PolymorphicAccess.cpp
1 /* 2 * Copyright (C) 2014-2020 Apple Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 #include "config.h" 27 #include "PolymorphicAccess.h" 28 29 #if ENABLE(JIT) 30 31 #include "BinarySwitch.h" 32 #include "CCallHelpers.h" 33 #include "CacheableIdentifierInlines.h" 34 #include "CodeBlock.h" 35 #include "FullCodeOrigin.h" 36 #include "Heap.h" 37 #include "JITOperations.h" 38 #include "LinkBuffer.h" 39 #include "StructureInlines.h" 40 #include "StructureStubClearingWatchpoint.h" 41 #include "StructureStubInfo.h" 42 #include "SuperSampler.h" 43 #include <wtf/CommaPrinter.h> 44 #include <wtf/ListDump.h> 45 46 namespace JSC { 47 48 namespace PolymorphicAccessInternal { 49 static constexpr bool verbose = false; 50 } 51 52 DEFINE_ALLOCATOR_WITH_HEAP_IDENTIFIER(PolymorphicAccess); 53 54 void AccessGenerationResult::dump(PrintStream& out) const 55 { 56 out.print(m_kind); 57 if (m_code) 58 out.print(":", m_code); 59 } 60 61 void AccessGenerationState::installWatchpoint(const ObjectPropertyCondition& condition) 62 { 63 WatchpointsOnStructureStubInfo::ensureReferenceAndInstallWatchpoint( 64 watchpoints, jit->codeBlock(), stubInfo, condition); 65 } 66 67 void AccessGenerationState::restoreScratch() 68 { 69 allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState); 70 } 71 72 void AccessGenerationState::succeed() 73 { 74 restoreScratch(); 75 success.append(jit->jump()); 76 } 77 78 const RegisterSet& AccessGenerationState::liveRegistersForCall() 79 { 80 if (!m_calculatedRegistersForCallAndExceptionHandling) 81 calculateLiveRegistersForCallAndExceptionHandling(); 82 return m_liveRegistersForCall; 83 } 84 85 const RegisterSet& AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite() 86 { 87 if (!m_calculatedRegistersForCallAndExceptionHandling) 88 calculateLiveRegistersForCallAndExceptionHandling(); 89 return m_liveRegistersToPreserveAtExceptionHandlingCallSite; 90 } 91 92 static RegisterSet calleeSaveRegisters() 93 { 94 RegisterSet result = RegisterSet::registersToNotSaveForJSCall(); 95 result.filter(RegisterSet::registersToNotSaveForCCall()); 96 return result; 97 } 98 99 const RegisterSet& AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling() 100 { 101 if (!m_calculatedRegistersForCallAndExceptionHandling) { 102 m_calculatedRegistersForCallAndExceptionHandling = true; 103 104 m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex); 105 m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0; 106 if (m_needsToRestoreRegistersIfException) 107 RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType())); 108 109 m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters()); 110 m_liveRegistersForCall.exclude(calleeSaveRegisters()); 111 } 112 return m_liveRegistersForCall; 113 } 114 115 auto AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra) -> SpillState 116 { 117 RegisterSet liveRegisters = liveRegistersForCall(); 118 liveRegisters.merge(extra); 119 120 unsigned extraStackPadding = 0; 121 unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding); 122 return SpillState { 123 WTFMove(liveRegisters), 124 numberOfStackBytesUsedForRegisterPreservation 125 }; 126 } 127 128 auto AccessGenerationState::preserveLiveRegistersToStackForCallWithoutExceptions() -> SpillState 129 { 130 RegisterSet liveRegisters = allocator->usedRegisters(); 131 liveRegisters.exclude(calleeSaveRegisters()); 132 133 constexpr unsigned extraStackPadding = 0; 134 unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding); 135 return SpillState { 136 WTFMove(liveRegisters), 137 numberOfStackBytesUsedForRegisterPreservation 138 }; 139 } 140 141 void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException(const SpillState& spillState) 142 { 143 // Even if we're a getter, we don't want to ignore the result value like we normally do 144 // because the getter threw, and therefore, didn't return a value that means anything. 145 // Instead, we want to restore that register to what it was upon entering the getter 146 // inline cache. The subtlety here is if the base and the result are the same register, 147 // and the getter threw, we want OSR exit to see the original base value, not the result 148 // of the getter call. 149 RegisterSet dontRestore = spillState.spilledRegisters; 150 // As an optimization here, we only need to restore what is live for exception handling. 151 // We can construct the dontRestore set to accomplish this goal by having it contain only 152 // what is live for call but not live for exception handling. By ignoring things that are 153 // only live at the call but not the exception handler, we will only restore things live 154 // at the exception handler. 155 dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite()); 156 restoreLiveRegistersFromStackForCall(spillState, dontRestore); 157 } 158 159 void AccessGenerationState::restoreLiveRegistersFromStackForCall(const SpillState& spillState, const RegisterSet& dontRestore) 160 { 161 unsigned extraStackPadding = 0; 162 ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, spillState.spilledRegisters, dontRestore, spillState.numberOfStackBytesUsedForRegisterPreservation, extraStackPadding); 163 } 164 165 CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal() 166 { 167 if (!m_calculatedRegistersForCallAndExceptionHandling) 168 calculateLiveRegistersForCallAndExceptionHandling(); 169 170 if (!m_calculatedCallSiteIndex) { 171 m_calculatedCallSiteIndex = true; 172 173 if (m_needsToRestoreRegistersIfException) 174 m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex); 175 else 176 m_callSiteIndex = originalCallSiteIndex(); 177 } 178 179 return m_callSiteIndex; 180 } 181 182 DisposableCallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandling() 183 { 184 RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling); 185 RELEASE_ASSERT(m_needsToRestoreRegistersIfException); 186 RELEASE_ASSERT(m_calculatedCallSiteIndex); 187 return DisposableCallSiteIndex::fromCallSiteIndex(m_callSiteIndex); 188 } 189 190 const HandlerInfo& AccessGenerationState::originalExceptionHandler() 191 { 192 if (!m_calculatedRegistersForCallAndExceptionHandling) 193 calculateLiveRegistersForCallAndExceptionHandling(); 194 195 RELEASE_ASSERT(m_needsToRestoreRegistersIfException); 196 HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits()); 197 RELEASE_ASSERT(exceptionHandler); 198 return *exceptionHandler; 199 } 200 201 CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; } 202 203 void AccessGenerationState::emitExplicitExceptionHandler() 204 { 205 restoreScratch(); 206 jit->pushToSave(GPRInfo::regT0); 207 jit->loadPtr(&m_vm.topEntryFrame, GPRInfo::regT0); 208 jit->copyCalleeSavesToEntryFrameCalleeSavesBuffer(GPRInfo::regT0); 209 jit->popToRestore(GPRInfo::regT0); 210 211 if (needsToRestoreRegistersIfException()) { 212 // To the JIT that produces the original exception handling 213 // call site, they will expect the OSR exit to be arrived 214 // at from genericUnwind. Therefore we must model what genericUnwind 215 // does here. I.e, set callFrameForCatch and copy callee saves. 216 217 jit->storePtr(GPRInfo::callFrameRegister, m_vm.addressOfCallFrameForCatch()); 218 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump(); 219 220 // We don't need to insert a new exception handler in the table 221 // because we're doing a manual exception check here. i.e, we'll 222 // never arrive here from genericUnwind(). 223 HandlerInfo originalHandler = originalExceptionHandler(); 224 jit->addLinkTask( 225 [=] (LinkBuffer& linkBuffer) { 226 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode); 227 }); 228 } else { 229 jit->setupArguments<decltype(operationLookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(&m_vm)); 230 jit->prepareCallOperation(m_vm); 231 CCallHelpers::Call lookupExceptionHandlerCall = jit->call(OperationPtrTag); 232 jit->addLinkTask( 233 [=] (LinkBuffer& linkBuffer) { 234 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr<OperationPtrTag>(operationLookupExceptionHandler)); 235 }); 236 jit->jumpToExceptionHandler(m_vm); 237 } 238 } 239 240 PolymorphicAccess::PolymorphicAccess() { } 241 PolymorphicAccess::~PolymorphicAccess() { } 242 243 AccessGenerationResult PolymorphicAccess::addCases( 244 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, 245 Vector<std::unique_ptr<AccessCase>, 2> originalCasesToAdd) 246 { 247 SuperSamplerScope superSamplerScope(false); 248 249 // This method will add the originalCasesToAdd to the list one at a time while preserving the 250 // invariants: 251 // - If a newly added case canReplace() any existing case, then the existing case is removed before 252 // the new case is added. Removal doesn't change order of the list. Any number of existing cases 253 // can be removed via the canReplace() rule. 254 // - Cases in the list always appear in ascending order of time of addition. Therefore, if you 255 // cascade through the cases in reverse order, you will get the most recent cases first. 256 // - If this method fails (returns null, doesn't add the cases), then both the previous case list 257 // and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to 258 // add more things after failure. 259 260 // First ensure that the originalCasesToAdd doesn't contain duplicates. 261 Vector<std::unique_ptr<AccessCase>> casesToAdd; 262 for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) { 263 std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]); 264 265 // Add it only if it is not replaced by the subsequent cases in the list. 266 bool found = false; 267 for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) { 268 if (originalCasesToAdd[j]->canReplace(*myCase)) { 269 found = true; 270 break; 271 } 272 } 273 274 if (found) 275 continue; 276 277 casesToAdd.append(WTFMove(myCase)); 278 } 279 280 if (PolymorphicAccessInternal::verbose) 281 dataLog("casesToAdd: ", listDump(casesToAdd), "\n"); 282 283 // If there aren't any cases to add, then fail on the grounds that there's no point to generating a 284 // new stub that will be identical to the old one. Returning null should tell the caller to just 285 // keep doing what they were doing before. 286 if (casesToAdd.isEmpty()) 287 return AccessGenerationResult::MadeNoChanges; 288 289 if (stubInfo.accessType != AccessType::InstanceOf) { 290 bool shouldReset = false; 291 AccessGenerationResult resetResult(AccessGenerationResult::ResetStubAndFireWatchpoints); 292 auto considerPolyProtoReset = [&] (Structure* a, Structure* b) { 293 if (Structure::shouldConvertToPolyProto(a, b)) { 294 // For now, we only reset if this is our first time invalidating this watchpoint. 295 // The reason we don't immediately fire this watchpoint is that we may be already 296 // watching the poly proto watchpoint, which if fired, would destroy us. We let 297 // the person handling the result to do a delayed fire. 298 ASSERT(a->rareData()->sharedPolyProtoWatchpoint().get() == b->rareData()->sharedPolyProtoWatchpoint().get()); 299 if (a->rareData()->sharedPolyProtoWatchpoint()->isStillValid()) { 300 shouldReset = true; 301 resetResult.addWatchpointToFire(*a->rareData()->sharedPolyProtoWatchpoint(), StringFireDetail("Detected poly proto optimization opportunity.")); 302 } 303 } 304 }; 305 306 for (auto& caseToAdd : casesToAdd) { 307 for (auto& existingCase : m_list) { 308 Structure* a = caseToAdd->structure(); 309 Structure* b = existingCase->structure(); 310 considerPolyProtoReset(a, b); 311 } 312 } 313 for (unsigned i = 0; i < casesToAdd.size(); ++i) { 314 for (unsigned j = i + 1; j < casesToAdd.size(); ++j) { 315 Structure* a = casesToAdd[i]->structure(); 316 Structure* b = casesToAdd[j]->structure(); 317 considerPolyProtoReset(a, b); 318 } 319 } 320 321 if (shouldReset) 322 return resetResult; 323 } 324 325 // Now add things to the new list. Note that at this point, we will still have old cases that 326 // may be replaced by the new ones. That's fine. We will sort that out when we regenerate. 327 for (auto& caseToAdd : casesToAdd) { 328 commit(locker, vm, m_watchpoints, codeBlock, stubInfo, *caseToAdd); 329 m_list.append(WTFMove(caseToAdd)); 330 } 331 332 if (PolymorphicAccessInternal::verbose) 333 dataLog("After addCases: m_list: ", listDump(m_list), "\n"); 334 335 return AccessGenerationResult::Buffered; 336 } 337 338 AccessGenerationResult PolymorphicAccess::addCase( 339 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, std::unique_ptr<AccessCase> newAccess) 340 { 341 Vector<std::unique_ptr<AccessCase>, 2> newAccesses; 342 newAccesses.append(WTFMove(newAccess)); 343 return addCases(locker, vm, codeBlock, stubInfo, WTFMove(newAccesses)); 344 } 345 346 bool PolymorphicAccess::visitWeak(VM& vm) const 347 { 348 for (unsigned i = 0; i < size(); ++i) { 349 if (!at(i).visitWeak(vm)) 350 return false; 351 } 352 if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) { 353 for (WriteBarrier<JSCell>& weakReference : *weakReferences) { 354 if (!vm.heap.isMarked(weakReference.get())) 355 return false; 356 } 357 } 358 return true; 359 } 360 361 bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const 362 { 363 bool result = true; 364 for (unsigned i = 0; i < size(); ++i) 365 result &= at(i).propagateTransitions(visitor); 366 return result; 367 } 368 369 void PolymorphicAccess::visitAggregate(SlotVisitor& visitor) 370 { 371 for (unsigned i = 0; i < size(); ++i) 372 at(i).visitAggregate(visitor); 373 } 374 375 void PolymorphicAccess::dump(PrintStream& out) const 376 { 377 out.print(RawPointer(this), ":["); 378 CommaPrinter comma; 379 for (auto& entry : m_list) 380 out.print(comma, *entry); 381 out.print("]"); 382 } 383 384 void PolymorphicAccess::commit( 385 const GCSafeConcurrentJSLocker&, VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock, 386 StructureStubInfo& stubInfo, AccessCase& accessCase) 387 { 388 // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to 389 // properties on DOM nodes. For sure we cache many DOM node accesses, but even in 390 // Real Pages (TM), we appear to spend most of our time caching accesses to properties on 391 // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular). 392 // Those common kinds of JSC object accesses don't hit this case. 393 394 for (WatchpointSet* set : accessCase.commit(vm)) { 395 Watchpoint* watchpoint = 396 WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint( 397 watchpoints, codeBlock, &stubInfo); 398 399 set->add(watchpoint); 400 } 401 } 402 403 AccessGenerationResult PolymorphicAccess::regenerate(const GCSafeConcurrentJSLocker& locker, VM& vm, JSGlobalObject* globalObject, CodeBlock* codeBlock, ECMAMode ecmaMode, StructureStubInfo& stubInfo) 404 { 405 SuperSamplerScope superSamplerScope(false); 406 407 if (PolymorphicAccessInternal::verbose) 408 dataLog("Regenerate with m_list: ", listDump(m_list), "\n"); 409 410 AccessGenerationState state(vm, globalObject, ecmaMode); 411 412 state.access = this; 413 state.stubInfo = &stubInfo; 414 415 state.baseGPR = stubInfo.baseGPR; 416 state.u.thisGPR = stubInfo.regs.thisGPR; 417 state.valueRegs = stubInfo.valueRegs(); 418 419 // Regenerating is our opportunity to figure out what our list of cases should look like. We 420 // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit 421 // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object 422 // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced 423 // from the code of the current stub (aka previous). 424 ListType cases; 425 unsigned srcIndex = 0; 426 unsigned dstIndex = 0; 427 while (srcIndex < m_list.size()) { 428 std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]); 429 430 // If the case had been generated, then we have to keep the original in m_list in case we 431 // fail to regenerate. That case may have data structures that are used by the code that it 432 // had generated. If the case had not been generated, then we want to remove it from m_list. 433 bool isGenerated = someCase->state() == AccessCase::Generated; 434 435 [&] () { 436 if (!someCase->couldStillSucceed()) 437 return; 438 439 // Figure out if this is replaced by any later case. Given two cases A and B where A 440 // comes first in the case list, we know that A would have triggered first if we had 441 // generated the cases in a cascade. That's why this loop asks B->canReplace(A) but not 442 // A->canReplace(B). If A->canReplace(B) was true then A would never have requested 443 // repatching in cases where Repatch.cpp would have then gone on to generate B. If that 444 // did happen by some fluke, then we'd just miss the redundancy here, which wouldn't be 445 // incorrect - just slow. However, if A's checks failed and Repatch.cpp concluded that 446 // this new condition could be handled by B and B->canReplace(A), then this says that we 447 // don't need A anymore. 448 // 449 // If we can generate a binary switch, then A->canReplace(B) == B->canReplace(A). So, 450 // it doesn't matter that we only do the check in one direction. 451 for (unsigned j = srcIndex; j < m_list.size(); ++j) { 452 if (m_list[j]->canReplace(*someCase)) 453 return; 454 } 455 456 if (isGenerated) 457 cases.append(someCase->clone()); 458 else 459 cases.append(WTFMove(someCase)); 460 }(); 461 462 if (isGenerated) 463 m_list[dstIndex++] = WTFMove(someCase); 464 } 465 m_list.resize(dstIndex); 466 467 ScratchRegisterAllocator allocator(stubInfo.usedRegisters); 468 state.allocator = &allocator; 469 allocator.lock(state.baseGPR); 470 if (state.u.thisGPR != InvalidGPRReg) 471 allocator.lock(state.u.thisGPR); 472 allocator.lock(state.valueRegs); 473 #if USE(JSVALUE32_64) 474 allocator.lock(stubInfo.baseTagGPR); 475 if (stubInfo.v.thisTagGPR != InvalidGPRReg) 476 allocator.lock(stubInfo.v.thisTagGPR); 477 #endif 478 479 state.scratchGPR = allocator.allocateScratchGPR(); 480 481 for (auto& accessCase : cases) { 482 if (accessCase->needsScratchFPR()) { 483 state.scratchFPR = allocator.allocateScratchFPR(); 484 break; 485 } 486 } 487 488 CCallHelpers jit(codeBlock); 489 state.jit = &jit; 490 491 state.preservedReusedRegisterState = 492 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace); 493 494 bool generatedFinalCode = false; 495 496 // If the resulting set of cases is so big that we would stop caching and this is InstanceOf, 497 // then we want to generate the generic InstanceOf and then stop. 498 if (cases.size() >= Options::maxAccessVariantListSize() 499 && stubInfo.accessType == AccessType::InstanceOf) { 500 while (!cases.isEmpty()) 501 m_list.append(cases.takeLast()); 502 cases.append(AccessCase::create(vm, codeBlock, AccessCase::InstanceOfGeneric, nullptr)); 503 generatedFinalCode = true; 504 } 505 506 if (PolymorphicAccessInternal::verbose) 507 dataLog("Optimized cases: ", listDump(cases), "\n"); 508 509 // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we 510 // won't change that set anymore. 511 512 bool allGuardedByStructureCheck = true; 513 bool hasJSGetterSetterCall = false; 514 bool needsInt32PropertyCheck = false; 515 bool needsStringPropertyCheck = false; 516 bool needsSymbolPropertyCheck = false; 517 for (auto& newCase : cases) { 518 if (!stubInfo.hasConstantIdentifier) { 519 if (newCase->requiresIdentifierNameMatch()) { 520 if (newCase->uid()->isSymbol()) 521 needsSymbolPropertyCheck = true; 522 else 523 needsStringPropertyCheck = true; 524 } else if (newCase->requiresInt32PropertyCheck()) 525 needsInt32PropertyCheck = true; 526 } 527 commit(locker, vm, state.watchpoints, codeBlock, stubInfo, *newCase); 528 allGuardedByStructureCheck &= newCase->guardedByStructureCheck(stubInfo); 529 if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter) 530 hasJSGetterSetterCall = true; 531 } 532 533 if (cases.isEmpty()) { 534 // This is super unlikely, but we make it legal anyway. 535 state.failAndRepatch.append(jit.jump()); 536 } else if (!allGuardedByStructureCheck || cases.size() == 1) { 537 // If there are any proxies in the list, we cannot just use a binary switch over the structure. 538 // We need to resort to a cascade. A cascade also happens to be optimal if we only have just 539 // one case. 540 CCallHelpers::JumpList fallThrough; 541 if (needsInt32PropertyCheck || needsStringPropertyCheck || needsSymbolPropertyCheck) { 542 if (needsInt32PropertyCheck) { 543 CCallHelpers::Jump notInt32; 544 545 if (!stubInfo.propertyIsInt32) { 546 #if USE(JSVALUE64) 547 notInt32 = jit.branchIfNotInt32(state.u.propertyGPR); 548 #else 549 notInt32 = jit.branchIfNotInt32(state.stubInfo->v.propertyTagGPR); 550 #endif 551 } 552 for (unsigned i = cases.size(); i--;) { 553 fallThrough.link(&jit); 554 fallThrough.clear(); 555 if (cases[i]->requiresInt32PropertyCheck()) 556 cases[i]->generateWithGuard(state, fallThrough); 557 } 558 559 if (needsStringPropertyCheck || needsSymbolPropertyCheck) { 560 if (notInt32.isSet()) 561 notInt32.link(&jit); 562 fallThrough.link(&jit); 563 fallThrough.clear(); 564 } else { 565 if (notInt32.isSet()) 566 state.failAndRepatch.append(notInt32); 567 } 568 } 569 570 if (needsStringPropertyCheck) { 571 CCallHelpers::JumpList notString; 572 GPRReg propertyGPR = state.u.propertyGPR; 573 if (!stubInfo.propertyIsString) { 574 #if USE(JSVALUE32_64) 575 GPRReg propertyTagGPR = state.stubInfo->v.propertyTagGPR; 576 notString.append(jit.branchIfNotCell(propertyTagGPR)); 577 #else 578 notString.append(jit.branchIfNotCell(propertyGPR)); 579 #endif 580 notString.append(jit.branchIfNotString(propertyGPR)); 581 } 582 583 jit.loadPtr(MacroAssembler::Address(propertyGPR, JSString::offsetOfValue()), state.scratchGPR); 584 585 state.failAndRepatch.append(jit.branchIfRopeStringImpl(state.scratchGPR)); 586 587 for (unsigned i = cases.size(); i--;) { 588 fallThrough.link(&jit); 589 fallThrough.clear(); 590 if (cases[i]->requiresIdentifierNameMatch() && !cases[i]->uid()->isSymbol()) 591 cases[i]->generateWithGuard(state, fallThrough); 592 } 593 594 if (needsSymbolPropertyCheck) { 595 notString.link(&jit); 596 fallThrough.link(&jit); 597 fallThrough.clear(); 598 } else 599 state.failAndRepatch.append(notString); 600 } 601 602 if (needsSymbolPropertyCheck) { 603 CCallHelpers::JumpList notSymbol; 604 if (!stubInfo.propertyIsSymbol) { 605 GPRReg propertyGPR = state.u.propertyGPR; 606 #if USE(JSVALUE32_64) 607 GPRReg propertyTagGPR = state.stubInfo->v.propertyTagGPR; 608 notSymbol.append(jit.branchIfNotCell(propertyTagGPR)); 609 #else 610 notSymbol.append(jit.branchIfNotCell(propertyGPR)); 611 #endif 612 notSymbol.append(jit.branchIfNotSymbol(propertyGPR)); 613 } 614 615 for (unsigned i = cases.size(); i--;) { 616 fallThrough.link(&jit); 617 fallThrough.clear(); 618 if (cases[i]->requiresIdentifierNameMatch() && cases[i]->uid()->isSymbol()) 619 cases[i]->generateWithGuard(state, fallThrough); 620 } 621 622 state.failAndRepatch.append(notSymbol); 623 } 624 } else { 625 // Cascade through the list, preferring newer entries. 626 for (unsigned i = cases.size(); i--;) { 627 fallThrough.link(&jit); 628 fallThrough.clear(); 629 cases[i]->generateWithGuard(state, fallThrough); 630 } 631 } 632 633 state.failAndRepatch.append(fallThrough); 634 635 } else { 636 jit.load32( 637 CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()), 638 state.scratchGPR); 639 640 Vector<int64_t> caseValues(cases.size()); 641 for (unsigned i = 0; i < cases.size(); ++i) 642 caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id()); 643 644 BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32); 645 while (binarySwitch.advance(jit)) 646 cases[binarySwitch.caseIndex()]->generate(state); 647 state.failAndRepatch.append(binarySwitch.fallThrough()); 648 } 649 650 if (!state.failAndIgnore.empty()) { 651 state.failAndIgnore.link(&jit); 652 653 // Make sure that the inline cache optimization code knows that we are taking slow path because 654 // of something that isn't patchable. The slow path will decrement "countdown" and will only 655 // patch things if the countdown reaches zero. We increment the slow path count here to ensure 656 // that the slow path does not try to patch. 657 #if CPU(X86) || CPU(X86_64) 658 jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR); 659 jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR)); 660 #else 661 jit.load8(&stubInfo.countdown, state.scratchGPR); 662 jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR); 663 jit.store8(state.scratchGPR, &stubInfo.countdown); 664 #endif 665 } 666 667 CCallHelpers::JumpList failure; 668 if (allocator.didReuseRegisters()) { 669 state.failAndRepatch.link(&jit); 670 state.restoreScratch(); 671 } else 672 failure = state.failAndRepatch; 673 failure.append(jit.jump()); 674 675 CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr; 676 DisposableCallSiteIndex callSiteIndexForExceptionHandling; 677 if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) { 678 // Emit the exception handler. 679 // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter . 680 // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have 681 // their own exception handling logic that doesn't go through genericUnwind. 682 MacroAssembler::Label makeshiftCatchHandler = jit.label(); 683 684 int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue); 685 AccessGenerationState::SpillState spillStateForJSGetterSetter = state.spillStateForJSGetterSetter(); 686 ASSERT(!spillStateForJSGetterSetter.isEmpty()); 687 stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved; 688 stackPointerOffset -= spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation; 689 690 jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister); 691 jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister); 692 693 state.restoreLiveRegistersFromStackForCallWithThrownException(spillStateForJSGetterSetter); 694 state.restoreScratch(); 695 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump(); 696 697 HandlerInfo oldHandler = state.originalExceptionHandler(); 698 DisposableCallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling(); 699 jit.addLinkTask( 700 [=] (LinkBuffer& linkBuffer) { 701 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode); 702 703 HandlerInfo handlerToRegister = oldHandler; 704 handlerToRegister.nativeCode = linkBuffer.locationOf<ExceptionHandlerPtrTag>(makeshiftCatchHandler); 705 handlerToRegister.start = newExceptionHandlingCallSite.bits(); 706 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1; 707 codeBlock->appendExceptionHandler(handlerToRegister); 708 }); 709 710 // We set these to indicate to the stub to remove itself from the CodeBlock's 711 // exception handler table when it is deallocated. 712 codeBlockThatOwnsExceptionHandlers = codeBlock; 713 ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType())); 714 callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling(); 715 } 716 717 LinkBuffer linkBuffer(jit, codeBlock, JITCompilationCanFail); 718 if (linkBuffer.didFailToAllocate()) { 719 if (PolymorphicAccessInternal::verbose) 720 dataLog("Did fail to allocate.\n"); 721 return AccessGenerationResult::GaveUp; 722 } 723 724 CodeLocationLabel<JSInternalPtrTag> successLabel = stubInfo.doneLocation; 725 726 linkBuffer.link(state.success, successLabel); 727 728 linkBuffer.link(failure, stubInfo.slowPathStartLocation); 729 730 if (PolymorphicAccessInternal::verbose) 731 dataLog(FullCodeOrigin(codeBlock, stubInfo.codeOrigin), ": Generating polymorphic access stub for ", listDump(cases), "\n"); 732 733 MacroAssemblerCodeRef<JITStubRoutinePtrTag> code = FINALIZE_CODE_FOR( 734 codeBlock, linkBuffer, JITStubRoutinePtrTag, 735 "%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()); 736 737 bool doesCalls = false; 738 Vector<JSCell*> cellsToMark; 739 for (auto& entry : cases) 740 doesCalls |= entry->doesCalls(vm, &cellsToMark); 741 742 m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, WTFMove(state.m_callLinkInfos), codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling); 743 m_watchpoints = WTFMove(state.watchpoints); 744 if (!state.weakReferences.isEmpty()) { 745 state.weakReferences.shrinkToFit(); 746 m_weakReferences = makeUnique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences)); 747 } 748 if (PolymorphicAccessInternal::verbose) 749 dataLog("Returning: ", code.code(), "\n"); 750 751 m_list = WTFMove(cases); 752 m_list.shrinkToFit(); 753 754 AccessGenerationResult::Kind resultKind; 755 if (m_list.size() >= Options::maxAccessVariantListSize() || generatedFinalCode) 756 resultKind = AccessGenerationResult::GeneratedFinalCode; 757 else 758 resultKind = AccessGenerationResult::GeneratedNewCode; 759 760 return AccessGenerationResult(resultKind, code.code()); 761 } 762 763 void PolymorphicAccess::aboutToDie() 764 { 765 if (m_stubRoutine) 766 m_stubRoutine->aboutToDie(); 767 } 768 769 } // namespace JSC 770 771 namespace WTF { 772 773 using namespace JSC; 774 775 void printInternal(PrintStream& out, AccessGenerationResult::Kind kind) 776 { 777 switch (kind) { 778 case AccessGenerationResult::MadeNoChanges: 779 out.print("MadeNoChanges"); 780 return; 781 case AccessGenerationResult::GaveUp: 782 out.print("GaveUp"); 783 return; 784 case AccessGenerationResult::Buffered: 785 out.print("Buffered"); 786 return; 787 case AccessGenerationResult::GeneratedNewCode: 788 out.print("GeneratedNewCode"); 789 return; 790 case AccessGenerationResult::GeneratedFinalCode: 791 out.print("GeneratedFinalCode"); 792 return; 793 case AccessGenerationResult::ResetStubAndFireWatchpoints: 794 out.print("ResetStubAndFireWatchpoints"); 795 return; 796 } 797 798 RELEASE_ASSERT_NOT_REACHED(); 799 } 800 801 void printInternal(PrintStream& out, AccessCase::AccessType type) 802 { 803 switch (type) { 804 case AccessCase::Load: 805 out.print("Load"); 806 return; 807 case AccessCase::Transition: 808 out.print("Transition"); 809 return; 810 case AccessCase::Delete: 811 out.print("Delete"); 812 return; 813 case AccessCase::DeleteNonConfigurable: 814 out.print("DeleteNonConfigurable"); 815 return; 816 case AccessCase::DeleteMiss: 817 out.print("DeleteMiss"); 818 return; 819 case AccessCase::Replace: 820 out.print("Replace"); 821 return; 822 case AccessCase::Miss: 823 out.print("Miss"); 824 return; 825 case AccessCase::GetGetter: 826 out.print("GetGetter"); 827 return; 828 case AccessCase::Getter: 829 out.print("Getter"); 830 return; 831 case AccessCase::Setter: 832 out.print("Setter"); 833 return; 834 case AccessCase::CustomValueGetter: 835 out.print("CustomValueGetter"); 836 return; 837 case AccessCase::CustomAccessorGetter: 838 out.print("CustomAccessorGetter"); 839 return; 840 case AccessCase::CustomValueSetter: 841 out.print("CustomValueSetter"); 842 return; 843 case AccessCase::CustomAccessorSetter: 844 out.print("CustomAccessorSetter"); 845 return; 846 case AccessCase::IntrinsicGetter: 847 out.print("IntrinsicGetter"); 848 return; 849 case AccessCase::InHit: 850 out.print("InHit"); 851 return; 852 case AccessCase::InMiss: 853 out.print("InMiss"); 854 return; 855 case AccessCase::ArrayLength: 856 out.print("ArrayLength"); 857 return; 858 case AccessCase::StringLength: 859 out.print("StringLength"); 860 return; 861 case AccessCase::DirectArgumentsLength: 862 out.print("DirectArgumentsLength"); 863 return; 864 case AccessCase::ScopedArgumentsLength: 865 out.print("ScopedArgumentsLength"); 866 return; 867 case AccessCase::ModuleNamespaceLoad: 868 out.print("ModuleNamespaceLoad"); 869 return; 870 case AccessCase::InstanceOfHit: 871 out.print("InstanceOfHit"); 872 return; 873 case AccessCase::InstanceOfMiss: 874 out.print("InstanceOfMiss"); 875 return; 876 case AccessCase::InstanceOfGeneric: 877 out.print("InstanceOfGeneric"); 878 return; 879 case AccessCase::IndexedInt32Load: 880 out.print("IndexedInt32Load"); 881 return; 882 case AccessCase::IndexedDoubleLoad: 883 out.print("IndexedDoubleLoad"); 884 return; 885 case AccessCase::IndexedContiguousLoad: 886 out.print("IndexedContiguousLoad"); 887 return; 888 case AccessCase::IndexedArrayStorageLoad: 889 out.print("IndexedArrayStorageLoad"); 890 return; 891 case AccessCase::IndexedScopedArgumentsLoad: 892 out.print("IndexedScopedArgumentsLoad"); 893 return; 894 case AccessCase::IndexedDirectArgumentsLoad: 895 out.print("IndexedDirectArgumentsLoad"); 896 return; 897 case AccessCase::IndexedTypedArrayInt8Load: 898 out.print("IndexedTypedArrayInt8Load"); 899 return; 900 case AccessCase::IndexedTypedArrayUint8Load: 901 out.print("IndexedTypedArrayUint8Load"); 902 return; 903 case AccessCase::IndexedTypedArrayUint8ClampedLoad: 904 out.print("IndexedTypedArrayUint8ClampedLoad"); 905 return; 906 case AccessCase::IndexedTypedArrayInt16Load: 907 out.print("IndexedTypedArrayInt16Load"); 908 return; 909 case AccessCase::IndexedTypedArrayUint16Load: 910 out.print("IndexedTypedArrayUint16Load"); 911 return; 912 case AccessCase::IndexedTypedArrayInt32Load: 913 out.print("IndexedTypedArrayInt32Load"); 914 return; 915 case AccessCase::IndexedTypedArrayUint32Load: 916 out.print("IndexedTypedArrayUint32Load"); 917 return; 918 case AccessCase::IndexedTypedArrayFloat32Load: 919 out.print("IndexedTypedArrayFloat32Load"); 920 return; 921 case AccessCase::IndexedTypedArrayFloat64Load: 922 out.print("IndexedTypedArrayFloat64Load"); 923 return; 924 case AccessCase::IndexedStringLoad: 925 out.print("IndexedStringLoad"); 926 return; 927 } 928 929 RELEASE_ASSERT_NOT_REACHED(); 930 } 931 932 void printInternal(PrintStream& out, AccessCase::State state) 933 { 934 switch (state) { 935 case AccessCase::Primordial: 936 out.print("Primordial"); 937 return; 938 case AccessCase::Committed: 939 out.print("Committed"); 940 return; 941 case AccessCase::Generated: 942 out.print("Generated"); 943 return; 944 } 945 946 RELEASE_ASSERT_NOT_REACHED(); 947 } 948 949 } // namespace WTF 950 951 #endif // ENABLE(JIT) 952 953