/ bytecode / CodeBlock.h
CodeBlock.h
   1  /*
   2   * Copyright (C) 2008-2020 Apple Inc. All rights reserved.
   3   * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
   4   *
   5   * Redistribution and use in source and binary forms, with or without
   6   * modification, are permitted provided that the following conditions
   7   * are met:
   8   *
   9   * 1.  Redistributions of source code must retain the above copyright
  10   *     notice, this list of conditions and the following disclaimer.
  11   * 2.  Redistributions in binary form must reproduce the above copyright
  12   *     notice, this list of conditions and the following disclaimer in the
  13   *     documentation and/or other materials provided with the distribution.
  14   * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  15   *     its contributors may be used to endorse or promote products derived
  16   *     from this software without specific prior written permission.
  17   *
  18   * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
  19   * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  20   * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  21   * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
  22   * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  23   * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  24   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  25   * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  27   * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28   */
  29  
  30  #pragma once
  31  
  32  #include "ArrayProfile.h"
  33  #include "BytecodeConventions.h"
  34  #include "CallLinkInfo.h"
  35  #include "CodeBlockHash.h"
  36  #include "CodeOrigin.h"
  37  #include "CodeType.h"
  38  #include "CompilationResult.h"
  39  #include "ConcurrentJSLock.h"
  40  #include "DFGCodeOriginPool.h"
  41  #include "DFGCommon.h"
  42  #include "DirectEvalCodeCache.h"
  43  #include "EvalExecutable.h"
  44  #include "ExecutionCounter.h"
  45  #include "ExpressionRangeInfo.h"
  46  #include "FunctionExecutable.h"
  47  #include "HandlerInfo.h"
  48  #include "ICStatusMap.h"
  49  #include "Instruction.h"
  50  #include "InstructionStream.h"
  51  #include "JITCode.h"
  52  #include "JITCodeMap.h"
  53  #include "JITMathICForwards.h"
  54  #include "JSCast.h"
  55  #include "JSGlobalObject.h"
  56  #include "JumpTable.h"
  57  #include "LLIntCallLinkInfo.h"
  58  #include "LazyOperandValueProfile.h"
  59  #include "MetadataTable.h"
  60  #include "ModuleProgramExecutable.h"
  61  #include "ObjectAllocationProfile.h"
  62  #include "Options.h"
  63  #include "Printer.h"
  64  #include "ProfilerJettisonReason.h"
  65  #include "ProgramExecutable.h"
  66  #include "PutPropertySlot.h"
  67  #include "ValueProfile.h"
  68  #include "VirtualRegister.h"
  69  #include "Watchpoint.h"
  70  #include <wtf/Bag.h>
  71  #include <wtf/FastMalloc.h>
  72  #include <wtf/RefCountedArray.h>
  73  #include <wtf/RefPtr.h>
  74  #include <wtf/SegmentedVector.h>
  75  #include <wtf/Vector.h>
  76  #include <wtf/text/WTFString.h>
  77  
  78  namespace JSC {
  79  
  80  #if ENABLE(DFG_JIT)
  81  namespace DFG {
  82  struct OSRExitState;
  83  } // namespace DFG
  84  #endif
  85  
  86  class UnaryArithProfile;
  87  class BinaryArithProfile;
  88  class BytecodeLivenessAnalysis;
  89  class CodeBlockSet;
  90  class ExecutableToCodeBlockEdge;
  91  class JSModuleEnvironment;
  92  class LLIntOffsetsExtractor;
  93  class LLIntPrototypeLoadAdaptiveStructureWatchpoint;
  94  class MetadataTable;
  95  class PCToCodeOriginMap;
  96  class RegisterAtOffsetList;
  97  class StructureStubInfo;
  98  struct ByValInfo;
  99  
 100  DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(CodeBlockRareData);
 101  
 102  enum class AccessType : int8_t;
 103  
 104  struct OpCatch;
 105  
 106  enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
 107  
 108  class CodeBlock : public JSCell {
 109      typedef JSCell Base;
 110      friend class BytecodeLivenessAnalysis;
 111      friend class JIT;
 112      friend class LLIntOffsetsExtractor;
 113  
 114  public:
 115  
 116      enum CopyParsedBlockTag { CopyParsedBlock };
 117  
 118      static constexpr unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
 119      static constexpr bool needsDestruction = true;
 120  
 121      template<typename, SubspaceAccess>
 122      static void subspaceFor(VM&)
 123      {
 124          RELEASE_ASSERT_NOT_REACHED();
 125      }
 126      // GC strongly assumes CodeBlock is not a PreciseAllocation for now.
 127      static constexpr uint8_t numberOfLowerTierCells = 0;
 128  
 129      DECLARE_INFO;
 130  
 131  protected:
 132      CodeBlock(VM&, Structure*, CopyParsedBlockTag, CodeBlock& other);
 133      CodeBlock(VM&, Structure*, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
 134  
 135      void finishCreation(VM&, CopyParsedBlockTag, CodeBlock& other);
 136      bool finishCreation(VM&, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*);
 137      
 138      void finishCreationCommon(VM&);
 139  
 140      WriteBarrier<JSGlobalObject> m_globalObject;
 141  
 142  public:
 143      JS_EXPORT_PRIVATE ~CodeBlock();
 144  
 145      UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
 146  
 147      CString inferredName() const;
 148      CodeBlockHash hash() const;
 149      bool hasHash() const;
 150      bool isSafeToComputeHash() const;
 151      CString hashAsStringIfPossible() const;
 152      CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
 153      CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
 154      void dumpAssumingJITType(PrintStream&, JITType) const;
 155      JS_EXPORT_PRIVATE void dump(PrintStream&) const;
 156  
 157      MetadataTable* metadataTable() const { return m_metadata.get(); }
 158  
 159      unsigned numParameters() const { return m_numParameters; }
 160      void setNumParameters(unsigned newValue);
 161  
 162      unsigned numberOfArgumentsToSkip() const { return m_numberOfArgumentsToSkip; }
 163  
 164      unsigned numCalleeLocals() const { return m_numCalleeLocals; }
 165  
 166      unsigned numVars() const { return m_numVars; }
 167      unsigned numTmps() const { return m_unlinkedCode->hasCheckpoints() * maxNumCheckpointTmps; }
 168  
 169      unsigned* addressOfNumParameters() { return &m_numParameters; }
 170      static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
 171  
 172      CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); }
 173      void setAlternative(VM&, CodeBlock*);
 174  
 175      template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
 176      {
 177          Functor f(std::forward<Functor>(functor));
 178          Vector<CodeBlock*, 4> codeBlocks;
 179          codeBlocks.append(this);
 180  
 181          while (!codeBlocks.isEmpty()) {
 182              CodeBlock* currentCodeBlock = codeBlocks.takeLast();
 183              f(currentCodeBlock);
 184  
 185              if (CodeBlock* alternative = currentCodeBlock->alternative())
 186                  codeBlocks.append(alternative);
 187              if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
 188                  codeBlocks.append(osrEntryBlock);
 189          }
 190      }
 191      
 192      CodeSpecializationKind specializationKind() const
 193      {
 194          return specializationFromIsConstruct(isConstructor());
 195      }
 196  
 197      CodeBlock* alternativeForJettison();    
 198      JS_EXPORT_PRIVATE CodeBlock* baselineAlternative();
 199      
 200      // FIXME: Get rid of this.
 201      // https://bugs.webkit.org/show_bug.cgi?id=123677
 202      CodeBlock* baselineVersion();
 203  
 204      static size_t estimatedSize(JSCell*, VM&);
 205      static void visitChildren(JSCell*, SlotVisitor&);
 206      static void destroy(JSCell*);
 207      void visitChildren(SlotVisitor&);
 208      void finalizeUnconditionally(VM&);
 209  
 210      void notifyLexicalBindingUpdate();
 211  
 212      void dumpSource();
 213      void dumpSource(PrintStream&);
 214  
 215      void dumpBytecode();
 216      void dumpBytecode(PrintStream&);
 217      void dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& = ICStatusMap());
 218      void dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& = ICStatusMap());
 219  
 220      void dumpExceptionHandlers(PrintStream&);
 221      void printStructures(PrintStream&, const Instruction*);
 222      void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
 223  
 224      void dumpMathICStats();
 225  
 226      bool isConstructor() const { return m_unlinkedCode->isConstructor(); }
 227      CodeType codeType() const { return m_unlinkedCode->codeType(); }
 228  
 229      JSParserScriptMode scriptMode() const { return m_unlinkedCode->scriptMode(); }
 230  
 231      bool hasInstalledVMTrapBreakpoints() const;
 232      bool installVMTrapBreakpoints();
 233  
 234      inline bool isKnownCell(VirtualRegister reg)
 235      {
 236          // FIXME: Consider adding back the optimization where we return true if `reg` is `this` and we're in sloppy mode.
 237          // https://bugs.webkit.org/show_bug.cgi?id=210145
 238          if (reg.isConstant())
 239              return getConstant(reg).isCell();
 240  
 241          return false;
 242      }
 243  
 244      ALWAYS_INLINE bool isTemporaryRegister(VirtualRegister reg)
 245      {
 246          return reg.offset() >= static_cast<int>(m_numVars);
 247      }
 248  
 249      HandlerInfo* handlerForBytecodeIndex(BytecodeIndex, RequiredHandler = RequiredHandler::AnyHandler);
 250      HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
 251      void removeExceptionHandlerForCallSite(DisposableCallSiteIndex);
 252      unsigned lineNumberForBytecodeIndex(BytecodeIndex);
 253      unsigned columnNumberForBytecodeIndex(BytecodeIndex);
 254      void expressionRangeForBytecodeIndex(BytecodeIndex, int& divot,
 255          int& startOffset, int& endOffset, unsigned& line, unsigned& column) const;
 256  
 257      Optional<BytecodeIndex> bytecodeIndexFromCallSiteIndex(CallSiteIndex);
 258  
 259      // Because we might throw out baseline JIT code and all its baseline JIT data (m_jitData),
 260      // you need to be careful about the lifetime of when you use the return value of this function.
 261      // The return value may have raw pointers into this data structure that gets thrown away.
 262      // Specifically, you need to ensure that no GC can be finalized (typically that means no
 263      // allocations) between calling this and the last use of it.
 264      void getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result);
 265      void getICStatusMap(ICStatusMap& result);
 266      
 267  #if ENABLE(JIT)
 268      struct JITData {
 269          WTF_MAKE_STRUCT_FAST_ALLOCATED;
 270  
 271          Bag<StructureStubInfo> m_stubInfos;
 272          Bag<JITAddIC> m_addICs;
 273          Bag<JITMulIC> m_mulICs;
 274          Bag<JITNegIC> m_negICs;
 275          Bag<JITSubIC> m_subICs;
 276          Bag<ByValInfo> m_byValInfos;
 277          Bag<CallLinkInfo> m_callLinkInfos;
 278          SentinelLinkedList<CallLinkInfo, PackedRawSentinelNode<CallLinkInfo>> m_incomingCalls;
 279          SentinelLinkedList<PolymorphicCallNode, PackedRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
 280          RefCountedArray<RareCaseProfile> m_rareCaseProfiles;
 281          std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
 282          std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters;
 283          JITCodeMap m_jitCodeMap;
 284      };
 285  
 286      JITData& ensureJITData(const ConcurrentJSLocker& locker)
 287      {
 288          if (LIKELY(m_jitData))
 289              return *m_jitData;
 290          return ensureJITDataSlow(locker);
 291      }
 292      JITData& ensureJITDataSlow(const ConcurrentJSLocker&);
 293  
 294      JITAddIC* addJITAddIC(BinaryArithProfile*);
 295      JITMulIC* addJITMulIC(BinaryArithProfile*);
 296      JITNegIC* addJITNegIC(UnaryArithProfile*);
 297      JITSubIC* addJITSubIC(BinaryArithProfile*);
 298  
 299      template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITAddGenerator>::value>::type>
 300      JITAddIC* addMathIC(BinaryArithProfile* profile) { return addJITAddIC(profile); }
 301  
 302      template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITMulGenerator>::value>::type>
 303      JITMulIC* addMathIC(BinaryArithProfile* profile) { return addJITMulIC(profile); }
 304  
 305      template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITNegGenerator>::value>::type>
 306      JITNegIC* addMathIC(UnaryArithProfile* profile) { return addJITNegIC(profile); }
 307  
 308      template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITSubGenerator>::value>::type>
 309      JITSubIC* addMathIC(BinaryArithProfile* profile) { return addJITSubIC(profile); }
 310  
 311      StructureStubInfo* addStubInfo(AccessType, CodeOrigin);
 312  
 313      // O(n) operation. Use getICStatusMap() unless you really only intend to get one stub info.
 314      StructureStubInfo* findStubInfo(CodeOrigin);
 315      // O(n) operation. Use getICStatusMap() unless you really only intend to get one by-val-info.
 316      ByValInfo* findByValInfo(CodeOrigin);
 317  
 318      ByValInfo* addByValInfo(BytecodeIndex);
 319  
 320      CallLinkInfo* addCallLinkInfo(CodeOrigin);
 321  
 322      // This is a slow function call used primarily for compiling OSR exits in the case
 323      // that there had been inlining. Chances are if you want to use this, you're really
 324      // looking for a CallLinkInfoMap to amortize the cost of calling this.
 325      CallLinkInfo* getCallLinkInfoForBytecodeIndex(BytecodeIndex);
 326      
 327      void setJITCodeMap(JITCodeMap&& jitCodeMap)
 328      {
 329          ConcurrentJSLocker locker(m_lock);
 330          ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap);
 331      }
 332      const JITCodeMap& jitCodeMap()
 333      {
 334          ConcurrentJSLocker locker(m_lock);
 335          return ensureJITData(locker).m_jitCodeMap;
 336      }
 337  
 338      void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&);
 339      Optional<CodeOrigin> findPC(void* pc);
 340  
 341      void setCalleeSaveRegisters(RegisterSet);
 342      void setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList>);
 343  
 344      void setRareCaseProfiles(RefCountedArray<RareCaseProfile>&&);
 345      RareCaseProfile* rareCaseProfileForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex);
 346      unsigned rareCaseProfileCountForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex);
 347  
 348      bool likelyToTakeSlowCase(BytecodeIndex bytecodeIndex)
 349      {
 350          if (!hasBaselineJITProfiling())
 351              return false;
 352          ConcurrentJSLocker locker(m_lock);
 353          unsigned value = rareCaseProfileCountForBytecodeIndex(locker, bytecodeIndex);
 354          return value >= Options::likelyToTakeSlowCaseMinimumCount();
 355      }
 356  
 357      bool couldTakeSlowCase(BytecodeIndex bytecodeIndex)
 358      {
 359          if (!hasBaselineJITProfiling())
 360              return false;
 361          ConcurrentJSLocker locker(m_lock);
 362          unsigned value = rareCaseProfileCountForBytecodeIndex(locker, bytecodeIndex);
 363          return value >= Options::couldTakeSlowCaseMinimumCount();
 364      }
 365  
 366      // We call this when we want to reattempt compiling something with the baseline JIT. Ideally
 367      // the baseline JIT would not add data to CodeBlock, but instead it would put its data into
 368      // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we
 369      // would be able to get rid of this silly function.
 370      // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061
 371      void resetJITData();
 372  #endif // ENABLE(JIT)
 373  
 374      void unlinkIncomingCalls();
 375  
 376  #if ENABLE(JIT)
 377      void linkIncomingCall(CallFrame* callerFrame, CallLinkInfo*);
 378      void linkIncomingPolymorphicCall(CallFrame* callerFrame, PolymorphicCallNode*);
 379  #endif // ENABLE(JIT)
 380  
 381      void linkIncomingCall(CallFrame* callerFrame, LLIntCallLinkInfo*);
 382  
 383      const Instruction* outOfLineJumpTarget(const Instruction* pc);
 384      int outOfLineJumpOffset(InstructionStream::Offset offset)
 385      {
 386          return m_unlinkedCode->outOfLineJumpOffset(offset);
 387      }
 388      int outOfLineJumpOffset(const Instruction* pc);
 389      int outOfLineJumpOffset(const InstructionStream::Ref& instruction)
 390      {
 391          return outOfLineJumpOffset(instruction.ptr());
 392      }
 393  
 394      inline unsigned bytecodeOffset(const Instruction* returnAddress)
 395      {
 396          const auto* instructionsBegin = instructions().at(0).ptr();
 397          const auto* instructionsEnd = reinterpret_cast<const Instruction*>(reinterpret_cast<uintptr_t>(instructionsBegin) + instructions().size());
 398          RELEASE_ASSERT(returnAddress >= instructionsBegin && returnAddress < instructionsEnd);
 399          return returnAddress - instructionsBegin;
 400      }
 401  
 402      inline BytecodeIndex bytecodeIndex(const Instruction* returnAddress)
 403      {
 404          return BytecodeIndex(bytecodeOffset(returnAddress));
 405      }
 406  
 407      const InstructionStream& instructions() const { return m_unlinkedCode->instructions(); }
 408      const Instruction* instructionAt(BytecodeIndex index) const { return instructions().at(index).ptr(); }
 409  
 410      size_t predictedMachineCodeSize();
 411  
 412      unsigned instructionsSize() const { return instructions().size(); }
 413      unsigned bytecodeCost() const { return m_bytecodeCost; }
 414  
 415      // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
 416      CodeBlock* newReplacement();
 417      
 418      void setJITCode(Ref<JITCode>&& code)
 419      {
 420          if (!code->isShared())
 421              heap()->reportExtraMemoryAllocated(code->size());
 422  
 423          ConcurrentJSLocker locker(m_lock);
 424          WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
 425          m_jitCode = WTFMove(code);
 426      }
 427  
 428      RefPtr<JITCode> jitCode() { return m_jitCode; }
 429      static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); }
 430      JITType jitType() const
 431      {
 432          JITCode* jitCode = m_jitCode.get();
 433          WTF::loadLoadFence();
 434          JITType result = JITCode::jitTypeFor(jitCode);
 435          WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
 436          return result;
 437      }
 438  
 439      bool hasBaselineJITProfiling() const
 440      {
 441          return jitType() == JITType::BaselineJIT;
 442      }
 443      
 444  #if ENABLE(JIT)
 445      CodeBlock* replacement();
 446  
 447      DFG::CapabilityLevel computeCapabilityLevel();
 448      DFG::CapabilityLevel capabilityLevel();
 449      DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); }
 450  
 451      CodeBlock* optimizedReplacement(JITType typeToReplace);
 452      CodeBlock* optimizedReplacement(); // the typeToReplace is my JITType
 453      bool hasOptimizedReplacement(JITType typeToReplace);
 454      bool hasOptimizedReplacement(); // the typeToReplace is my JITType
 455  #endif
 456  
 457      void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr);
 458      
 459      ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
 460      
 461      ExecutableToCodeBlockEdge* ownerEdge() const { return m_ownerEdge.get(); }
 462  
 463      VM& vm() const { return *m_vm; }
 464  
 465      VirtualRegister thisRegister() const { return m_unlinkedCode->thisRegister(); }
 466  
 467      bool usesEval() const { return m_unlinkedCode->usesEval(); }
 468  
 469      void setScopeRegister(VirtualRegister scopeRegister)
 470      {
 471          ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
 472          m_scopeRegister = scopeRegister;
 473      }
 474  
 475      VirtualRegister scopeRegister() const
 476      {
 477          return m_scopeRegister;
 478      }
 479      
 480      PutPropertySlot::Context putByIdContext() const
 481      {
 482          if (codeType() == EvalCode)
 483              return PutPropertySlot::PutByIdEval;
 484          return PutPropertySlot::PutById;
 485      }
 486  
 487      const SourceCode& source() const { return m_ownerExecutable->source(); }
 488      unsigned sourceOffset() const { return m_ownerExecutable->source().startOffset(); }
 489      unsigned firstLineColumnOffset() const { return m_ownerExecutable->startColumn(); }
 490  
 491      size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
 492      unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
 493  
 494      String nameForRegister(VirtualRegister);
 495  
 496      unsigned numberOfArgumentValueProfiles()
 497      {
 498          ASSERT(m_numParameters >= 0);
 499          ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters) || !Options::useJIT());
 500          return m_argumentValueProfiles.size();
 501      }
 502  
 503      ValueProfile& valueProfileForArgument(unsigned argumentIndex)
 504      {
 505          ASSERT(Options::useJIT()); // This is only called from the various JIT compilers or places that first check numberOfArgumentValueProfiles before calling this.
 506          ValueProfile& result = m_argumentValueProfiles[argumentIndex];
 507          return result;
 508      }
 509  
 510      ValueProfile& valueProfileForBytecodeIndex(BytecodeIndex);
 511      SpeculatedType valueProfilePredictionForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex);
 512  
 513      template<typename Functor> void forEachValueProfile(const Functor&);
 514      template<typename Functor> void forEachArrayProfile(const Functor&);
 515      template<typename Functor> void forEachArrayAllocationProfile(const Functor&);
 516      template<typename Functor> void forEachObjectAllocationProfile(const Functor&);
 517      template<typename Functor> void forEachLLIntCallLinkInfo(const Functor&);
 518  
 519      BinaryArithProfile* binaryArithProfileForBytecodeIndex(BytecodeIndex);
 520      UnaryArithProfile* unaryArithProfileForBytecodeIndex(BytecodeIndex);
 521      BinaryArithProfile* binaryArithProfileForPC(const Instruction*);
 522      UnaryArithProfile* unaryArithProfileForPC(const Instruction*);
 523  
 524      bool couldTakeSpecialArithFastCase(BytecodeIndex bytecodeOffset);
 525  
 526      ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, BytecodeIndex);
 527      ArrayProfile* getArrayProfile(BytecodeIndex);
 528  
 529      // Exception handling support
 530  
 531      size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
 532      HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
 533  
 534      bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
 535  
 536  #if ENABLE(DFG_JIT)
 537      DFG::CodeOriginPool& codeOrigins();
 538      
 539      // Having code origins implies that there has been some inlining.
 540      bool hasCodeOrigins()
 541      {
 542          return JITCode::isOptimizingJIT(jitType());
 543      }
 544          
 545      bool canGetCodeOrigin(CallSiteIndex index)
 546      {
 547          if (!hasCodeOrigins())
 548              return false;
 549          return index.bits() < codeOrigins().size();
 550      }
 551  
 552      CodeOrigin codeOrigin(CallSiteIndex index)
 553      {
 554          return codeOrigins().get(index.bits());
 555      }
 556  
 557      CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles(const ConcurrentJSLocker&)
 558      {
 559          return m_lazyOperandValueProfiles;
 560      }
 561  #endif // ENABLE(DFG_JIT)
 562  
 563      // Constant Pool
 564  #if ENABLE(DFG_JIT)
 565      size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
 566      size_t numberOfDFGIdentifiers() const;
 567      const Identifier& identifier(int index) const;
 568  #else
 569      size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
 570      const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
 571  #endif
 572  
 573      Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
 574      Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
 575      unsigned addConstant(const ConcurrentJSLocker&, JSValue v)
 576      {
 577          unsigned result = m_constantRegisters.size();
 578          m_constantRegisters.append(WriteBarrier<Unknown>());
 579          m_constantRegisters.last().set(*m_vm, this, v);
 580          m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
 581          return result;
 582      }
 583  
 584      unsigned addConstantLazily(const ConcurrentJSLocker&)
 585      {
 586          unsigned result = m_constantRegisters.size();
 587          m_constantRegisters.append(WriteBarrier<Unknown>());
 588          m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
 589          return result;
 590      }
 591  
 592      const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; }
 593      WriteBarrier<Unknown>& constantRegister(VirtualRegister reg) { return m_constantRegisters[reg.toConstantIndex()]; }
 594      ALWAYS_INLINE JSValue getConstant(VirtualRegister reg) const { return m_constantRegisters[reg.toConstantIndex()].get(); }
 595      ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(VirtualRegister reg) const { return m_constantsSourceCodeRepresentation[reg.toConstantIndex()]; }
 596  
 597      FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
 598      int numberOfFunctionDecls() { return m_functionDecls.size(); }
 599      FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
 600      
 601      const BitVector& bitVector(size_t i) { return m_unlinkedCode->bitVector(i); }
 602  
 603      Heap* heap() const { return &m_vm->heap; }
 604      JSGlobalObject* globalObject() { return m_globalObject.get(); }
 605  
 606      JSGlobalObject* globalObjectFor(CodeOrigin);
 607  
 608      BytecodeLivenessAnalysis& livenessAnalysis()
 609      {
 610          return m_unlinkedCode->livenessAnalysis(this);
 611      }
 612      
 613      void validate();
 614  
 615      // Jump Tables
 616  
 617      size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
 618      SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
 619      void clearSwitchJumpTables()
 620      {
 621          if (!m_rareData)
 622              return;
 623          m_rareData->m_switchJumpTables.clear();
 624      }
 625  #if ENABLE(DFG_JIT)
 626      void addSwitchJumpTableFromProfiledCodeBlock(SimpleJumpTable& profiled)
 627      {
 628          createRareDataIfNecessary();
 629          m_rareData->m_switchJumpTables.append(profiled.cloneNonJITPart());
 630      }
 631  #endif
 632  
 633      size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
 634      StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
 635  
 636      DirectEvalCodeCache& directEvalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_directEvalCodeCache; }
 637  
 638      enum class ShrinkMode {
 639          // Shrink prior to generating machine code that may point directly into vectors.
 640          EarlyShrink,
 641  
 642          // Shrink after generating machine code, and after possibly creating new vectors
 643          // and appending to others. At this time it is not safe to shrink certain vectors
 644          // because we would have generated machine code that references them directly.
 645          LateShrink,
 646      };
 647      void shrinkToFit(const ConcurrentJSLocker&, ShrinkMode);
 648  
 649      // Functions for controlling when JITting kicks in, in a mixed mode
 650      // execution world.
 651  
 652      bool checkIfJITThresholdReached()
 653      {
 654          return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
 655      }
 656  
 657      void dontJITAnytimeSoon()
 658      {
 659          m_llintExecuteCounter.deferIndefinitely();
 660      }
 661  
 662      int32_t thresholdForJIT(int32_t threshold);
 663      void jitAfterWarmUp();
 664      void jitSoon();
 665  
 666      const BaselineExecutionCounter& llintExecuteCounter() const
 667      {
 668          return m_llintExecuteCounter;
 669      }
 670  
 671      typedef HashMap<std::tuple<StructureID, unsigned>, Vector<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap;
 672      StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; }
 673  
 674      // Functions for controlling when tiered compilation kicks in. This
 675      // controls both when the optimizing compiler is invoked and when OSR
 676      // entry happens. Two triggers exist: the loop trigger and the return
 677      // trigger. In either case, when an addition to m_jitExecuteCounter
 678      // causes it to become non-negative, the optimizing compiler is
 679      // invoked. This includes a fast check to see if this CodeBlock has
 680      // already been optimized (i.e. replacement() returns a CodeBlock
 681      // that was optimized with a higher tier JIT than this one). In the
 682      // case of the loop trigger, if the optimized compilation succeeds
 683      // (or has already succeeded in the past) then OSR is attempted to
 684      // redirect program flow into the optimized code.
 685  
 686      // These functions are called from within the optimization triggers,
 687      // and are used as a single point at which we define the heuristics
 688      // for how much warm-up is mandated before the next optimization
 689      // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
 690      // as this is called from the CodeBlock constructor.
 691  
 692      // When we observe a lot of speculation failures, we trigger a
 693      // reoptimization. But each time, we increase the optimization trigger
 694      // to avoid thrashing.
 695      JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
 696      void countReoptimization();
 697  
 698  #if !ENABLE(C_LOOP)
 699      const RegisterAtOffsetList* calleeSaveRegisters() const;
 700  
 701      static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return RegisterSet::llintBaselineCalleeSaveRegisters().numberOfSetRegisters(); }
 702      static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters();
 703      size_t calleeSaveSpaceAsVirtualRegisters();
 704  #else
 705      static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; }
 706      static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 1; };
 707      size_t calleeSaveSpaceAsVirtualRegisters() { return 0; }
 708  #endif
 709  
 710  #if ENABLE(JIT)
 711      unsigned numberOfDFGCompiles();
 712  
 713      int32_t codeTypeThresholdMultiplier() const;
 714  
 715      int32_t adjustedCounterValue(int32_t desiredThreshold);
 716  
 717      int32_t* addressOfJITExecuteCounter()
 718      {
 719          return &m_jitExecuteCounter.m_counter;
 720      }
 721  
 722      static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
 723      static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
 724      static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
 725  
 726      const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
 727  
 728      unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
 729  
 730      // Check if the optimization threshold has been reached, and if not,
 731      // adjust the heuristics accordingly. Returns true if the threshold has
 732      // been reached.
 733      bool checkIfOptimizationThresholdReached();
 734  
 735      // Call this to force the next optimization trigger to fire. This is
 736      // rarely wise, since optimization triggers are typically more
 737      // expensive than executing baseline code.
 738      void optimizeNextInvocation();
 739  
 740      // Call this to prevent optimization from happening again. Note that
 741      // optimization will still happen after roughly 2^29 invocations,
 742      // so this is really meant to delay that as much as possible. This
 743      // is called if optimization failed, and we expect it to fail in
 744      // the future as well.
 745      void dontOptimizeAnytimeSoon();
 746  
 747      // Call this to reinitialize the counter to its starting state,
 748      // forcing a warm-up to happen before the next optimization trigger
 749      // fires. This is called in the CodeBlock constructor. It also
 750      // makes sense to call this if an OSR exit occurred. Note that
 751      // OSR exit code is code generated, so the value of the execute
 752      // counter that this corresponds to is also available directly.
 753      void optimizeAfterWarmUp();
 754  
 755      // Call this to force an optimization trigger to fire only after
 756      // a lot of warm-up.
 757      void optimizeAfterLongWarmUp();
 758  
 759      // Call this to cause an optimization trigger to fire soon, but
 760      // not necessarily the next one. This makes sense if optimization
 761      // succeeds. Successful optimization means that all calls are
 762      // relinked to the optimized code, so this only affects call
 763      // frames that are still executing this CodeBlock. The value here
 764      // is tuned to strike a balance between the cost of OSR entry
 765      // (which is too high to warrant making every loop back edge to
 766      // trigger OSR immediately) and the cost of executing baseline
 767      // code (which is high enough that we don't necessarily want to
 768      // have a full warm-up). The intuition for calling this instead of
 769      // optimizeNextInvocation() is for the case of recursive functions
 770      // with loops. Consider that there may be N call frames of some
 771      // recursive function, for a reasonably large value of N. The top
 772      // one triggers optimization, and then returns, and then all of
 773      // the others return. We don't want optimization to be triggered on
 774      // each return, as that would be superfluous. It only makes sense
 775      // to trigger optimization if one of those functions becomes hot
 776      // in the baseline code.
 777      void optimizeSoon();
 778  
 779      void forceOptimizationSlowPathConcurrently();
 780  
 781      void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
 782      
 783      BytecodeIndex bytecodeIndexForExit(BytecodeIndex) const;
 784      uint32_t osrExitCounter() const { return m_osrExitCounter; }
 785  
 786      void countOSRExit() { m_osrExitCounter++; }
 787  
 788      enum class OptimizeAction { None, ReoptimizeNow };
 789  #if ENABLE(DFG_JIT)
 790      OptimizeAction updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState&);
 791  #endif
 792  
 793      static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
 794  
 795      uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
 796      uint32_t exitCountThresholdForReoptimization();
 797      uint32_t exitCountThresholdForReoptimizationFromLoop();
 798      bool shouldReoptimizeNow();
 799      bool shouldReoptimizeFromLoopNow();
 800  
 801  #else // No JIT
 802      void optimizeAfterWarmUp() { }
 803      unsigned numberOfDFGCompiles() { return 0; }
 804  #endif
 805  
 806      bool shouldOptimizeNow();
 807      void updateAllValueProfilePredictions();
 808      void updateAllArrayPredictions();
 809      void updateAllPredictions();
 810  
 811      unsigned frameRegisterCount();
 812      int stackPointerOffset();
 813  
 814      bool hasOpDebugForLineAndColumn(unsigned line, Optional<unsigned> column);
 815  
 816      bool hasDebuggerRequests() const { return m_debuggerRequests; }
 817      void* debuggerRequestsAddress() { return &m_debuggerRequests; }
 818  
 819      void addBreakpoint(unsigned numBreakpoints);
 820      void removeBreakpoint(unsigned numBreakpoints)
 821      {
 822          ASSERT(m_numBreakpoints >= numBreakpoints);
 823          m_numBreakpoints -= numBreakpoints;
 824      }
 825  
 826      enum SteppingMode {
 827          SteppingModeDisabled,
 828          SteppingModeEnabled
 829      };
 830      void setSteppingMode(SteppingMode);
 831  
 832      void clearDebuggerRequests()
 833      {
 834          m_steppingMode = SteppingModeDisabled;
 835          m_numBreakpoints = 0;
 836      }
 837  
 838      bool wasCompiledWithDebuggingOpcodes() const { return m_unlinkedCode->wasCompiledWithDebuggingOpcodes(); }
 839      
 840      // This is intentionally public; it's the responsibility of anyone doing any
 841      // of the following to hold the lock:
 842      //
 843      // - Modifying any inline cache in this code block.
 844      //
 845      // - Quering any inline cache in this code block, from a thread other than
 846      //   the main thread.
 847      //
 848      // Additionally, it's only legal to modify the inline cache on the main
 849      // thread. This means that the main thread can query the inline cache without
 850      // locking. This is crucial since executing the inline cache is effectively
 851      // "querying" it.
 852      //
 853      // Another exception to the rules is that the GC can do whatever it wants
 854      // without holding any locks, because the GC is guaranteed to wait until any
 855      // concurrent compilation threads finish what they're doing.
 856      mutable ConcurrentJSLock m_lock;
 857  
 858      bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
 859  
 860  #if ENABLE(JIT)
 861      unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel
 862  #endif
 863  
 864      bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
 865  
 866      bool m_didFailJITCompilation : 1;
 867      bool m_didFailFTLCompilation : 1;
 868      bool m_hasBeenCompiledWithFTL : 1;
 869  
 870      bool m_hasLinkedOSRExit : 1;
 871      bool m_isEligibleForLLIntDowngrade : 1;
 872  
 873      // Internal methods for use by validation code. It would be private if it wasn't
 874      // for the fact that we use it from anonymous namespaces.
 875      void beginValidationDidFail();
 876      NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
 877  
 878      struct RareData {
 879          WTF_MAKE_STRUCT_FAST_ALLOCATED_WITH_HEAP_IDENTIFIER(CodeBlockRareData);
 880      public:
 881          Vector<HandlerInfo> m_exceptionHandlers;
 882  
 883          // Jump Tables
 884          Vector<SimpleJumpTable> m_switchJumpTables;
 885          Vector<StringJumpTable> m_stringSwitchJumpTables;
 886  
 887          Vector<std::unique_ptr<ValueProfileAndVirtualRegisterBuffer>> m_catchProfiles;
 888  
 889          DirectEvalCodeCache m_directEvalCodeCache;
 890      };
 891  
 892      void clearExceptionHandlers()
 893      {
 894          if (m_rareData)
 895              m_rareData->m_exceptionHandlers.clear();
 896      }
 897  
 898      void appendExceptionHandler(const HandlerInfo& handler)
 899      {
 900          createRareDataIfNecessary(); // We may be handling the exception of an inlined call frame.
 901          m_rareData->m_exceptionHandlers.append(handler);
 902      }
 903  
 904      DisposableCallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite);
 905  
 906      void ensureCatchLivenessIsComputedForBytecodeIndex(BytecodeIndex);
 907  
 908      bool hasTailCalls() const { return m_unlinkedCode->hasTailCalls(); }
 909  
 910      template<typename Metadata>
 911      Metadata& metadata(OpcodeID opcodeID, unsigned metadataID)
 912      {
 913          ASSERT(m_metadata);
 914          return bitwise_cast<Metadata*>(m_metadata->get(opcodeID))[metadataID];
 915      }
 916  
 917      size_t metadataSizeInBytes()
 918      {
 919          return m_unlinkedCode->metadataSizeInBytes();
 920      }
 921  
 922      MetadataTable* metadataTable() { return m_metadata.get(); }
 923      const void* instructionsRawPointer() { return m_instructionsRawPointer; }
 924  
 925      bool loopHintsAreEligibleForFuzzingEarlyReturn()
 926      {
 927          // Some builtins are required to always complete the loops they run.
 928          return !m_unlinkedCode->isBuiltinFunction();
 929      }
 930  
 931  protected:
 932      void finalizeLLIntInlineCaches();
 933  #if ENABLE(JIT)
 934      void finalizeBaselineJITInlineCaches();
 935  #endif
 936  #if ENABLE(DFG_JIT)
 937      void tallyFrequentExitSites();
 938  #else
 939      void tallyFrequentExitSites() { }
 940  #endif
 941  
 942  private:
 943      friend class CodeBlockSet;
 944      friend class ExecutableToCodeBlockEdge;
 945  
 946      BytecodeLivenessAnalysis& livenessAnalysisSlow();
 947      
 948      CodeBlock* specialOSREntryBlockOrNull();
 949      
 950      void noticeIncomingCall(CallFrame* callerFrame);
 951      
 952      double optimizationThresholdScalingFactor();
 953  
 954      void updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
 955  
 956      void setConstantIdentifierSetRegisters(VM&, const RefCountedArray<ConstantIdentifierSetEntry>& constants);
 957  
 958      void setConstantRegisters(const RefCountedArray<WriteBarrier<Unknown>>& constants, const RefCountedArray<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable);
 959  
 960      void replaceConstant(VirtualRegister reg, JSValue value)
 961      {
 962          ASSERT(reg.isConstant() && static_cast<size_t>(reg.toConstantIndex()) < m_constantRegisters.size());
 963          m_constantRegisters[reg.toConstantIndex()].set(*m_vm, this, value);
 964      }
 965  
 966      bool shouldVisitStrongly(const ConcurrentJSLocker&);
 967      bool shouldJettisonDueToWeakReference(VM&);
 968      bool shouldJettisonDueToOldAge(const ConcurrentJSLocker&);
 969      
 970      void propagateTransitions(const ConcurrentJSLocker&, SlotVisitor&);
 971      void determineLiveness(const ConcurrentJSLocker&, SlotVisitor&);
 972          
 973      void stronglyVisitStrongReferences(const ConcurrentJSLocker&, SlotVisitor&);
 974      void stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor&);
 975      void visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor&);
 976  
 977      unsigned numberOfNonArgumentValueProfiles() { return m_numberOfNonArgumentValueProfiles; }
 978      unsigned totalNumberOfValueProfiles() { return numberOfArgumentValueProfiles() + numberOfNonArgumentValueProfiles(); }
 979      ValueProfile* tryGetValueProfileForBytecodeIndex(BytecodeIndex);
 980  
 981      Seconds timeSinceCreation()
 982      {
 983          return MonotonicTime::now() - m_creationTime;
 984      }
 985  
 986      void createRareDataIfNecessary()
 987      {
 988          if (!m_rareData) {
 989              auto rareData = makeUnique<RareData>();
 990              WTF::storeStoreFence(); // m_catchProfiles can be touched from compiler threads.
 991              m_rareData = WTFMove(rareData);
 992          }
 993      }
 994  
 995      void insertBasicBlockBoundariesForControlFlowProfiler();
 996      void ensureCatchLivenessIsComputedForBytecodeIndexSlow(const OpCatch&, BytecodeIndex);
 997  
 998      unsigned m_numCalleeLocals;
 999      unsigned m_numVars;
1000      unsigned m_numParameters;
1001      unsigned m_numberOfArgumentsToSkip { 0 };
1002      unsigned m_numberOfNonArgumentValueProfiles { 0 };
1003      union {
1004          unsigned m_debuggerRequests;
1005          struct {
1006              unsigned m_hasDebuggerStatement : 1;
1007              unsigned m_steppingMode : 1;
1008              unsigned m_numBreakpoints : 30;
1009          };
1010      };
1011      unsigned m_bytecodeCost { 0 };
1012      VirtualRegister m_scopeRegister;
1013      mutable CodeBlockHash m_hash;
1014  
1015      WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
1016      WriteBarrier<ScriptExecutable> m_ownerExecutable;
1017      WriteBarrier<ExecutableToCodeBlockEdge> m_ownerEdge;
1018      // m_vm must be a pointer (instead of a reference) because the JSCLLIntOffsetsExtractor
1019      // cannot handle it being a reference.
1020      VM* m_vm;
1021  
1022      const void* m_instructionsRawPointer { nullptr };
1023      SentinelLinkedList<LLIntCallLinkInfo, PackedRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
1024      StructureWatchpointMap m_llintGetByIdWatchpointMap;
1025      RefPtr<JITCode> m_jitCode;
1026  #if ENABLE(JIT)
1027      std::unique_ptr<JITData> m_jitData;
1028  #endif
1029  #if ENABLE(DFG_JIT)
1030      // This is relevant to non-DFG code blocks that serve as the profiled code block
1031      // for DFG code blocks.
1032      CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
1033  #endif
1034      RefCountedArray<ValueProfile> m_argumentValueProfiles;
1035  
1036      // Constant Pool
1037      COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
1038      // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
1039      // it, so we're stuck with it for now.
1040      Vector<WriteBarrier<Unknown>> m_constantRegisters;
1041      Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation;
1042      RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionDecls;
1043      RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionExprs;
1044  
1045      WriteBarrier<CodeBlock> m_alternative;
1046      
1047      BaselineExecutionCounter m_llintExecuteCounter;
1048  
1049      BaselineExecutionCounter m_jitExecuteCounter;
1050      uint32_t m_osrExitCounter;
1051  
1052      uint16_t m_optimizationDelayCounter;
1053      uint16_t m_reoptimizationRetryCounter;
1054  
1055      RefPtr<MetadataTable> m_metadata;
1056  
1057      MonotonicTime m_creationTime;
1058      double m_previousCounter { 0 };
1059  
1060      std::unique_ptr<RareData> m_rareData;
1061  };
1062  
1063  template <typename ExecutableType>
1064  Exception* ScriptExecutable::prepareForExecution(VM& vm, JSFunction* function, JSScope* scope, CodeSpecializationKind kind, CodeBlock*& resultCodeBlock)
1065  {
1066      if (hasJITCodeFor(kind)) {
1067          if constexpr (std::is_same<ExecutableType, EvalExecutable>::value) {
1068              resultCodeBlock = jsCast<CodeBlock*>(jsCast<ExecutableType*>(this)->codeBlock());
1069              return nullptr;
1070          }
1071          if constexpr (std::is_same<ExecutableType, ProgramExecutable>::value) {
1072              resultCodeBlock = jsCast<CodeBlock*>(jsCast<ExecutableType*>(this)->codeBlock());
1073              return nullptr;
1074          }
1075          if constexpr (std::is_same<ExecutableType, ModuleProgramExecutable>::value) {
1076              resultCodeBlock = jsCast<CodeBlock*>(jsCast<ExecutableType*>(this)->codeBlock());
1077              return nullptr;
1078          }
1079          if constexpr (std::is_same<ExecutableType, FunctionExecutable>::value) {
1080              resultCodeBlock = jsCast<CodeBlock*>(jsCast<ExecutableType*>(this)->codeBlockFor(kind));
1081              return nullptr;
1082          }
1083          RELEASE_ASSERT_NOT_REACHED();
1084          return nullptr;
1085      }
1086      return prepareForExecutionImpl(vm, function, scope, kind, resultCodeBlock);
1087  }
1088  
1089  #define CODEBLOCK_LOG_EVENT(codeBlock, summary, details) \
1090      do { \
1091          if (codeBlock) \
1092              (codeBlock->vm().logEvent(codeBlock, summary, [&] () { return toCString details; })); \
1093      } while (0)
1094  
1095  
1096  void setPrinter(Printer::PrintRecord&, CodeBlock*);
1097  
1098  } // namespace JSC
1099  
1100  namespace WTF {
1101      
1102  JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::CodeBlock*);
1103  
1104  } // namespace WTF