/ assembler / ARMv7Assembler.h
ARMv7Assembler.h
   1  /*
   2   * Copyright (C) 2009-2019 Apple Inc. All rights reserved.
   3   * Copyright (C) 2010 University of Szeged
   4   *
   5   * Redistribution and use in source and binary forms, with or without
   6   * modification, are permitted provided that the following conditions
   7   * are met:
   8   * 1. Redistributions of source code must retain the above copyright
   9   *    notice, this list of conditions and the following disclaimer.
  10   * 2. Redistributions in binary form must reproduce the above copyright
  11   *    notice, this list of conditions and the following disclaimer in the
  12   *    documentation and/or other materials provided with the distribution.
  13   *
  14   * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
  15   * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  16   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  17   * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
  18   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  19   * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  20   * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  21   * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  22   * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  23   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24   * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  25   */
  26  
  27  #pragma once
  28  
  29  #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
  30  
  31  #include "AssemblerBuffer.h"
  32  #include "AssemblerCommon.h"
  33  #include "RegisterInfo.h"
  34  #include <limits.h>
  35  #include <wtf/Assertions.h>
  36  #include <wtf/Vector.h>
  37  #include <stdint.h>
  38  
  39  namespace JSC {
  40  
  41  namespace RegisterNames {
  42  
  43      typedef enum : int8_t {
  44  #define REGISTER_ID(id, name, r, cs) id,
  45          FOR_EACH_GP_REGISTER(REGISTER_ID)
  46  #undef REGISTER_ID
  47  
  48  #define REGISTER_ALIAS(id, name, alias) id = alias,
  49          FOR_EACH_REGISTER_ALIAS(REGISTER_ALIAS)
  50  #undef REGISTER_ALIAS
  51          InvalidGPRReg = -1,
  52      } RegisterID;
  53  
  54      typedef enum : int8_t {
  55  #define REGISTER_ID(id, name) id,
  56          FOR_EACH_SP_REGISTER(REGISTER_ID)
  57  #undef REGISTER_ID
  58      } SPRegisterID;
  59  
  60      typedef enum : int8_t {
  61  #define REGISTER_ID(id, name, r, cs) id,
  62          FOR_EACH_FP_SINGLE_REGISTER(REGISTER_ID)
  63  #undef REGISTER_ID
  64      } FPSingleRegisterID;
  65  
  66      typedef enum : int8_t {
  67  #define REGISTER_ID(id, name, r, cs) id,
  68          FOR_EACH_FP_DOUBLE_REGISTER(REGISTER_ID)
  69  #undef REGISTER_ID
  70          InvalidFPRReg = -1,
  71      } FPDoubleRegisterID;
  72  
  73  #if CPU(ARM_NEON)
  74      typedef enum : int8_t {
  75  #define REGISTER_ID(id, name, r, cs) id,
  76          FOR_EACH_FP_QUAD_REGISTER(REGISTER_ID)
  77  #undef REGISTER_ID
  78      } FPQuadRegisterID;
  79  #endif // CPU(ARM_NEON)
  80  
  81      inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
  82      {
  83          ASSERT(reg <= d15);
  84          return (FPSingleRegisterID)(reg << 1);
  85      }
  86  
  87      inline FPSingleRegisterID asSingleUpper(FPDoubleRegisterID reg)
  88      {
  89          ASSERT(reg <= d15);
  90          return (FPSingleRegisterID)((reg << 1) + 1);
  91      }
  92  
  93      inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
  94      {
  95          ASSERT(!(reg & 1));
  96          return (FPDoubleRegisterID)(reg >> 1);
  97      }
  98  
  99  } // namespace ARMRegisters
 100  
 101  class ARMv7Assembler;
 102  class ARMThumbImmediate {
 103      friend class ARMv7Assembler;
 104  
 105      typedef uint8_t ThumbImmediateType;
 106      static constexpr ThumbImmediateType TypeInvalid = 0;
 107      static constexpr ThumbImmediateType TypeEncoded = 1;
 108      static constexpr ThumbImmediateType TypeUInt16 = 2;
 109  
 110      typedef union {
 111          int16_t asInt;
 112          struct {
 113              unsigned imm8 : 8;
 114              unsigned imm3 : 3;
 115              unsigned i    : 1;
 116              unsigned imm4 : 4;
 117          };
 118          // If this is an encoded immediate, then it may describe a shift, or a pattern.
 119          struct {
 120              unsigned shiftValue7 : 7;
 121              unsigned shiftAmount : 5;
 122          };
 123          struct {
 124              unsigned immediate   : 8;
 125              unsigned pattern     : 4;
 126          };
 127      } ThumbImmediateValue;
 128  
 129      // byte0 contains least significant bit; not using an array to make client code endian agnostic.
 130      typedef union {
 131          int32_t asInt;
 132          struct {
 133              uint8_t byte0;
 134              uint8_t byte1;
 135              uint8_t byte2;
 136              uint8_t byte3;
 137          };
 138      } PatternBytes;
 139  
 140      ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
 141      {
 142          if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
 143              value >>= N;             /* if any were set, lose the bottom N */
 144          else                         /* if none of the top N bits are set, */
 145              zeros += N;              /* then we have identified N leading zeros */
 146      }
 147  
 148      static int32_t countLeadingZeros(uint32_t value)
 149      {
 150          if (!value)
 151              return 32;
 152  
 153          int32_t zeros = 0;
 154          countLeadingZerosPartial(value, zeros, 16);
 155          countLeadingZerosPartial(value, zeros, 8);
 156          countLeadingZerosPartial(value, zeros, 4);
 157          countLeadingZerosPartial(value, zeros, 2);
 158          countLeadingZerosPartial(value, zeros, 1);
 159          return zeros;
 160      }
 161  
 162      ARMThumbImmediate()
 163          : m_type(TypeInvalid)
 164      {
 165          m_value.asInt = 0;
 166      }
 167          
 168      ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
 169          : m_type(type)
 170          , m_value(value)
 171      {
 172      }
 173  
 174      ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
 175          : m_type(TypeUInt16)
 176      {
 177          // Make sure this constructor is only reached with type TypeUInt16;
 178          // this extra parameter makes the code a little clearer by making it
 179          // explicit at call sites which type is being constructed
 180          ASSERT_UNUSED(type, type == TypeUInt16);
 181  
 182          m_value.asInt = value;
 183      }
 184  
 185  public:
 186      static ARMThumbImmediate makeEncodedImm(uint32_t value)
 187      {
 188          ThumbImmediateValue encoding;
 189          encoding.asInt = 0;
 190  
 191          // okay, these are easy.
 192          if (value < 256) {
 193              encoding.immediate = value;
 194              encoding.pattern = 0;
 195              return ARMThumbImmediate(TypeEncoded, encoding);
 196          }
 197  
 198          int32_t leadingZeros = countLeadingZeros(value);
 199          // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
 200          ASSERT(leadingZeros < 24);
 201  
 202          // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
 203          // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
 204          // zero.  count(B) == 8, so the count of bits to be checked is 24 - count(Z).
 205          int32_t rightShiftAmount = 24 - leadingZeros;
 206          if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
 207              // Shift the value down to the low byte position.  The assign to 
 208              // shiftValue7 drops the implicit top bit.
 209              encoding.shiftValue7 = value >> rightShiftAmount;
 210              // The endoded shift amount is the magnitude of a right rotate.
 211              encoding.shiftAmount = 8 + leadingZeros;
 212              return ARMThumbImmediate(TypeEncoded, encoding);
 213          }
 214          
 215          PatternBytes bytes;
 216          bytes.asInt = value;
 217  
 218          if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
 219              encoding.immediate = bytes.byte0;
 220              encoding.pattern = 3;
 221              return ARMThumbImmediate(TypeEncoded, encoding);
 222          }
 223  
 224          if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
 225              encoding.immediate = bytes.byte0;
 226              encoding.pattern = 1;
 227              return ARMThumbImmediate(TypeEncoded, encoding);
 228          }
 229  
 230          if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
 231              encoding.immediate = bytes.byte1;
 232              encoding.pattern = 2;
 233              return ARMThumbImmediate(TypeEncoded, encoding);
 234          }
 235  
 236          return ARMThumbImmediate();
 237      }
 238  
 239      static ARMThumbImmediate makeUInt12(int32_t value)
 240      {
 241          return (!(value & 0xfffff000))
 242              ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
 243              : ARMThumbImmediate();
 244      }
 245  
 246      static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
 247      {
 248          // If this is not a 12-bit unsigned it, try making an encoded immediate.
 249          return (!(value & 0xfffff000))
 250              ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
 251              : makeEncodedImm(value);
 252      }
 253  
 254      // The 'make' methods, above, return a !isValid() value if the argument
 255      // cannot be represented as the requested type.  This methods  is called
 256      // 'get' since the argument can always be represented.
 257      static ARMThumbImmediate makeUInt16(uint16_t value)
 258      {
 259          return ARMThumbImmediate(TypeUInt16, value);
 260      }
 261      
 262      bool isValid()
 263      {
 264          return m_type != TypeInvalid;
 265      }
 266  
 267      uint16_t asUInt16() const { return m_value.asInt; }
 268  
 269      // These methods rely on the format of encoded byte values.
 270      bool isUInt3() { return !(m_value.asInt & 0xfff8); }
 271      bool isUInt4() { return !(m_value.asInt & 0xfff0); }
 272      bool isUInt5() { return !(m_value.asInt & 0xffe0); }
 273      bool isUInt6() { return !(m_value.asInt & 0xffc0); }
 274      bool isUInt7() { return !(m_value.asInt & 0xff80); }
 275      bool isUInt8() { return !(m_value.asInt & 0xff00); }
 276      bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
 277      bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
 278      bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
 279      bool isUInt16() { return m_type == TypeUInt16; }
 280      uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
 281      uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
 282      uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
 283      uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
 284      uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
 285      uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
 286      uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
 287      uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
 288      uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
 289      uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
 290  
 291      bool isEncodedImm() { return m_type == TypeEncoded; }
 292  
 293  private:
 294      ThumbImmediateType m_type;
 295      ThumbImmediateValue m_value;
 296  };
 297  
 298  typedef enum {
 299      SRType_LSL,
 300      SRType_LSR,
 301      SRType_ASR,
 302      SRType_ROR,
 303  
 304      SRType_RRX = SRType_ROR
 305  } ARMShiftType;
 306  
 307  class ShiftTypeAndAmount {
 308      friend class ARMv7Assembler;
 309  
 310  public:
 311      ShiftTypeAndAmount()
 312      {
 313          m_u.type = (ARMShiftType)0;
 314          m_u.amount = 0;
 315      }
 316      
 317      ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
 318      {
 319          m_u.type = type;
 320          m_u.amount = amount & 31;
 321      }
 322      
 323      unsigned lo4() { return m_u.lo4; }
 324      unsigned hi4() { return m_u.hi4; }
 325      
 326  private:
 327      union {
 328          struct {
 329              unsigned lo4 : 4;
 330              unsigned hi4 : 4;
 331          };
 332          struct {
 333              unsigned type   : 2;
 334              unsigned amount : 6;
 335          };
 336      } m_u;
 337  };
 338  
 339  class ARMv7Assembler {
 340  public:
 341      typedef ARMRegisters::RegisterID RegisterID;
 342      typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
 343      typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
 344  #if CPU(ARM_NEON)
 345      typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
 346  #endif
 347      typedef ARMRegisters::SPRegisterID SPRegisterID;
 348      typedef FPDoubleRegisterID FPRegisterID;
 349      
 350      static constexpr RegisterID firstRegister() { return ARMRegisters::r0; }
 351      static constexpr RegisterID lastRegister() { return ARMRegisters::r15; }
 352      static constexpr unsigned numberOfRegisters() { return lastRegister() - firstRegister() + 1; }
 353  
 354      static constexpr SPRegisterID firstSPRegister() { return ARMRegisters::apsr; }
 355      static constexpr SPRegisterID lastSPRegister() { return ARMRegisters::fpscr; }
 356      static constexpr unsigned numberOfSPRegisters() { return lastSPRegister() - firstSPRegister() + 1; }
 357  
 358      static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
 359  #if CPU(ARM_NEON) || CPU(ARM_VFP_V3_D32)
 360      static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
 361  #else
 362      static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d15; }
 363  #endif
 364      static constexpr unsigned numberOfFPRegisters() { return lastFPRegister() - firstFPRegister() + 1; }
 365  
 366      static const char* gprName(RegisterID id)
 367      {
 368          ASSERT(id >= firstRegister() && id <= lastRegister());
 369          static const char* const nameForRegister[numberOfRegisters()] = {
 370  #define REGISTER_NAME(id, name, r, cs) name,
 371          FOR_EACH_GP_REGISTER(REGISTER_NAME)
 372  #undef REGISTER_NAME        
 373          };
 374          return nameForRegister[id];
 375      }
 376  
 377      static const char* sprName(SPRegisterID id)
 378      {
 379          ASSERT(id >= firstSPRegister() && id <= lastSPRegister());
 380          static const char* const nameForRegister[numberOfSPRegisters()] = {
 381  #define REGISTER_NAME(id, name) name,
 382          FOR_EACH_SP_REGISTER(REGISTER_NAME)
 383  #undef REGISTER_NAME
 384          };
 385          return nameForRegister[id];
 386      }
 387  
 388      static const char* fprName(FPRegisterID id)
 389      {
 390          ASSERT(id >= firstFPRegister() && id <= lastFPRegister());
 391          static const char* const nameForRegister[numberOfFPRegisters()] = {
 392  #define REGISTER_NAME(id, name, r, cs) name,
 393          FOR_EACH_FP_DOUBLE_REGISTER(REGISTER_NAME)
 394  #undef REGISTER_NAME
 395          };
 396          return nameForRegister[id];
 397      }
 398  
 399      // (HS, LO, HI, LS) -> (AE, B, A, BE)
 400      // (VS, VC) -> (O, NO)
 401      typedef enum {
 402          ConditionEQ, // Zero / Equal.
 403          ConditionNE, // Non-zero / Not equal.
 404          ConditionHS, ConditionCS = ConditionHS, // Unsigned higher or same.
 405          ConditionLO, ConditionCC = ConditionLO, // Unsigned lower.
 406          ConditionMI, // Negative.
 407          ConditionPL, // Positive or zero.
 408          ConditionVS, // Overflowed.
 409          ConditionVC, // Not overflowed.
 410          ConditionHI, // Unsigned higher.
 411          ConditionLS, // Unsigned lower or same.
 412          ConditionGE, // Signed greater than or equal.
 413          ConditionLT, // Signed less than.
 414          ConditionGT, // Signed greater than.
 415          ConditionLE, // Signed less than or equal.
 416          ConditionAL, // Unconditional / Always execute.
 417          ConditionInvalid
 418      } Condition;
 419  
 420  #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
 421  #define JUMP_ENUM_SIZE(jump) ((jump) >> 3) 
 422      enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0), 
 423                      JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
 424                      JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
 425                      JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
 426                      JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
 427      };
 428      enum JumpLinkType { 
 429          LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
 430          LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
 431          LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
 432          LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
 433          LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
 434          LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
 435          LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
 436          LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
 437      };
 438  
 439      class LinkRecord {
 440      public:
 441          LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
 442          {
 443              data.realTypes.m_from = from;
 444              data.realTypes.m_to = to;
 445              data.realTypes.m_type = type;
 446              data.realTypes.m_linkType = LinkInvalid;
 447              data.realTypes.m_condition = condition;
 448          }
 449          void operator=(const LinkRecord& other)
 450          {
 451              data.copyTypes.content[0] = other.data.copyTypes.content[0];
 452              data.copyTypes.content[1] = other.data.copyTypes.content[1];
 453              data.copyTypes.content[2] = other.data.copyTypes.content[2];
 454          }
 455          intptr_t from() const { return data.realTypes.m_from; }
 456          void setFrom(intptr_t from) { data.realTypes.m_from = from; }
 457          intptr_t to() const { return data.realTypes.m_to; }
 458          JumpType type() const { return data.realTypes.m_type; }
 459          JumpLinkType linkType() const { return data.realTypes.m_linkType; }
 460          void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
 461          Condition condition() const { return data.realTypes.m_condition; }
 462      private:
 463          union {
 464              struct RealTypes {
 465                  intptr_t m_from : 31;
 466                  intptr_t m_to : 31;
 467                  JumpType m_type : 8;
 468                  JumpLinkType m_linkType : 8;
 469                  Condition m_condition : 16;
 470              } realTypes;
 471              struct CopyTypes {
 472                  uint32_t content[3];
 473              } copyTypes;
 474              COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
 475          } data;
 476      };
 477  
 478      ARMv7Assembler()
 479          : m_indexOfLastWatchpoint(INT_MIN)
 480          , m_indexOfTailOfLastWatchpoint(INT_MIN)
 481      {
 482      }
 483  
 484      AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
 485  
 486  private:
 487  
 488      // ARMv7, Appx-A.6.3
 489      static bool BadReg(RegisterID reg)
 490      {
 491          return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
 492      }
 493  
 494      uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
 495      {
 496          uint32_t rdMask = (rdNum >> 1) << highBitsShift;
 497          if (rdNum & 1)
 498              rdMask |= 1 << lowBitShift;
 499          return rdMask;
 500      }
 501  
 502      uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
 503      {
 504          uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
 505          if (rdNum & 16)
 506              rdMask |= 1 << highBitShift;
 507          return rdMask;
 508      }
 509  
 510      typedef enum {
 511          OP_ADD_reg_T1       = 0x1800,
 512          OP_SUB_reg_T1       = 0x1A00,
 513          OP_ADD_imm_T1       = 0x1C00,
 514          OP_SUB_imm_T1       = 0x1E00,
 515          OP_MOV_imm_T1       = 0x2000,
 516          OP_CMP_imm_T1       = 0x2800,
 517          OP_ADD_imm_T2       = 0x3000,
 518          OP_SUB_imm_T2       = 0x3800,
 519          OP_AND_reg_T1       = 0x4000,
 520          OP_EOR_reg_T1       = 0x4040,
 521          OP_TST_reg_T1       = 0x4200,
 522          OP_RSB_imm_T1       = 0x4240,
 523          OP_CMP_reg_T1       = 0x4280,
 524          OP_ORR_reg_T1       = 0x4300,
 525          OP_MVN_reg_T1       = 0x43C0,
 526          OP_ADD_reg_T2       = 0x4400,
 527          OP_MOV_reg_T1       = 0x4600,
 528          OP_BLX              = 0x4700,
 529          OP_BX               = 0x4700,
 530          OP_STR_reg_T1       = 0x5000,
 531          OP_STRH_reg_T1      = 0x5200,
 532          OP_STRB_reg_T1      = 0x5400,
 533          OP_LDRSB_reg_T1     = 0x5600,
 534          OP_LDR_reg_T1       = 0x5800,
 535          OP_LDRH_reg_T1      = 0x5A00,
 536          OP_LDRB_reg_T1      = 0x5C00,
 537          OP_LDRSH_reg_T1     = 0x5E00,
 538          OP_STR_imm_T1       = 0x6000,
 539          OP_LDR_imm_T1       = 0x6800,
 540          OP_STRB_imm_T1      = 0x7000,
 541          OP_LDRB_imm_T1      = 0x7800,
 542          OP_STRH_imm_T1      = 0x8000,
 543          OP_LDRH_imm_T1      = 0x8800,
 544          OP_STR_imm_T2       = 0x9000,
 545          OP_LDR_imm_T2       = 0x9800,
 546          OP_ADD_SP_imm_T1    = 0xA800,
 547          OP_ADD_SP_imm_T2    = 0xB000,
 548          OP_SUB_SP_imm_T1    = 0xB080,
 549          OP_PUSH_T1          = 0xB400,
 550          OP_POP_T1           = 0xBC00,
 551          OP_BKPT             = 0xBE00,
 552          OP_IT               = 0xBF00,
 553          OP_NOP_T1           = 0xBF00,
 554      } OpcodeID;
 555  
 556      typedef enum {
 557          OP_B_T1         = 0xD000,
 558          OP_B_T2         = 0xE000,
 559          OP_POP_T2       = 0xE8BD,
 560          OP_PUSH_T2      = 0xE92D,
 561          OP_AND_reg_T2   = 0xEA00,
 562          OP_TST_reg_T2   = 0xEA10,
 563          OP_ORR_reg_T2   = 0xEA40,
 564          OP_ORR_S_reg_T2 = 0xEA50,
 565          OP_ASR_imm_T1   = 0xEA4F,
 566          OP_LSL_imm_T1   = 0xEA4F,
 567          OP_LSR_imm_T1   = 0xEA4F,
 568          OP_ROR_imm_T1   = 0xEA4F,
 569          OP_MVN_reg_T2   = 0xEA6F,
 570          OP_EOR_reg_T2   = 0xEA80,
 571          OP_ADD_reg_T3   = 0xEB00,
 572          OP_ADD_S_reg_T3 = 0xEB10,
 573          OP_SUB_reg_T2   = 0xEBA0,
 574          OP_SUB_S_reg_T2 = 0xEBB0,
 575          OP_CMP_reg_T2   = 0xEBB0,
 576          OP_VMOV_CtoD    = 0xEC00,
 577          OP_VMOV_DtoC    = 0xEC10,
 578          OP_FSTS         = 0xED00,
 579          OP_VSTR         = 0xED00,
 580          OP_FLDS         = 0xED10,
 581          OP_VLDR         = 0xED10,
 582          OP_VMOV_CtoS    = 0xEE00,
 583          OP_VMOV_StoC    = 0xEE10,
 584          OP_VMUL_T2      = 0xEE20,
 585          OP_VADD_T2      = 0xEE30,
 586          OP_VSUB_T2      = 0xEE30,
 587          OP_VDIV         = 0xEE80,
 588          OP_VABS_T2      = 0xEEB0,
 589          OP_VCMP         = 0xEEB0,
 590          OP_VCVT_FPIVFP  = 0xEEB0,
 591          OP_VMOV_T2      = 0xEEB0,
 592          OP_VMOV_IMM_T2  = 0xEEB0,
 593          OP_VMRS         = 0xEEB0,
 594          OP_VNEG_T2      = 0xEEB0,
 595          OP_VSQRT_T1     = 0xEEB0,
 596          OP_VCVTSD_T1    = 0xEEB0,
 597          OP_VCVTDS_T1    = 0xEEB0,
 598          OP_VAND_T1      = 0xEF00,
 599          OP_VORR_T1      = 0xEF20,
 600          OP_B_T3a        = 0xF000,
 601          OP_B_T4a        = 0xF000,
 602          OP_AND_imm_T1   = 0xF000,
 603          OP_TST_imm      = 0xF010,
 604          OP_ORR_imm_T1   = 0xF040,
 605          OP_MOV_imm_T2   = 0xF040,
 606          OP_MVN_imm      = 0xF060,
 607          OP_EOR_imm_T1   = 0xF080,
 608          OP_ADD_imm_T3   = 0xF100,
 609          OP_ADD_S_imm_T3 = 0xF110,
 610          OP_CMN_imm      = 0xF110,
 611          OP_ADC_imm      = 0xF140,
 612          OP_SUB_imm_T3   = 0xF1A0,
 613          OP_SUB_S_imm_T3 = 0xF1B0,
 614          OP_CMP_imm_T2   = 0xF1B0,
 615          OP_RSB_imm_T2   = 0xF1C0,
 616          OP_RSB_S_imm_T2 = 0xF1D0,
 617          OP_ADD_imm_T4   = 0xF200,
 618          OP_MOV_imm_T3   = 0xF240,
 619          OP_SUB_imm_T4   = 0xF2A0,
 620          OP_MOVT         = 0xF2C0,
 621          OP_UBFX_T1      = 0xF3C0,
 622          OP_NOP_T2a      = 0xF3AF,
 623          OP_DMB_T1a      = 0xF3BF,
 624          OP_STRB_imm_T3  = 0xF800,
 625          OP_STRB_reg_T2  = 0xF800,
 626          OP_LDRB_imm_T3  = 0xF810,
 627          OP_LDRB_reg_T2  = 0xF810,
 628          OP_STRH_imm_T3  = 0xF820,
 629          OP_STRH_reg_T2  = 0xF820,
 630          OP_LDRH_reg_T2  = 0xF830,
 631          OP_LDRH_imm_T3  = 0xF830,
 632          OP_STR_imm_T4   = 0xF840,
 633          OP_STR_reg_T2   = 0xF840,
 634          OP_LDR_imm_T4   = 0xF850,
 635          OP_LDR_reg_T2   = 0xF850,
 636          OP_STRB_imm_T2  = 0xF880,
 637          OP_LDRB_imm_T2  = 0xF890,
 638          OP_STRH_imm_T2  = 0xF8A0,
 639          OP_LDRH_imm_T2  = 0xF8B0,
 640          OP_STR_imm_T3   = 0xF8C0,
 641          OP_LDR_imm_T3   = 0xF8D0,
 642          OP_LDRSB_reg_T2 = 0xF910,
 643          OP_LDRSH_reg_T2 = 0xF930,
 644          OP_LSL_reg_T2   = 0xFA00,
 645          OP_LSR_reg_T2   = 0xFA20,
 646          OP_ASR_reg_T2   = 0xFA40,
 647          OP_ROR_reg_T2   = 0xFA60,
 648          OP_CLZ          = 0xFAB0,
 649          OP_SMULL_T1     = 0xFB80,
 650  #if HAVE(ARM_IDIV_INSTRUCTIONS)
 651          OP_SDIV_T1      = 0xFB90,
 652          OP_UDIV_T1      = 0xFBB0,
 653  #endif
 654          OP_MRS_T1       = 0xF3EF,
 655      } OpcodeID1;
 656  
 657      typedef enum {
 658          OP_VAND_T1b      = 0x0010,
 659          OP_VORR_T1b      = 0x0010,
 660          OP_VADD_T2b      = 0x0A00,
 661          OP_VDIVb         = 0x0A00,
 662          OP_FLDSb         = 0x0A00,
 663          OP_VLDRb         = 0x0A00,
 664          OP_VMOV_IMM_T2b  = 0x0A00,
 665          OP_VMOV_T2b      = 0x0A40,
 666          OP_VMUL_T2b      = 0x0A00,
 667          OP_FSTSb         = 0x0A00,
 668          OP_VSTRb         = 0x0A00,
 669          OP_VMOV_StoCb    = 0x0A10,
 670          OP_VMOV_CtoSb    = 0x0A10,
 671          OP_VMOV_DtoCb    = 0x0A10,
 672          OP_VMOV_CtoDb    = 0x0A10,
 673          OP_VMRSb         = 0x0A10,
 674          OP_VABS_T2b      = 0x0A40,
 675          OP_VCMPb         = 0x0A40,
 676          OP_VCVT_FPIVFPb  = 0x0A40,
 677          OP_VNEG_T2b      = 0x0A40,
 678          OP_VSUB_T2b      = 0x0A40,
 679          OP_VSQRT_T1b     = 0x0A40,
 680          OP_VCVTSD_T1b    = 0x0A40,
 681          OP_VCVTDS_T1b    = 0x0A40,
 682          OP_NOP_T2b       = 0x8000,
 683          OP_DMB_SY_T1b    = 0x8F5F,
 684          OP_DMB_ISHST_T1b = 0x8F5A,
 685          OP_B_T3b         = 0x8000,
 686          OP_B_T4b         = 0x9000,
 687      } OpcodeID2;
 688  
 689      struct FourFours {
 690          FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
 691          {
 692              m_u.f0 = f0;
 693              m_u.f1 = f1;
 694              m_u.f2 = f2;
 695              m_u.f3 = f3;
 696          }
 697  
 698          union {
 699              unsigned value;
 700              struct {
 701                  unsigned f0 : 4;
 702                  unsigned f1 : 4;
 703                  unsigned f2 : 4;
 704                  unsigned f3 : 4;
 705              };
 706          } m_u;
 707      };
 708  
 709      class ARMInstructionFormatter;
 710  
 711      // false means else!
 712      static bool ifThenElseConditionBit(Condition condition, bool isIf)
 713      {
 714          return isIf ? (condition & 1) : !(condition & 1);
 715      }
 716      static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
 717      {
 718          int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
 719              | (ifThenElseConditionBit(condition, inst3if) << 2)
 720              | (ifThenElseConditionBit(condition, inst4if) << 1)
 721              | 1;
 722          ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
 723          return (condition << 4) | mask;
 724      }
 725      static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
 726      {
 727          int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
 728              | (ifThenElseConditionBit(condition, inst3if) << 2)
 729              | 2;
 730          ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
 731          return (condition << 4) | mask;
 732      }
 733      static uint8_t ifThenElse(Condition condition, bool inst2if)
 734      {
 735          int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
 736              | 4;
 737          ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
 738          return (condition << 4) | mask;
 739      }
 740  
 741      static uint8_t ifThenElse(Condition condition)
 742      {
 743          int mask = 8;
 744          return (condition << 4) | mask;
 745      }
 746  
 747  public:
 748      
 749      void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
 750      {
 751          // Rd can only be SP if Rn is also SP.
 752          ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
 753          ASSERT(rd != ARMRegisters::pc);
 754          ASSERT(rn != ARMRegisters::pc);
 755          ASSERT(imm.isEncodedImm());
 756  
 757          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
 758      }
 759  
 760      void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
 761      {
 762          // Rd can only be SP if Rn is also SP.
 763          ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
 764          ASSERT(rd != ARMRegisters::pc);
 765          ASSERT(rn != ARMRegisters::pc);
 766          ASSERT(imm.isValid());
 767  
 768          if (rn == ARMRegisters::sp && imm.isUInt16()) {
 769              ASSERT(!(imm.getUInt16() & 3));
 770              if (!(rd & 8) && imm.isUInt10()) {
 771                  m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
 772                  return;
 773              } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
 774                  m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
 775                  return;
 776              }
 777          } else if (!((rd | rn) & 8)) {
 778              if (imm.isUInt3()) {
 779                  m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
 780                  return;
 781              } else if ((rd == rn) && imm.isUInt8()) {
 782                  m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
 783                  return;
 784              }
 785          }
 786  
 787          if (imm.isEncodedImm())
 788              m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
 789          else {
 790              ASSERT(imm.isUInt12());
 791              m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
 792          }
 793      }
 794  
 795      ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
 796      {
 797          ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
 798          ASSERT(rd != ARMRegisters::pc);
 799          ASSERT(rn != ARMRegisters::pc);
 800          ASSERT(!BadReg(rm));
 801          m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
 802      }
 803  
 804      // NOTE: In an IT block, add doesn't modify the flags register.
 805      ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
 806      {
 807          if (rd == ARMRegisters::sp) {
 808              mov(rd, rn);
 809              rn = rd;
 810          }
 811  
 812          if (rd == rn)
 813              m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
 814          else if (rd == rm)
 815              m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
 816          else if (!((rd | rn | rm) & 8))
 817              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
 818          else
 819              add(rd, rn, rm, ShiftTypeAndAmount());
 820      }
 821  
 822      // Not allowed in an IT (if then) block.
 823      ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
 824      {
 825          // Rd can only be SP if Rn is also SP.
 826          ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
 827          ASSERT(rd != ARMRegisters::pc);
 828          ASSERT(rn != ARMRegisters::pc);
 829          ASSERT(imm.isEncodedImm());
 830  
 831          if (!((rd | rn) & 8)) {
 832              if (imm.isUInt3()) {
 833                  m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
 834                  return;
 835              } else if ((rd == rn) && imm.isUInt8()) {
 836                  m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
 837                  return;
 838              }
 839          }
 840  
 841          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
 842      }
 843  
 844      // Not allowed in an IT (if then) block?
 845      ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
 846      {
 847          ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
 848          ASSERT(rd != ARMRegisters::pc);
 849          ASSERT(rn != ARMRegisters::pc);
 850          ASSERT(!BadReg(rm));
 851          m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
 852      }
 853  
 854      // Not allowed in an IT (if then) block.
 855      ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
 856      {
 857          if (!((rd | rn | rm) & 8))
 858              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
 859          else
 860              add_S(rd, rn, rm, ShiftTypeAndAmount());
 861      }
 862  
 863      ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
 864      {
 865          ASSERT(!BadReg(rd));
 866          ASSERT(!BadReg(rn));
 867          ASSERT(imm.isEncodedImm());
 868          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
 869      }
 870  
 871      ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
 872      {
 873          ASSERT(!BadReg(rd));
 874          ASSERT(!BadReg(rn));
 875          ASSERT(!BadReg(rm));
 876          m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
 877      }
 878  
 879      ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
 880      {
 881          if ((rd == rn) && !((rd | rm) & 8))
 882              m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
 883          else if ((rd == rm) && !((rd | rn) & 8))
 884              m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
 885          else
 886              ARM_and(rd, rn, rm, ShiftTypeAndAmount());
 887      }
 888  
 889      ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
 890      {
 891          ASSERT(!BadReg(rd));
 892          ASSERT(!BadReg(rm));
 893          ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
 894          m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
 895      }
 896  
 897      ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
 898      {
 899          ASSERT(!BadReg(rd));
 900          ASSERT(!BadReg(rn));
 901          ASSERT(!BadReg(rm));
 902          m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
 903      }
 904      
 905      // Only allowed in IT (if then) block if last instruction.
 906      ALWAYS_INLINE AssemblerLabel b()
 907      {
 908          m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
 909          return m_formatter.label();
 910      }
 911      
 912      // Only allowed in IT (if then) block if last instruction.
 913      ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
 914      {
 915          ASSERT(rm != ARMRegisters::pc);
 916          m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
 917          return m_formatter.label();
 918      }
 919  
 920      // Only allowed in IT (if then) block if last instruction.
 921      ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
 922      {
 923          m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
 924          return m_formatter.label();
 925      }
 926  
 927      void bkpt(uint8_t imm = 0)
 928      {
 929          m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
 930      }
 931  
 932      static bool isBkpt(void* address)
 933      {
 934          unsigned short expected = OP_BKPT;
 935          unsigned short immediateMask = 0xff;
 936          unsigned short candidateInstruction = *reinterpret_cast<unsigned short*>(address);
 937          return (candidateInstruction & ~immediateMask) == expected;
 938      }
 939  
 940      ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
 941      {
 942          ASSERT(!BadReg(rd));
 943          ASSERT(!BadReg(rm));
 944          m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
 945      }
 946  
 947      ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
 948      {
 949          ASSERT(rn != ARMRegisters::pc);
 950          ASSERT(imm.isEncodedImm());
 951  
 952          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
 953      }
 954  
 955      ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
 956      {
 957          ASSERT(rn != ARMRegisters::pc);
 958          ASSERT(imm.isEncodedImm());
 959  
 960          if (!(rn & 8) && imm.isUInt8())
 961              m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
 962          else
 963              m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
 964      }
 965  
 966      ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
 967      {
 968          ASSERT(rn != ARMRegisters::pc);
 969          ASSERT(!BadReg(rm));
 970          m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
 971      }
 972  
 973      ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
 974      {
 975          if ((rn | rm) & 8)
 976              cmp(rn, rm, ShiftTypeAndAmount());
 977          else
 978              m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
 979      }
 980  
 981      // xor is not spelled with an 'e'. :-(
 982      ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
 983      {
 984          ASSERT(!BadReg(rd));
 985          ASSERT(!BadReg(rn));
 986          ASSERT(imm.isEncodedImm());
 987          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
 988      }
 989  
 990      // xor is not spelled with an 'e'. :-(
 991      ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
 992      {
 993          ASSERT(!BadReg(rd));
 994          ASSERT(!BadReg(rn));
 995          ASSERT(!BadReg(rm));
 996          m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
 997      }
 998  
 999      // xor is not spelled with an 'e'. :-(
1000      void eor(RegisterID rd, RegisterID rn, RegisterID rm)
1001      {
1002          if ((rd == rn) && !((rd | rm) & 8))
1003              m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
1004          else if ((rd == rm) && !((rd | rn) & 8))
1005              m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
1006          else
1007              eor(rd, rn, rm, ShiftTypeAndAmount());
1008      }
1009  
1010      ALWAYS_INLINE void it(Condition cond)
1011      {
1012          m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
1013      }
1014  
1015      ALWAYS_INLINE void it(Condition cond, bool inst2if)
1016      {
1017          m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
1018      }
1019  
1020      ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
1021      {
1022          m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
1023      }
1024  
1025      ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
1026      {
1027          m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
1028      }
1029  
1030      // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1031      ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1032      {
1033          ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1034          ASSERT(imm.isUInt12());
1035  
1036          if (!((rt | rn) & 8) && imm.isUInt7() && !(imm.getUInt7() % 4)) {
1037              // We can only use Encoding T1 when imm is a multiple of 4.
1038              // For details see A8.8.63 on ARM Architecture Reference
1039              // Manual ARMv7-A and ARMv7-R edition available on
1040              // https://static.docs.arm.com/ddi0406/cd/DDI0406C_d_armv7ar_arm.pdf
1041              m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1042          } else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1043              m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1044          else
1045              m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
1046      }
1047      
1048      ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
1049      {
1050          ASSERT(rn != ARMRegisters::pc);
1051          m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
1052      }
1053  
1054      ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1055      {
1056          ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1057          ASSERT(imm.isUInt7());
1058          ASSERT(!((rt | rn) & 8));
1059          m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1060      }
1061  
1062      // If index is set, this is a regular offset or a pre-indexed load;
1063      // if index is not set then is is a post-index load.
1064      //
1065      // If wback is set rn is updated - this is a pre or post index load,
1066      // if wback is not set this is a regular offset memory access.
1067      //
1068      // (-255 <= offset <= 255)
1069      // _reg = REG[rn]
1070      // _tmp = _reg + offset
1071      // MEM[index ? _tmp : _reg] = REG[rt]
1072      // if (wback) REG[rn] = _tmp
1073      ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1074      {
1075          ASSERT(rt != ARMRegisters::pc);
1076          ASSERT(rn != ARMRegisters::pc);
1077          ASSERT(index || wback);
1078          ASSERT(!wback | (rt != rn));
1079      
1080          bool add = true;
1081          if (offset < 0) {
1082              add = false;
1083              offset = -offset;
1084          }
1085          ASSERT((offset & ~0xff) == 0);
1086          
1087          offset |= (wback << 8);
1088          offset |= (add   << 9);
1089          offset |= (index << 10);
1090          offset |= (1 << 11);
1091          
1092          m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1093      }
1094  
1095      // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1096      ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1097      {
1098          ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1099          ASSERT(!BadReg(rm));
1100          ASSERT(shift <= 3);
1101  
1102          if (!shift && !((rt | rn | rm) & 8))
1103              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1104          else
1105              m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1106      }
1107  
1108      // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1109      ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1110      {
1111          ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1112          ASSERT(imm.isUInt12());
1113          ASSERT(!(imm.getUInt12() & 1));
1114  
1115          if (!((rt | rn) & 8) && imm.isUInt6())
1116              m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
1117          else
1118              m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1119      }
1120  
1121      // If index is set, this is a regular offset or a pre-indexed load;
1122      // if index is not set then is is a post-index load.
1123      //
1124      // If wback is set rn is updated - this is a pre or post index load,
1125      // if wback is not set this is a regular offset memory access.
1126      //
1127      // (-255 <= offset <= 255)
1128      // _reg = REG[rn]
1129      // _tmp = _reg + offset
1130      // MEM[index ? _tmp : _reg] = REG[rt]
1131      // if (wback) REG[rn] = _tmp
1132      ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1133      {
1134          ASSERT(rt != ARMRegisters::pc);
1135          ASSERT(rn != ARMRegisters::pc);
1136          ASSERT(index || wback);
1137          ASSERT(!wback | (rt != rn));
1138      
1139          bool add = true;
1140          if (offset < 0) {
1141              add = false;
1142              offset = -offset;
1143          }
1144          ASSERT((offset & ~0xff) == 0);
1145          
1146          offset |= (wback << 8);
1147          offset |= (add   << 9);
1148          offset |= (index << 10);
1149          offset |= (1 << 11);
1150          
1151          m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1152      }
1153  
1154      ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1155      {
1156          ASSERT(!BadReg(rt));   // Memory hint
1157          ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1158          ASSERT(!BadReg(rm));
1159          ASSERT(shift <= 3);
1160  
1161          if (!shift && !((rt | rn | rm) & 8))
1162              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1163          else
1164              m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1165      }
1166  
1167      void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1168      {
1169          ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1170          ASSERT(imm.isUInt12());
1171  
1172          if (!((rt | rn) & 8) && imm.isUInt5())
1173              m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1174          else
1175              m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1176      }
1177  
1178      void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1179      {
1180          ASSERT(rt != ARMRegisters::pc);
1181          ASSERT(rn != ARMRegisters::pc);
1182          ASSERT(index || wback);
1183          ASSERT(!wback | (rt != rn));
1184  
1185          bool add = true;
1186          if (offset < 0) {
1187              add = false;
1188              offset = -offset;
1189          }
1190  
1191          ASSERT(!(offset & ~0xff));
1192  
1193          offset |= (wback << 8);
1194          offset |= (add   << 9);
1195          offset |= (index << 10);
1196          offset |= (1 << 11);
1197  
1198          m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1199      }
1200  
1201      ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1202      {
1203          ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1204          ASSERT(!BadReg(rm));
1205          ASSERT(shift <= 3);
1206  
1207          if (!shift && !((rt | rn | rm) & 8))
1208              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1209          else
1210              m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1211      }
1212      
1213      void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1214      {
1215          ASSERT(rn != ARMRegisters::pc);
1216          ASSERT(!BadReg(rm));
1217          ASSERT(shift <= 3);
1218          
1219          if (!shift && !((rt | rn | rm) & 8))
1220              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
1221          else
1222              m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1223      }
1224  
1225      void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1226      {
1227          ASSERT(rn != ARMRegisters::pc);
1228          ASSERT(!BadReg(rm));
1229          ASSERT(shift <= 3);
1230          
1231          if (!shift && !((rt | rn | rm) & 8))
1232              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
1233          else
1234              m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1235      }
1236  
1237      void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1238      {
1239          ASSERT(!BadReg(rd));
1240          ASSERT(!BadReg(rm));
1241          ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1242          m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1243      }
1244  
1245      ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1246      {
1247          ASSERT(!BadReg(rd));
1248          ASSERT(!BadReg(rn));
1249          ASSERT(!BadReg(rm));
1250          m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1251      }
1252  
1253      ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1254      {
1255          ASSERT(!BadReg(rd));
1256          ASSERT(!BadReg(rm));
1257          ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1258          m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1259      }
1260  
1261      ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1262      {
1263          ASSERT(!BadReg(rd));
1264          ASSERT(!BadReg(rn));
1265          ASSERT(!BadReg(rm));
1266          m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1267      }
1268  
1269      ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
1270      {
1271          ASSERT(imm.isValid());
1272          ASSERT(!imm.isEncodedImm());
1273          ASSERT(!BadReg(rd));
1274          
1275          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1276      }
1277      
1278  #if OS(LINUX)
1279      static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
1280      {
1281          uint16_t* address = static_cast<uint16_t*>(instructionStart);
1282          ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
1283          ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
1284          uint16_t instruction[] = {
1285              twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16),
1286              twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16),
1287              twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16),
1288              twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16),
1289              static_cast<uint16_t>(OP_CMP_reg_T2 | left)
1290          };
1291          performJITMemcpy(address, instruction, sizeof(uint16_t) * 5);
1292          cacheFlush(address, sizeof(uint16_t) * 5);
1293      }
1294  #else
1295      static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
1296      {
1297          ASSERT(imm.isValid());
1298          ASSERT(!imm.isEncodedImm());
1299          ASSERT(!BadReg(rd));
1300          
1301          uint16_t* address = static_cast<uint16_t*>(instructionStart);
1302          uint16_t instruction[] = {
1303              twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm),
1304              twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm)
1305          };
1306          performJITMemcpy(address, instruction, sizeof(uint16_t) * 2);
1307          cacheFlush(address, sizeof(uint16_t) * 2);
1308      }
1309  #endif
1310  
1311      ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
1312      {
1313          ASSERT(imm.isValid());
1314          ASSERT(!BadReg(rd));
1315          
1316          if ((rd < 8) && imm.isUInt8())
1317              m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1318          else if (imm.isEncodedImm())
1319              m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1320          else
1321              movT3(rd, imm);
1322      }
1323  
1324      ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1325      {
1326          m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1327      }
1328  
1329      ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
1330      {
1331          ASSERT(imm.isUInt16());
1332          ASSERT(!BadReg(rd));
1333          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1334      }
1335  
1336      ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
1337      {
1338          ASSERT(imm.isEncodedImm());
1339          ASSERT(!BadReg(rd));
1340          
1341          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1342      }
1343  
1344      ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1345      {
1346          ASSERT(!BadReg(rd));
1347          ASSERT(!BadReg(rm));
1348          m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1349      }
1350  
1351      ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1352      {
1353          if (!((rd | rm) & 8))
1354              m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1355          else
1356              mvn(rd, rm, ShiftTypeAndAmount());
1357      }
1358  
1359      ALWAYS_INLINE void mrs(RegisterID rd, SPRegisterID specReg)
1360      {
1361          ASSERT(specReg == ARMRegisters::apsr);
1362          ASSERT(!BadReg(rd));
1363          unsigned short specialRegisterBit = (specReg == ARMRegisters::apsr) ? 0 : (1 << 4);
1364          OpcodeID1 mrsOp = static_cast<OpcodeID1>(OP_MRS_T1 | specialRegisterBit);
1365          m_formatter.twoWordOp16FourFours(mrsOp, FourFours(0x8, rd, 0, 0));
1366      }
1367  
1368      ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1369      {
1370          ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1371          sub(rd, zero, rm);
1372      }
1373  
1374      ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1375      {
1376          ASSERT(!BadReg(rd));
1377          ASSERT(!BadReg(rn));
1378          ASSERT(imm.isEncodedImm());
1379          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1380      }
1381  
1382      ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1383      {
1384          ASSERT(!BadReg(rd));
1385          ASSERT(!BadReg(rn));
1386          ASSERT(!BadReg(rm));
1387          m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1388      }
1389  
1390      void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1391      {
1392          if ((rd == rn) && !((rd | rm) & 8))
1393              m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1394          else if ((rd == rm) && !((rd | rn) & 8))
1395              m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1396          else
1397              orr(rd, rn, rm, ShiftTypeAndAmount());
1398      }
1399  
1400      ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1401      {
1402          ASSERT(!BadReg(rd));
1403          ASSERT(!BadReg(rn));
1404          ASSERT(!BadReg(rm));
1405          m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1406      }
1407  
1408      void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1409      {
1410          if ((rd == rn) && !((rd | rm) & 8))
1411              m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1412          else if ((rd == rm) && !((rd | rn) & 8))
1413              m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1414          else
1415              orr_S(rd, rn, rm, ShiftTypeAndAmount());
1416      }
1417  
1418      ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1419      {
1420          ASSERT(!BadReg(rd));
1421          ASSERT(!BadReg(rm));
1422          ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1423          m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1424      }
1425  
1426      ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1427      {
1428          ASSERT(!BadReg(rd));
1429          ASSERT(!BadReg(rn));
1430          ASSERT(!BadReg(rm));
1431          m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1432      }
1433  
1434      ALWAYS_INLINE void pop(RegisterID dest)
1435      {
1436          if (dest < ARMRegisters::r8)
1437              m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest);
1438          else {
1439              // Load postindexed with writeback.
1440              ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
1441          }
1442      }
1443  
1444      ALWAYS_INLINE void pop(uint32_t registerList)
1445      {
1446          ASSERT(WTF::bitCount(registerList) > 1);
1447          ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList));
1448          ASSERT(!((1 << ARMRegisters::sp) & registerList));
1449          m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList);
1450      }
1451  
1452      ALWAYS_INLINE void push(RegisterID src)
1453      {
1454          if (src < ARMRegisters::r8)
1455              m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src);
1456          else if (src == ARMRegisters::lr)
1457              m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100);
1458          else {
1459              // Store preindexed with writeback.
1460              str(src, ARMRegisters::sp, -sizeof(void*), true, true);
1461          }
1462      }
1463  
1464      ALWAYS_INLINE void push(uint32_t registerList)
1465      {
1466          ASSERT(WTF::bitCount(registerList) > 1);
1467          ASSERT(!((1 << ARMRegisters::pc) & registerList));
1468          ASSERT(!((1 << ARMRegisters::sp) & registerList));
1469          m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList);
1470      }
1471  
1472  #if HAVE(ARM_IDIV_INSTRUCTIONS)
1473      template<int datasize>
1474      ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
1475      {
1476          static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");        
1477          ASSERT(!BadReg(rd));
1478          ASSERT(!BadReg(rn));
1479          ASSERT(!BadReg(rm));
1480          m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1481      }
1482  #endif
1483  
1484      ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1485      {
1486          ASSERT(!BadReg(rdLo));
1487          ASSERT(!BadReg(rdHi));
1488          ASSERT(!BadReg(rn));
1489          ASSERT(!BadReg(rm));
1490          ASSERT(rdLo != rdHi);
1491          m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1492      }
1493  
1494      // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1495      ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1496      {
1497          ASSERT(rt != ARMRegisters::pc);
1498          ASSERT(rn != ARMRegisters::pc);
1499          ASSERT(imm.isUInt12());
1500  
1501          if (!((rt | rn) & 8) && imm.isUInt7())
1502              m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1503          else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1504              m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1505          else
1506              m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1507      }
1508  
1509      // If index is set, this is a regular offset or a pre-indexed store;
1510      // if index is not set then is is a post-index store.
1511      //
1512      // If wback is set rn is updated - this is a pre or post index store,
1513      // if wback is not set this is a regular offset memory access.
1514      //
1515      // (-255 <= offset <= 255)
1516      // _reg = REG[rn]
1517      // _tmp = _reg + offset
1518      // MEM[index ? _tmp : _reg] = REG[rt]
1519      // if (wback) REG[rn] = _tmp
1520      ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1521      {
1522          ASSERT(rt != ARMRegisters::pc);
1523          ASSERT(rn != ARMRegisters::pc);
1524          ASSERT(index || wback);
1525          ASSERT(!wback | (rt != rn));
1526      
1527          bool add = true;
1528          if (offset < 0) {
1529              add = false;
1530              offset = -offset;
1531          }
1532          ASSERT((offset & ~0xff) == 0);
1533          
1534          offset |= (wback << 8);
1535          offset |= (add   << 9);
1536          offset |= (index << 10);
1537          offset |= (1 << 11);
1538          
1539          m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1540      }
1541  
1542      // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1543      ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1544      {
1545          ASSERT(rn != ARMRegisters::pc);
1546          ASSERT(!BadReg(rm));
1547          ASSERT(shift <= 3);
1548  
1549          if (!shift && !((rt | rn | rm) & 8))
1550              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1551          else
1552              m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1553      }
1554  
1555      // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1556      ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1557      {
1558          ASSERT(rt != ARMRegisters::pc);
1559          ASSERT(rn != ARMRegisters::pc);
1560          ASSERT(imm.isUInt12());
1561  
1562          if (!((rt | rn) & 8) && imm.isUInt7())
1563              m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
1564          else
1565              m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
1566      }
1567  
1568      // If index is set, this is a regular offset or a pre-indexed store;
1569      // if index is not set then is is a post-index store.
1570      //
1571      // If wback is set rn is updated - this is a pre or post index store,
1572      // if wback is not set this is a regular offset memory access.
1573      //
1574      // (-255 <= offset <= 255)
1575      // _reg = REG[rn]
1576      // _tmp = _reg + offset
1577      // MEM[index ? _tmp : _reg] = REG[rt]
1578      // if (wback) REG[rn] = _tmp
1579      ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1580      {
1581          ASSERT(rt != ARMRegisters::pc);
1582          ASSERT(rn != ARMRegisters::pc);
1583          ASSERT(index || wback);
1584          ASSERT(!wback | (rt != rn));
1585      
1586          bool add = true;
1587          if (offset < 0) {
1588              add = false;
1589              offset = -offset;
1590          }
1591          ASSERT((offset & ~0xff) == 0);
1592          
1593          offset |= (wback << 8);
1594          offset |= (add   << 9);
1595          offset |= (index << 10);
1596          offset |= (1 << 11);
1597          
1598          m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
1599      }
1600  
1601      // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1602      ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1603      {
1604          ASSERT(rn != ARMRegisters::pc);
1605          ASSERT(!BadReg(rm));
1606          ASSERT(shift <= 3);
1607  
1608          if (!shift && !((rt | rn | rm) & 8))
1609              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
1610          else
1611              m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1612      }
1613      
1614      // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1615      ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1616      {
1617          ASSERT(rt != ARMRegisters::pc);
1618          ASSERT(rn != ARMRegisters::pc);
1619          ASSERT(imm.isUInt12());
1620          
1621          if (!((rt | rn) & 8) && imm.isUInt6())
1622              m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
1623          else
1624              m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
1625      }
1626      
1627      // If index is set, this is a regular offset or a pre-indexed store;
1628      // if index is not set then is is a post-index store.
1629      //
1630      // If wback is set rn is updated - this is a pre or post index store,
1631      // if wback is not set this is a regular offset memory access.
1632      //
1633      // (-255 <= offset <= 255)
1634      // _reg = REG[rn]
1635      // _tmp = _reg + offset
1636      // MEM[index ? _tmp : _reg] = REG[rt]
1637      // if (wback) REG[rn] = _tmp
1638      ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1639      {
1640          ASSERT(rt != ARMRegisters::pc);
1641          ASSERT(rn != ARMRegisters::pc);
1642          ASSERT(index || wback);
1643          ASSERT(!wback | (rt != rn));
1644          
1645          bool add = true;
1646          if (offset < 0) {
1647              add = false;
1648              offset = -offset;
1649          }
1650          ASSERT(!(offset & ~0xff));
1651          
1652          offset |= (wback << 8);
1653          offset |= (add   << 9);
1654          offset |= (index << 10);
1655          offset |= (1 << 11);
1656          
1657          m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
1658      }
1659      
1660      // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1661      ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1662      {
1663          ASSERT(rn != ARMRegisters::pc);
1664          ASSERT(!BadReg(rm));
1665          ASSERT(shift <= 3);
1666          
1667          if (!shift && !((rt | rn | rm) & 8))
1668              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
1669          else
1670              m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1671      }
1672  
1673      ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1674      {
1675          // Rd can only be SP if Rn is also SP.
1676          ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1677          ASSERT(rd != ARMRegisters::pc);
1678          ASSERT(rn != ARMRegisters::pc);
1679          ASSERT(imm.isValid());
1680  
1681          if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1682              ASSERT(!(imm.getUInt16() & 3));
1683              m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1684              return;
1685          } else if (!((rd | rn) & 8)) {
1686              if (imm.isUInt3()) {
1687                  m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1688                  return;
1689              } else if ((rd == rn) && imm.isUInt8()) {
1690                  m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1691                  return;
1692              }
1693          }
1694  
1695          if (imm.isEncodedImm())
1696              m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1697          else {
1698              ASSERT(imm.isUInt12());
1699              m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1700          }
1701      }
1702  
1703      ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1704      {
1705          ASSERT(rd != ARMRegisters::pc);
1706          ASSERT(rn != ARMRegisters::pc);
1707          ASSERT(imm.isValid());
1708          ASSERT(imm.isUInt12());
1709  
1710          if (!((rd | rn) & 8) && !imm.getUInt12())
1711              m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1712          else
1713              m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1714      }
1715  
1716      ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1717      {
1718          ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1719          ASSERT(rd != ARMRegisters::pc);
1720          ASSERT(rn != ARMRegisters::pc);
1721          ASSERT(!BadReg(rm));
1722          m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1723      }
1724  
1725      // NOTE: In an IT block, add doesn't modify the flags register.
1726      ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1727      {
1728          if (!((rd | rn | rm) & 8))
1729              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1730          else
1731              sub(rd, rn, rm, ShiftTypeAndAmount());
1732      }
1733  
1734      // Not allowed in an IT (if then) block.
1735      void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1736      {
1737          // Rd can only be SP if Rn is also SP.
1738          ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1739          ASSERT(rd != ARMRegisters::pc);
1740          ASSERT(rn != ARMRegisters::pc);
1741          ASSERT(imm.isValid());
1742  
1743          if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1744              ASSERT(!(imm.getUInt16() & 3));
1745              m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1746              return;
1747          } else if (!((rd | rn) & 8)) {
1748              if (imm.isUInt3()) {
1749                  m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1750                  return;
1751              } else if ((rd == rn) && imm.isUInt8()) {
1752                  m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1753                  return;
1754              }
1755          }
1756  
1757          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1758      }
1759  
1760      ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1761      {
1762          ASSERT(rd != ARMRegisters::pc);
1763          ASSERT(rn != ARMRegisters::pc);
1764          ASSERT(imm.isValid());
1765          ASSERT(imm.isUInt12());
1766  
1767          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
1768      }
1769  
1770      // Not allowed in an IT (if then) block?
1771      ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1772      {
1773          ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1774          ASSERT(rd != ARMRegisters::pc);
1775          ASSERT(rn != ARMRegisters::pc);
1776          ASSERT(!BadReg(rm));
1777          m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1778      }
1779  
1780      // Not allowed in an IT (if then) block.
1781      ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1782      {
1783          if (!((rd | rn | rm) & 8))
1784              m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1785          else
1786              sub_S(rd, rn, rm, ShiftTypeAndAmount());
1787      }
1788  
1789      ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
1790      {
1791          ASSERT(!BadReg(rn));
1792          ASSERT(imm.isEncodedImm());
1793  
1794          m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1795      }
1796  
1797      ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1798      {
1799          ASSERT(!BadReg(rn));
1800          ASSERT(!BadReg(rm));
1801          m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1802      }
1803  
1804      ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1805      {
1806          if ((rn | rm) & 8)
1807              tst(rn, rm, ShiftTypeAndAmount());
1808          else
1809              m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1810      }
1811  
1812      ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
1813      {
1814          ASSERT(lsb < 32);
1815          ASSERT((width >= 1) && (width <= 32));
1816          ASSERT((lsb + width) <= 32);
1817          m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
1818      }
1819  
1820  #if HAVE(ARM_IDIV_INSTRUCTIONS)
1821      ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
1822      {
1823          ASSERT(!BadReg(rd));
1824          ASSERT(!BadReg(rn));
1825          ASSERT(!BadReg(rm));
1826          m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1827      }
1828  #endif
1829  
1830      void vand(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1831      {
1832          m_formatter.vfpOp(OP_VAND_T1, OP_VAND_T1b, true, rn, rd, rm);
1833      }
1834  
1835      void vorr(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1836      {
1837          m_formatter.vfpOp(OP_VORR_T1, OP_VORR_T1b, true, rn, rd, rm);
1838      }
1839  
1840      void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1841      {
1842          m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1843      }
1844  
1845      void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1846      {
1847          m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1848      }
1849  
1850      void vcmpz(FPDoubleRegisterID rd)
1851      {
1852          m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1853      }
1854  
1855      void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1856      {
1857          // boolean values are 64bit (toInt, unsigned, roundZero)
1858          m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1859      }
1860  
1861      void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1862      {
1863          // boolean values are 64bit (toInt, unsigned, roundZero)
1864          m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1865      }
1866      
1867      void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1868      {
1869          // boolean values are 64bit (toInt, unsigned, roundZero)
1870          m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
1871      }
1872  
1873      void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1874      {
1875          m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1876      }
1877  
1878      void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1879      {
1880          m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1881      }
1882      
1883      void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1884      {
1885          m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
1886      }
1887  
1888      void vmov(RegisterID rd, FPSingleRegisterID rn)
1889      {
1890          ASSERT(!BadReg(rd));
1891          m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
1892      }
1893  
1894      void vmov(FPSingleRegisterID rd, RegisterID rn)
1895      {
1896          ASSERT(!BadReg(rn));
1897          m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
1898      }
1899  
1900      void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
1901      {
1902          ASSERT(!BadReg(rd1));
1903          ASSERT(!BadReg(rd2));
1904          m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
1905      }
1906  
1907      void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
1908      {
1909          ASSERT(!BadReg(rn1));
1910          ASSERT(!BadReg(rn2));
1911          m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
1912      }
1913  
1914      void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
1915      {
1916          m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
1917      }
1918  
1919      void vmrs(RegisterID reg = ARMRegisters::pc)
1920      {
1921          ASSERT(reg != ARMRegisters::sp);
1922          m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1923      }
1924  
1925      void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1926      {
1927          m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1928      }
1929  
1930      void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1931      {
1932          m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1933      }
1934  
1935      void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1936      {
1937          m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
1938      }
1939  
1940      void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1941      {
1942          m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
1943      }
1944  
1945      void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1946      {
1947          m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
1948      }
1949  
1950      void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1951      {
1952          m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
1953      }
1954  
1955      void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1956      {
1957          m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
1958      }
1959      
1960      void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1961      {
1962          m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
1963      }
1964  
1965      void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1966      {
1967          m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
1968      }
1969  
1970      void nop()
1971      {
1972          m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
1973      }
1974  
1975      void nopw()
1976      {
1977          m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
1978      }
1979      
1980      static constexpr int16_t nopPseudo16()
1981      {
1982          return OP_NOP_T1;
1983      }
1984  
1985      static constexpr int32_t nopPseudo32()
1986      {
1987          return OP_NOP_T2a | (OP_NOP_T2b << 16);
1988      }
1989  
1990      using CopyFunction = void*(&)(void*, const void*, size_t);
1991  
1992      template <CopyFunction copy>
1993      ALWAYS_INLINE static void fillNops(void* base, size_t size)
1994      {
1995          RELEASE_ASSERT(!(size % sizeof(int16_t)));
1996  
1997          char* ptr = static_cast<char*>(base);
1998          const size_t num32s = size / sizeof(int32_t);
1999          for (size_t i = 0; i < num32s; i++) {
2000              const int32_t insn = nopPseudo32();
2001              copy(ptr, &insn, sizeof(int32_t));
2002              ptr += sizeof(int32_t);
2003          }
2004  
2005          const size_t num16s = (size % sizeof(int32_t)) / sizeof(int16_t);
2006          ASSERT(num16s == 0 || num16s == 1);
2007          ASSERT(num16s * sizeof(int16_t) + num32s * sizeof(int32_t) == size);
2008          if (num16s) {
2009              const int16_t insn = nopPseudo16();
2010              copy(ptr, &insn, sizeof(int16_t));
2011          }
2012      }
2013  
2014      void dmbSY()
2015      {
2016          m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_SY_T1b);
2017      }
2018  
2019      void dmbISHST()
2020      {
2021          m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_ISHST_T1b);
2022      }
2023  
2024      AssemblerLabel labelIgnoringWatchpoints()
2025      {
2026          return m_formatter.label();
2027      }
2028  
2029      AssemblerLabel labelForWatchpoint()
2030      {
2031          AssemblerLabel result = m_formatter.label();
2032          if (static_cast<int>(result.offset()) != m_indexOfLastWatchpoint)
2033              result = label();
2034          m_indexOfLastWatchpoint = result.offset();
2035          m_indexOfTailOfLastWatchpoint = result.offset() + maxJumpReplacementSize();
2036          return result;
2037      }
2038  
2039      AssemblerLabel label()
2040      {
2041          AssemblerLabel result = m_formatter.label();
2042          while (UNLIKELY(static_cast<int>(result.offset()) < m_indexOfTailOfLastWatchpoint)) {
2043              if (UNLIKELY(static_cast<int>(result.offset()) + 4 <= m_indexOfTailOfLastWatchpoint))
2044                  nopw();
2045              else
2046                  nop();
2047              result = m_formatter.label();
2048          }
2049          return result;
2050      }
2051      
2052      AssemblerLabel align(int alignment)
2053      {
2054          while (!m_formatter.isAligned(alignment))
2055              bkpt();
2056  
2057          return label();
2058      }
2059      
2060      static void* getRelocatedAddress(void* code, AssemblerLabel label)
2061      {
2062          ASSERT(label.isSet());
2063          return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.offset());
2064      }
2065      
2066      static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
2067      {
2068          return b.offset() - a.offset();
2069      }
2070  
2071      static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
2072      
2073      // Assembler admin methods:
2074  
2075      static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
2076      {
2077          return a.from() < b.from();
2078      }
2079  
2080      static bool canCompact(JumpType jumpType)
2081      {
2082          // The following cannot be compacted:
2083          //   JumpFixed: represents custom jump sequence
2084          //   JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
2085          //   JumpConditionFixedSize: represents conditional jump that must remain a fixed size
2086          return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
2087      }
2088      
2089      static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
2090      {
2091          if (jumpType == JumpFixed)
2092              return LinkInvalid;
2093          
2094          // for patchable jump we must leave space for the longest code sequence
2095          if (jumpType == JumpNoConditionFixedSize)
2096              return LinkBX;
2097          if (jumpType == JumpConditionFixedSize)
2098              return LinkConditionalBX;
2099          
2100          const int paddingSize = JUMP_ENUM_SIZE(jumpType);
2101          
2102          if (jumpType == JumpCondition) {
2103              // 2-byte conditional T1
2104              const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
2105              if (canBeJumpT1(jumpT1Location, to))
2106                  return LinkJumpT1;
2107              // 4-byte conditional T3
2108              const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
2109              if (canBeJumpT3(jumpT3Location, to))
2110                  return LinkJumpT3;
2111              // 4-byte conditional T4 with IT
2112              const uint16_t* conditionalJumpT4Location = 
2113              reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
2114              if (canBeJumpT4(conditionalJumpT4Location, to))
2115                  return LinkConditionalJumpT4;
2116          } else {
2117              // 2-byte unconditional T2
2118              const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
2119              if (canBeJumpT2(jumpT2Location, to))
2120                  return LinkJumpT2;
2121              // 4-byte unconditional T4
2122              const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
2123              if (canBeJumpT4(jumpT4Location, to))
2124                  return LinkJumpT4;
2125              // use long jump sequence
2126              return LinkBX;
2127          }
2128          
2129          ASSERT(jumpType == JumpCondition);
2130          return LinkConditionalBX;
2131      }
2132      
2133      static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
2134      {
2135          JumpLinkType linkType = computeJumpType(record.type(), from, to);
2136          record.setLinkType(linkType);
2137          return linkType;
2138      }
2139      
2140      Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
2141      {
2142          std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
2143          return m_jumpsToLink;
2144      }
2145  
2146      template<CopyFunction copy>
2147      static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to)
2148      {
2149          const uint16_t* fromInstruction = reinterpret_cast_ptr<const uint16_t*>(fromInstruction8);
2150          switch (record.linkType()) {
2151          case LinkJumpT1:
2152              linkJumpT1<copy>(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2153              break;
2154          case LinkJumpT2:
2155              linkJumpT2<copy>(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2156              break;
2157          case LinkJumpT3:
2158              linkJumpT3<copy>(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2159              break;
2160          case LinkJumpT4:
2161              linkJumpT4<copy>(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2162              break;
2163          case LinkConditionalJumpT4:
2164              linkConditionalJumpT4<copy>(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2165              break;
2166          case LinkConditionalBX:
2167              linkConditionalBX<copy>(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2168              break;
2169          case LinkBX:
2170              linkBX<copy>(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
2171              break;
2172          default:
2173              RELEASE_ASSERT_NOT_REACHED();
2174              break;
2175          }
2176      }
2177  
2178      size_t codeSize() const { return m_formatter.codeSize(); }
2179  
2180      static unsigned getCallReturnOffset(AssemblerLabel call)
2181      {
2182          ASSERT(call.isSet());
2183          return call.offset();
2184      }
2185  
2186      // Linking & patching:
2187      //
2188      // 'link' and 'patch' methods are for use on unprotected code - such as the code
2189      // within the AssemblerBuffer, and code being patched by the patch buffer.  Once
2190      // code has been finalized it is (platform support permitting) within a non-
2191      // writable region of memory; to modify the code in an execute-only execuable
2192      // pool the 'repatch' and 'relink' methods should be used.
2193  
2194      void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2195      {
2196          ASSERT(to.isSet());
2197          ASSERT(from.isSet());
2198          m_jumpsToLink.append(LinkRecord(from.offset(), to.offset(), type, condition));
2199      }
2200  
2201      static void linkJump(void* code, AssemblerLabel from, void* to)
2202      {
2203          ASSERT(from.isSet());
2204          
2205          uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.offset());
2206          linkJumpAbsolute(location, location, to);
2207      }
2208  
2209      static void linkCall(void* code, AssemblerLabel from, void* to)
2210      {
2211          ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
2212          ASSERT(from.isSet());
2213  
2214          setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.offset()) - 1, to, false);
2215      }
2216  
2217      static void linkPointer(void* code, AssemblerLabel where, void* value)
2218      {
2219          setPointer(reinterpret_cast<char*>(code) + where.offset(), value, false);
2220      }
2221  
2222      // The static relink and replace methods can use can use |from| for both
2223      // the write and executable address for call and jump patching
2224      // as they're modifying existing (linked) code, so the address being
2225      // provided is correct for relative address computation.
2226      static void relinkJump(void* from, void* to)
2227      {
2228          ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2229          ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
2230  
2231          linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), reinterpret_cast<uint16_t*>(from), to);
2232  
2233          cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
2234      }
2235  
2236      static void relinkJumpToNop(void* from)
2237      {
2238          relinkJump(from, from);
2239      }
2240      
2241      static void relinkCall(void* from, void* to)
2242      {
2243          ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2244  
2245          setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
2246      }
2247      
2248      static void* readCallTarget(void* from)
2249      {
2250          return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
2251      }
2252  
2253      static void repatchInt32(void* where, int32_t value)
2254      {
2255          ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2256          
2257          setInt32(where, value, true);
2258      }
2259      
2260      static void repatchCompact(void* where, int32_t offset)
2261      {
2262          ASSERT(offset >= -255 && offset <= 255);
2263  
2264          bool add = true;
2265          if (offset < 0) {
2266              add = false;
2267              offset = -offset;
2268          }
2269          
2270          offset |= (add << 9);
2271          offset |= (1 << 10);
2272          offset |= (1 << 11);
2273  
2274          uint16_t* location = reinterpret_cast<uint16_t*>(where);
2275          uint16_t instruction = location[1] & ~((1 << 12) - 1);
2276          instruction |= offset;
2277          performJITMemcpy(location + 1, &instruction, sizeof(uint16_t));
2278          cacheFlush(location, sizeof(uint16_t) * 2);
2279      }
2280  
2281      static void repatchPointer(void* where, void* value)
2282      {
2283          ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2284          
2285          setPointer(where, value, true);
2286      }
2287  
2288      static void* readPointer(void* where)
2289      {
2290          return reinterpret_cast<void*>(readInt32(where));
2291      }
2292  
2293      static void replaceWithJump(void* instructionStart, void* to)
2294      {
2295          ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2296          ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
2297  
2298  #if OS(LINUX)
2299          if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
2300              uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2301              linkJumpT4(ptr, ptr, to);
2302              cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2303          } else {
2304              uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
2305              linkBX(ptr, ptr, to);
2306              cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
2307          }
2308  #else
2309          uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2310          linkJumpT4(ptr, ptr, to);
2311          cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2312  #endif
2313      }
2314      
2315      static ptrdiff_t maxJumpReplacementSize()
2316      {
2317  #if OS(LINUX)
2318          return 10;
2319  #else
2320          return 4;
2321  #endif
2322      }
2323  
2324      static constexpr ptrdiff_t patchableJumpSize()
2325      {
2326          return 10;
2327      }
2328      
2329      static void replaceWithLoad(void* instructionStart)
2330      {
2331          ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2332          uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2333          switch (ptr[0] & 0xFFF0) {
2334          case OP_LDR_imm_T3:
2335              break;
2336          case OP_ADD_imm_T3: {
2337              ASSERT(!(ptr[1] & 0xF000));
2338              uint16_t instructions[2];
2339              instructions[0] = ptr[0] & 0x000F;
2340              instructions[0] |= OP_LDR_imm_T3;
2341              instructions[1] = ptr[1] | (ptr[1] & 0x0F00) << 4;
2342              instructions[1] &= 0xF0FF;
2343              performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
2344              cacheFlush(ptr, sizeof(uint16_t) * 2);
2345              break;
2346          }
2347          default:
2348              RELEASE_ASSERT_NOT_REACHED();
2349          }
2350      }
2351  
2352      static void replaceWithAddressComputation(void* instructionStart)
2353      {
2354          ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2355          uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2356          switch (ptr[0] & 0xFFF0) {
2357          case OP_LDR_imm_T3: {
2358              ASSERT(!(ptr[1] & 0x0F00));
2359              uint16_t instructions[2];
2360              instructions[0] = ptr[0] & 0x000F;
2361              instructions[0] |= OP_ADD_imm_T3;
2362              instructions[1] = ptr[1] | (ptr[1] & 0xF000) >> 4;
2363              instructions[1] &= 0x0FFF;
2364              performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
2365              cacheFlush(ptr, sizeof(uint16_t) * 2);
2366              break;
2367          }
2368          case OP_ADD_imm_T3:
2369              break;
2370          default:
2371              RELEASE_ASSERT_NOT_REACHED();
2372          }
2373      }
2374  
2375      unsigned debugOffset() { return m_formatter.debugOffset(); }
2376  
2377  #if OS(LINUX)
2378      static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
2379      {
2380          asm volatile(
2381              "push    {r7}\n"
2382              "mov     r0, %0\n"
2383              "mov     r1, %1\n"
2384              "movw    r7, #0x2\n"
2385              "movt    r7, #0xf\n"
2386              "movs    r2, #0x0\n"
2387              "svc     0x0\n"
2388              "pop     {r7}\n"
2389              :
2390              : "r" (begin), "r" (end)
2391              : "r0", "r1", "r2");
2392      }
2393  #endif
2394  
2395      static void cacheFlush(void* code, size_t size)
2396      {
2397  #if OS(DARWIN)
2398          sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2399  #elif OS(LINUX)
2400          size_t page = pageSize();
2401          uintptr_t current = reinterpret_cast<uintptr_t>(code);
2402          uintptr_t end = current + size;
2403          uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
2404  
2405          if (end <= firstPageEnd) {
2406              linuxPageFlush(current, end);
2407              return;
2408          }
2409  
2410          linuxPageFlush(current, firstPageEnd);
2411  
2412          for (current = firstPageEnd; current + page < end; current += page)
2413              linuxPageFlush(current, current + page);
2414  
2415          linuxPageFlush(current, end);
2416  #else
2417  #error "The cacheFlush support is missing on this platform."
2418  #endif
2419      }
2420  
2421  private:
2422      // VFP operations commonly take one or more 5-bit operands, typically representing a
2423      // floating point register number.  This will commonly be encoded in the instruction
2424      // in two parts, with one single bit field, and one 4-bit field.  In the case of
2425      // double precision operands the high bit of the register number will be encoded
2426      // separately, and for single precision operands the high bit of the register number
2427      // will be encoded individually.
2428      // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2429      // field to be encoded together in the instruction (the low 4-bits of a double
2430      // register number, or the high 4-bits of a single register number), and bit 4
2431      // contains the bit value to be encoded individually.
2432      struct VFPOperand {
2433          explicit VFPOperand(uint32_t value)
2434              : m_value(value)
2435          {
2436              ASSERT(!(m_value & ~0x1f));
2437          }
2438  
2439          VFPOperand(FPDoubleRegisterID reg)
2440              : m_value(reg)
2441          {
2442          }
2443  
2444          VFPOperand(RegisterID reg)
2445              : m_value(reg)
2446          {
2447          }
2448  
2449          VFPOperand(FPSingleRegisterID reg)
2450              : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
2451          {
2452          }
2453  
2454          uint32_t bits1()
2455          {
2456              return m_value >> 4;
2457          }
2458  
2459          uint32_t bits4()
2460          {
2461              return m_value & 0xf;
2462          }
2463  
2464          uint32_t m_value;
2465      };
2466  
2467      VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
2468      {
2469          // Cannot specify rounding when converting to float.
2470          ASSERT(toInteger || !isRoundZero);
2471  
2472          uint32_t op = 0x8;
2473          if (toInteger) {
2474              // opc2 indicates both toInteger & isUnsigned.
2475              op |= isUnsigned ? 0x4 : 0x5;
2476              // 'op' field in instruction is isRoundZero
2477              if (isRoundZero)
2478                  op |= 0x10;
2479          } else {
2480              ASSERT(!isRoundZero);
2481              // 'op' field in instruction is isUnsigned
2482              if (!isUnsigned)
2483                  op |= 0x10;
2484          }
2485          return VFPOperand(op);
2486      }
2487  
2488      static void setInt32(void* code, uint32_t value, bool flush)
2489      {
2490          uint16_t* location = reinterpret_cast<uint16_t*>(code);
2491          ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2492  
2493          ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
2494          ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
2495          uint16_t instructions[4];
2496          instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2497          instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
2498          instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2499          instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
2500  
2501          performJITMemcpy(location - 4, instructions, 4 * sizeof(uint16_t));
2502          if (flush)
2503              cacheFlush(location - 4, 4 * sizeof(uint16_t));
2504      }
2505      
2506      static int32_t readInt32(void* code)
2507      {
2508          uint16_t* location = reinterpret_cast<uint16_t*>(code);
2509          ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2510          
2511          ARMThumbImmediate lo16;
2512          ARMThumbImmediate hi16;
2513          decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
2514          decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
2515          decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
2516          decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
2517          uint32_t result = hi16.asUInt16();
2518          result <<= 16;
2519          result |= lo16.asUInt16();
2520          return static_cast<int32_t>(result);
2521      }
2522  
2523      static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
2524      {
2525          // Requires us to have planted a LDR_imm_T1
2526          ASSERT(imm.isValid());
2527          ASSERT(imm.isUInt7());
2528          uint16_t* location = reinterpret_cast<uint16_t*>(code);
2529          uint16_t instruction;
2530          instruction = location[0] & ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2531          instruction |= (imm.getUInt7() >> 2) << 6;
2532          performJITMemcpy(location, &instruction, sizeof(uint16_t));
2533          cacheFlush(location, sizeof(uint16_t));
2534      }
2535  
2536      static void setPointer(void* code, void* value, bool flush)
2537      {
2538          setInt32(code, reinterpret_cast<uint32_t>(value), flush);
2539      }
2540  
2541      static bool isB(const void* address)
2542      {
2543          const uint16_t* instruction = static_cast<const uint16_t*>(address);
2544          return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
2545      }
2546  
2547      static bool isBX(const void* address)
2548      {
2549          const uint16_t* instruction = static_cast<const uint16_t*>(address);
2550          return (instruction[0] & 0xff87) == OP_BX;
2551      }
2552  
2553      static bool isMOV_imm_T3(const void* address)
2554      {
2555          const uint16_t* instruction = static_cast<const uint16_t*>(address);
2556          return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
2557      }
2558  
2559      static bool isMOVT(const void* address)
2560      {
2561          const uint16_t* instruction = static_cast<const uint16_t*>(address);
2562          return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
2563      }
2564  
2565      static bool isNOP_T1(const void* address)
2566      {
2567          const uint16_t* instruction = static_cast<const uint16_t*>(address);
2568          return instruction[0] == OP_NOP_T1;
2569      }
2570  
2571      static bool isNOP_T2(const void* address)
2572      {
2573          const uint16_t* instruction = static_cast<const uint16_t*>(address);
2574          return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
2575      }
2576  
2577      static bool canBeJumpT1(const uint16_t* instruction, const void* target)
2578      {
2579          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2580          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2581          
2582          intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2583          // It does not appear to be documented in the ARM ARM (big surprise), but
2584          // for OP_B_T1 the branch displacement encoded in the instruction is 2 
2585          // less than the actual displacement.
2586          relative -= 2;
2587          return ((relative << 23) >> 23) == relative;
2588      }
2589      
2590      static bool canBeJumpT2(const uint16_t* instruction, const void* target)
2591      {
2592          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2593          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2594          
2595          intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2596          // It does not appear to be documented in the ARM ARM (big surprise), but
2597          // for OP_B_T2 the branch displacement encoded in the instruction is 2 
2598          // less than the actual displacement.
2599          relative -= 2;
2600          return ((relative << 20) >> 20) == relative;
2601      }
2602      
2603      static bool canBeJumpT3(const uint16_t* instruction, const void* target)
2604      {
2605          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2606          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2607          
2608          intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2609          return ((relative << 11) >> 11) == relative;
2610      }
2611      
2612      static bool canBeJumpT4(const uint16_t* instruction, const void* target)
2613      {
2614          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2615          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2616          
2617          intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2618          return ((relative << 7) >> 7) == relative;
2619      }
2620  
2621      template<CopyFunction copy = performJITMemcpy>
2622      static void linkJumpT1(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2623      {
2624          // FIMXE: this should be up in the MacroAssembler layer. :-(        
2625          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2626          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2627          ASSERT(canBeJumpT1(instruction, target));
2628          
2629          intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2630          // It does not appear to be documented in the ARM ARM (big surprise), but
2631          // for OP_B_T1 the branch displacement encoded in the instruction is 2 
2632          // less than the actual displacement.
2633          relative -= 2;
2634          
2635          // All branch offsets should be an even distance.
2636          ASSERT(!(relative & 1));
2637          uint16_t newInstruction = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2638          copy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
2639      }
2640  
2641      template<CopyFunction copy = performJITMemcpy>
2642      static void linkJumpT2(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2643      {
2644          // FIMXE: this should be up in the MacroAssembler layer. :-(        
2645          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2646          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2647          ASSERT(canBeJumpT2(instruction, target));
2648          
2649          intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2650          // It does not appear to be documented in the ARM ARM (big surprise), but
2651          // for OP_B_T2 the branch displacement encoded in the instruction is 2 
2652          // less than the actual displacement.
2653          relative -= 2;
2654          
2655          // All branch offsets should be an even distance.
2656          ASSERT(!(relative & 1));
2657          uint16_t newInstruction = OP_B_T2 | ((relative & 0xffe) >> 1);
2658          copy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
2659      }
2660      
2661      template<CopyFunction copy = performJITMemcpy>
2662      static void linkJumpT3(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2663      {
2664          // FIMXE: this should be up in the MacroAssembler layer. :-(
2665          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2666          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2667          ASSERT(canBeJumpT3(instruction, target));
2668          
2669          intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2670          
2671          // All branch offsets should be an even distance.
2672          ASSERT(!(relative & 1));
2673          uint16_t instructions[2];
2674          instructions[0] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2675          instructions[1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2676          copy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
2677      }
2678      
2679      template<CopyFunction copy = performJITMemcpy>
2680      static void linkJumpT4(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2681      {
2682          // FIMXE: this should be up in the MacroAssembler layer. :-(        
2683          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2684          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2685          ASSERT(canBeJumpT4(instruction, target));
2686          
2687          intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2688          // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2689          if (relative >= 0)
2690              relative ^= 0xC00000;
2691          
2692          // All branch offsets should be an even distance.
2693          ASSERT(!(relative & 1));
2694          uint16_t instructions[2];
2695          instructions[0] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2696          instructions[1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2697          copy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
2698      }
2699  
2700      template<CopyFunction copy = performJITMemcpy>
2701      static void linkConditionalJumpT4(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2702      {
2703          // FIMXE: this should be up in the MacroAssembler layer. :-(        
2704          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2705          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2706          
2707          uint16_t newInstruction = ifThenElse(cond) | OP_IT;
2708          copy(writeTarget - 3, &newInstruction, sizeof(uint16_t));
2709          linkJumpT4<copy>(writeTarget, instruction, target);
2710      }
2711  
2712      template<CopyFunction copy = performJITMemcpy>
2713      static void linkBX(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2714      {
2715          // FIMXE: this should be up in the MacroAssembler layer. :-(
2716          ASSERT_UNUSED(instruction, !(reinterpret_cast<intptr_t>(instruction) & 1));
2717          ASSERT(!(reinterpret_cast<intptr_t>(writeTarget) & 1));
2718          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2719          
2720          const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2721          ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2722          ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2723          uint16_t instructions[5];
2724          instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2725          instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2726          instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2727          instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2728          instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2729  
2730          copy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
2731      }
2732  
2733      template<CopyFunction copy = performJITMemcpy>
2734      static void linkConditionalBX(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
2735      {
2736          // FIMXE: this should be up in the MacroAssembler layer. :-(        
2737          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2738          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2739          
2740          linkBX(writeTarget, instruction, target);
2741          uint16_t newInstruction = ifThenElse(cond, true, true) | OP_IT;
2742          copy(writeTarget - 6, &newInstruction, sizeof(uint16_t));
2743      }
2744      
2745      static void linkJumpAbsolute(uint16_t* writeTarget, const uint16_t* instruction, void* target)
2746      {
2747          // FIMXE: this should be up in the MacroAssembler layer. :-(
2748          ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2749          ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2750          
2751          ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2752                 || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2753  
2754          if (canBeJumpT4(instruction, target)) {
2755              // There may be a better way to fix this, but right now put the NOPs first, since in the
2756              // case of an conditional branch this will be coming after an ITTT predicating *three*
2757              // instructions!  Looking backwards to modify the ITTT to an IT is not easy, due to
2758              // variable wdith encoding - the previous instruction might *look* like an ITTT but
2759              // actually be the second half of a 2-word op.
2760              uint16_t instructions[3];
2761              instructions[0] = OP_NOP_T1;
2762              instructions[1] = OP_NOP_T2a;
2763              instructions[2] = OP_NOP_T2b;
2764              performJITMemcpy(writeTarget - 5, instructions, 3 * sizeof(uint16_t));
2765              linkJumpT4(writeTarget, instruction, target);
2766          } else {
2767              const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2768              ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2769              ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2770  
2771              uint16_t instructions[5];
2772              instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2773              instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2774              instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2775              instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2776              instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2777              performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
2778          }
2779      }
2780      
2781      static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2782      {
2783          return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2784      }
2785  
2786      static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
2787      {
2788          result.m_value.i = (value >> 10) & 1;
2789          result.m_value.imm4 = value & 15;
2790      }
2791  
2792      static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2793      {
2794          return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2795      }
2796  
2797      static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
2798      {
2799          result.m_value.imm3 = (value >> 12) & 7;
2800          result.m_value.imm8 = value & 255;
2801      }
2802  
2803      class ARMInstructionFormatter {
2804      public:
2805          ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2806          {
2807              m_buffer.putShort(op | (rd << 8) | imm);
2808          }
2809          
2810          ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2811          {
2812              m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2813          }
2814  
2815          ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2816          {
2817              m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2818          }
2819  
2820          ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm)
2821          {
2822              m_buffer.putShort(op | imm);
2823          }
2824  
2825          ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2826          {
2827              m_buffer.putShort(op | imm);
2828          }
2829  
2830          ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2831          {
2832              m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2833          }
2834  
2835          ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2836          {
2837              m_buffer.putShort(op | imm);
2838          }
2839  
2840          ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2841          {
2842              m_buffer.putShort(op | (reg1 << 3) | reg2);
2843          }
2844  
2845          ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2846          {
2847              m_buffer.putShort(op | reg);
2848              m_buffer.putShort(ff.m_u.value);
2849          }
2850          
2851          ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2852          {
2853              m_buffer.putShort(op);
2854              m_buffer.putShort(ff.m_u.value);
2855          }
2856          
2857          ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2858          {
2859              m_buffer.putShort(op1);
2860              m_buffer.putShort(op2);
2861          }
2862  
2863          ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm)
2864          {
2865              m_buffer.putShort(op1);
2866              m_buffer.putShort(imm);
2867          }
2868          
2869          ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2870          {
2871              ARMThumbImmediate newImm = imm;
2872              newImm.m_value.imm4 = imm4;
2873  
2874              m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2875              m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2876          }
2877  
2878          ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2879          {
2880              m_buffer.putShort(op | reg1);
2881              m_buffer.putShort((reg2 << 12) | imm);
2882          }
2883  
2884          ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
2885          {
2886              m_buffer.putShort(op | reg1);
2887              m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
2888          }
2889  
2890          // Formats up instructions of the pattern:
2891          //    111111111B11aaaa:bbbb222SA2C2cccc
2892          // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2893          // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2894          ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2895          {
2896              ASSERT(!(op1 & 0x004f));
2897              ASSERT(!(op2 & 0xf1af));
2898              m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2899              m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2900          }
2901  
2902          // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2903          // (i.e. +/-(0..255) 32-bit words)
2904          ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2905          {
2906              bool up = true;
2907              if (imm < 0) {
2908                  imm = -imm;
2909                  up = false;
2910              }
2911              
2912              uint32_t offset = imm;
2913              ASSERT(!(offset & ~0x3fc));
2914              offset >>= 2;
2915  
2916              m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2917              m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2918          }
2919  
2920          // Administrative methods:
2921  
2922          size_t codeSize() const { return m_buffer.codeSize(); }
2923          AssemblerLabel label() const { return m_buffer.label(); }
2924          bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2925          void* data() const { return m_buffer.data(); }
2926  
2927          unsigned debugOffset() { return m_buffer.debugOffset(); }
2928  
2929          AssemblerBuffer m_buffer;
2930      } m_formatter;
2931  
2932      Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
2933      int m_indexOfLastWatchpoint;
2934      int m_indexOfTailOfLastWatchpoint;
2935  };
2936  
2937  } // namespace JSC
2938  
2939  #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)