MacroAssemblerARMv7.h
1 /* 2 * Copyright (C) 2009-2019 Apple Inc. All rights reserved. 3 * Copyright (C) 2010 University of Szeged 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #pragma once 28 29 #if ENABLE(ASSEMBLER) 30 31 #include "ARMv7Assembler.h" 32 #include "AbstractMacroAssembler.h" 33 34 namespace JSC { 35 36 using Assembler = TARGET_ASSEMBLER; 37 38 class MacroAssemblerARMv7 : public AbstractMacroAssembler<Assembler> { 39 static constexpr RegisterID dataTempRegister = ARMRegisters::ip; 40 static constexpr RegisterID addressTempRegister = ARMRegisters::r6; 41 42 static constexpr ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7; 43 inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); } 44 45 public: 46 static constexpr unsigned numGPRs = 16; 47 static constexpr unsigned numFPRs = 16; 48 49 RegisterID scratchRegister() { return dataTempRegister; } 50 51 MacroAssemblerARMv7() 52 : m_makeJumpPatchable(false) 53 { 54 } 55 56 typedef ARMv7Assembler::LinkRecord LinkRecord; 57 typedef ARMv7Assembler::JumpType JumpType; 58 typedef ARMv7Assembler::JumpLinkType JumpLinkType; 59 typedef ARMv7Assembler::Condition Condition; 60 61 static constexpr ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid; 62 static constexpr ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize; 63 64 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value) 65 { 66 return value >= -255 && value <= 255; 67 } 68 69 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); } 70 static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); } 71 static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); } 72 static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); } 73 static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); } 74 75 template <Assembler::CopyFunction copy> 76 ALWAYS_INLINE static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARMv7Assembler::link<copy>(record, from, fromInstruction, to); } 77 78 struct ArmAddress { 79 enum AddressType { 80 HasOffset, 81 HasIndex, 82 } type; 83 RegisterID base; 84 union { 85 int32_t offset; 86 struct { 87 RegisterID index; 88 Scale scale; 89 }; 90 } u; 91 92 explicit ArmAddress(RegisterID base, int32_t offset = 0) 93 : type(HasOffset) 94 , base(base) 95 { 96 u.offset = offset; 97 } 98 99 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne) 100 : type(HasIndex) 101 , base(base) 102 { 103 u.index = index; 104 u.scale = scale; 105 } 106 }; 107 108 public: 109 enum RelationalCondition { 110 Equal = ARMv7Assembler::ConditionEQ, 111 NotEqual = ARMv7Assembler::ConditionNE, 112 Above = ARMv7Assembler::ConditionHI, 113 AboveOrEqual = ARMv7Assembler::ConditionHS, 114 Below = ARMv7Assembler::ConditionLO, 115 BelowOrEqual = ARMv7Assembler::ConditionLS, 116 GreaterThan = ARMv7Assembler::ConditionGT, 117 GreaterThanOrEqual = ARMv7Assembler::ConditionGE, 118 LessThan = ARMv7Assembler::ConditionLT, 119 LessThanOrEqual = ARMv7Assembler::ConditionLE 120 }; 121 122 enum ResultCondition { 123 Overflow = ARMv7Assembler::ConditionVS, 124 Signed = ARMv7Assembler::ConditionMI, 125 PositiveOrZero = ARMv7Assembler::ConditionPL, 126 Zero = ARMv7Assembler::ConditionEQ, 127 NonZero = ARMv7Assembler::ConditionNE 128 }; 129 130 enum DoubleCondition { 131 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. 132 DoubleEqualAndOrdered = ARMv7Assembler::ConditionEQ, 133 DoubleNotEqualAndOrdered = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently. 134 DoubleGreaterThanAndOrdered = ARMv7Assembler::ConditionGT, 135 DoubleGreaterThanOrEqualAndOrdered = ARMv7Assembler::ConditionGE, 136 DoubleLessThanAndOrdered = ARMv7Assembler::ConditionLO, 137 DoubleLessThanOrEqualAndOrdered = ARMv7Assembler::ConditionLS, 138 // If either operand is NaN, these conditions always evaluate to true. 139 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently. 140 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE, 141 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI, 142 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS, 143 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT, 144 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE, 145 }; 146 147 static constexpr RegisterID stackPointerRegister = ARMRegisters::sp; 148 static constexpr RegisterID framePointerRegister = ARMRegisters::fp; 149 static constexpr RegisterID linkRegister = ARMRegisters::lr; 150 151 // Integer arithmetic operations: 152 // 153 // Operations are typically two operand - operation(source, srcDst) 154 // For many operations the source may be an TrustedImm32, the srcDst operand 155 // may often be a memory location (explictly described using an Address 156 // object). 157 158 void add32(RegisterID src, RegisterID dest) 159 { 160 m_assembler.add(dest, dest, src); 161 } 162 163 void add32(RegisterID left, RegisterID right, RegisterID dest) 164 { 165 m_assembler.add(dest, left, right); 166 } 167 168 void add32(TrustedImm32 imm, RegisterID dest) 169 { 170 add32(imm, dest, dest); 171 } 172 173 void add32(AbsoluteAddress src, RegisterID dest) 174 { 175 load32(src.m_ptr, dataTempRegister); 176 add32(dataTempRegister, dest); 177 } 178 179 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) 180 { 181 // For adds with stack pointer destination avoid unpredictable instruction 182 if (dest == ARMRegisters::sp && src != dest) { 183 add32(imm, src, dataTempRegister); 184 move(dataTempRegister, dest); 185 return; 186 } 187 188 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 189 190 if (armImm.isValid()) 191 m_assembler.add(dest, src, armImm); 192 else { 193 move(imm, dataTempRegister); 194 m_assembler.add(dest, src, dataTempRegister); 195 } 196 } 197 198 void add32(TrustedImm32 imm, Address address) 199 { 200 load32(address, dataTempRegister); 201 202 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 203 if (armImm.isValid()) 204 m_assembler.add(dataTempRegister, dataTempRegister, armImm); 205 else { 206 // Hrrrm, since dataTempRegister holds the data loaded, 207 // use addressTempRegister to hold the immediate. 208 move(imm, addressTempRegister); 209 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister); 210 } 211 212 store32(dataTempRegister, address); 213 } 214 215 void add32(Address src, RegisterID dest) 216 { 217 load32(src, dataTempRegister); 218 add32(dataTempRegister, dest); 219 } 220 221 void add32(TrustedImm32 imm, AbsoluteAddress address) 222 { 223 load32(address.m_ptr, dataTempRegister); 224 225 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 226 if (armImm.isValid()) 227 m_assembler.add(dataTempRegister, dataTempRegister, armImm); 228 else { 229 // Hrrrm, since dataTempRegister holds the data loaded, 230 // use addressTempRegister to hold the immediate. 231 move(imm, addressTempRegister); 232 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister); 233 } 234 235 store32(dataTempRegister, address.m_ptr); 236 } 237 238 void getEffectiveAddress(BaseIndex address, RegisterID dest) 239 { 240 m_assembler.lsl(addressTempRegister, address.index, static_cast<int>(address.scale)); 241 m_assembler.add(dest, address.base, addressTempRegister); 242 if (address.offset) 243 add32(TrustedImm32(address.offset), dest); 244 } 245 246 void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) 247 { 248 add32(imm, srcDest); 249 } 250 251 void add64(TrustedImm32 imm, AbsoluteAddress address) 252 { 253 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 254 255 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0)); 256 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 257 if (armImm.isValid()) 258 m_assembler.add_S(dataTempRegister, dataTempRegister, armImm); 259 else { 260 move(imm, addressTempRegister); 261 m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister); 262 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 263 } 264 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0)); 265 266 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4)); 267 m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31)); 268 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4)); 269 } 270 271 void and16(Address src, RegisterID dest) 272 { 273 load16(src, dataTempRegister); 274 and32(dataTempRegister, dest); 275 } 276 277 void and32(RegisterID op1, RegisterID op2, RegisterID dest) 278 { 279 m_assembler.ARM_and(dest, op1, op2); 280 } 281 282 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest) 283 { 284 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 285 if (armImm.isValid()) 286 m_assembler.ARM_and(dest, src, armImm); 287 else { 288 move(imm, dataTempRegister); 289 m_assembler.ARM_and(dest, src, dataTempRegister); 290 } 291 } 292 293 void and32(RegisterID src, RegisterID dest) 294 { 295 and32(dest, src, dest); 296 } 297 298 void and32(TrustedImm32 imm, RegisterID dest) 299 { 300 and32(imm, dest, dest); 301 } 302 303 void and32(Address src, RegisterID dest) 304 { 305 load32(src, dataTempRegister); 306 and32(dataTempRegister, dest); 307 } 308 309 void countLeadingZeros32(RegisterID src, RegisterID dest) 310 { 311 m_assembler.clz(dest, src); 312 } 313 314 void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) 315 { 316 // Clamp the shift to the range 0..31 317 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); 318 ASSERT(armImm.isValid()); 319 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm); 320 321 m_assembler.lsl(dest, src, dataTempRegister); 322 } 323 324 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) 325 { 326 m_assembler.lsl(dest, src, imm.m_value & 0x1f); 327 } 328 329 void lshift32(RegisterID shiftAmount, RegisterID dest) 330 { 331 lshift32(dest, shiftAmount, dest); 332 } 333 334 void lshift32(TrustedImm32 imm, RegisterID dest) 335 { 336 lshift32(dest, imm, dest); 337 } 338 339 void mul32(RegisterID src, RegisterID dest) 340 { 341 m_assembler.smull(dest, dataTempRegister, dest, src); 342 } 343 344 void mul32(RegisterID left, RegisterID right, RegisterID dest) 345 { 346 m_assembler.smull(dest, dataTempRegister, left, right); 347 } 348 349 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) 350 { 351 move(imm, dataTempRegister); 352 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister); 353 } 354 355 void neg32(RegisterID srcDest) 356 { 357 m_assembler.neg(srcDest, srcDest); 358 } 359 360 void neg32(RegisterID src, RegisterID dest) 361 { 362 m_assembler.neg(dest, src); 363 } 364 365 void or8(TrustedImm32 imm, AbsoluteAddress address) 366 { 367 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 368 if (armImm.isValid()) { 369 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 370 load8(addressTempRegister, dataTempRegister); 371 m_assembler.orr(dataTempRegister, dataTempRegister, armImm); 372 store8(dataTempRegister, addressTempRegister); 373 } else { 374 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 375 load8(addressTempRegister, dataTempRegister); 376 move(imm, addressTempRegister); 377 m_assembler.orr(dataTempRegister, dataTempRegister, addressTempRegister); 378 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 379 store8(dataTempRegister, addressTempRegister); 380 } 381 } 382 383 void or16(TrustedImm32 imm, AbsoluteAddress dest) 384 { 385 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 386 if (armImm.isValid()) { 387 move(TrustedImmPtr(dest.m_ptr), addressTempRegister); 388 load16(addressTempRegister, dataTempRegister); 389 m_assembler.orr(dataTempRegister, dataTempRegister, armImm); 390 store16(dataTempRegister, addressTempRegister); 391 } else { 392 move(TrustedImmPtr(dest.m_ptr), addressTempRegister); 393 load16(addressTempRegister, dataTempRegister); 394 move(imm, addressTempRegister); 395 m_assembler.orr(dataTempRegister, dataTempRegister, addressTempRegister); 396 move(TrustedImmPtr(dest.m_ptr), addressTempRegister); 397 store16(dataTempRegister, addressTempRegister); 398 } 399 } 400 401 void or32(RegisterID src, RegisterID dest) 402 { 403 m_assembler.orr(dest, dest, src); 404 } 405 406 void or32(RegisterID src, AbsoluteAddress dest) 407 { 408 move(TrustedImmPtr(dest.m_ptr), addressTempRegister); 409 load32(addressTempRegister, dataTempRegister); 410 or32(src, dataTempRegister); 411 store32(dataTempRegister, addressTempRegister); 412 } 413 414 void or32(TrustedImm32 imm, AbsoluteAddress address) 415 { 416 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 417 if (armImm.isValid()) { 418 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 419 load32(addressTempRegister, dataTempRegister); 420 m_assembler.orr(dataTempRegister, dataTempRegister, armImm); 421 store32(dataTempRegister, addressTempRegister); 422 } else { 423 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 424 load32(addressTempRegister, dataTempRegister); 425 move(imm, addressTempRegister); 426 m_assembler.orr(dataTempRegister, dataTempRegister, addressTempRegister); 427 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 428 store32(dataTempRegister, addressTempRegister); 429 } 430 } 431 432 void or32(TrustedImm32 imm, Address address) 433 { 434 load32(address, dataTempRegister); 435 or32(imm, dataTempRegister, dataTempRegister); 436 store32(dataTempRegister, address); 437 } 438 439 void or32(TrustedImm32 imm, RegisterID dest) 440 { 441 or32(imm, dest, dest); 442 } 443 444 void or32(RegisterID op1, RegisterID op2, RegisterID dest) 445 { 446 m_assembler.orr(dest, op1, op2); 447 } 448 449 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) 450 { 451 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 452 if (armImm.isValid()) 453 m_assembler.orr(dest, src, armImm); 454 else { 455 ASSERT(src != dataTempRegister); 456 move(imm, dataTempRegister); 457 m_assembler.orr(dest, src, dataTempRegister); 458 } 459 } 460 461 void rotateRight32(RegisterID src, TrustedImm32 imm, RegisterID dest) 462 { 463 if (!imm.m_value) 464 move(src, dest); 465 else 466 m_assembler.ror(dest, src, imm.m_value & 0x1f); 467 } 468 469 void rotateRight32(TrustedImm32 imm, RegisterID srcDst) 470 { 471 rotateRight32(srcDst, imm, srcDst); 472 } 473 474 void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) 475 { 476 // Clamp the shift to the range 0..31 477 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); 478 ASSERT(armImm.isValid()); 479 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm); 480 481 m_assembler.asr(dest, src, dataTempRegister); 482 } 483 484 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) 485 { 486 if (!imm.m_value) 487 move(src, dest); 488 else 489 m_assembler.asr(dest, src, imm.m_value & 0x1f); 490 } 491 492 void rshift32(RegisterID shiftAmount, RegisterID dest) 493 { 494 rshift32(dest, shiftAmount, dest); 495 } 496 497 void rshift32(TrustedImm32 imm, RegisterID dest) 498 { 499 rshift32(dest, imm, dest); 500 } 501 502 void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) 503 { 504 // Clamp the shift to the range 0..31 505 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); 506 ASSERT(armImm.isValid()); 507 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm); 508 509 m_assembler.lsr(dest, src, dataTempRegister); 510 } 511 512 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) 513 { 514 if (!imm.m_value) 515 move(src, dest); 516 else 517 m_assembler.lsr(dest, src, imm.m_value & 0x1f); 518 } 519 520 void urshift32(RegisterID shiftAmount, RegisterID dest) 521 { 522 urshift32(dest, shiftAmount, dest); 523 } 524 525 void urshift32(TrustedImm32 imm, RegisterID dest) 526 { 527 urshift32(dest, imm, dest); 528 } 529 530 void sub32(RegisterID src, RegisterID dest) 531 { 532 m_assembler.sub(dest, dest, src); 533 } 534 535 void sub32(RegisterID left, RegisterID right, RegisterID dest) 536 { 537 m_assembler.sub(dest, left, right); 538 } 539 540 void sub32(TrustedImm32 imm, RegisterID dest) 541 { 542 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 543 if (armImm.isValid()) 544 m_assembler.sub(dest, dest, armImm); 545 else { 546 move(imm, dataTempRegister); 547 m_assembler.sub(dest, dest, dataTempRegister); 548 } 549 } 550 551 void sub32(TrustedImm32 imm, Address address) 552 { 553 load32(address, dataTempRegister); 554 555 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 556 if (armImm.isValid()) 557 m_assembler.sub(dataTempRegister, dataTempRegister, armImm); 558 else { 559 // Hrrrm, since dataTempRegister holds the data loaded, 560 // use addressTempRegister to hold the immediate. 561 move(imm, addressTempRegister); 562 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister); 563 } 564 565 store32(dataTempRegister, address); 566 } 567 568 void sub32(Address src, RegisterID dest) 569 { 570 load32(src, dataTempRegister); 571 sub32(dataTempRegister, dest); 572 } 573 574 void sub32(TrustedImm32 imm, AbsoluteAddress address) 575 { 576 load32(address.m_ptr, dataTempRegister); 577 578 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 579 if (armImm.isValid()) 580 m_assembler.sub(dataTempRegister, dataTempRegister, armImm); 581 else { 582 // Hrrrm, since dataTempRegister holds the data loaded, 583 // use addressTempRegister to hold the immediate. 584 move(imm, addressTempRegister); 585 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister); 586 } 587 588 store32(dataTempRegister, address.m_ptr); 589 } 590 591 void xor32(RegisterID op1, RegisterID op2, RegisterID dest) 592 { 593 m_assembler.eor(dest, op1, op2); 594 } 595 596 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest) 597 { 598 if (imm.m_value == -1) { 599 m_assembler.mvn(dest, src); 600 return; 601 } 602 603 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 604 if (armImm.isValid()) 605 m_assembler.eor(dest, src, armImm); 606 else { 607 move(imm, dataTempRegister); 608 m_assembler.eor(dest, src, dataTempRegister); 609 } 610 } 611 612 void xor32(RegisterID src, RegisterID dest) 613 { 614 xor32(dest, src, dest); 615 } 616 617 void xor32(Address src, RegisterID dest) 618 { 619 load32(src, dataTempRegister); 620 xor32(dataTempRegister, dest); 621 } 622 623 void xor32(TrustedImm32 imm, RegisterID dest) 624 { 625 if (imm.m_value == -1) 626 m_assembler.mvn(dest, dest); 627 else 628 xor32(imm, dest, dest); 629 } 630 631 void not32(RegisterID srcDest) 632 { 633 m_assembler.mvn(srcDest, srcDest); 634 } 635 636 // Memory access operations: 637 // 638 // Loads are of the form load(address, destination) and stores of the form 639 // store(source, address). The source for a store may be an TrustedImm32. Address 640 // operand objects to loads and store will be implicitly constructed if a 641 // register is passed. 642 643 private: 644 void load32(ArmAddress address, RegisterID dest) 645 { 646 if (address.type == ArmAddress::HasIndex) 647 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale); 648 else if (address.u.offset >= 0) { 649 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); 650 ASSERT(armImm.isValid()); 651 m_assembler.ldr(dest, address.base, armImm); 652 } else { 653 ASSERT(address.u.offset >= -255); 654 m_assembler.ldr(dest, address.base, address.u.offset, true, false); 655 } 656 } 657 658 void load16(ArmAddress address, RegisterID dest) 659 { 660 if (address.type == ArmAddress::HasIndex) 661 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale); 662 else if (address.u.offset >= 0) { 663 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); 664 ASSERT(armImm.isValid()); 665 m_assembler.ldrh(dest, address.base, armImm); 666 } else { 667 ASSERT(address.u.offset >= -255); 668 m_assembler.ldrh(dest, address.base, address.u.offset, true, false); 669 } 670 } 671 672 void load16SignedExtendTo32(ArmAddress address, RegisterID dest) 673 { 674 ASSERT(address.type == ArmAddress::HasIndex); 675 m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale); 676 } 677 678 void load8(ArmAddress address, RegisterID dest) 679 { 680 if (address.type == ArmAddress::HasIndex) 681 m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale); 682 else if (address.u.offset >= 0) { 683 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); 684 ASSERT(armImm.isValid()); 685 m_assembler.ldrb(dest, address.base, armImm); 686 } else { 687 ASSERT(address.u.offset >= -255); 688 m_assembler.ldrb(dest, address.base, address.u.offset, true, false); 689 } 690 } 691 692 void load8SignedExtendTo32(ArmAddress address, RegisterID dest) 693 { 694 ASSERT(address.type == ArmAddress::HasIndex); 695 m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale); 696 } 697 698 protected: 699 void store32(RegisterID src, ArmAddress address) 700 { 701 if (address.type == ArmAddress::HasIndex) 702 m_assembler.str(src, address.base, address.u.index, address.u.scale); 703 else if (address.u.offset >= 0) { 704 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); 705 ASSERT(armImm.isValid()); 706 m_assembler.str(src, address.base, armImm); 707 } else { 708 ASSERT(address.u.offset >= -255); 709 m_assembler.str(src, address.base, address.u.offset, true, false); 710 } 711 } 712 713 private: 714 void store8(RegisterID src, ArmAddress address) 715 { 716 if (address.type == ArmAddress::HasIndex) 717 m_assembler.strb(src, address.base, address.u.index, address.u.scale); 718 else if (address.u.offset >= 0) { 719 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); 720 ASSERT(armImm.isValid()); 721 m_assembler.strb(src, address.base, armImm); 722 } else { 723 ASSERT(address.u.offset >= -255); 724 m_assembler.strb(src, address.base, address.u.offset, true, false); 725 } 726 } 727 728 void store16(RegisterID src, ArmAddress address) 729 { 730 if (address.type == ArmAddress::HasIndex) 731 m_assembler.strh(src, address.base, address.u.index, address.u.scale); 732 else if (address.u.offset >= 0) { 733 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); 734 ASSERT(armImm.isValid()); 735 m_assembler.strh(src, address.base, armImm); 736 } else { 737 ASSERT(address.u.offset >= -255); 738 m_assembler.strh(src, address.base, address.u.offset, true, false); 739 } 740 } 741 742 public: 743 void load32(ImplicitAddress address, RegisterID dest) 744 { 745 load32(setupArmAddress(address), dest); 746 } 747 748 void load32(BaseIndex address, RegisterID dest) 749 { 750 load32(setupArmAddress(address), dest); 751 } 752 753 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) 754 { 755 load32(setupArmAddress(address), dest); 756 } 757 758 void load16Unaligned(BaseIndex address, RegisterID dest) 759 { 760 load16(setupArmAddress(address), dest); 761 } 762 763 void load32(const void* address, RegisterID dest) 764 { 765 move(TrustedImmPtr(address), addressTempRegister); 766 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); 767 } 768 769 void abortWithReason(AbortReason reason) 770 { 771 move(TrustedImm32(reason), dataTempRegister); 772 breakpoint(); 773 } 774 775 void abortWithReason(AbortReason reason, intptr_t misc) 776 { 777 move(TrustedImm32(misc), addressTempRegister); 778 abortWithReason(reason); 779 } 780 781 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) 782 { 783 ConvertibleLoadLabel result(this); 784 ASSERT(address.offset >= 0 && address.offset <= 255); 785 m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset); 786 return result; 787 } 788 789 void load8(ImplicitAddress address, RegisterID dest) 790 { 791 load8(setupArmAddress(address), dest); 792 } 793 794 void load8SignedExtendTo32(ImplicitAddress, RegisterID) 795 { 796 UNREACHABLE_FOR_PLATFORM(); 797 } 798 799 void load8(BaseIndex address, RegisterID dest) 800 { 801 load8(setupArmAddress(address), dest); 802 } 803 804 void load8SignedExtendTo32(BaseIndex address, RegisterID dest) 805 { 806 load8SignedExtendTo32(setupArmAddress(address), dest); 807 } 808 809 void load8(const void* address, RegisterID dest) 810 { 811 move(TrustedImmPtr(address), dest); 812 load8(dest, dest); 813 } 814 815 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) 816 { 817 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister); 818 load32(ArmAddress(address.base, dataTempRegister), dest); 819 return label; 820 } 821 822 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest) 823 { 824 padBeforePatch(); 825 826 RegisterID base = address.base; 827 828 DataLabelCompact label(this); 829 ASSERT(isCompactPtrAlignedAddressOffset(address.offset)); 830 831 m_assembler.ldr(dest, base, address.offset, true, false); 832 return label; 833 } 834 835 void load16(const void* address, RegisterID dest) 836 { 837 move(TrustedImmPtr(address), addressTempRegister); 838 m_assembler.ldrh(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); 839 } 840 841 void load16(BaseIndex address, RegisterID dest) 842 { 843 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale); 844 } 845 846 void load16SignedExtendTo32(BaseIndex address, RegisterID dest) 847 { 848 load16SignedExtendTo32(setupArmAddress(address), dest); 849 } 850 851 void load16(ImplicitAddress address, RegisterID dest) 852 { 853 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset); 854 if (armImm.isValid()) 855 m_assembler.ldrh(dest, address.base, armImm); 856 else { 857 move(TrustedImm32(address.offset), dataTempRegister); 858 m_assembler.ldrh(dest, address.base, dataTempRegister); 859 } 860 } 861 862 void load16SignedExtendTo32(ImplicitAddress, RegisterID) 863 { 864 UNREACHABLE_FOR_PLATFORM(); 865 } 866 867 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) 868 { 869 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister); 870 store32(src, ArmAddress(address.base, dataTempRegister)); 871 return label; 872 } 873 874 void store32(RegisterID src, ImplicitAddress address) 875 { 876 store32(src, setupArmAddress(address)); 877 } 878 879 void store32(RegisterID src, BaseIndex address) 880 { 881 store32(src, setupArmAddress(address)); 882 } 883 884 void store32(TrustedImm32 imm, ImplicitAddress address) 885 { 886 move(imm, dataTempRegister); 887 store32(dataTempRegister, setupArmAddress(address)); 888 } 889 890 void store32(TrustedImm32 imm, BaseIndex address) 891 { 892 move(imm, dataTempRegister); 893 store32(dataTempRegister, setupArmAddress(address)); 894 } 895 896 void store32(RegisterID src, const void* address) 897 { 898 move(TrustedImmPtr(address), addressTempRegister); 899 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); 900 } 901 902 void store32(TrustedImm32 imm, const void* address) 903 { 904 move(imm, dataTempRegister); 905 store32(dataTempRegister, address); 906 } 907 908 void store8(RegisterID src, Address address) 909 { 910 store8(src, setupArmAddress(address)); 911 } 912 913 void store8(RegisterID src, BaseIndex address) 914 { 915 store8(src, setupArmAddress(address)); 916 } 917 918 void store8(RegisterID src, const void *address) 919 { 920 move(TrustedImmPtr(address), addressTempRegister); 921 store8(src, ArmAddress(addressTempRegister, 0)); 922 } 923 924 void store8(TrustedImm32 imm, const void *address) 925 { 926 TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); 927 move(imm8, dataTempRegister); 928 store8(dataTempRegister, address); 929 } 930 931 void store8(TrustedImm32 imm, Address address) 932 { 933 TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); 934 move(imm8, dataTempRegister); 935 store8(dataTempRegister, address); 936 } 937 938 void store8(RegisterID src, RegisterID addrreg) 939 { 940 store8(src, ArmAddress(addrreg, 0)); 941 } 942 943 void store16(RegisterID src, ImplicitAddress address) 944 { 945 store16(src, setupArmAddress(address)); 946 } 947 948 void store16(RegisterID src, BaseIndex address) 949 { 950 store16(src, setupArmAddress(address)); 951 } 952 953 void store16(RegisterID src, const void* address) 954 { 955 move(TrustedImmPtr(address), addressTempRegister); 956 m_assembler.strh(src, addressTempRegister, ARMThumbImmediate::makeUInt12(0)); 957 } 958 959 void store16(TrustedImm32 imm, const void* address) 960 { 961 move(imm, dataTempRegister); 962 store16(dataTempRegister, address); 963 } 964 965 // Possibly clobbers src, but not on this architecture. 966 void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2) 967 { 968 m_assembler.vmov(dest1, dest2, src); 969 } 970 971 void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch) 972 { 973 UNUSED_PARAM(scratch); 974 m_assembler.vmov(dest, src1, src2); 975 } 976 977 static bool shouldBlindForSpecificArch(uint32_t value) 978 { 979 ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value); 980 981 // Couldn't be encoded as an immediate, so assume it's untrusted. 982 if (!immediate.isValid()) 983 return true; 984 985 // If we can encode the immediate, we have less than 16 attacker 986 // controlled bits. 987 if (immediate.isEncodedImm()) 988 return false; 989 990 // Don't let any more than 12 bits of an instruction word 991 // be controlled by an attacker. 992 return !immediate.isUInt12(); 993 } 994 995 // Floating-point operations: 996 997 static bool supportsFloatingPoint() { return true; } 998 static bool supportsFloatingPointTruncate() { return true; } 999 static bool supportsFloatingPointSqrt() { return true; } 1000 static bool supportsFloatingPointAbs() { return true; } 1001 static bool supportsFloatingPointRounding() { return false; } 1002 1003 void loadDouble(ImplicitAddress address, FPRegisterID dest) 1004 { 1005 RegisterID base = address.base; 1006 int32_t offset = address.offset; 1007 1008 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. 1009 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { 1010 add32(TrustedImm32(offset), base, addressTempRegister); 1011 base = addressTempRegister; 1012 offset = 0; 1013 } 1014 1015 m_assembler.vldr(dest, base, offset); 1016 } 1017 1018 void loadFloat(ImplicitAddress address, FPRegisterID dest) 1019 { 1020 RegisterID base = address.base; 1021 int32_t offset = address.offset; 1022 1023 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. 1024 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { 1025 add32(TrustedImm32(offset), base, addressTempRegister); 1026 base = addressTempRegister; 1027 offset = 0; 1028 } 1029 1030 m_assembler.flds(ARMRegisters::asSingle(dest), base, offset); 1031 } 1032 1033 void loadDouble(BaseIndex address, FPRegisterID dest) 1034 { 1035 move(address.index, addressTempRegister); 1036 lshift32(TrustedImm32(address.scale), addressTempRegister); 1037 add32(address.base, addressTempRegister); 1038 loadDouble(Address(addressTempRegister, address.offset), dest); 1039 } 1040 1041 void loadFloat(BaseIndex address, FPRegisterID dest) 1042 { 1043 move(address.index, addressTempRegister); 1044 lshift32(TrustedImm32(address.scale), addressTempRegister); 1045 add32(address.base, addressTempRegister); 1046 loadFloat(Address(addressTempRegister, address.offset), dest); 1047 } 1048 1049 void moveDouble(FPRegisterID src, FPRegisterID dest) 1050 { 1051 if (src != dest) 1052 m_assembler.vmov(dest, src); 1053 } 1054 1055 void moveDouble(FPRegisterID src, RegisterID dest) 1056 { 1057 m_assembler.vmov(dest, RegisterID(dest + 1), src); 1058 } 1059 1060 void moveZeroToDouble(FPRegisterID reg) 1061 { 1062 static double zeroConstant = 0.; 1063 loadDouble(TrustedImmPtr(&zeroConstant), reg); 1064 } 1065 1066 void loadDouble(TrustedImmPtr address, FPRegisterID dest) 1067 { 1068 move(address, addressTempRegister); 1069 m_assembler.vldr(dest, addressTempRegister, 0); 1070 } 1071 1072 void storeDouble(FPRegisterID src, ImplicitAddress address) 1073 { 1074 RegisterID base = address.base; 1075 int32_t offset = address.offset; 1076 1077 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. 1078 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { 1079 add32(TrustedImm32(offset), base, addressTempRegister); 1080 base = addressTempRegister; 1081 offset = 0; 1082 } 1083 1084 m_assembler.vstr(src, base, offset); 1085 } 1086 1087 void storeFloat(FPRegisterID src, ImplicitAddress address) 1088 { 1089 RegisterID base = address.base; 1090 int32_t offset = address.offset; 1091 1092 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. 1093 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { 1094 add32(TrustedImm32(offset), base, addressTempRegister); 1095 base = addressTempRegister; 1096 offset = 0; 1097 } 1098 1099 m_assembler.fsts(ARMRegisters::asSingle(src), base, offset); 1100 } 1101 1102 void storeDouble(FPRegisterID src, TrustedImmPtr address) 1103 { 1104 move(address, addressTempRegister); 1105 storeDouble(src, addressTempRegister); 1106 } 1107 1108 void storeDouble(FPRegisterID src, BaseIndex address) 1109 { 1110 move(address.index, addressTempRegister); 1111 lshift32(TrustedImm32(address.scale), addressTempRegister); 1112 add32(address.base, addressTempRegister); 1113 storeDouble(src, Address(addressTempRegister, address.offset)); 1114 } 1115 1116 void storeFloat(FPRegisterID src, BaseIndex address) 1117 { 1118 move(address.index, addressTempRegister); 1119 lshift32(TrustedImm32(address.scale), addressTempRegister); 1120 add32(address.base, addressTempRegister); 1121 storeFloat(src, Address(addressTempRegister, address.offset)); 1122 } 1123 1124 void addDouble(FPRegisterID src, FPRegisterID dest) 1125 { 1126 m_assembler.vadd(dest, dest, src); 1127 } 1128 1129 void addDouble(Address src, FPRegisterID dest) 1130 { 1131 loadDouble(src, fpTempRegister); 1132 addDouble(fpTempRegister, dest); 1133 } 1134 1135 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) 1136 { 1137 m_assembler.vadd(dest, op1, op2); 1138 } 1139 1140 void addDouble(AbsoluteAddress address, FPRegisterID dest) 1141 { 1142 loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister); 1143 m_assembler.vadd(dest, dest, fpTempRegister); 1144 } 1145 1146 void divDouble(FPRegisterID src, FPRegisterID dest) 1147 { 1148 m_assembler.vdiv(dest, dest, src); 1149 } 1150 1151 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) 1152 { 1153 m_assembler.vdiv(dest, op1, op2); 1154 } 1155 1156 void subDouble(FPRegisterID src, FPRegisterID dest) 1157 { 1158 m_assembler.vsub(dest, dest, src); 1159 } 1160 1161 void subDouble(Address src, FPRegisterID dest) 1162 { 1163 loadDouble(src, fpTempRegister); 1164 subDouble(fpTempRegister, dest); 1165 } 1166 1167 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) 1168 { 1169 m_assembler.vsub(dest, op1, op2); 1170 } 1171 1172 void mulDouble(FPRegisterID src, FPRegisterID dest) 1173 { 1174 m_assembler.vmul(dest, dest, src); 1175 } 1176 1177 void mulDouble(Address src, FPRegisterID dest) 1178 { 1179 loadDouble(src, fpTempRegister); 1180 mulDouble(fpTempRegister, dest); 1181 } 1182 1183 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) 1184 { 1185 m_assembler.vmul(dest, op1, op2); 1186 } 1187 1188 void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) 1189 { 1190 m_assembler.vand(dest, op1, op2); 1191 } 1192 1193 void orDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) 1194 { 1195 m_assembler.vorr(dest, op1, op2); 1196 } 1197 1198 void sqrtDouble(FPRegisterID src, FPRegisterID dest) 1199 { 1200 m_assembler.vsqrt(dest, src); 1201 } 1202 1203 void absDouble(FPRegisterID src, FPRegisterID dest) 1204 { 1205 m_assembler.vabs(dest, src); 1206 } 1207 1208 void negateDouble(FPRegisterID src, FPRegisterID dest) 1209 { 1210 m_assembler.vneg(dest, src); 1211 } 1212 1213 NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID) 1214 { 1215 ASSERT(!supportsFloatingPointRounding()); 1216 CRASH(); 1217 } 1218 1219 NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID) 1220 { 1221 ASSERT(!supportsFloatingPointRounding()); 1222 CRASH(); 1223 } 1224 1225 NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID) 1226 { 1227 ASSERT(!supportsFloatingPointRounding()); 1228 CRASH(); 1229 } 1230 1231 void convertInt32ToDouble(RegisterID src, FPRegisterID dest) 1232 { 1233 m_assembler.vmov(fpTempRegister, src, src); 1234 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle()); 1235 } 1236 1237 void convertInt32ToDouble(Address address, FPRegisterID dest) 1238 { 1239 // Fixme: load directly into the fpr! 1240 load32(address, dataTempRegister); 1241 m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister); 1242 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle()); 1243 } 1244 1245 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest) 1246 { 1247 // Fixme: load directly into the fpr! 1248 load32(address.m_ptr, dataTempRegister); 1249 m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister); 1250 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle()); 1251 } 1252 1253 void convertFloatToDouble(FPRegisterID src, FPRegisterID dst) 1254 { 1255 m_assembler.vcvtds(dst, ARMRegisters::asSingle(src)); 1256 } 1257 1258 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst) 1259 { 1260 m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src); 1261 } 1262 1263 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) 1264 { 1265 m_assembler.vcmp(left, right); 1266 m_assembler.vmrs(); 1267 1268 if (cond == DoubleNotEqualAndOrdered) { 1269 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump. 1270 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); 1271 Jump result = makeBranch(ARMv7Assembler::ConditionNE); 1272 unordered.link(this); 1273 return result; 1274 } 1275 if (cond == DoubleEqualOrUnordered) { 1276 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); 1277 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE); 1278 unordered.link(this); 1279 // We get here if either unordered or equal. 1280 Jump result = jump(); 1281 notEqual.link(this); 1282 return result; 1283 } 1284 return makeBranch(cond); 1285 } 1286 1287 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful }; 1288 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) 1289 { 1290 // Convert into dest. 1291 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src); 1292 m_assembler.vmov(dest, fpTempRegisterAsSingle()); 1293 1294 // Calculate 2x dest. If the value potentially underflowed, it will have 1295 // clamped to 0x80000000, so 2x dest is zero in this case. In the case of 1296 // overflow the result will be equal to -2. 1297 Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister); 1298 Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2)); 1299 1300 // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps. 1301 underflow.link(this); 1302 if (branchType == BranchIfTruncateSuccessful) 1303 return noOverflow; 1304 1305 // We'll reach the current point in the code on failure, so plant a 1306 // jump here & link the success case. 1307 Jump failure = jump(); 1308 noOverflow.link(this); 1309 return failure; 1310 } 1311 1312 // Result is undefined if the value is outside of the integer range. 1313 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) 1314 { 1315 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src); 1316 m_assembler.vmov(dest, fpTempRegisterAsSingle()); 1317 } 1318 1319 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) 1320 { 1321 m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src); 1322 m_assembler.vmov(dest, fpTempRegisterAsSingle()); 1323 } 1324 1325 // Convert 'src' to an integer, and places the resulting 'dest'. 1326 // If the result is not representable as a 32 bit value, branch. 1327 // May also branch for some values that are representable in 32 bits 1328 // (specifically, in this case, 0). 1329 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true) 1330 { 1331 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src); 1332 m_assembler.vmov(dest, fpTempRegisterAsSingle()); 1333 1334 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. 1335 m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle()); 1336 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister)); 1337 1338 // Test for negative zero. 1339 if (negZeroCheck) { 1340 Jump valueIsNonZero = branchTest32(NonZero, dest); 1341 m_assembler.vmov(dataTempRegister, ARMRegisters::asSingleUpper(src)); 1342 failureCases.append(branch32(LessThan, dataTempRegister, TrustedImm32(0))); 1343 valueIsNonZero.link(this); 1344 } 1345 } 1346 1347 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID) 1348 { 1349 m_assembler.vcmpz(reg); 1350 m_assembler.vmrs(); 1351 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); 1352 Jump result = makeBranch(ARMv7Assembler::ConditionNE); 1353 unordered.link(this); 1354 return result; 1355 } 1356 1357 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID) 1358 { 1359 m_assembler.vcmpz(reg); 1360 m_assembler.vmrs(); 1361 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); 1362 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE); 1363 unordered.link(this); 1364 // We get here if either unordered or equal. 1365 Jump result = jump(); 1366 notEqual.link(this); 1367 return result; 1368 } 1369 1370 // Stack manipulation operations: 1371 // 1372 // The ABI is assumed to provide a stack abstraction to memory, 1373 // containing machine word sized units of data. Push and pop 1374 // operations add and remove a single register sized unit of data 1375 // to or from the stack. Peek and poke operations read or write 1376 // values on the stack, without moving the current stack position. 1377 1378 void pop(RegisterID dest) 1379 { 1380 m_assembler.pop(dest); 1381 } 1382 1383 void push(RegisterID src) 1384 { 1385 m_assembler.push(src); 1386 } 1387 1388 void push(Address address) 1389 { 1390 load32(address, dataTempRegister); 1391 push(dataTempRegister); 1392 } 1393 1394 void push(TrustedImm32 imm) 1395 { 1396 move(imm, dataTempRegister); 1397 push(dataTempRegister); 1398 } 1399 1400 void popPair(RegisterID dest1, RegisterID dest2) 1401 { 1402 m_assembler.pop(1 << dest1 | 1 << dest2); 1403 } 1404 1405 void pushPair(RegisterID src1, RegisterID src2) 1406 { 1407 m_assembler.push(1 << src1 | 1 << src2); 1408 } 1409 1410 // Register move operations: 1411 // 1412 // Move values in registers. 1413 1414 void move(TrustedImm32 imm, RegisterID dest) 1415 { 1416 uint32_t value = imm.m_value; 1417 1418 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value); 1419 1420 if (armImm.isValid()) 1421 m_assembler.mov(dest, armImm); 1422 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid()) 1423 m_assembler.mvn(dest, armImm); 1424 else { 1425 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value)); 1426 if (value & 0xffff0000) 1427 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16)); 1428 } 1429 } 1430 1431 void move(RegisterID src, RegisterID dest) 1432 { 1433 if (src != dest) 1434 m_assembler.mov(dest, src); 1435 } 1436 1437 void move(TrustedImmPtr imm, RegisterID dest) 1438 { 1439 move(TrustedImm32(imm), dest); 1440 } 1441 1442 void swap(RegisterID reg1, RegisterID reg2) 1443 { 1444 move(reg1, dataTempRegister); 1445 move(reg2, reg1); 1446 move(dataTempRegister, reg2); 1447 } 1448 1449 void swap(FPRegisterID fr1, FPRegisterID fr2) 1450 { 1451 moveDouble(fr1, fpTempRegister); 1452 moveDouble(fr2, fr1); 1453 moveDouble(fpTempRegister, fr2); 1454 } 1455 1456 void signExtend32ToPtr(RegisterID src, RegisterID dest) 1457 { 1458 move(src, dest); 1459 } 1460 1461 void zeroExtend32ToWord(RegisterID src, RegisterID dest) 1462 { 1463 move(src, dest); 1464 } 1465 1466 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. 1467 static RelationalCondition invert(RelationalCondition cond) 1468 { 1469 return static_cast<RelationalCondition>(cond ^ 1); 1470 } 1471 1472 void nop() 1473 { 1474 m_assembler.nop(); 1475 } 1476 1477 void memoryFence() 1478 { 1479 m_assembler.dmbSY(); 1480 } 1481 1482 void storeFence() 1483 { 1484 m_assembler.dmbISHST(); 1485 } 1486 1487 template<PtrTag startTag, PtrTag destTag> 1488 static void replaceWithJump(CodeLocationLabel<startTag> instructionStart, CodeLocationLabel<destTag> destination) 1489 { 1490 ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); 1491 } 1492 1493 static ptrdiff_t maxJumpReplacementSize() 1494 { 1495 return ARMv7Assembler::maxJumpReplacementSize(); 1496 } 1497 1498 static ptrdiff_t patchableJumpSize() 1499 { 1500 return ARMv7Assembler::patchableJumpSize(); 1501 } 1502 1503 // Forwards / external control flow operations: 1504 // 1505 // This set of jump and conditional branch operations return a Jump 1506 // object which may linked at a later point, allow forwards jump, 1507 // or jumps that will require external linkage (after the code has been 1508 // relocated). 1509 // 1510 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge 1511 // respecitvely, for unsigned comparisons the names b, a, be, and ae are 1512 // used (representing the names 'below' and 'above'). 1513 // 1514 // Operands to the comparision are provided in the expected order, e.g. 1515 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when 1516 // treated as a signed 32bit value, is less than or equal to 5. 1517 // 1518 // jz and jnz test whether the first operand is equal to zero, and take 1519 // an optional second operand of a mask under which to perform the test. 1520 private: 1521 1522 // Should we be using TEQ for equal/not-equal? 1523 void compare32AndSetFlags(RegisterID left, TrustedImm32 right) 1524 { 1525 int32_t imm = right.m_value; 1526 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); 1527 if (armImm.isValid()) 1528 m_assembler.cmp(left, armImm); 1529 else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid()) 1530 m_assembler.cmn(left, armImm); 1531 else { 1532 move(TrustedImm32(imm), dataTempRegister); 1533 m_assembler.cmp(left, dataTempRegister); 1534 } 1535 } 1536 1537 public: 1538 void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) 1539 { 1540 int32_t imm = mask.m_value; 1541 1542 if (imm == -1) 1543 m_assembler.tst(reg, reg); 1544 else { 1545 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); 1546 if (armImm.isValid()) { 1547 if (reg == ARMRegisters::sp) { 1548 move(reg, addressTempRegister); 1549 m_assembler.tst(addressTempRegister, armImm); 1550 } else 1551 m_assembler.tst(reg, armImm); 1552 } else { 1553 move(mask, dataTempRegister); 1554 if (reg == ARMRegisters::sp) { 1555 move(reg, addressTempRegister); 1556 m_assembler.tst(addressTempRegister, dataTempRegister); 1557 } else 1558 m_assembler.tst(reg, dataTempRegister); 1559 } 1560 } 1561 } 1562 1563 Jump branch(ResultCondition cond) 1564 { 1565 return Jump(makeBranch(cond)); 1566 } 1567 1568 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right) 1569 { 1570 m_assembler.cmp(left, right); 1571 return Jump(makeBranch(cond)); 1572 } 1573 1574 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) 1575 { 1576 compare32AndSetFlags(left, right); 1577 return Jump(makeBranch(cond)); 1578 } 1579 1580 Jump branch32(RelationalCondition cond, RegisterID left, Address right) 1581 { 1582 load32(right, dataTempRegister); 1583 return branch32(cond, left, dataTempRegister); 1584 } 1585 1586 Jump branch32(RelationalCondition cond, Address left, RegisterID right) 1587 { 1588 load32(left, dataTempRegister); 1589 return branch32(cond, dataTempRegister, right); 1590 } 1591 1592 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right) 1593 { 1594 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ 1595 load32(left, addressTempRegister); 1596 return branch32(cond, addressTempRegister, right); 1597 } 1598 1599 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right) 1600 { 1601 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ 1602 load32(left, addressTempRegister); 1603 return branch32(cond, addressTempRegister, right); 1604 } 1605 1606 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right) 1607 { 1608 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ 1609 load32WithUnalignedHalfWords(left, addressTempRegister); 1610 return branch32(cond, addressTempRegister, right); 1611 } 1612 1613 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) 1614 { 1615 load32(left.m_ptr, dataTempRegister); 1616 return branch32(cond, dataTempRegister, right); 1617 } 1618 1619 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) 1620 { 1621 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ 1622 load32(left.m_ptr, addressTempRegister); 1623 return branch32(cond, addressTempRegister, right); 1624 } 1625 1626 Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) 1627 { 1628 load32(left, dataTempRegister); 1629 return branch32(cond, dataTempRegister, right); 1630 } 1631 1632 Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right) 1633 { 1634 TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); 1635 compare32AndSetFlags(left, right8); 1636 return Jump(makeBranch(cond)); 1637 } 1638 1639 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) 1640 { 1641 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/ 1642 TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); 1643 MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister); 1644 return branch8(cond, addressTempRegister, right8); 1645 } 1646 1647 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) 1648 { 1649 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ 1650 TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); 1651 MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister); 1652 return branch32(cond, addressTempRegister, right8); 1653 } 1654 1655 Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right) 1656 { 1657 // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister. 1658 TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); 1659 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 1660 MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister); 1661 return branch32(cond, addressTempRegister, right8); 1662 } 1663 1664 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) 1665 { 1666 ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); 1667 m_assembler.tst(reg, mask); 1668 return Jump(makeBranch(cond)); 1669 } 1670 1671 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) 1672 { 1673 ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); 1674 test32(reg, mask); 1675 return Jump(makeBranch(cond)); 1676 } 1677 1678 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) 1679 { 1680 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/ 1681 load32(address, addressTempRegister); 1682 return branchTest32(cond, addressTempRegister, mask); 1683 } 1684 1685 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) 1686 { 1687 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/ 1688 load32(address, addressTempRegister); 1689 return branchTest32(cond, addressTempRegister, mask); 1690 } 1691 1692 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) 1693 { 1694 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ 1695 TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); 1696 MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister); 1697 return branchTest32(cond, addressTempRegister, mask8); 1698 } 1699 1700 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) 1701 { 1702 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ 1703 TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); 1704 MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister); 1705 return branchTest32(cond, addressTempRegister, mask8); 1706 } 1707 1708 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) 1709 { 1710 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ 1711 TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); 1712 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 1713 MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister); 1714 return branchTest32(cond, addressTempRegister, mask8); 1715 } 1716 1717 void farJump(RegisterID target, PtrTag) 1718 { 1719 m_assembler.bx(target); 1720 } 1721 1722 void farJump(TrustedImmPtr target, PtrTag) 1723 { 1724 move(target, dataTempRegister); 1725 m_assembler.bx(dataTempRegister); 1726 } 1727 1728 // Address is a memory location containing the address to jump to 1729 void farJump(Address address, PtrTag) 1730 { 1731 load32(address, dataTempRegister); 1732 m_assembler.bx(dataTempRegister); 1733 } 1734 1735 void farJump(AbsoluteAddress address, PtrTag) 1736 { 1737 move(TrustedImmPtr(address.m_ptr), dataTempRegister); 1738 load32(Address(dataTempRegister), dataTempRegister); 1739 m_assembler.bx(dataTempRegister); 1740 } 1741 1742 ALWAYS_INLINE void farJump(RegisterID target, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(target, NoPtrTag); } 1743 ALWAYS_INLINE void farJump(Address address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(address, NoPtrTag); } 1744 ALWAYS_INLINE void farJump(AbsoluteAddress address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(address, NoPtrTag); } 1745 1746 // Arithmetic control flow operations: 1747 // 1748 // This set of conditional branch operations branch based 1749 // on the result of an arithmetic operation. The operation 1750 // is performed as normal, storing the result. 1751 // 1752 // * jz operations branch if the result is zero. 1753 // * jo operations branch if the (signed) arithmetic 1754 // operation caused an overflow to occur. 1755 1756 Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) 1757 { 1758 m_assembler.add_S(dest, op1, op2); 1759 return Jump(makeBranch(cond)); 1760 } 1761 1762 Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) 1763 { 1764 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 1765 if (armImm.isValid()) 1766 m_assembler.add_S(dest, op1, armImm); 1767 else { 1768 move(imm, dataTempRegister); 1769 m_assembler.add_S(dest, op1, dataTempRegister); 1770 } 1771 return Jump(makeBranch(cond)); 1772 } 1773 1774 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest) 1775 { 1776 return branchAdd32(cond, dest, src, dest); 1777 } 1778 1779 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest) 1780 { 1781 load32(src, dataTempRegister); 1782 return branchAdd32(cond, dest, dataTempRegister, dest); 1783 } 1784 1785 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) 1786 { 1787 return branchAdd32(cond, dest, imm, dest); 1788 } 1789 1790 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) 1791 { 1792 // Move the high bits of the address into addressTempRegister, 1793 // and load the value into dataTempRegister. 1794 move(TrustedImmPtr(dest.m_ptr), addressTempRegister); 1795 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); 1796 1797 // Do the add. 1798 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 1799 if (armImm.isValid()) 1800 m_assembler.add_S(dataTempRegister, dataTempRegister, armImm); 1801 else { 1802 // If the operand does not fit into an immediate then load it temporarily 1803 // into addressTempRegister; since we're overwriting addressTempRegister 1804 // we'll need to reload it with the high bits of the address afterwards. 1805 move(imm, addressTempRegister); 1806 m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister); 1807 move(TrustedImmPtr(dest.m_ptr), addressTempRegister); 1808 } 1809 1810 // Store the result. 1811 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); 1812 1813 return Jump(makeBranch(cond)); 1814 } 1815 1816 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) 1817 { 1818 m_assembler.smull(dest, dataTempRegister, src1, src2); 1819 1820 if (cond == Overflow) { 1821 m_assembler.asr(addressTempRegister, dest, 31); 1822 return branch32(NotEqual, addressTempRegister, dataTempRegister); 1823 } 1824 1825 return branchTest32(cond, dest); 1826 } 1827 1828 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest) 1829 { 1830 return branchMul32(cond, src, dest, dest); 1831 } 1832 1833 Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) 1834 { 1835 move(imm, dataTempRegister); 1836 return branchMul32(cond, dataTempRegister, src, dest); 1837 } 1838 1839 Jump branchNeg32(ResultCondition cond, RegisterID srcDest) 1840 { 1841 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0); 1842 m_assembler.sub_S(srcDest, zero, srcDest); 1843 return Jump(makeBranch(cond)); 1844 } 1845 1846 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest) 1847 { 1848 m_assembler.orr_S(dest, dest, src); 1849 return Jump(makeBranch(cond)); 1850 } 1851 1852 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) 1853 { 1854 m_assembler.sub_S(dest, op1, op2); 1855 return Jump(makeBranch(cond)); 1856 } 1857 1858 Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) 1859 { 1860 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 1861 if (armImm.isValid()) 1862 m_assembler.sub_S(dest, op1, armImm); 1863 else { 1864 move(imm, dataTempRegister); 1865 m_assembler.sub_S(dest, op1, dataTempRegister); 1866 } 1867 return Jump(makeBranch(cond)); 1868 } 1869 1870 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest) 1871 { 1872 return branchSub32(cond, dest, src, dest); 1873 } 1874 1875 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) 1876 { 1877 return branchSub32(cond, dest, imm, dest); 1878 } 1879 1880 void relativeTableJump(RegisterID index, int scale) 1881 { 1882 ASSERT(scale >= 0 && scale <= 31); 1883 1884 // dataTempRegister will point after the jump if index register contains zero 1885 move(ARMRegisters::pc, dataTempRegister); 1886 m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9)); 1887 1888 ShiftTypeAndAmount shift(SRType_LSL, scale); 1889 m_assembler.add(dataTempRegister, dataTempRegister, index, shift); 1890 farJump(dataTempRegister, NoPtrTag); 1891 } 1892 1893 // Miscellaneous operations: 1894 1895 void breakpoint(uint8_t imm = 0) 1896 { 1897 m_assembler.bkpt(imm); 1898 } 1899 1900 static bool isBreakpoint(void* address) { return ARMv7Assembler::isBkpt(address); } 1901 1902 ALWAYS_INLINE Call nearCall() 1903 { 1904 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); 1905 return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear); 1906 } 1907 1908 ALWAYS_INLINE Call nearTailCall() 1909 { 1910 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); 1911 return Call(m_assembler.bx(dataTempRegister), Call::LinkableNearTail); 1912 } 1913 1914 ALWAYS_INLINE Call call(PtrTag) 1915 { 1916 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); 1917 return Call(m_assembler.blx(dataTempRegister), Call::Linkable); 1918 } 1919 1920 ALWAYS_INLINE Call call(RegisterID target, PtrTag) 1921 { 1922 return Call(m_assembler.blx(target), Call::None); 1923 } 1924 1925 ALWAYS_INLINE Call call(Address address, PtrTag) 1926 { 1927 load32(address, dataTempRegister); 1928 return Call(m_assembler.blx(dataTempRegister), Call::None); 1929 } 1930 1931 ALWAYS_INLINE Call call(RegisterID callTag) { return UNUSED_PARAM(callTag), call(NoPtrTag); } 1932 ALWAYS_INLINE Call call(RegisterID target, RegisterID callTag) { return UNUSED_PARAM(callTag), call(target, NoPtrTag); } 1933 ALWAYS_INLINE Call call(Address address, RegisterID callTag) { return UNUSED_PARAM(callTag), call(address, NoPtrTag); } 1934 1935 ALWAYS_INLINE void ret() 1936 { 1937 m_assembler.bx(linkRegister); 1938 } 1939 1940 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) 1941 { 1942 m_assembler.cmp(left, right); 1943 m_assembler.it(armV7Condition(cond), false); 1944 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); 1945 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); 1946 } 1947 1948 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest) 1949 { 1950 load32(left, dataTempRegister); 1951 compare32(cond, dataTempRegister, right, dest); 1952 } 1953 1954 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) 1955 { 1956 TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); 1957 MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister); 1958 compare32(cond, addressTempRegister, right8, dest); 1959 } 1960 1961 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) 1962 { 1963 compare32AndSetFlags(left, right); 1964 m_assembler.it(armV7Condition(cond), false); 1965 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); 1966 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); 1967 } 1968 1969 // FIXME: 1970 // The mask should be optional... paerhaps the argument order should be 1971 // dest-src, operations always have a dest? ... possibly not true, considering 1972 // asm ops like test, or pseudo ops like pop(). 1973 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) 1974 { 1975 load32(address, dataTempRegister); 1976 test32(dataTempRegister, mask); 1977 m_assembler.it(armV7Condition(cond), false); 1978 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); 1979 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); 1980 } 1981 1982 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) 1983 { 1984 TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); 1985 MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister); 1986 test32(dataTempRegister, mask8); 1987 m_assembler.it(armV7Condition(cond), false); 1988 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); 1989 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); 1990 } 1991 1992 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst) 1993 { 1994 padBeforePatch(); 1995 moveFixedWidthEncoding(imm, dst); 1996 return DataLabel32(this); 1997 } 1998 1999 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst) 2000 { 2001 padBeforePatch(); 2002 moveFixedWidthEncoding(TrustedImm32(imm), dst); 2003 return DataLabelPtr(this); 2004 } 2005 2006 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr)) 2007 { 2008 dataLabel = moveWithPatch(initialRightValue, dataTempRegister); 2009 return branch32(cond, left, dataTempRegister); 2010 } 2011 2012 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr)) 2013 { 2014 load32(left, addressTempRegister); 2015 dataLabel = moveWithPatch(initialRightValue, dataTempRegister); 2016 return branch32(cond, addressTempRegister, dataTempRegister); 2017 } 2018 2019 ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) 2020 { 2021 load32(left, addressTempRegister); 2022 dataLabel = moveWithPatch(initialRightValue, dataTempRegister); 2023 return branch32(cond, addressTempRegister, dataTempRegister); 2024 } 2025 2026 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(nullptr)) 2027 { 2028 m_makeJumpPatchable = true; 2029 Jump result = branch32(cond, left, TrustedImm32(right)); 2030 m_makeJumpPatchable = false; 2031 return PatchableJump(result); 2032 } 2033 2034 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) 2035 { 2036 m_makeJumpPatchable = true; 2037 Jump result = branchTest32(cond, reg, mask); 2038 m_makeJumpPatchable = false; 2039 return PatchableJump(result); 2040 } 2041 2042 PatchableJump patchableBranch8(RelationalCondition cond, Address left, TrustedImm32 imm) 2043 { 2044 m_makeJumpPatchable = true; 2045 Jump result = branch8(cond, left, imm); 2046 m_makeJumpPatchable = false; 2047 return PatchableJump(result); 2048 } 2049 2050 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm) 2051 { 2052 m_makeJumpPatchable = true; 2053 Jump result = branch32(cond, reg, imm); 2054 m_makeJumpPatchable = false; 2055 return PatchableJump(result); 2056 } 2057 2058 PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm) 2059 { 2060 m_makeJumpPatchable = true; 2061 Jump result = branch32(cond, left, imm); 2062 m_makeJumpPatchable = false; 2063 return PatchableJump(result); 2064 } 2065 2066 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr)) 2067 { 2068 m_makeJumpPatchable = true; 2069 Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue); 2070 m_makeJumpPatchable = false; 2071 return PatchableJump(result); 2072 } 2073 2074 PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) 2075 { 2076 m_makeJumpPatchable = true; 2077 Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue); 2078 m_makeJumpPatchable = false; 2079 return PatchableJump(result); 2080 } 2081 2082 PatchableJump patchableJump() 2083 { 2084 padBeforePatch(); 2085 m_makeJumpPatchable = true; 2086 Jump result = jump(); 2087 m_makeJumpPatchable = false; 2088 return PatchableJump(result); 2089 } 2090 2091 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) 2092 { 2093 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister); 2094 store32(dataTempRegister, address); 2095 return label; 2096 } 2097 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(nullptr), address); } 2098 2099 template<PtrTag resultTag, PtrTag locationTag> 2100 static FunctionPtr<resultTag> readCallTarget(CodeLocationCall<locationTag> call) 2101 { 2102 return FunctionPtr<resultTag>(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation()))); 2103 } 2104 2105 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } 2106 static bool canJumpReplacePatchableBranch32WithPatch() { return false; } 2107 2108 template<PtrTag tag> 2109 static CodeLocationLabel<tag> startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr<tag> label) 2110 { 2111 const unsigned twoWordOpSize = 4; 2112 return label.labelAtOffset(-twoWordOpSize * 2); 2113 } 2114 2115 template<PtrTag tag> 2116 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel<tag> instructionStart, RegisterID rd, void* initialValue) 2117 { 2118 #if OS(LINUX) 2119 ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue)); 2120 #else 2121 UNUSED_PARAM(rd); 2122 ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff)); 2123 #endif 2124 } 2125 2126 template<PtrTag tag> 2127 static CodeLocationLabel<tag> startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr<tag>) 2128 { 2129 UNREACHABLE_FOR_PLATFORM(); 2130 return CodeLocationLabel<tag>(); 2131 } 2132 2133 template<PtrTag tag> 2134 static CodeLocationLabel<tag> startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32<tag>) 2135 { 2136 UNREACHABLE_FOR_PLATFORM(); 2137 return CodeLocationLabel<tag>(); 2138 } 2139 2140 template<PtrTag tag> 2141 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel<tag>, Address, void*) 2142 { 2143 UNREACHABLE_FOR_PLATFORM(); 2144 } 2145 2146 template<PtrTag tag> 2147 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel<tag>, Address, int32_t) 2148 { 2149 UNREACHABLE_FOR_PLATFORM(); 2150 } 2151 2152 template<PtrTag callTag, PtrTag destTag> 2153 static void repatchCall(CodeLocationCall<callTag> call, CodeLocationLabel<destTag> destination) 2154 { 2155 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); 2156 } 2157 2158 template<PtrTag callTag, PtrTag destTag> 2159 static void repatchCall(CodeLocationCall<callTag> call, FunctionPtr<destTag> destination) 2160 { 2161 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); 2162 } 2163 2164 protected: 2165 ALWAYS_INLINE Jump jump() 2166 { 2167 m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint. 2168 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); 2169 return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition); 2170 } 2171 2172 ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond) 2173 { 2174 m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint. 2175 m_assembler.it(cond, true, true); 2176 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); 2177 return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond); 2178 } 2179 ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); } 2180 ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); } 2181 ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); } 2182 2183 ArmAddress setupArmAddress(BaseIndex address) 2184 { 2185 if (address.offset) { 2186 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset); 2187 if (imm.isValid()) 2188 m_assembler.add(addressTempRegister, address.base, imm); 2189 else { 2190 move(TrustedImm32(address.offset), addressTempRegister); 2191 m_assembler.add(addressTempRegister, addressTempRegister, address.base); 2192 } 2193 2194 return ArmAddress(addressTempRegister, address.index, address.scale); 2195 } else 2196 return ArmAddress(address.base, address.index, address.scale); 2197 } 2198 2199 ArmAddress setupArmAddress(Address address) 2200 { 2201 if ((address.offset >= -0xff) && (address.offset <= 0xfff)) 2202 return ArmAddress(address.base, address.offset); 2203 2204 move(TrustedImm32(address.offset), addressTempRegister); 2205 return ArmAddress(address.base, addressTempRegister); 2206 } 2207 2208 ArmAddress setupArmAddress(ImplicitAddress address) 2209 { 2210 if ((address.offset >= -0xff) && (address.offset <= 0xfff)) 2211 return ArmAddress(address.base, address.offset); 2212 2213 move(TrustedImm32(address.offset), addressTempRegister); 2214 return ArmAddress(address.base, addressTempRegister); 2215 } 2216 2217 RegisterID makeBaseIndexBase(BaseIndex address) 2218 { 2219 if (!address.offset) 2220 return address.base; 2221 2222 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset); 2223 if (imm.isValid()) 2224 m_assembler.add(addressTempRegister, address.base, imm); 2225 else { 2226 move(TrustedImm32(address.offset), addressTempRegister); 2227 m_assembler.add(addressTempRegister, addressTempRegister, address.base); 2228 } 2229 2230 return addressTempRegister; 2231 } 2232 2233 void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst) 2234 { 2235 uint32_t value = imm.m_value; 2236 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff)); 2237 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16)); 2238 } 2239 2240 ARMv7Assembler::Condition armV7Condition(RelationalCondition cond) 2241 { 2242 return static_cast<ARMv7Assembler::Condition>(cond); 2243 } 2244 2245 ARMv7Assembler::Condition armV7Condition(ResultCondition cond) 2246 { 2247 return static_cast<ARMv7Assembler::Condition>(cond); 2248 } 2249 2250 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond) 2251 { 2252 return static_cast<ARMv7Assembler::Condition>(cond); 2253 } 2254 2255 private: 2256 friend class LinkBuffer; 2257 2258 template<PtrTag tag> 2259 static void linkCall(void* code, Call call, FunctionPtr<tag> function) 2260 { 2261 if (call.isFlagSet(Call::Tail)) 2262 ARMv7Assembler::linkJump(code, call.m_label, function.executableAddress()); 2263 else 2264 ARMv7Assembler::linkCall(code, call.m_label, function.executableAddress()); 2265 } 2266 2267 bool m_makeJumpPatchable; 2268 }; 2269 2270 } // namespace JSC 2271 2272 #endif // ENABLE(ASSEMBLER)