test_generator.cpp
1 /* This file is part of the dynarmic project. 2 * Copyright (c) 2022 MerryMage 3 * SPDX-License-Identifier: 0BSD 4 */ 5 6 #include <algorithm> 7 #include <array> 8 #include <cstdio> 9 #include <cstdlib> 10 #include <functional> 11 #include <limits> 12 #include <optional> 13 #include <tuple> 14 #include <vector> 15 16 #include <mcl/bit/swap.hpp> 17 #include <mcl/macro/architecture.hpp> 18 #include <mcl/stdint.hpp> 19 20 #include "./A32/testenv.h" 21 #include "./A64/testenv.h" 22 #include "./fuzz_util.h" 23 #include "./rand_int.h" 24 #include "dynarmic/common/fp/fpcr.h" 25 #include "dynarmic/common/fp/fpsr.h" 26 #include "dynarmic/frontend/A32/ITState.h" 27 #include "dynarmic/frontend/A32/a32_location_descriptor.h" 28 #include "dynarmic/frontend/A32/a32_types.h" 29 #include "dynarmic/frontend/A32/translate/a32_translate.h" 30 #include "dynarmic/frontend/A64/a64_location_descriptor.h" 31 #include "dynarmic/frontend/A64/a64_types.h" 32 #include "dynarmic/frontend/A64/translate/a64_translate.h" 33 #include "dynarmic/interface/A32/a32.h" 34 #include "dynarmic/interface/A64/a64.h" 35 #include "dynarmic/ir/basic_block.h" 36 #include "dynarmic/ir/location_descriptor.h" 37 #include "dynarmic/ir/opcodes.h" 38 39 // Must be declared last for all necessary operator<< to be declared prior to this. 40 #include <fmt/format.h> 41 #include <fmt/ostream.h> 42 43 constexpr bool mask_fpsr_cum_bits = true; 44 45 namespace { 46 using namespace Dynarmic; 47 48 bool ShouldTestInst(IR::Block& block) { 49 if (auto terminal = block.GetTerminal(); boost::get<IR::Term::Interpret>(&terminal)) { 50 return false; 51 } 52 53 for (const auto& ir_inst : block) { 54 switch (ir_inst.GetOpcode()) { 55 // A32 56 case IR::Opcode::A32GetFpscr: 57 case IR::Opcode::A32ExceptionRaised: 58 case IR::Opcode::A32CallSupervisor: 59 case IR::Opcode::A32CoprocInternalOperation: 60 case IR::Opcode::A32CoprocSendOneWord: 61 case IR::Opcode::A32CoprocSendTwoWords: 62 case IR::Opcode::A32CoprocGetOneWord: 63 case IR::Opcode::A32CoprocGetTwoWords: 64 case IR::Opcode::A32CoprocLoadWords: 65 case IR::Opcode::A32CoprocStoreWords: 66 // A64 67 case IR::Opcode::A64ExceptionRaised: 68 case IR::Opcode::A64CallSupervisor: 69 case IR::Opcode::A64DataCacheOperationRaised: 70 case IR::Opcode::A64GetCNTPCT: 71 // Unimplemented 72 case IR::Opcode::SignedSaturatedAdd8: 73 case IR::Opcode::SignedSaturatedAdd16: 74 case IR::Opcode::SignedSaturatedAdd32: 75 case IR::Opcode::SignedSaturatedAdd64: 76 case IR::Opcode::SignedSaturatedDoublingMultiplyReturnHigh16: 77 case IR::Opcode::SignedSaturatedDoublingMultiplyReturnHigh32: 78 case IR::Opcode::SignedSaturatedSub8: 79 case IR::Opcode::SignedSaturatedSub16: 80 case IR::Opcode::SignedSaturatedSub32: 81 case IR::Opcode::SignedSaturatedSub64: 82 case IR::Opcode::UnsignedSaturatedAdd8: 83 case IR::Opcode::UnsignedSaturatedAdd16: 84 case IR::Opcode::UnsignedSaturatedAdd32: 85 case IR::Opcode::UnsignedSaturatedAdd64: 86 case IR::Opcode::UnsignedSaturatedSub8: 87 case IR::Opcode::UnsignedSaturatedSub16: 88 case IR::Opcode::UnsignedSaturatedSub32: 89 case IR::Opcode::UnsignedSaturatedSub64: 90 case IR::Opcode::VectorMaxS64: 91 case IR::Opcode::VectorMaxU64: 92 case IR::Opcode::VectorMinS64: 93 case IR::Opcode::VectorMinU64: 94 case IR::Opcode::VectorMultiply64: 95 case IR::Opcode::SM4AccessSubstitutionBox: 96 // Half-prec conversions 97 case IR::Opcode::FPHalfToFixedS16: 98 case IR::Opcode::FPHalfToFixedS32: 99 case IR::Opcode::FPHalfToFixedS64: 100 case IR::Opcode::FPHalfToFixedU16: 101 case IR::Opcode::FPHalfToFixedU32: 102 case IR::Opcode::FPHalfToFixedU64: 103 // Half-precision 104 case IR::Opcode::FPAbs16: 105 case IR::Opcode::FPMulAdd16: 106 case IR::Opcode::FPMulSub16: 107 case IR::Opcode::FPNeg16: 108 case IR::Opcode::FPRecipEstimate16: 109 case IR::Opcode::FPRecipExponent16: 110 case IR::Opcode::FPRecipStepFused16: 111 case IR::Opcode::FPRoundInt16: 112 case IR::Opcode::FPRSqrtEstimate16: 113 case IR::Opcode::FPRSqrtStepFused16: 114 case IR::Opcode::FPVectorAbs16: 115 case IR::Opcode::FPVectorEqual16: 116 case IR::Opcode::FPVectorMulAdd16: 117 case IR::Opcode::FPVectorNeg16: 118 case IR::Opcode::FPVectorRecipEstimate16: 119 case IR::Opcode::FPVectorRecipStepFused16: 120 case IR::Opcode::FPVectorRoundInt16: 121 case IR::Opcode::FPVectorRSqrtEstimate16: 122 case IR::Opcode::FPVectorRSqrtStepFused16: 123 case IR::Opcode::FPVectorToSignedFixed16: 124 case IR::Opcode::FPVectorToUnsignedFixed16: 125 case IR::Opcode::FPVectorFromHalf32: 126 case IR::Opcode::FPVectorToHalf32: 127 return false; 128 default: 129 continue; 130 } 131 } 132 133 return true; 134 } 135 136 bool ShouldTestA32Inst(u32 instruction, u32 pc, bool is_thumb, bool is_last_inst, A32::ITState it_state = {}) { 137 const A32::LocationDescriptor location = A32::LocationDescriptor{pc, {}, {}}.SetTFlag(is_thumb).SetIT(it_state); 138 IR::Block block{location}; 139 const bool should_continue = A32::TranslateSingleInstruction(block, location, instruction); 140 141 if (!should_continue && !is_last_inst) { 142 return false; 143 } 144 145 return ShouldTestInst(block); 146 } 147 148 bool ShouldTestA64Inst(u32 instruction, u64 pc, bool is_last_inst) { 149 const A64::LocationDescriptor location = A64::LocationDescriptor{pc, {}}; 150 IR::Block block{location}; 151 const bool should_continue = A64::TranslateSingleInstruction(block, location, instruction); 152 153 if (!should_continue && !is_last_inst) { 154 return false; 155 } 156 157 return ShouldTestInst(block); 158 } 159 160 u32 GenRandomArmInst(u32 pc, bool is_last_inst) { 161 static const struct InstructionGeneratorInfo { 162 std::vector<InstructionGenerator> generators; 163 std::vector<InstructionGenerator> invalid; 164 } instructions = [] { 165 const std::vector<std::tuple<std::string, const char*>> list{ 166 #define INST(fn, name, bitstring) {#fn, bitstring}, 167 #include "dynarmic/frontend/A32/decoder/arm.inc" 168 #include "dynarmic/frontend/A32/decoder/asimd.inc" 169 #include "dynarmic/frontend/A32/decoder/vfp.inc" 170 #undef INST 171 }; 172 173 std::vector<InstructionGenerator> generators; 174 std::vector<InstructionGenerator> invalid; 175 176 // List of instructions not to test 177 static constexpr std::array do_not_test{ 178 // Translating load/stores 179 "arm_LDRBT", "arm_LDRBT", "arm_LDRHT", "arm_LDRHT", "arm_LDRSBT", "arm_LDRSBT", "arm_LDRSHT", "arm_LDRSHT", "arm_LDRT", "arm_LDRT", 180 "arm_STRBT", "arm_STRBT", "arm_STRHT", "arm_STRHT", "arm_STRT", "arm_STRT", 181 // Exclusive load/stores 182 "arm_LDREXB", "arm_LDREXD", "arm_LDREXH", "arm_LDREX", "arm_LDAEXB", "arm_LDAEXD", "arm_LDAEXH", "arm_LDAEX", 183 "arm_STREXB", "arm_STREXD", "arm_STREXH", "arm_STREX", "arm_STLEXB", "arm_STLEXD", "arm_STLEXH", "arm_STLEX", 184 "arm_SWP", "arm_SWPB", 185 // Elevated load/store multiple instructions. 186 "arm_LDM_eret", "arm_LDM_usr", 187 "arm_STM_usr", 188 // Coprocessor 189 "arm_CDP", "arm_LDC", "arm_MCR", "arm_MCRR", "arm_MRC", "arm_MRRC", "arm_STC", 190 // System 191 "arm_CPS", "arm_RFE", "arm_SRS", 192 // Undefined 193 "arm_UDF", 194 // FPSCR is inaccurate 195 "vfp_VMRS", 196 // Incorrect Unicorn implementations 197 "asimd_VRECPS", // Unicorn does not fuse the multiply and subtraction, resulting in being off by 1ULP. 198 "asimd_VRSQRTS", // Unicorn does not fuse the multiply and subtraction, resulting in being off by 1ULP. 199 "vfp_VCVT_from_fixed", // Unicorn does not do round-to-nearest-even for this instruction correctly. 200 }; 201 202 for (const auto& [fn, bitstring] : list) { 203 if (std::find(do_not_test.begin(), do_not_test.end(), fn) != do_not_test.end()) { 204 invalid.emplace_back(InstructionGenerator{bitstring}); 205 continue; 206 } 207 generators.emplace_back(InstructionGenerator{bitstring}); 208 } 209 return InstructionGeneratorInfo{generators, invalid}; 210 }(); 211 212 while (true) { 213 const size_t index = RandInt<size_t>(0, instructions.generators.size() - 1); 214 const u32 inst = instructions.generators[index].Generate(); 215 216 if ((instructions.generators[index].Mask() & 0xF0000000) == 0 && (inst & 0xF0000000) == 0xF0000000) { 217 continue; 218 } 219 220 if (ShouldTestA32Inst(inst, pc, false, is_last_inst)) { 221 return inst; 222 } 223 } 224 } 225 226 std::vector<u16> GenRandomThumbInst(u32 pc, bool is_last_inst, A32::ITState it_state = {}) { 227 static const struct InstructionGeneratorInfo { 228 std::vector<InstructionGenerator> generators; 229 std::vector<InstructionGenerator> invalid; 230 } instructions = [] { 231 const std::vector<std::tuple<std::string, const char*>> list{ 232 #define INST(fn, name, bitstring) {#fn, bitstring}, 233 #include "dynarmic/frontend/A32/decoder/thumb16.inc" 234 #include "dynarmic/frontend/A32/decoder/thumb32.inc" 235 #undef INST 236 }; 237 238 const std::vector<std::tuple<std::string, const char*>> vfp_list{ 239 #define INST(fn, name, bitstring) {#fn, bitstring}, 240 #include "dynarmic/frontend/A32/decoder/vfp.inc" 241 #undef INST 242 }; 243 244 const std::vector<std::tuple<std::string, const char*>> asimd_list{ 245 #define INST(fn, name, bitstring) {#fn, bitstring}, 246 #include "dynarmic/frontend/A32/decoder/asimd.inc" 247 #undef INST 248 }; 249 250 std::vector<InstructionGenerator> generators; 251 std::vector<InstructionGenerator> invalid; 252 253 // List of instructions not to test 254 static constexpr std::array do_not_test{ 255 "thumb16_BKPT", 256 "thumb16_IT", 257 258 // Exclusive load/stores 259 "thumb32_LDREX", 260 "thumb32_LDREXB", 261 "thumb32_LDREXD", 262 "thumb32_LDREXH", 263 "thumb32_STREX", 264 "thumb32_STREXB", 265 "thumb32_STREXD", 266 "thumb32_STREXH", 267 268 // Coprocessor 269 "thumb32_CDP", 270 "thumb32_LDC", 271 "thumb32_MCR", 272 "thumb32_MCRR", 273 "thumb32_MRC", 274 "thumb32_MRRC", 275 "thumb32_STC", 276 }; 277 278 for (const auto& [fn, bitstring] : list) { 279 if (std::find(do_not_test.begin(), do_not_test.end(), fn) != do_not_test.end()) { 280 invalid.emplace_back(InstructionGenerator{bitstring}); 281 continue; 282 } 283 generators.emplace_back(InstructionGenerator{bitstring}); 284 } 285 for (const auto& [fn, bs] : vfp_list) { 286 std::string bitstring = bs; 287 if (bitstring.substr(0, 4) == "cccc" || bitstring.substr(0, 4) == "----") { 288 bitstring.replace(0, 4, "1110"); 289 } 290 if (std::find(do_not_test.begin(), do_not_test.end(), fn) != do_not_test.end()) { 291 invalid.emplace_back(InstructionGenerator{bitstring.c_str()}); 292 continue; 293 } 294 generators.emplace_back(InstructionGenerator{bitstring.c_str()}); 295 } 296 for (const auto& [fn, bs] : asimd_list) { 297 std::string bitstring = bs; 298 if (bitstring.substr(0, 7) == "1111001") { 299 const char U = bitstring[7]; 300 bitstring.replace(0, 8, "111-1111"); 301 bitstring[3] = U; 302 } else if (bitstring.substr(0, 8) == "11110100") { 303 bitstring.replace(0, 8, "11111001"); 304 } else { 305 ASSERT_FALSE("Unhandled ASIMD instruction: {} {}", fn, bs); 306 } 307 if (std::find(do_not_test.begin(), do_not_test.end(), fn) != do_not_test.end()) { 308 invalid.emplace_back(InstructionGenerator{bitstring.c_str()}); 309 continue; 310 } 311 generators.emplace_back(InstructionGenerator{bitstring.c_str()}); 312 } 313 return InstructionGeneratorInfo{generators, invalid}; 314 }(); 315 316 while (true) { 317 const size_t index = RandInt<size_t>(0, instructions.generators.size() - 1); 318 const u32 inst = instructions.generators[index].Generate(); 319 const bool is_four_bytes = (inst >> 16) != 0; 320 321 if (ShouldTestA32Inst(is_four_bytes ? mcl::bit::swap_halves_32(inst) : inst, pc, true, is_last_inst, it_state)) { 322 if (is_four_bytes) 323 return {static_cast<u16>(inst >> 16), static_cast<u16>(inst)}; 324 return {static_cast<u16>(inst)}; 325 } 326 } 327 } 328 329 u32 GenRandomA64Inst(u64 pc, bool is_last_inst) { 330 static const struct InstructionGeneratorInfo { 331 std::vector<InstructionGenerator> generators; 332 std::vector<InstructionGenerator> invalid; 333 } instructions = [] { 334 const std::vector<std::tuple<std::string, const char*>> list{ 335 #define INST(fn, name, bitstring) {#fn, bitstring}, 336 #include "dynarmic/frontend/A64/decoder/a64.inc" 337 #undef INST 338 }; 339 340 std::vector<InstructionGenerator> generators; 341 std::vector<InstructionGenerator> invalid; 342 343 // List of instructions not to test 344 const std::vector<std::string> do_not_test{ 345 // Dynarmic and QEMU currently differ on how the exclusive monitor's address range works. 346 "STXR", 347 "STLXR", 348 "STXP", 349 "STLXP", 350 "LDXR", 351 "LDAXR", 352 "LDXP", 353 "LDAXP", 354 // Behaviour differs from QEMU 355 "MSR_reg", 356 "MSR_imm", 357 "MRS", 358 }; 359 360 for (const auto& [fn, bitstring] : list) { 361 if (fn == "UnallocatedEncoding") { 362 continue; 363 } 364 if (std::find(do_not_test.begin(), do_not_test.end(), fn) != do_not_test.end()) { 365 invalid.emplace_back(InstructionGenerator{bitstring}); 366 continue; 367 } 368 generators.emplace_back(InstructionGenerator{bitstring}); 369 } 370 return InstructionGeneratorInfo{generators, invalid}; 371 }(); 372 373 while (true) { 374 const size_t index = RandInt<size_t>(0, instructions.generators.size() - 1); 375 const u32 inst = instructions.generators[index].Generate(); 376 377 if (std::any_of(instructions.invalid.begin(), instructions.invalid.end(), [inst](const auto& invalid) { return invalid.Match(inst); })) { 378 continue; 379 } 380 if (ShouldTestA64Inst(inst, pc, is_last_inst)) { 381 return inst; 382 } 383 } 384 } 385 386 template<typename TestEnv> 387 Dynarmic::A32::UserConfig GetA32UserConfig(TestEnv& testenv, bool noopt) { 388 Dynarmic::A32::UserConfig user_config; 389 user_config.optimizations &= ~OptimizationFlag::FastDispatch; 390 user_config.callbacks = &testenv; 391 if (noopt) { 392 user_config.optimizations = no_optimizations; 393 } 394 return user_config; 395 } 396 397 template<size_t num_jit_reruns = 1, typename TestEnv> 398 void RunTestInstance(Dynarmic::A32::Jit& jit, 399 TestEnv& jit_env, 400 const std::array<u32, 16>& regs, 401 const std::array<u32, 64>& vecs, 402 const std::vector<typename TestEnv::InstructionType>& instructions, 403 const u32 cpsr, 404 const u32 fpscr, 405 const size_t ticks_left) { 406 const u32 initial_pc = regs[15]; 407 const u32 num_words = initial_pc / sizeof(typename TestEnv::InstructionType); 408 const u32 code_mem_size = num_words + static_cast<u32>(instructions.size()); 409 410 fmt::print("instructions:"); 411 for (auto instruction : instructions) { 412 if constexpr (sizeof(decltype(instruction)) == 2) { 413 fmt::print(" {:04x}", instruction); 414 } else { 415 fmt::print(" {:08x}", instruction); 416 } 417 } 418 fmt::print("\n"); 419 420 fmt::print("initial_regs:"); 421 for (u32 i : regs) { 422 fmt::print(" {:08x}", i); 423 } 424 fmt::print("\n"); 425 fmt::print("initial_vecs:"); 426 for (u32 i : vecs) { 427 fmt::print(" {:08x}", i); 428 } 429 fmt::print("\n"); 430 fmt::print("initial_cpsr: {:08x}\n", cpsr); 431 fmt::print("initial_fpcr: {:08x}\n", fpscr); 432 433 jit.ClearCache(); 434 435 for (size_t jit_rerun_count = 0; jit_rerun_count < num_jit_reruns; ++jit_rerun_count) { 436 jit_env.code_mem.resize(code_mem_size); 437 std::fill(jit_env.code_mem.begin(), jit_env.code_mem.end(), TestEnv::infinite_loop); 438 439 std::copy(instructions.begin(), instructions.end(), jit_env.code_mem.begin() + num_words); 440 jit_env.PadCodeMem(); 441 jit_env.modified_memory.clear(); 442 jit_env.interrupts.clear(); 443 444 jit.Regs() = regs; 445 jit.ExtRegs() = vecs; 446 jit.SetFpscr(fpscr); 447 jit.SetCpsr(cpsr); 448 449 jit_env.ticks_left = ticks_left; 450 jit.Run(); 451 } 452 453 fmt::print("final_regs:"); 454 for (u32 i : jit.Regs()) { 455 fmt::print(" {:08x}", i); 456 } 457 fmt::print("\n"); 458 fmt::print("final_vecs:"); 459 for (u32 i : jit.ExtRegs()) { 460 fmt::print(" {:08x}", i); 461 } 462 fmt::print("\n"); 463 fmt::print("final_cpsr: {:08x}\n", jit.Cpsr()); 464 fmt::print("final_fpsr: {:08x}\n", mask_fpsr_cum_bits ? jit.Fpscr() & 0xffffff00 : jit.Fpscr()); 465 466 fmt::print("mod_mem: "); 467 for (auto [addr, value] : jit_env.modified_memory) { 468 fmt::print("{:08x}:{:02x} ", addr, value); 469 } 470 fmt::print("\n"); 471 472 fmt::print("interrupts:\n"); 473 for (const auto& i : jit_env.interrupts) { 474 std::puts(i.c_str()); 475 } 476 477 fmt::print("===\n"); 478 } 479 480 Dynarmic::A64::UserConfig GetA64UserConfig(A64TestEnv& jit_env, bool noopt) { 481 Dynarmic::A64::UserConfig jit_user_config{&jit_env}; 482 jit_user_config.optimizations &= ~OptimizationFlag::FastDispatch; 483 // The below corresponds to the settings for qemu's aarch64_max_initfn 484 jit_user_config.dczid_el0 = 7; 485 jit_user_config.ctr_el0 = 0x80038003; 486 if (noopt) { 487 jit_user_config.optimizations = no_optimizations; 488 } 489 return jit_user_config; 490 } 491 492 template<size_t num_jit_reruns = 2> 493 void RunTestInstance(Dynarmic::A64::Jit& jit, 494 A64TestEnv& jit_env, 495 const std::array<u64, 31>& regs, 496 const std::array<std::array<u64, 2>, 32>& vecs, 497 const std::vector<u32>& instructions, 498 const u32 pstate, 499 const u32 fpcr, 500 const u64 initial_sp, 501 const u64 start_address, 502 const size_t ticks_left) { 503 jit.ClearCache(); 504 505 for (size_t jit_rerun_count = 0; jit_rerun_count < num_jit_reruns; ++jit_rerun_count) { 506 jit_env.code_mem = instructions; 507 jit_env.code_mem.emplace_back(0x14000000); // B . 508 jit_env.code_mem_start_address = start_address; 509 jit_env.modified_memory.clear(); 510 jit_env.interrupts.clear(); 511 512 jit.SetRegisters(regs); 513 jit.SetVectors(vecs); 514 jit.SetPC(start_address); 515 jit.SetSP(initial_sp); 516 jit.SetFpcr(fpcr); 517 jit.SetFpsr(0); 518 jit.SetPstate(pstate); 519 jit.ClearCache(); 520 521 jit_env.ticks_left = ticks_left; 522 jit.Run(); 523 } 524 525 fmt::print("instructions:"); 526 for (u32 instruction : instructions) { 527 fmt::print(" {:08x}", instruction); 528 } 529 fmt::print("\n"); 530 531 fmt::print("initial_regs:"); 532 for (u64 i : regs) { 533 fmt::print(" {:016x}", i); 534 } 535 fmt::print("\n"); 536 fmt::print("initial_vecs:"); 537 for (auto i : vecs) { 538 fmt::print(" {:016x}:{:016x}", i[0], i[1]); 539 } 540 fmt::print("\n"); 541 fmt::print("initial_sp: {:016x}\n", initial_sp); 542 fmt::print("initial_pstate: {:08x}\n", pstate); 543 fmt::print("initial_fpcr: {:08x}\n", fpcr); 544 545 fmt::print("final_regs:"); 546 for (u64 i : jit.GetRegisters()) { 547 fmt::print(" {:016x}", i); 548 } 549 fmt::print("\n"); 550 fmt::print("final_vecs:"); 551 for (auto i : jit.GetVectors()) { 552 fmt::print(" {:016x}:{:016x}", i[0], i[1]); 553 } 554 fmt::print("\n"); 555 fmt::print("final_sp: {:016x}\n", jit.GetSP()); 556 fmt::print("final_pc: {:016x}\n", jit.GetPC()); 557 fmt::print("final_pstate: {:08x}\n", jit.GetPstate()); 558 fmt::print("final_fpcr: {:08x}\n", jit.GetFpcr()); 559 fmt::print("final_qc : {}\n", FP::FPSR{jit.GetFpsr()}.QC()); 560 561 fmt::print("mod_mem:"); 562 for (auto [addr, value] : jit_env.modified_memory) { 563 fmt::print(" {:08x}:{:02x}", addr, value); 564 } 565 fmt::print("\n"); 566 567 fmt::print("interrupts:\n"); 568 for (const auto& i : jit_env.interrupts) { 569 std::puts(i.c_str()); 570 } 571 572 fmt::print("===\n"); 573 } 574 575 } // Anonymous namespace 576 577 void TestThumb(size_t num_instructions, size_t num_iterations, bool noopt) { 578 ThumbTestEnv jit_env{}; 579 Dynarmic::A32::Jit jit{GetA32UserConfig(jit_env, noopt)}; 580 581 std::array<u32, 16> regs; 582 std::array<u32, 64> ext_reg; 583 std::vector<u16> instructions; 584 585 for (size_t iteration = 0; iteration < num_iterations; ++iteration) { 586 std::generate(regs.begin(), regs.end(), [] { return RandInt<u32>(0, ~u32(0)); }); 587 std::generate(ext_reg.begin(), ext_reg.end(), [] { return RandInt<u32>(0, ~u32(0)); }); 588 589 const u32 start_address = 100; 590 const u32 cpsr = (RandInt<u32>(0, 0xF) << 28) | 0x1F0; 591 const u32 fpcr = RandomFpcr(); 592 593 instructions.clear(); 594 for (size_t i = 0; i < num_instructions; ++i) { 595 const auto inst = GenRandomThumbInst(static_cast<u32>(start_address + 2 * instructions.size()), i == num_instructions - 1); 596 instructions.insert(instructions.end(), inst.begin(), inst.end()); 597 } 598 599 regs[15] = start_address; 600 RunTestInstance(jit, jit_env, regs, ext_reg, instructions, cpsr, fpcr, num_instructions); 601 } 602 } 603 604 void TestArm(size_t num_instructions, size_t num_iterations, bool noopt) { 605 ArmTestEnv jit_env{}; 606 Dynarmic::A32::Jit jit{GetA32UserConfig(jit_env, noopt)}; 607 608 std::array<u32, 16> regs; 609 std::array<u32, 64> ext_reg; 610 std::vector<u32> instructions; 611 612 for (size_t iteration = 0; iteration < num_iterations; ++iteration) { 613 std::generate(regs.begin(), regs.end(), [] { return RandInt<u32>(0, ~u32(0)); }); 614 std::generate(ext_reg.begin(), ext_reg.end(), [] { return RandInt<u32>(0, ~u32(0)); }); 615 616 const u32 start_address = 100; 617 const u32 cpsr = (RandInt<u32>(0, 0xF) << 28); 618 const u32 fpcr = RandomFpcr(); 619 620 instructions.clear(); 621 for (size_t i = 0; i < num_instructions; ++i) { 622 instructions.emplace_back(GenRandomArmInst(static_cast<u32>(start_address + 4 * instructions.size()), i == num_instructions - 1)); 623 } 624 625 regs[15] = start_address; 626 RunTestInstance(jit, jit_env, regs, ext_reg, instructions, cpsr, fpcr, num_instructions); 627 } 628 } 629 630 void TestA64(size_t num_instructions, size_t num_iterations, bool noopt) { 631 A64TestEnv jit_env{}; 632 Dynarmic::A64::Jit jit{GetA64UserConfig(jit_env, noopt)}; 633 634 std::array<u64, 31> regs; 635 std::array<std::array<u64, 2>, 32> vecs; 636 std::vector<u32> instructions; 637 638 for (size_t iteration = 0; iteration < num_iterations; ++iteration) { 639 std::generate(regs.begin(), regs.end(), [] { return RandInt<u64>(0, ~u64(0)); }); 640 std::generate(vecs.begin(), vecs.end(), RandomVector); 641 642 const u32 start_address = 100; 643 const u32 pstate = (RandInt<u32>(0, 0xF) << 28); 644 const u32 fpcr = RandomFpcr(); 645 const u64 initial_sp = RandInt<u64>(0x30'0000'0000, 0x40'0000'0000) * 4; 646 647 instructions.clear(); 648 for (size_t i = 0; i < num_instructions; ++i) { 649 instructions.emplace_back(GenRandomA64Inst(static_cast<u32>(start_address + 4 * instructions.size()), i == num_instructions - 1)); 650 } 651 652 RunTestInstance(jit, jit_env, regs, vecs, instructions, pstate, fpcr, initial_sp, start_address, num_instructions); 653 } 654 } 655 656 static std::optional<size_t> str2sz(char const* s) { 657 char* end = nullptr; 658 errno = 0; 659 660 const long l = std::strtol(s, &end, 10); 661 if (errno == ERANGE || l < 0) { 662 return std::nullopt; 663 } 664 if (*s == '\0' || *end != '\0') { 665 return std::nullopt; 666 } 667 return static_cast<size_t>(l); 668 } 669 670 int main(int argc, char* argv[]) { 671 if (argc < 5 || argc > 6) { 672 fmt::print("Usage: {} <thumb|arm|a64> <seed> <instruction_count> <iteration_count> [noopt]\n", argv[0]); 673 return 1; 674 } 675 676 const auto seed = str2sz(argv[2]); 677 const auto instruction_count = str2sz(argv[3]); 678 const auto iterator_count = str2sz(argv[4]); 679 const bool noopt = argc == 6 && (strcmp(argv[5], "noopt") == 0); 680 681 if (!seed || !instruction_count || !iterator_count) { 682 fmt::print("invalid numeric arguments\n"); 683 return 1; 684 } 685 686 detail::g_rand_int_generator.seed(static_cast<std::mt19937::result_type>(*seed)); 687 688 if (strcmp(argv[1], "thumb") == 0) { 689 TestThumb(*instruction_count, *iterator_count, noopt); 690 } else if (strcmp(argv[1], "arm") == 0) { 691 TestArm(*instruction_count, *iterator_count, noopt); 692 } else if (strcmp(argv[1], "a64") == 0) { 693 TestA64(*instruction_count, *iterator_count, noopt); 694 } else { 695 fmt::print("unrecognized instruction class\n"); 696 return 1; 697 } 698 699 return 0; 700 }