/ externals / biscuit / src / assembler_vector.cpp
assembler_vector.cpp
   1  #include <biscuit/assert.hpp>
   2  #include <biscuit/assembler.hpp>
   3  
   4  namespace biscuit {
   5  namespace {
   6  
   7  enum class AddressingMode : uint32_t {
   8      // clang-format off
   9      UnitStride       = 0b00,
  10      IndexedUnordered = 0b01,
  11      Strided          = 0b10,
  12      IndexedOrdered   = 0b11,
  13      // clang-format on
  14  };
  15  
  16  enum class UnitStrideLoadAddressingMode : uint32_t {
  17      // clang-format off
  18      Load               = 0b00000,
  19      MaskLoad           = 0b01011,
  20      LoadFaultOnlyFirst = 0b10000,
  21      // clang-format on
  22  };
  23  
  24  enum class UnitStrideStoreAddressingMode : uint32_t {
  25      // clang-format off
  26      Store     = 0b00000,
  27      MaskStore = 0b01011,
  28      // clang-format on
  29  };
  30  
  31  enum class WidthEncoding : uint32_t {
  32      // clang-format off
  33      E8  = 0b000,
  34      E16 = 0b101,
  35      E32 = 0b110,
  36      E64 = 0b111,
  37      // clang-format on
  38  };
  39  
  40  void EmitVectorLoadImpl(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
  41                          VecMask vm, uint32_t lumop, GPR rs, WidthEncoding width, Vec vd) noexcept {
  42      BISCUIT_ASSERT(nf <= 8);
  43  
  44      // Fit to encoding space. Allows for being more explicit about the size in calling functions
  45      // (e.g. using 8 for 8 elements instead of 7).
  46      if (nf != 0) {
  47          nf -= 1;
  48      }
  49  
  50      // clang-format off
  51      const auto value = (nf << 29) |
  52                         (static_cast<uint32_t>(mew) << 28) |
  53                         (static_cast<uint32_t>(mop) << 26) |
  54                         (static_cast<uint32_t>(vm) << 25) |
  55                         (lumop << 20) |
  56                         (rs.Index() << 15) |
  57                         (static_cast<uint32_t>(width) << 12) |
  58                         (vd.Index() << 7);
  59      // clang-format on
  60  
  61      buffer.Emit32(value | 0b111);
  62  }
  63  
  64  void EmitVectorLoad(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
  65                      VecMask vm, UnitStrideLoadAddressingMode lumop, GPR rs,
  66                      WidthEncoding width, Vec vd) noexcept {
  67      EmitVectorLoadImpl(buffer, nf, mew, mop, vm, static_cast<uint32_t>(lumop), rs, width, vd);
  68  }
  69  
  70  void EmitVectorLoad(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
  71                      VecMask vm, GPR rs2, GPR rs1, WidthEncoding width, Vec vd) noexcept {
  72      EmitVectorLoadImpl(buffer, nf, mew, mop, vm, rs2.Index(), rs1, width, vd);
  73  }
  74  
  75  void EmitVectorLoad(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
  76                      VecMask vm, Vec vs2, GPR rs1, WidthEncoding width, Vec vd) noexcept {
  77      EmitVectorLoadImpl(buffer, nf, mew, mop, vm, vs2.Index(), rs1, width, vd);
  78  }
  79  
  80  void EmitVectorLoadWholeReg(CodeBuffer& buffer, uint32_t nf, bool mew, GPR rs,
  81                              WidthEncoding width, Vec vd) noexcept {
  82      // RISC-V V extension spec (as of 1.0RC) only allows these nf values.
  83      BISCUIT_ASSERT(nf == 1 || nf == 2 || nf == 4 || nf == 8);
  84  
  85      EmitVectorLoadImpl(buffer, nf, mew, AddressingMode::UnitStride,
  86                         VecMask::No, 0b01000, rs, width, vd);
  87  }
  88  
  89  void EmitVectorStoreImpl(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
  90                           VecMask vm, uint32_t sumop, GPR rs, WidthEncoding width, Vec vd) noexcept {
  91      BISCUIT_ASSERT(nf <= 8);
  92  
  93      // Fit to encoding space. Allows for being more explicit about the size in calling functions
  94      // (e.g. using 8 for 8 elements instead of 7).
  95      if (nf != 0) {
  96          nf -= 1;
  97      }
  98  
  99      // clang-format off
 100      const auto value = (nf << 29) |
 101                         (static_cast<uint32_t>(mew) << 28) |
 102                         (static_cast<uint32_t>(mop) << 26) |
 103                         (static_cast<uint32_t>(vm) << 25) |
 104                         (sumop << 20) |
 105                         (rs.Index() << 15) |
 106                         (static_cast<uint32_t>(width) << 12) |
 107                         (vd.Index() << 7);
 108      // clang-format on
 109  
 110      buffer.Emit32(value | 0b100111);
 111  }
 112  
 113  void EmitVectorStore(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
 114                       VecMask vm, UnitStrideStoreAddressingMode lumop, GPR rs,
 115                       WidthEncoding width, Vec vs) noexcept {
 116      EmitVectorStoreImpl(buffer, nf, mew, mop, vm, static_cast<uint32_t>(lumop), rs, width, vs);
 117  }
 118  
 119  void EmitVectorStore(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
 120                       VecMask vm, GPR rs2, GPR rs1, WidthEncoding width, Vec vs3) noexcept {
 121      EmitVectorStoreImpl(buffer, nf, mew, mop, vm, rs2.Index(), rs1, width, vs3);
 122  }
 123  
 124  void EmitVectorStore(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
 125                       VecMask vm, Vec vs2, GPR rs1, WidthEncoding width, Vec vs3) noexcept {
 126      EmitVectorStoreImpl(buffer, nf, mew, mop, vm, vs2.Index(), rs1, width, vs3);
 127  }
 128  
 129  void EmitVectorStoreWholeReg(CodeBuffer& buffer, uint32_t nf, GPR rs, Vec vs) noexcept {
 130      // RISC-V V extension spec (as of 1.0RC) only allows these nf values.
 131      BISCUIT_ASSERT(nf == 1 || nf == 2 || nf == 4 || nf == 8);
 132  
 133      EmitVectorStoreImpl(buffer, nf, false, AddressingMode::UnitStride, VecMask::No,
 134                          0b01000, rs, WidthEncoding::E8, vs);
 135  }
 136  
 137  void EmitVectorOPIVIImpl(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, uint32_t imm5, Vec vd) noexcept {
 138      // clang-format off
 139      const auto value = (funct6 << 26) |
 140                         (static_cast<uint32_t>(vm) << 25) |
 141                         (vs2.Index() << 20) |
 142                         ((imm5 & 0b11111) << 15) |
 143                         (0b011U << 12) |
 144                         (vd.Index() << 7);
 145      // clang-format on
 146  
 147      buffer.Emit32(value | 0b1010111);
 148  }
 149  
 150  void EmitVectorOPIVI(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, int32_t simm5, Vec vd) noexcept {
 151      BISCUIT_ASSERT(simm5 >= -16 && simm5 <= 15);
 152      EmitVectorOPIVIImpl(buffer, funct6, vm, vs2, static_cast<uint32_t>(simm5), vd);
 153  }
 154  
 155  void EmitVectorOPIVUI(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, uint32_t uimm5, Vec vd) noexcept {
 156      BISCUIT_ASSERT(uimm5 <= 31);
 157      EmitVectorOPIVIImpl(buffer, funct6, vm, vs2, uimm5, vd);
 158  }
 159  
 160  void EmitVectorOPIVV(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd) noexcept {
 161      // clang-format off
 162      const auto value = (funct6 << 26) |
 163                         (static_cast<uint32_t>(vm) << 25) |
 164                         (vs2.Index() << 20) |
 165                         (vs1.Index() << 15) |
 166                         (vd.Index() << 7);
 167      // clang-format on
 168  
 169      buffer.Emit32(value | 0b1010111);
 170  }
 171  
 172  void EmitVectorOPIVX(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, GPR rs1, Vec vd) noexcept {
 173      // clang-format off
 174      const auto value = (funct6 << 26) |
 175                         (static_cast<uint32_t>(vm) << 25) |
 176                         (vs2.Index() << 20) |
 177                         (rs1.Index() << 15) |
 178                         (0b100U << 12) |
 179                         (vd.Index() << 7);
 180      // clang-format on
 181  
 182      buffer.Emit32(value | 0b1010111);
 183  }
 184  
 185  void EmitVectorOPMVVImpl(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd,
 186                           uint32_t op) noexcept {
 187      // clang-format off
 188      const auto value = (funct6 << 26) |
 189                         (static_cast<uint32_t>(vm) << 25) |
 190                         (vs2.Index() << 20) |
 191                         (vs1.Index() << 15) |
 192                         (0b010U << 12) |
 193                         (vd.Index() << 7);
 194      // clang-format on
 195  
 196      buffer.Emit32(value | op);
 197  }
 198  
 199  void EmitVectorOPMVV(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd) noexcept {
 200      EmitVectorOPMVVImpl(buffer, funct6, vm, vs2, vs1, vd, 0b1010111);
 201  }
 202  
 203  void EmitVectorOPMVVP(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd) noexcept {
 204      EmitVectorOPMVVImpl(buffer, funct6, vm, vs2, vs1, vd, 0b1110111);
 205  }
 206  
 207  void EmitVectorOPMVX(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, GPR rs1, Vec vd) noexcept {
 208      // clang-format off
 209      const auto value = (funct6 << 26) |
 210                         (static_cast<uint32_t>(vm) << 25) |
 211                         (vs2.Index() << 20) |
 212                         (rs1.Index() << 15) |
 213                         (0b110U << 12) |
 214                         (vd.Index() << 7);
 215      // clang-format on
 216  
 217      buffer.Emit32(value | 0b1010111);
 218  }
 219  
 220  void EmitVectorOPFVV(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd) noexcept {
 221      // clang-format off
 222      const auto value = (funct6 << 26) |
 223                         (static_cast<uint32_t>(vm) << 25) |
 224                         (vs2.Index() << 20) |
 225                         (vs1.Index() << 15) |
 226                         (0b001U << 12) |
 227                         (vd.Index() << 7);
 228      // clang-format on
 229  
 230      buffer.Emit32(value | 0b1010111);
 231  }
 232  
 233  void EmitVectorOPFVF(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, FPR rs1, Vec vd) noexcept {
 234      // clang-format off
 235      const auto value = (funct6 << 26) |
 236                         (static_cast<uint32_t>(vm) << 25) |
 237                         (vs2.Index() << 20) |
 238                         (rs1.Index() << 15) |
 239                         (0b101U << 12) |
 240                         (vd.Index() << 7);
 241      // clang-format on
 242  
 243      buffer.Emit32(value | 0b1010111);
 244  }
 245  } // Anonymous namespace
 246  
 247  // Vector Integer Arithmetic Instructions
 248  
 249  void Assembler::VAADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 250      EmitVectorOPMVV(m_buffer, 0b001001, mask, vs2, vs1, vd);
 251  }
 252  
 253  void Assembler::VAADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 254      EmitVectorOPMVX(m_buffer, 0b001001, mask, vs2, rs1, vd);
 255  }
 256  
 257  void Assembler::VAADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 258      EmitVectorOPMVV(m_buffer, 0b001000, mask, vs2, vs1, vd);
 259  }
 260  
 261  void Assembler::VAADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 262      EmitVectorOPMVX(m_buffer, 0b001000, mask, vs2, rs1, vd);
 263  }
 264  
 265  void Assembler::VADC(Vec vd, Vec vs2, Vec vs1) noexcept {
 266      EmitVectorOPIVV(m_buffer, 0b010000, VecMask::Yes, vs2, vs1, vd);
 267  }
 268  
 269  void Assembler::VADC(Vec vd, Vec vs2, GPR rs1) noexcept {
 270      EmitVectorOPIVX(m_buffer, 0b010000, VecMask::Yes, vs2, rs1, vd);
 271  }
 272  
 273  void Assembler::VADC(Vec vd, Vec vs2, int32_t simm) noexcept {
 274      EmitVectorOPIVI(m_buffer, 0b010000, VecMask::Yes, vs2, simm, vd);
 275  }
 276  
 277  void Assembler::VADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 278      EmitVectorOPIVV(m_buffer, 0b000000, mask, vs2, vs1, vd);
 279  }
 280  
 281  void Assembler::VADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 282      EmitVectorOPIVX(m_buffer, 0b000000, mask, vs2, rs1, vd);
 283  }
 284  
 285  void Assembler::VADD(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 286      EmitVectorOPIVI(m_buffer, 0b000000, mask, vs2, simm, vd);
 287  }
 288  
 289  void Assembler::VAND(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 290      EmitVectorOPIVV(m_buffer, 0b001001, mask, vs2, vs1, vd);
 291  }
 292  
 293  void Assembler::VAND(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 294      EmitVectorOPIVX(m_buffer, 0b001001, mask, vs2, rs1, vd);
 295  }
 296  
 297  void Assembler::VAND(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 298      EmitVectorOPIVI(m_buffer, 0b001001, mask, vs2, simm, vd);
 299  }
 300  
 301  void Assembler::VASUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 302      EmitVectorOPMVV(m_buffer, 0b001011, mask, vs2, vs1, vd);
 303  }
 304  
 305  void Assembler::VASUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 306      EmitVectorOPMVX(m_buffer, 0b001011, mask, vs2, rs1, vd);
 307  }
 308  
 309  void Assembler::VASUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 310      EmitVectorOPMVV(m_buffer, 0b001010, mask, vs2, vs1, vd);
 311  }
 312  
 313  void Assembler::VASUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 314      EmitVectorOPMVX(m_buffer, 0b001010, mask, vs2, rs1, vd);
 315  }
 316  
 317  void Assembler::VCOMPRESS(Vec vd, Vec vs2, Vec vs1) noexcept {
 318      // Note: Destination register may not overlap any of the source registers,
 319      //       as per the RVV spec (as of 1.0RC; see section 16.5)
 320      EmitVectorOPMVV(m_buffer, 0b010111, VecMask::No, vs2, vs1, vd);
 321  }
 322  
 323  void Assembler::VDIV(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 324      EmitVectorOPMVV(m_buffer, 0b100001, mask, vs2, vs1, vd);
 325  }
 326  
 327  void Assembler::VDIV(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 328      EmitVectorOPMVX(m_buffer, 0b100001, mask, vs2, rs1, vd);
 329  }
 330  
 331  void Assembler::VDIVU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 332      EmitVectorOPMVV(m_buffer, 0b100000, mask, vs2, vs1, vd);
 333  }
 334  
 335  void Assembler::VDIVU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 336      EmitVectorOPMVX(m_buffer, 0b100000, mask, vs2, rs1, vd);
 337  }
 338  
 339  void Assembler::VFIRST(GPR rd, Vec vs, VecMask mask) noexcept {
 340      EmitVectorOPMVV(m_buffer, 0b010000, mask, vs, v17, Vec{rd.Index()});
 341  }
 342  
 343  void Assembler::VID(Vec vd, VecMask mask) noexcept {
 344      EmitVectorOPMVV(m_buffer, 0b010100, mask, v0, v17, vd);
 345  }
 346  
 347  void Assembler::VIOTA(Vec vd, Vec vs, VecMask mask) noexcept {
 348      EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v16, vd);
 349  }
 350  
 351  void Assembler::VMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
 352      EmitVectorOPMVV(m_buffer, 0b101101, mask, vs2, vs1, vd);
 353  }
 354  
 355  void Assembler::VMACC(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
 356      EmitVectorOPMVX(m_buffer, 0b101101, mask, vs2, rs1, vd);
 357  }
 358  
 359  void Assembler::VMADC(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 360      EmitVectorOPIVV(m_buffer, 0b010001, mask, vs2, vs1, vd);
 361  }
 362  
 363  void Assembler::VMADC(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 364      EmitVectorOPIVX(m_buffer, 0b010001, mask, vs2, rs1, vd);
 365  }
 366  
 367  void Assembler::VMADC(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 368      EmitVectorOPIVI(m_buffer, 0b010001, mask, vs2, simm, vd);
 369  }
 370  
 371  void Assembler::VMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
 372      EmitVectorOPMVV(m_buffer, 0b101001, mask, vs2, vs1, vd);
 373  }
 374  
 375  void Assembler::VMADD(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
 376      EmitVectorOPMVX(m_buffer, 0b101001, mask, vs2, rs1, vd);
 377  }
 378  
 379  void Assembler::VMAND(Vec vd, Vec vs2, Vec vs1) noexcept {
 380      EmitVectorOPMVV(m_buffer, 0b011001, VecMask::No, vs2, vs1, vd);
 381  }
 382  
 383  void Assembler::VMANDNOT(Vec vd, Vec vs2, Vec vs1) noexcept {
 384      EmitVectorOPMVV(m_buffer, 0b011000, VecMask::No, vs2, vs1, vd);
 385  }
 386  
 387  void Assembler::VMNAND(Vec vd, Vec vs2, Vec vs1) noexcept {
 388      EmitVectorOPMVV(m_buffer, 0b011101, VecMask::No, vs2, vs1, vd);
 389  }
 390  
 391  void Assembler::VMNOR(Vec vd, Vec vs2, Vec vs1) noexcept {
 392      EmitVectorOPMVV(m_buffer, 0b011110, VecMask::No, vs2, vs1, vd);
 393  }
 394  
 395  void Assembler::VMOR(Vec vd, Vec vs2, Vec vs1) noexcept {
 396      EmitVectorOPMVV(m_buffer, 0b011010, VecMask::No, vs2, vs1, vd);
 397  }
 398  
 399  void Assembler::VMORNOT(Vec vd, Vec vs2, Vec vs1) noexcept {
 400      EmitVectorOPMVV(m_buffer, 0b011100, VecMask::No, vs2, vs1, vd);
 401  }
 402  
 403  void Assembler::VMXNOR(Vec vd, Vec vs2, Vec vs1) noexcept {
 404      EmitVectorOPMVV(m_buffer, 0b011111, VecMask::No, vs2, vs1, vd);
 405  }
 406  
 407  void Assembler::VMXOR(Vec vd, Vec vs2, Vec vs1) noexcept {
 408      EmitVectorOPMVV(m_buffer, 0b011011, VecMask::No, vs2, vs1, vd);
 409  }
 410  
 411  void Assembler::VMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 412      EmitVectorOPIVV(m_buffer, 0b000111, mask, vs2, vs1, vd);
 413  }
 414  
 415  void Assembler::VMAX(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 416      EmitVectorOPIVX(m_buffer, 0b000111, mask, vs2, rs1, vd);
 417  }
 418  
 419  void Assembler::VMAXU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 420      EmitVectorOPIVV(m_buffer, 0b000110, mask, vs2, vs1, vd);
 421  }
 422  
 423  void Assembler::VMAXU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 424      EmitVectorOPIVX(m_buffer, 0b000110, mask, vs2, rs1, vd);
 425  }
 426  
 427  void Assembler::VMERGE(Vec vd, Vec vs2, Vec vs1) noexcept {
 428      EmitVectorOPIVV(m_buffer, 0b010111, VecMask::Yes, vs2, vs1, vd);
 429  }
 430  
 431  void Assembler::VMERGE(Vec vd, Vec vs2, GPR rs1) noexcept {
 432      EmitVectorOPIVX(m_buffer, 0b010111, VecMask::Yes, vs2, rs1, vd);
 433  }
 434  
 435  void Assembler::VMERGE(Vec vd, Vec vs2, int32_t simm) noexcept {
 436      EmitVectorOPIVI(m_buffer, 0b010111, VecMask::Yes, vs2, simm, vd);
 437  }
 438  
 439  void Assembler::VMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 440      EmitVectorOPIVV(m_buffer, 0b000101, mask, vs2, vs1, vd);
 441  }
 442  
 443  void Assembler::VMIN(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 444      EmitVectorOPIVX(m_buffer, 0b000101, mask, vs2, rs1, vd);
 445  }
 446  
 447  void Assembler::VMINU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 448      EmitVectorOPIVV(m_buffer, 0b000100, mask, vs2, vs1, vd);
 449  }
 450  
 451  void Assembler::VMINU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 452      EmitVectorOPIVX(m_buffer, 0b000100, mask, vs2, rs1, vd);
 453  }
 454  
 455  void Assembler::VMSBC(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 456      EmitVectorOPIVV(m_buffer, 0b010011, mask, vs2, vs1, vd);
 457  }
 458  
 459  void Assembler::VMSBC(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 460      EmitVectorOPIVX(m_buffer, 0b010011, mask, vs2, rs1, vd);
 461  }
 462  
 463  void Assembler::VMSBF(Vec vd, Vec vs, VecMask mask) noexcept {
 464      EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v1, vd);
 465  }
 466  
 467  void Assembler::VMSIF(Vec vd, Vec vs, VecMask mask) noexcept {
 468      EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v3, vd);
 469  }
 470  
 471  void Assembler::VMSOF(Vec vd, Vec vs, VecMask mask) noexcept {
 472      EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v2, vd);
 473  }
 474  
 475  void Assembler::VMSEQ(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 476      EmitVectorOPIVV(m_buffer, 0b011000, mask, vs2, vs1, vd);
 477  }
 478  
 479  void Assembler::VMSEQ(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 480      EmitVectorOPIVX(m_buffer, 0b011000, mask, vs2, rs1, vd);
 481  }
 482  
 483  void Assembler::VMSEQ(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 484      EmitVectorOPIVI(m_buffer, 0b011000, mask, vs2, simm, vd);
 485  }
 486  
 487  void Assembler::VMSGT(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 488      EmitVectorOPIVX(m_buffer, 0b011111, mask, vs2, rs1, vd);
 489  }
 490  
 491  void Assembler::VMSGT(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 492      EmitVectorOPIVI(m_buffer, 0b011111, mask, vs2, simm, vd);
 493  }
 494  
 495  void Assembler::VMSGTU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 496      EmitVectorOPIVX(m_buffer, 0b011110, mask, vs2, rs1, vd);
 497  }
 498  
 499  void Assembler::VMSGTU(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 500      EmitVectorOPIVI(m_buffer, 0b011110, mask, vs2, simm, vd);
 501  }
 502  
 503  void Assembler::VMSLE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 504      EmitVectorOPIVV(m_buffer, 0b011101, mask, vs2, vs1, vd);
 505  }
 506  
 507  void Assembler::VMSLE(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 508      EmitVectorOPIVX(m_buffer, 0b011101, mask, vs2, rs1, vd);
 509  }
 510  
 511  void Assembler::VMSLE(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 512      EmitVectorOPIVI(m_buffer, 0b011101, mask, vs2, simm, vd);
 513  }
 514  
 515  void Assembler::VMSLEU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 516      EmitVectorOPIVV(m_buffer, 0b011100, mask, vs2, vs1, vd);
 517  }
 518  
 519  void Assembler::VMSLEU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 520      EmitVectorOPIVX(m_buffer, 0b011100, mask, vs2, rs1, vd);
 521  }
 522  
 523  void Assembler::VMSLEU(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 524      EmitVectorOPIVI(m_buffer, 0b011100, mask, vs2, simm, vd);
 525  }
 526  
 527  void Assembler::VMSLT(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 528      EmitVectorOPIVV(m_buffer, 0b011011, mask, vs2, vs1, vd);
 529  }
 530  
 531  void Assembler::VMSLT(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 532      EmitVectorOPIVX(m_buffer, 0b011011, mask, vs2, rs1, vd);
 533  }
 534  
 535  void Assembler::VMSLTU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 536      EmitVectorOPIVV(m_buffer, 0b011010, mask, vs2, vs1, vd);
 537  }
 538  
 539  void Assembler::VMSLTU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 540      EmitVectorOPIVX(m_buffer, 0b011010, mask, vs2, rs1, vd);
 541  }
 542  
 543  void Assembler::VMSNE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 544      EmitVectorOPIVV(m_buffer, 0b011001, mask, vs2, vs1, vd);
 545  }
 546  
 547  void Assembler::VMSNE(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 548      EmitVectorOPIVX(m_buffer, 0b011001, mask, vs2, rs1, vd);
 549  }
 550  
 551  void Assembler::VMSNE(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 552      EmitVectorOPIVI(m_buffer, 0b011001, mask, vs2, simm, vd);
 553  }
 554  
 555  void Assembler::VMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 556      EmitVectorOPMVV(m_buffer, 0b100101, mask, vs2, vs1, vd);
 557  }
 558  
 559  void Assembler::VMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 560      EmitVectorOPMVX(m_buffer, 0b100101, mask, vs2, rs1, vd);
 561  }
 562  
 563  void Assembler::VMULH(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 564      EmitVectorOPMVV(m_buffer, 0b100111, mask, vs2, vs1, vd);
 565  }
 566  
 567  void Assembler::VMULH(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 568      EmitVectorOPMVX(m_buffer, 0b100111, mask, vs2, rs1, vd);
 569  }
 570  
 571  void Assembler::VMULHSU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 572      EmitVectorOPMVV(m_buffer, 0b100110, mask, vs2, vs1, vd);
 573  }
 574  
 575  void Assembler::VMULHSU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 576      EmitVectorOPMVX(m_buffer, 0b100110, mask, vs2, rs1, vd);
 577  }
 578  
 579  void Assembler::VMULHU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 580      EmitVectorOPMVV(m_buffer, 0b100100, mask, vs2, vs1, vd);
 581  }
 582  
 583  void Assembler::VMULHU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 584      EmitVectorOPMVX(m_buffer, 0b100100, mask, vs2, rs1, vd);
 585  }
 586  
 587  void Assembler::VMV(Vec vd, Vec vs1) noexcept {
 588      EmitVectorOPIVV(m_buffer, 0b010111, VecMask::No, v0, vs1, vd);
 589  }
 590  
 591  void Assembler::VMV(Vec vd, GPR rs1) noexcept {
 592      EmitVectorOPIVX(m_buffer, 0b010111, VecMask::No, v0, rs1, vd);
 593  }
 594  
 595  void Assembler::VMV(Vec vd, int32_t simm) noexcept {
 596      EmitVectorOPIVI(m_buffer, 0b010111, VecMask::No, v0, simm, vd);
 597  }
 598  
 599  void Assembler::VMV1R(Vec vd, Vec vs) noexcept {
 600      EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00000, vd);
 601  }
 602  
 603  void Assembler::VMV2R(Vec vd, Vec vs) noexcept {
 604      // Registers must be aligned to the register group size, per the
 605      // RVV spec (as of 1.0RC)
 606      BISCUIT_ASSERT(vd.Index() % 2 == 0);
 607      BISCUIT_ASSERT(vs.Index() % 2 == 0);
 608  
 609      EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00001, vd);
 610  }
 611  
 612  void Assembler::VMV4R(Vec vd, Vec vs) noexcept {
 613      // Registers must be aligned to the register group size, per the
 614      // RVV spec (as of 1.0RC)
 615      BISCUIT_ASSERT(vd.Index() % 4 == 0);
 616      BISCUIT_ASSERT(vs.Index() % 4 == 0);
 617  
 618      EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00011, vd);
 619  }
 620  
 621  void Assembler::VMV8R(Vec vd, Vec vs) noexcept {
 622      // Registers must be aligned to the register group size, per the
 623      // RVV spec (as of 1.0RC)
 624      BISCUIT_ASSERT(vd.Index() % 8 == 0);
 625      BISCUIT_ASSERT(vs.Index() % 8 == 0);
 626  
 627      EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00111, vd);
 628  }
 629  
 630  void Assembler::VMV_SX(Vec vd, GPR rs) noexcept {
 631      EmitVectorOPMVX(m_buffer, 0b010000, VecMask::No, v0, rs, vd);
 632  }
 633  
 634  void Assembler::VMV_XS(GPR rd, Vec vs) noexcept {
 635      EmitVectorOPMVV(m_buffer, 0b010000, VecMask::No, vs, v0, Vec{rd.Index()});
 636  }
 637  
 638  void Assembler::VNCLIP(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 639      EmitVectorOPIVV(m_buffer, 0b101111, mask, vs2, vs1, vd);
 640  }
 641  
 642  void Assembler::VNCLIP(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 643      EmitVectorOPIVX(m_buffer, 0b101111, mask, vs2, rs1, vd);
 644  }
 645  
 646  void Assembler::VNCLIP(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 647      EmitVectorOPIVUI(m_buffer, 0b101111, mask, vs2, uimm, vd);
 648  }
 649  
 650  void Assembler::VNCLIPU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 651      EmitVectorOPIVV(m_buffer, 0b101110, mask, vs2, vs1, vd);
 652  }
 653  
 654  void Assembler::VNCLIPU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 655      EmitVectorOPIVX(m_buffer, 0b101110, mask, vs2, rs1, vd);
 656  }
 657  
 658  void Assembler::VNCLIPU(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 659      EmitVectorOPIVUI(m_buffer, 0b101110, mask, vs2, uimm, vd);
 660  }
 661  
 662  void Assembler::VNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
 663      EmitVectorOPMVV(m_buffer, 0b101111, mask, vs2, vs1, vd);
 664  }
 665  
 666  void Assembler::VNMSAC(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
 667      EmitVectorOPMVX(m_buffer, 0b101111, mask, vs2, rs1, vd);
 668  }
 669  
 670  void Assembler::VNMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
 671      EmitVectorOPMVV(m_buffer, 0b101011, mask, vs2, vs1, vd);
 672  }
 673  
 674  void Assembler::VNMSUB(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
 675      EmitVectorOPMVX(m_buffer, 0b101011, mask, vs2, rs1, vd);
 676  }
 677  
 678  void Assembler::VNSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 679      EmitVectorOPIVV(m_buffer, 0b101101, mask, vs2, vs1, vd);
 680  }
 681  
 682  void Assembler::VNSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 683      EmitVectorOPIVX(m_buffer, 0b101101, mask, vs2, rs1, vd);
 684  }
 685  
 686  void Assembler::VNSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 687      EmitVectorOPIVUI(m_buffer, 0b101101, mask, vs2, uimm, vd);
 688  }
 689  
 690  void Assembler::VNSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 691      EmitVectorOPIVV(m_buffer, 0b101100, mask, vs2, vs1, vd);
 692  }
 693  
 694  void Assembler::VNSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 695      EmitVectorOPIVX(m_buffer, 0b101100, mask, vs2, rs1, vd);
 696  }
 697  
 698  void Assembler::VNSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 699      EmitVectorOPIVUI(m_buffer, 0b101100, mask, vs2, uimm, vd);
 700  }
 701  
 702  void Assembler::VOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 703      EmitVectorOPIVV(m_buffer, 0b001010, mask, vs2, vs1, vd);
 704  }
 705  
 706  void Assembler::VOR(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 707      EmitVectorOPIVX(m_buffer, 0b001010, mask, vs2, rs1, vd);
 708  }
 709  
 710  void Assembler::VOR(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 711      EmitVectorOPIVI(m_buffer, 0b001010, mask, vs2, simm, vd);
 712  }
 713  
 714  void Assembler::VPOPC(GPR rd, Vec vs, VecMask mask) noexcept {
 715      EmitVectorOPMVV(m_buffer, 0b010000, mask, vs, v16, Vec{rd.Index()});
 716  }
 717  
 718  void Assembler::VREDAND(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 719      EmitVectorOPMVV(m_buffer, 0b000001, mask, vs2, vs1, vd);
 720  }
 721  
 722  void Assembler::VREDMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 723      EmitVectorOPMVV(m_buffer, 0b000111, mask, vs2, vs1, vd);
 724  }
 725  
 726  void Assembler::VREDMAXU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 727      EmitVectorOPMVV(m_buffer, 0b000110, mask, vs2, vs1, vd);
 728  }
 729  
 730  void Assembler::VREDMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 731      EmitVectorOPMVV(m_buffer, 0b000101, mask, vs2, vs1, vd);
 732  }
 733  
 734  void Assembler::VREDMINU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 735      EmitVectorOPMVV(m_buffer, 0b000100, mask, vs2, vs1, vd);
 736  }
 737  
 738  void Assembler::VREDOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 739      EmitVectorOPMVV(m_buffer, 0b000010, mask, vs2, vs1, vd);
 740  }
 741  
 742  void Assembler::VREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 743      EmitVectorOPMVV(m_buffer, 0b000000, mask, vs2, vs1, vd);
 744  }
 745  
 746  void Assembler::VREDXOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 747      EmitVectorOPMVV(m_buffer, 0b000011, mask, vs2, vs1, vd);
 748  }
 749  
 750  void Assembler::VREM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 751      EmitVectorOPMVV(m_buffer, 0b100011, mask, vs2, vs1, vd);
 752  }
 753  
 754  void Assembler::VREM(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 755      EmitVectorOPMVX(m_buffer, 0b100011, mask, vs2, rs1, vd);
 756  }
 757  
 758  void Assembler::VREMU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 759      EmitVectorOPMVV(m_buffer, 0b100010, mask, vs2, vs1, vd);
 760  }
 761  
 762  void Assembler::VREMU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 763      EmitVectorOPMVX(m_buffer, 0b100010, mask, vs2, rs1, vd);
 764  }
 765  
 766  void Assembler::VRGATHER(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 767      EmitVectorOPIVV(m_buffer, 0b001100, mask, vs2, vs1, vd);
 768  }
 769  
 770  void Assembler::VRGATHER(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 771      EmitVectorOPIVX(m_buffer, 0b001100, mask, vs2, rs1, vd);
 772  }
 773  
 774  void Assembler::VRGATHER(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 775      EmitVectorOPIVUI(m_buffer, 0b001100, mask, vs2, uimm, vd);
 776  }
 777  
 778  void Assembler::VRGATHEREI16(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 779      EmitVectorOPIVV(m_buffer, 0b001110, mask, vs2, vs1, vd);
 780  }
 781  
 782  void Assembler::VRSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 783      EmitVectorOPIVX(m_buffer, 0b000011, mask, vs2, rs1, vd);
 784  }
 785  
 786  void Assembler::VRSUB(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 787      EmitVectorOPIVI(m_buffer, 0b000011, mask, vs2, simm, vd);
 788  }
 789  
 790  void Assembler::VSADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 791      EmitVectorOPIVV(m_buffer, 0b100001, mask, vs2, vs1, vd);
 792  }
 793  
 794  void Assembler::VSADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 795      EmitVectorOPIVX(m_buffer, 0b100001, mask, vs2, rs1, vd);
 796  }
 797  
 798  void Assembler::VSADD(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 799      EmitVectorOPIVI(m_buffer, 0b100001, mask, vs2, simm, vd);
 800  }
 801  
 802  void Assembler::VSADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 803      EmitVectorOPIVV(m_buffer, 0b100000, mask, vs2, vs1, vd);
 804  }
 805  
 806  void Assembler::VSADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 807      EmitVectorOPIVX(m_buffer, 0b100000, mask, vs2, rs1, vd);
 808  }
 809  
 810  void Assembler::VSADDU(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
 811      EmitVectorOPIVI(m_buffer, 0b100000, mask, vs2, simm, vd);
 812  }
 813  
 814  void Assembler::VSBC(Vec vd, Vec vs2, Vec vs1) noexcept {
 815      EmitVectorOPIVV(m_buffer, 0b010010, VecMask::Yes, vs2, vs1, vd);
 816  }
 817  
 818  void Assembler::VSBC(Vec vd, Vec vs2, GPR rs1) noexcept {
 819      EmitVectorOPIVX(m_buffer, 0b010010, VecMask::Yes, vs2, rs1, vd);
 820  }
 821  
 822  void Assembler::VSEXTVF2(Vec vd, Vec vs, VecMask mask) noexcept {
 823      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v7, vd);
 824  }
 825  
 826  void Assembler::VSEXTVF4(Vec vd, Vec vs, VecMask mask) noexcept {
 827      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v5, vd);
 828  }
 829  
 830  void Assembler::VSEXTVF8(Vec vd, Vec vs, VecMask mask) noexcept {
 831      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v3, vd);
 832  }
 833  
 834  void Assembler::VSLIDE1DOWN(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 835      EmitVectorOPMVX(m_buffer, 0b001111, mask, vs2, rs1, vd);
 836  }
 837  
 838  void Assembler::VSLIDEDOWN(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 839      EmitVectorOPIVX(m_buffer, 0b001111, mask, vs2, rs1, vd);
 840  }
 841  
 842  void Assembler::VSLIDEDOWN(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 843      EmitVectorOPIVUI(m_buffer, 0b001111, mask, vs2, uimm, vd);
 844  }
 845  
 846  void Assembler::VSLIDE1UP(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 847      EmitVectorOPMVX(m_buffer, 0b001110, mask, vs2, rs1, vd);
 848  }
 849  
 850  void Assembler::VSLIDEUP(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 851      EmitVectorOPIVX(m_buffer, 0b001110, mask, vs2, rs1, vd);
 852  }
 853  
 854  void Assembler::VSLIDEUP(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 855      EmitVectorOPIVUI(m_buffer, 0b001110, mask, vs2, uimm, vd);
 856  }
 857  
 858  void Assembler::VSLL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 859      EmitVectorOPIVV(m_buffer, 0b100101, mask, vs2, vs1, vd);
 860  }
 861  
 862  void Assembler::VSLL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 863      EmitVectorOPIVX(m_buffer, 0b100101, mask, vs2, rs1, vd);
 864  }
 865  
 866  void Assembler::VSLL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 867      EmitVectorOPIVUI(m_buffer, 0b100101, mask, vs2, uimm, vd);
 868  }
 869  
 870  void Assembler::VSMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 871      EmitVectorOPIVV(m_buffer, 0b100111, mask, vs2, vs1, vd);
 872  }
 873  
 874  void Assembler::VSMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 875      EmitVectorOPIVX(m_buffer, 0b100111, mask, vs2, rs1, vd);
 876  }
 877  
 878  void Assembler::VSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 879      EmitVectorOPIVV(m_buffer, 0b101001, mask, vs2, vs1, vd);
 880  }
 881  
 882  void Assembler::VSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 883      EmitVectorOPIVX(m_buffer, 0b101001, mask, vs2, rs1, vd);
 884  }
 885  
 886  void Assembler::VSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 887      EmitVectorOPIVUI(m_buffer, 0b101001, mask, vs2, uimm, vd);
 888  }
 889  
 890  void Assembler::VSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 891      EmitVectorOPIVV(m_buffer, 0b101000, mask, vs2, vs1, vd);
 892  }
 893  
 894  void Assembler::VSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 895      EmitVectorOPIVX(m_buffer, 0b101000, mask, vs2, rs1, vd);
 896  }
 897  
 898  void Assembler::VSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 899      EmitVectorOPIVUI(m_buffer, 0b101000, mask, vs2, uimm, vd);
 900  }
 901  
 902  void Assembler::VSSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 903      EmitVectorOPIVV(m_buffer, 0b101011, mask, vs2, vs1, vd);
 904  }
 905  
 906  void Assembler::VSSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 907      EmitVectorOPIVX(m_buffer, 0b101011, mask, vs2, rs1, vd);
 908  }
 909  
 910  void Assembler::VSSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 911      EmitVectorOPIVUI(m_buffer, 0b101011, mask, vs2, uimm, vd);
 912  }
 913  
 914  void Assembler::VSSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 915      EmitVectorOPIVV(m_buffer, 0b101010, mask, vs2, vs1, vd);
 916  }
 917  
 918  void Assembler::VSSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 919      EmitVectorOPIVX(m_buffer, 0b101010, mask, vs2, rs1, vd);
 920  }
 921  
 922  void Assembler::VSSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
 923      EmitVectorOPIVUI(m_buffer, 0b101010, mask, vs2, uimm, vd);
 924  }
 925  
 926  void Assembler::VSSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 927      EmitVectorOPIVV(m_buffer, 0b100011, mask, vs2, vs1, vd);
 928  }
 929  
 930  void Assembler::VSSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 931      EmitVectorOPIVX(m_buffer, 0b100011, mask, vs2, rs1, vd);
 932  }
 933  
 934  void Assembler::VSSUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 935      EmitVectorOPIVV(m_buffer, 0b100010, mask, vs2, vs1, vd);
 936  }
 937  
 938  void Assembler::VSSUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 939      EmitVectorOPIVX(m_buffer, 0b100010, mask, vs2, rs1, vd);
 940  }
 941  
 942  void Assembler::VSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 943      EmitVectorOPIVV(m_buffer, 0b000010, mask, vs2, vs1, vd);
 944  }
 945  
 946  void Assembler::VSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 947      EmitVectorOPIVX(m_buffer, 0b000010, mask, vs2, rs1, vd);
 948  }
 949  
 950  void Assembler::VWADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 951      EmitVectorOPMVV(m_buffer, 0b110001, mask, vs2, vs1, vd);
 952  }
 953  
 954  void Assembler::VWADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 955      EmitVectorOPMVX(m_buffer, 0b110001, mask, vs2, rs1, vd);
 956  }
 957  
 958  void Assembler::VWADDW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 959      EmitVectorOPMVV(m_buffer, 0b110101, mask, vs2, vs1, vd);
 960  }
 961  
 962  void Assembler::VWADDW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 963      EmitVectorOPMVX(m_buffer, 0b110101, mask, vs2, rs1, vd);
 964  }
 965  
 966  void Assembler::VWADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 967      EmitVectorOPMVV(m_buffer, 0b110000, mask, vs2, vs1, vd);
 968  }
 969  
 970  void Assembler::VWADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 971      EmitVectorOPMVX(m_buffer, 0b110000, mask, vs2, rs1, vd);
 972  }
 973  
 974  void Assembler::VWADDUW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
 975      EmitVectorOPMVV(m_buffer, 0b110100, mask, vs2, vs1, vd);
 976  }
 977  
 978  void Assembler::VWADDUW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
 979      EmitVectorOPMVX(m_buffer, 0b110100, mask, vs2, rs1, vd);
 980  }
 981  
 982  void Assembler::VWMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
 983      EmitVectorOPMVV(m_buffer, 0b111101, mask, vs2, vs1, vd);
 984  }
 985  
 986  void Assembler::VWMACC(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
 987      EmitVectorOPMVX(m_buffer, 0b111101, mask, vs2, rs1, vd);
 988  }
 989  
 990  void Assembler::VWMACCSU(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
 991      EmitVectorOPMVV(m_buffer, 0b111111, mask, vs2, vs1, vd);
 992  }
 993  
 994  void Assembler::VWMACCSU(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
 995      EmitVectorOPMVX(m_buffer, 0b111111, mask, vs2, rs1, vd);
 996  }
 997  
 998  void Assembler::VWMACCU(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
 999      EmitVectorOPMVV(m_buffer, 0b111100, mask, vs2, vs1, vd);
1000  }
1001  
1002  void Assembler::VWMACCU(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
1003      EmitVectorOPMVX(m_buffer, 0b111100, mask, vs2, rs1, vd);
1004  }
1005  
1006  void Assembler::VWMACCUS(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
1007      EmitVectorOPMVX(m_buffer, 0b111110, mask, vs2, rs1, vd);
1008  }
1009  
1010  void Assembler::VWMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1011      EmitVectorOPMVV(m_buffer, 0b111011, mask, vs2, vs1, vd);
1012  }
1013  
1014  void Assembler::VWMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
1015      EmitVectorOPMVX(m_buffer, 0b111011, mask, vs2, rs1, vd);
1016  }
1017  
1018  void Assembler::VWMULSU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1019      EmitVectorOPMVV(m_buffer, 0b111010, mask, vs2, vs1, vd);
1020  }
1021  
1022  void Assembler::VWMULSU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
1023      EmitVectorOPMVX(m_buffer, 0b111010, mask, vs2, rs1, vd);
1024  }
1025  
1026  void Assembler::VWMULU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1027      EmitVectorOPMVV(m_buffer, 0b111000, mask, vs2, vs1, vd);
1028  }
1029  
1030  void Assembler::VWMULU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
1031      EmitVectorOPMVX(m_buffer, 0b111000, mask, vs2, rs1, vd);
1032  }
1033  
1034  void Assembler::VWREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1035      EmitVectorOPIVV(m_buffer, 0b110001, mask, vs2, vs1, vd);
1036  }
1037  
1038  void Assembler::VWREDSUMU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1039      EmitVectorOPIVV(m_buffer, 0b110000, mask, vs2, vs1, vd);
1040  }
1041  
1042  void Assembler::VWSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1043      EmitVectorOPMVV(m_buffer, 0b110011, mask, vs2, vs1, vd);
1044  }
1045  
1046  void Assembler::VWSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
1047      EmitVectorOPMVX(m_buffer, 0b110011, mask, vs2, rs1, vd);
1048  }
1049  
1050  void Assembler::VWSUBW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1051      EmitVectorOPMVV(m_buffer, 0b110111, mask, vs2, vs1, vd);
1052  }
1053  
1054  void Assembler::VWSUBW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
1055      EmitVectorOPMVX(m_buffer, 0b110111, mask, vs2, rs1, vd);
1056  }
1057  
1058  void Assembler::VWSUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1059      EmitVectorOPMVV(m_buffer, 0b110010, mask, vs2, vs1, vd);
1060  }
1061  
1062  void Assembler::VWSUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
1063      EmitVectorOPMVX(m_buffer, 0b110010, mask, vs2, rs1, vd);
1064  }
1065  
1066  void Assembler::VWSUBUW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1067      EmitVectorOPMVV(m_buffer, 0b110110, mask, vs2, vs1, vd);
1068  }
1069  
1070  void Assembler::VWSUBUW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
1071      EmitVectorOPMVX(m_buffer, 0b110110, mask, vs2, rs1, vd);
1072  }
1073  
1074  void Assembler::VXOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1075      EmitVectorOPIVV(m_buffer, 0b001011, mask, vs2, vs1, vd);
1076  }
1077  
1078  void Assembler::VXOR(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
1079      EmitVectorOPIVX(m_buffer, 0b001011, mask, vs2, rs1, vd);
1080  }
1081  
1082  void Assembler::VXOR(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
1083      EmitVectorOPIVI(m_buffer, 0b001011, mask, vs2, simm, vd);
1084  }
1085  
1086  void Assembler::VZEXTVF2(Vec vd, Vec vs, VecMask mask) noexcept {
1087      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v6, vd);
1088  }
1089  
1090  void Assembler::VZEXTVF4(Vec vd, Vec vs, VecMask mask) noexcept {
1091      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v4, vd);
1092  }
1093  
1094  void Assembler::VZEXTVF8(Vec vd, Vec vs, VecMask mask) noexcept {
1095      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v2, vd);
1096  }
1097  
1098  // Vector Floating-Point Instructions
1099  
1100  void Assembler::VFADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1101      EmitVectorOPFVV(m_buffer, 0b000000, mask, vs2, vs1, vd);
1102  }
1103  
1104  void Assembler::VFADD(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1105      EmitVectorOPFVF(m_buffer, 0b000000, mask, vs2, rs1, vd);
1106  }
1107  
1108  void Assembler::VFCLASS(Vec vd, Vec vs, VecMask mask) noexcept {
1109      EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v16, vd);
1110  }
1111  
1112  void Assembler::VFCVT_F_X(Vec vd, Vec vs, VecMask mask) noexcept {
1113      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v3, vd);
1114  }
1115  
1116  void Assembler::VFCVT_F_XU(Vec vd, Vec vs, VecMask mask) noexcept {
1117      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v2, vd);
1118  }
1119  
1120  void Assembler::VFCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
1121      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v7, vd);
1122  }
1123  
1124  void Assembler::VFCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
1125      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v6, vd);
1126  }
1127  
1128  void Assembler::VFCVT_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
1129      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v1, vd);
1130  }
1131  
1132  void Assembler::VFCVT_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
1133      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v0, vd);
1134  }
1135  
1136  void Assembler::VFNCVT_F_F(Vec vd, Vec vs, VecMask mask) noexcept {
1137      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v20, vd);
1138  }
1139  
1140  void Assembler::VFNCVT_F_X(Vec vd, Vec vs, VecMask mask) noexcept {
1141      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v19, vd);
1142  }
1143  
1144  void Assembler::VFNCVT_F_XU(Vec vd, Vec vs, VecMask mask) noexcept {
1145      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v18, vd);
1146  }
1147  
1148  void Assembler::VFNCVT_ROD_F_F(Vec vd, Vec vs, VecMask mask) noexcept {
1149      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v21, vd);
1150  }
1151  
1152  void Assembler::VFNCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
1153      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v23, vd);
1154  }
1155  
1156  void Assembler::VFNCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
1157      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v22, vd);
1158  }
1159  
1160  void Assembler::VFNCVT_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
1161      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v17, vd);
1162  }
1163  
1164  void Assembler::VFNCVT_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
1165      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v16, vd);
1166  }
1167  
1168  void Assembler::VFWCVT_F_F(Vec vd, Vec vs, VecMask mask) noexcept {
1169      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v12, vd);
1170  }
1171  
1172  void Assembler::VFWCVT_F_X(Vec vd, Vec vs, VecMask mask) noexcept {
1173      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v11, vd);
1174  }
1175  
1176  void Assembler::VFWCVT_F_XU(Vec vd, Vec vs, VecMask mask) noexcept {
1177      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v10, vd);
1178  }
1179  
1180  void Assembler::VFWCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
1181      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v15, vd);
1182  }
1183  
1184  void Assembler::VFWCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
1185      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v14, vd);
1186  }
1187  
1188  void Assembler::VFWCVT_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
1189      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v9, vd);
1190  }
1191  
1192  void Assembler::VFWCVT_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
1193      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v8, vd);
1194  }
1195  
1196  void Assembler::VFDIV(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1197      EmitVectorOPFVV(m_buffer, 0b100000, mask, vs2, vs1, vd);
1198  }
1199  
1200  void Assembler::VFDIV(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1201      EmitVectorOPFVF(m_buffer, 0b100000, mask, vs2, rs1, vd);
1202  }
1203  
1204  void Assembler::VFRDIV(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1205      EmitVectorOPFVF(m_buffer, 0b100001, mask, vs2, rs1, vd);
1206  }
1207  
1208  void Assembler::VFREDMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1209      EmitVectorOPFVV(m_buffer, 0b000111, mask, vs2, vs1, vd);
1210  }
1211  
1212  void Assembler::VFREDMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1213      EmitVectorOPFVV(m_buffer, 0b000101, mask, vs2, vs1, vd);
1214  }
1215  
1216  void Assembler::VFREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1217      EmitVectorOPFVV(m_buffer, 0b000001, mask, vs2, vs1, vd);
1218  }
1219  
1220  void Assembler::VFREDOSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1221      EmitVectorOPFVV(m_buffer, 0b000011, mask, vs2, vs1, vd);
1222  }
1223  
1224  void Assembler::VFMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1225      EmitVectorOPFVV(m_buffer, 0b101100, mask, vs2, vs1, vd);
1226  }
1227  
1228  void Assembler::VFMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1229      EmitVectorOPFVF(m_buffer, 0b101100, mask, vs2, rs1, vd);
1230  }
1231  
1232  void Assembler::VFMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1233      EmitVectorOPFVV(m_buffer, 0b101000, mask, vs2, vs1, vd);
1234  }
1235  
1236  void Assembler::VFMADD(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1237      EmitVectorOPFVF(m_buffer, 0b101000, mask, vs2, rs1, vd);
1238  }
1239  
1240  void Assembler::VFMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1241      EmitVectorOPFVV(m_buffer, 0b000110, mask, vs2, vs1, vd);
1242  }
1243  
1244  void Assembler::VFMAX(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1245      EmitVectorOPFVF(m_buffer, 0b000110, mask, vs2, rs1, vd);
1246  }
1247  
1248  void Assembler::VFMERGE(Vec vd, Vec vs2, FPR rs1) noexcept {
1249      EmitVectorOPFVF(m_buffer, 0b010111, VecMask::Yes, vs2, rs1, vd);
1250  }
1251  
1252  void Assembler::VFMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1253      EmitVectorOPFVV(m_buffer, 0b000100, mask, vs2, vs1, vd);
1254  }
1255  
1256  void Assembler::VFMIN(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1257      EmitVectorOPFVF(m_buffer, 0b000100, mask, vs2, rs1, vd);
1258  }
1259  
1260  void Assembler::VFMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1261      EmitVectorOPFVV(m_buffer, 0b101110, mask, vs2, vs1, vd);
1262  }
1263  
1264  void Assembler::VFMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1265      EmitVectorOPFVF(m_buffer, 0b101110, mask, vs2, rs1, vd);
1266  }
1267  
1268  void Assembler::VFMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1269      EmitVectorOPFVV(m_buffer, 0b101010, mask, vs2, vs1, vd);
1270  }
1271  
1272  void Assembler::VFMSUB(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1273      EmitVectorOPFVF(m_buffer, 0b101010, mask, vs2, rs1, vd);
1274  }
1275  
1276  void Assembler::VFMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1277      EmitVectorOPFVV(m_buffer, 0b100100, mask, vs2, vs1, vd);
1278  }
1279  
1280  void Assembler::VFMUL(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1281      EmitVectorOPFVF(m_buffer, 0b100100, mask, vs2, rs1, vd);
1282  }
1283  
1284  void Assembler::VFMV(Vec vd, FPR rs) noexcept {
1285      EmitVectorOPFVF(m_buffer, 0b010111, VecMask::No, v0, rs, vd);
1286  }
1287  
1288  void Assembler::VFMV_FS(FPR rd, Vec vs) noexcept {
1289      EmitVectorOPFVV(m_buffer, 0b010000, VecMask::No, vs, v0, Vec{rd.Index()});
1290  }
1291  
1292  void Assembler::VFMV_SF(Vec vd, FPR rs) noexcept {
1293      EmitVectorOPFVF(m_buffer, 0b010000, VecMask::No, v0, rs, vd);
1294  }
1295  
1296  void Assembler::VFNMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1297      EmitVectorOPFVV(m_buffer, 0b101101, mask, vs2, vs1, vd);
1298  }
1299  
1300  void Assembler::VFNMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1301      EmitVectorOPFVF(m_buffer, 0b101101, mask, vs2, rs1, vd);
1302  }
1303  
1304  void Assembler::VFNMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1305      EmitVectorOPFVV(m_buffer, 0b101001, mask, vs2, vs1, vd);
1306  }
1307  
1308  void Assembler::VFNMADD(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1309      EmitVectorOPFVF(m_buffer, 0b101001, mask, vs2, rs1, vd);
1310  }
1311  
1312  void Assembler::VFNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1313      EmitVectorOPFVV(m_buffer, 0b101111, mask, vs2, vs1, vd);
1314  }
1315  
1316  void Assembler::VFNMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1317      EmitVectorOPFVF(m_buffer, 0b101111, mask, vs2, rs1, vd);
1318  }
1319  
1320  void Assembler::VFNMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1321      EmitVectorOPFVV(m_buffer, 0b101011, mask, vs2, vs1, vd);
1322  }
1323  
1324  void Assembler::VFNMSUB(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1325      EmitVectorOPFVF(m_buffer, 0b101011, mask, vs2, rs1, vd);
1326  }
1327  
1328  void Assembler::VFREC7(Vec vd, Vec vs, VecMask mask) noexcept {
1329      EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v5, vd);
1330  }
1331  
1332  void Assembler::VFSGNJ(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1333      EmitVectorOPFVV(m_buffer, 0b001000, mask, vs2, vs1, vd);
1334  }
1335  
1336  void Assembler::VFSGNJ(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1337      EmitVectorOPFVF(m_buffer, 0b001000, mask, vs2, rs1, vd);
1338  }
1339  
1340  void Assembler::VFSGNJN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1341      EmitVectorOPFVV(m_buffer, 0b001001, mask, vs2, vs1, vd);
1342  }
1343  
1344  void Assembler::VFSGNJN(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1345      EmitVectorOPFVF(m_buffer, 0b001001, mask, vs2, rs1, vd);
1346  }
1347  
1348  void Assembler::VFSGNJX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1349      EmitVectorOPFVV(m_buffer, 0b001010, mask, vs2, vs1, vd);
1350  }
1351  
1352  void Assembler::VFSGNJX(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1353      EmitVectorOPFVF(m_buffer, 0b001010, mask, vs2, rs1, vd);
1354  }
1355  
1356  void Assembler::VFSQRT(Vec vd, Vec vs, VecMask mask) noexcept {
1357      EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v0, vd);
1358  }
1359  
1360  void Assembler::VFRSQRT7(Vec vd, Vec vs, VecMask mask) noexcept {
1361      EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v4, vd);
1362  }
1363  
1364  void Assembler::VFSLIDE1DOWN(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1365      EmitVectorOPFVF(m_buffer, 0b001111, mask, vs2, rs1, vd);
1366  }
1367  
1368  void Assembler::VFSLIDE1UP(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1369      EmitVectorOPFVF(m_buffer, 0b001110, mask, vs2, rs1, vd);
1370  }
1371  
1372  void Assembler::VFSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1373      EmitVectorOPFVV(m_buffer, 0b000010, mask, vs2, vs1, vd);
1374  }
1375  
1376  void Assembler::VFSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1377      EmitVectorOPFVF(m_buffer, 0b000010, mask, vs2, rs1, vd);
1378  }
1379  
1380  void Assembler::VFRSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1381      EmitVectorOPFVF(m_buffer, 0b100111, mask, vs2, rs1, vd);
1382  }
1383  
1384  void Assembler::VFWADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1385      EmitVectorOPFVV(m_buffer, 0b110000, mask, vs2, vs1, vd);
1386  }
1387  
1388  void Assembler::VFWADD(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1389      EmitVectorOPFVF(m_buffer, 0b110000, mask, vs2, rs1, vd);
1390  }
1391  
1392  void Assembler::VFWADDW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1393      EmitVectorOPFVV(m_buffer, 0b110100, mask, vs2, vs1, vd);
1394  }
1395  
1396  void Assembler::VFWADDW(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1397      EmitVectorOPFVF(m_buffer, 0b110100, mask, vs2, rs1, vd);
1398  }
1399  
1400  void Assembler::VFWMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1401      EmitVectorOPFVV(m_buffer, 0b111100, mask, vs2, vs1, vd);
1402  }
1403  
1404  void Assembler::VFWMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1405      EmitVectorOPFVF(m_buffer, 0b111100, mask, vs2, rs1, vd);
1406  }
1407  
1408  void Assembler::VFWMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1409      EmitVectorOPFVV(m_buffer, 0b111000, mask, vs2, vs1, vd);
1410  }
1411  
1412  void Assembler::VFWMUL(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1413      EmitVectorOPFVF(m_buffer, 0b111000, mask, vs2, rs1, vd);
1414  }
1415  
1416  void Assembler::VFWNMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1417      EmitVectorOPFVV(m_buffer, 0b111101, mask, vs2, vs1, vd);
1418  }
1419  
1420  void Assembler::VFWNMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1421      EmitVectorOPFVF(m_buffer, 0b111101, mask, vs2, rs1, vd);
1422  }
1423  
1424  void Assembler::VFWNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1425      EmitVectorOPFVV(m_buffer, 0b111111, mask, vs2, vs1, vd);
1426  }
1427  
1428  void Assembler::VFWNMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1429      EmitVectorOPFVF(m_buffer, 0b111111, mask, vs2, rs1, vd);
1430  }
1431  
1432  void Assembler::VFWREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1433      EmitVectorOPFVV(m_buffer, 0b110001, mask, vs2, vs1, vd);
1434  }
1435  
1436  void Assembler::VFWREDOSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1437      EmitVectorOPFVV(m_buffer, 0b110011, mask, vs2, vs1, vd);
1438  }
1439  
1440  void Assembler::VFWMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
1441      EmitVectorOPFVV(m_buffer, 0b111110, mask, vs2, vs1, vd);
1442  }
1443  
1444  void Assembler::VFWMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
1445      EmitVectorOPFVF(m_buffer, 0b111110, mask, vs2, rs1, vd);
1446  }
1447  
1448  void Assembler::VFWSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1449      EmitVectorOPFVV(m_buffer, 0b110010, mask, vs2, vs1, vd);
1450  }
1451  
1452  void Assembler::VFWSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1453      EmitVectorOPFVF(m_buffer, 0b110010, mask, vs2, rs1, vd);
1454  }
1455  
1456  void Assembler::VFWSUBW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1457      EmitVectorOPFVV(m_buffer, 0b110110, mask, vs2, vs1, vd);
1458  }
1459  
1460  void Assembler::VFWSUBW(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1461      EmitVectorOPFVF(m_buffer, 0b110110, mask, vs2, rs1, vd);
1462  }
1463  
1464  void Assembler::VMFEQ(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1465      EmitVectorOPFVV(m_buffer, 0b011000, mask, vs2, vs1, vd);
1466  }
1467  
1468  void Assembler::VMFEQ(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1469      EmitVectorOPFVF(m_buffer, 0b011000, mask, vs2, rs1, vd);
1470  }
1471  
1472  void Assembler::VMFGE(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1473      EmitVectorOPFVF(m_buffer, 0b011111, mask, vs2, rs1, vd);
1474  }
1475  
1476  void Assembler::VMFGT(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1477      EmitVectorOPFVF(m_buffer, 0b011101, mask, vs2, rs1, vd);
1478  }
1479  
1480  void Assembler::VMFLE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1481      EmitVectorOPFVV(m_buffer, 0b011001, mask, vs2, vs1, vd);
1482  }
1483  
1484  void Assembler::VMFLE(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1485      EmitVectorOPFVF(m_buffer, 0b011001, mask, vs2, rs1, vd);
1486  }
1487  
1488  void Assembler::VMFLT(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1489      EmitVectorOPFVV(m_buffer, 0b011011, mask, vs2, vs1, vd);
1490  }
1491  
1492  void Assembler::VMFLT(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1493      EmitVectorOPFVF(m_buffer, 0b011011, mask, vs2, rs1, vd);
1494  }
1495  
1496  void Assembler::VMFNE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1497      EmitVectorOPFVV(m_buffer, 0b011100, mask, vs2, vs1, vd);
1498  }
1499  
1500  void Assembler::VMFNE(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
1501      EmitVectorOPFVF(m_buffer, 0b011100, mask, vs2, rs1, vd);
1502  }
1503  
1504  // Vector Load/Store Instructions
1505  
1506  void Assembler::VLE8(Vec vd, GPR rs, VecMask mask) noexcept {
1507      VLSEGE8(1, vd, rs, mask);
1508  }
1509  
1510  void Assembler::VLE16(Vec vd, GPR rs, VecMask mask) noexcept {
1511      VLSEGE16(1, vd, rs, mask);
1512  }
1513  
1514  void Assembler::VLE32(Vec vd, GPR rs, VecMask mask) noexcept {
1515      VLSEGE32(1, vd, rs, mask);
1516  }
1517  
1518  void Assembler::VLE64(Vec vd, GPR rs, VecMask mask) noexcept {
1519      VLSEGE64(1, vd, rs, mask);
1520  }
1521  
1522  void Assembler::VLM(Vec vd, GPR rs) noexcept {
1523      EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, VecMask::No,
1524                     UnitStrideLoadAddressingMode::MaskLoad, rs, WidthEncoding::E8, vd);
1525  }
1526  
1527  void Assembler::VLSE8(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
1528      VLSSEGE8(1, vd, rs1, rs2, mask);
1529  }
1530  
1531  void Assembler::VLSE16(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
1532      VLSSEGE16(1, vd, rs1, rs2, mask);
1533  }
1534  
1535  void Assembler::VLSE32(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
1536      VLSSEGE32(1, vd, rs1, rs2, mask);
1537  }
1538  
1539  void Assembler::VLSE64(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
1540      VLSSEGE64(1, vd, rs1, rs2, mask);
1541  }
1542  
1543  void Assembler::VLOXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1544      VLOXSEGEI8(1, vd, rs, vs, mask);
1545  }
1546  
1547  void Assembler::VLOXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1548      VLOXSEGEI16(1, vd, rs, vs, mask);
1549  }
1550  
1551  void Assembler::VLOXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1552      VLOXSEGEI32(1, vd, rs, vs, mask);
1553  }
1554  
1555  void Assembler::VLOXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1556      VLOXSEGEI64(1, vd, rs, vs, mask);
1557  }
1558  
1559  void Assembler::VLUXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1560      VLUXSEGEI8(1, vd, rs, vs, mask);
1561  }
1562  
1563  void Assembler::VLUXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1564      VLUXSEGEI16(1, vd, rs, vs, mask);
1565  }
1566  
1567  void Assembler::VLUXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1568      VLUXSEGEI32(1, vd, rs, vs, mask);
1569  }
1570  
1571  void Assembler::VLUXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1572      VLUXSEGEI64(1, vd, rs, vs, mask);
1573  }
1574  
1575  void Assembler::VLE8FF(Vec vd, GPR rs, VecMask mask) noexcept {
1576      EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask,
1577                     UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E8, vd);
1578  }
1579  
1580  void Assembler::VLE16FF(Vec vd, GPR rs, VecMask mask) noexcept {
1581      EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask,
1582                     UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E16, vd);
1583  }
1584  
1585  void Assembler::VLE32FF(Vec vd, GPR rs, VecMask mask) noexcept {
1586      EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask,
1587                     UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E32, vd);
1588  }
1589  
1590  void Assembler::VLE64FF(Vec vd, GPR rs, VecMask mask) noexcept {
1591      EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask,
1592                     UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E64, vd);
1593  }
1594  
1595  void Assembler::VLSEGE8(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept {
1596      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
1597                     UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E8, vd);
1598  }
1599  
1600  void Assembler::VLSEGE16(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept {
1601      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
1602                     UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E16, vd);
1603  }
1604  
1605  void Assembler::VLSEGE32(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept {
1606      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
1607                     UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E32, vd);
1608  }
1609  
1610  void Assembler::VLSEGE64(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept {
1611      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
1612                     UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E64, vd);
1613  }
1614  
1615  void Assembler::VLSSEGE8(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
1616      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask,
1617                     rs2, rs1, WidthEncoding::E8, vd);
1618  }
1619  
1620  void Assembler::VLSSEGE16(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
1621      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask,
1622                     rs2, rs1, WidthEncoding::E16, vd);
1623  }
1624  
1625  void Assembler::VLSSEGE32(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
1626      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask,
1627                     rs2, rs1, WidthEncoding::E32, vd);
1628  }
1629  
1630  void Assembler::VLSSEGE64(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
1631      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask,
1632                     rs2, rs1, WidthEncoding::E64, vd);
1633  }
1634  
1635  void Assembler::VLOXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1636      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
1637                     vs, rs, WidthEncoding::E8, vd);
1638  }
1639  
1640  void Assembler::VLOXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1641      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
1642                     vs, rs, WidthEncoding::E16, vd);
1643  }
1644  
1645  void Assembler::VLOXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1646      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
1647                     vs, rs, WidthEncoding::E32, vd);
1648  }
1649  
1650  void Assembler::VLOXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1651      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
1652                     vs, rs, WidthEncoding::E64, vd);
1653  }
1654  
1655  void Assembler::VLUXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1656      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
1657                     vs, rs, WidthEncoding::E8, vd);
1658  }
1659  
1660  void Assembler::VLUXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1661      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
1662                     vs, rs, WidthEncoding::E16, vd);
1663  }
1664  
1665  void Assembler::VLUXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1666      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
1667                     vs, rs, WidthEncoding::E32, vd);
1668  }
1669  
1670  void Assembler::VLUXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1671      EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
1672                     vs, rs, WidthEncoding::E64, vd);
1673  }
1674  
1675  void Assembler::VLRE8(uint32_t num_registers, Vec vd, GPR rs) noexcept {
1676      BISCUIT_ASSERT(vd.Index() % num_registers == 0);
1677      EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E8, vd);
1678  }
1679  
1680  void Assembler::VL1RE8(Vec vd, GPR rs) noexcept {
1681      VLRE8(1, vd, rs);
1682  }
1683  
1684  void Assembler::VL2RE8(Vec vd, GPR rs) noexcept {
1685      VLRE8(2, vd, rs);
1686  }
1687  
1688  void Assembler::VL4RE8(Vec vd, GPR rs) noexcept {
1689      VLRE8(4, vd, rs);
1690  }
1691  
1692  void Assembler::VL8RE8(Vec vd, GPR rs) noexcept {
1693      VLRE8(8, vd, rs);
1694  }
1695  
1696  void Assembler::VLRE16(uint32_t num_registers, Vec vd, GPR rs) noexcept {
1697      BISCUIT_ASSERT(vd.Index() % num_registers == 0);
1698      EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E16, vd);
1699  }
1700  
1701  void Assembler::VL1RE16(Vec vd, GPR rs) noexcept {
1702      VLRE16(1, vd, rs);
1703  }
1704  
1705  void Assembler::VL2RE16(Vec vd, GPR rs) noexcept {
1706      VLRE16(2, vd, rs);
1707  }
1708  
1709  void Assembler::VL4RE16(Vec vd, GPR rs) noexcept {
1710      VLRE16(4, vd, rs);
1711  }
1712  
1713  void Assembler::VL8RE16(Vec vd, GPR rs) noexcept {
1714      VLRE16(8, vd, rs);
1715  }
1716  
1717  void Assembler::VLRE32(uint32_t num_registers, Vec vd, GPR rs) noexcept {
1718      BISCUIT_ASSERT(vd.Index() % num_registers == 0);
1719      EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E32, vd);
1720  }
1721  
1722  void Assembler::VL1RE32(Vec vd, GPR rs) noexcept {
1723      VLRE32(1, vd, rs);
1724  }
1725  
1726  void Assembler::VL2RE32(Vec vd, GPR rs) noexcept {
1727      VLRE32(2, vd, rs);
1728  }
1729  
1730  void Assembler::VL4RE32(Vec vd, GPR rs) noexcept {
1731      VLRE32(4, vd, rs);
1732  }
1733  
1734  void Assembler::VL8RE32(Vec vd, GPR rs) noexcept {
1735      VLRE32(8, vd, rs);
1736  }
1737  
1738  void Assembler::VLRE64(uint32_t num_registers, Vec vd, GPR rs) noexcept {
1739      BISCUIT_ASSERT(vd.Index() % num_registers == 0);
1740      EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E64, vd);
1741  }
1742  
1743  void Assembler::VL1RE64(Vec vd, GPR rs) noexcept {
1744      VLRE64(1, vd, rs);
1745  }
1746  
1747  void Assembler::VL2RE64(Vec vd, GPR rs) noexcept {
1748      VLRE64(2, vd, rs);
1749  }
1750  
1751  void Assembler::VL4RE64(Vec vd, GPR rs) noexcept {
1752      VLRE64(4, vd, rs);
1753  }
1754  
1755  void Assembler::VL8RE64(Vec vd, GPR rs) noexcept {
1756      VLRE64(8, vd, rs);
1757  }
1758  
1759  void Assembler::VSE8(Vec vs, GPR rs, VecMask mask) noexcept {
1760      VSSEGE8(1, vs, rs, mask);
1761  }
1762  
1763  void Assembler::VSE16(Vec vs, GPR rs, VecMask mask) noexcept {
1764      VSSEGE16(1, vs, rs, mask);
1765  }
1766  
1767  void Assembler::VSE32(Vec vs, GPR rs, VecMask mask) noexcept {
1768      VSSEGE32(1, vs, rs, mask);
1769  }
1770  
1771  void Assembler::VSE64(Vec vs, GPR rs, VecMask mask) noexcept {
1772      VSSEGE64(1, vs, rs, mask);
1773  }
1774  
1775  void Assembler::VSM(Vec vs, GPR rs) noexcept {
1776      EmitVectorStore(m_buffer, 0b000, false, AddressingMode::UnitStride, VecMask::No,
1777                      UnitStrideStoreAddressingMode::MaskStore, rs, WidthEncoding::E8, vs);
1778  }
1779  
1780  void Assembler::VSSE8(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
1781      VSSSEGE8(1, vs, rs1, rs2, mask);
1782  }
1783  
1784  void Assembler::VSSE16(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
1785      VSSSEGE16(1, vs, rs1, rs2, mask);
1786  }
1787  
1788  void Assembler::VSSE32(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
1789      VSSSEGE32(1, vs, rs1, rs2, mask);
1790  }
1791  
1792  void Assembler::VSSE64(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
1793      VSSSEGE64(1, vs, rs1, rs2, mask);
1794  }
1795  
1796  void Assembler::VSOXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1797      VSOXSEGEI8(1, vd, rs, vs, mask);
1798  }
1799  
1800  void Assembler::VSOXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1801      VSOXSEGEI16(1, vd, rs, vs, mask);
1802  }
1803  
1804  void Assembler::VSOXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1805      VSOXSEGEI32(1, vd, rs, vs, mask);
1806  }
1807  
1808  void Assembler::VSOXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1809      VSOXSEGEI64(1, vd, rs, vs, mask);
1810  }
1811  
1812  void Assembler::VSUXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1813      VSUXSEGEI8(1, vd, rs, vs, mask);
1814  }
1815  
1816  void Assembler::VSUXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1817      VSUXSEGEI16(1, vd, rs, vs, mask);
1818  }
1819  
1820  void Assembler::VSUXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1821      VSUXSEGEI32(1, vd, rs, vs, mask);
1822  }
1823  
1824  void Assembler::VSUXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1825      VSUXSEGEI64(1, vd, rs, vs, mask);
1826  }
1827  
1828  void Assembler::VSSEGE8(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept {
1829      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
1830                      UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E8, vs);
1831  }
1832  
1833  void Assembler::VSSEGE16(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept {
1834      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
1835                      UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E16, vs);
1836  }
1837  
1838  void Assembler::VSSEGE32(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept {
1839      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
1840                      UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E32, vs);
1841  }
1842  
1843  void Assembler::VSSEGE64(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept {
1844      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
1845                      UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E64, vs);
1846  }
1847  
1848  void Assembler::VSSSEGE8(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
1849      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask,
1850                      rs2, rs1, WidthEncoding::E8, vs);
1851  }
1852  
1853  void Assembler::VSSSEGE16(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
1854      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask,
1855                      rs2, rs1, WidthEncoding::E16, vs);
1856  }
1857  
1858  void Assembler::VSSSEGE32(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
1859      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask,
1860                      rs2, rs1, WidthEncoding::E32, vs);
1861  }
1862  
1863  void Assembler::VSSSEGE64(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
1864      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask,
1865                      rs2, rs1, WidthEncoding::E64, vs);
1866  }
1867  
1868  void Assembler::VSOXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1869      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
1870          vs, rs, WidthEncoding::E8, vd);
1871  }
1872  
1873  void Assembler::VSOXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1874      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
1875          vs, rs, WidthEncoding::E16, vd);
1876  }
1877  
1878  void Assembler::VSOXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1879      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
1880          vs, rs, WidthEncoding::E32, vd);
1881  }
1882  
1883  void Assembler::VSOXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1884      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
1885          vs, rs, WidthEncoding::E64, vd);
1886  }
1887  
1888  void Assembler::VSUXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1889      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
1890                      vs, rs, WidthEncoding::E8, vd);
1891  }
1892  
1893  void Assembler::VSUXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1894      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
1895                      vs, rs, WidthEncoding::E16, vd);
1896  }
1897  
1898  void Assembler::VSUXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1899      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
1900                      vs, rs, WidthEncoding::E32, vd);
1901  }
1902  
1903  void Assembler::VSUXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
1904      EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
1905                      vs, rs, WidthEncoding::E64, vd);
1906  }
1907  
1908  void Assembler::VSR(uint32_t num_registers, Vec vs, GPR rs) noexcept {
1909      EmitVectorStoreWholeReg(m_buffer, num_registers, rs, vs);
1910  }
1911  
1912  void Assembler::VS1R(Vec vs, GPR rs) noexcept {
1913      VSR(1, vs, rs);
1914  }
1915  
1916  void Assembler::VS2R(Vec vs, GPR rs) noexcept {
1917      BISCUIT_ASSERT(vs.Index() % 2 == 0);
1918      VSR(2, vs, rs);
1919  }
1920  
1921  void Assembler::VS4R(Vec vs, GPR rs) noexcept {
1922      BISCUIT_ASSERT(vs.Index() % 4 == 0);
1923      VSR(4, vs, rs);
1924  }
1925  
1926  void Assembler::VS8R(Vec vs, GPR rs) noexcept {
1927      BISCUIT_ASSERT(vs.Index() % 8 == 0);
1928      VSR(8, vs, rs);
1929  }
1930  
1931  void Assembler::VSETIVLI(GPR rd, uint32_t imm, SEW sew, LMUL lmul, VTA vta, VMA vma) noexcept {
1932      // Immediate must be able to fit in 5 bits.
1933      BISCUIT_ASSERT(imm <= 31);
1934  
1935      // clang-format off
1936      const auto zimm = static_cast<uint32_t>(lmul) |
1937                        (static_cast<uint32_t>(sew) << 3) |
1938                        (static_cast<uint32_t>(vta) << 6) |
1939                        (static_cast<uint32_t>(vma) << 7);
1940      // clang-format on
1941  
1942      m_buffer.Emit32(0xC0007057U | (zimm << 20) | (imm << 15) | (rd.Index() << 7));
1943  }
1944  
1945  void Assembler::VSETVL(GPR rd, GPR rs1, GPR rs2) noexcept {
1946      m_buffer.Emit32(0x80007057U | (rs2.Index() << 20) | (rs1.Index() << 15) | (rd.Index() << 7));
1947  }
1948  
1949  void Assembler::VSETVLI(GPR rd, GPR rs, SEW sew, LMUL lmul, VTA vta, VMA vma) noexcept {
1950      // clang-format off
1951      const auto zimm = static_cast<uint32_t>(lmul) |
1952                        (static_cast<uint32_t>(sew) << 3) |
1953                        (static_cast<uint32_t>(vta) << 6) |
1954                        (static_cast<uint32_t>(vma) << 7);
1955      // clang-format on
1956  
1957      m_buffer.Emit32(0x00007057U | (zimm << 20) | (rs.Index() << 15) | (rd.Index() << 7));
1958  }
1959  
1960  // Vector Cryptography Instructions
1961  
1962  void Assembler::VANDN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1963      EmitVectorOPIVV(m_buffer, 0b000001, mask, vs2, vs1, vd);
1964  }
1965  void Assembler::VANDN(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
1966      EmitVectorOPIVX(m_buffer, 0b000001, mask, vs2, rs1, vd);
1967  }
1968  
1969  void Assembler::VBREV(Vec vd, Vec vs2, VecMask mask) noexcept {
1970      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs2, Vec{0b01010}, vd);
1971  }
1972  void Assembler::VBREV8(Vec vd, Vec vs2, VecMask mask) noexcept {
1973      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs2, Vec{0b01000}, vd);
1974  }
1975  void Assembler::VREV8(Vec vd, Vec vs2, VecMask mask) noexcept {
1976      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs2, Vec{0b01001}, vd);
1977  }
1978  
1979  void Assembler::VCLZ(Vec vd, Vec vs2, VecMask mask) noexcept {
1980      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs2, Vec{0b01100}, vd);
1981  }
1982  void Assembler::VCTZ(Vec vd, Vec vs2, VecMask mask) noexcept {
1983      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs2, Vec{0b01101}, vd);
1984  }
1985  void Assembler::VCPOP(Vec vd, Vec vs2, VecMask mask) noexcept {
1986      EmitVectorOPMVV(m_buffer, 0b010010, mask, vs2, Vec{0b01110}, vd);
1987  }
1988  
1989  void Assembler::VROL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1990      EmitVectorOPIVV(m_buffer, 0b010101, mask, vs2, vs1, vd);
1991  }
1992  void Assembler::VROL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
1993      EmitVectorOPIVX(m_buffer, 0b010101, mask, vs2, rs1, vd);
1994  }
1995  
1996  void Assembler::VROR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
1997      EmitVectorOPIVV(m_buffer, 0b010100, mask, vs2, vs1, vd);
1998  }
1999  void Assembler::VROR(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
2000      EmitVectorOPIVX(m_buffer, 0b010100, mask, vs2, rs1, vd);
2001  }
2002  void Assembler::VROR(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
2003      BISCUIT_ASSERT(uimm <= 63);
2004  
2005      const auto funct6 = 0b010100 | ((uimm & 0b100000) >> 5);
2006      EmitVectorOPIVIImpl(m_buffer, funct6, mask, vs2, uimm, vd);
2007  }
2008  
2009  void Assembler::VWSLL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
2010      EmitVectorOPIVV(m_buffer, 0b110101, mask, vs2, vs1, vd);
2011  }
2012  void Assembler::VWSLL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
2013      EmitVectorOPIVX(m_buffer, 0b110101, mask, vs2, rs1, vd);
2014  }
2015  void Assembler::VWSLL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
2016      EmitVectorOPIVUI(m_buffer, 0b110101, mask, vs2, uimm, vd);
2017  }
2018  
2019  void Assembler::VCLMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
2020      EmitVectorOPMVV(m_buffer, 0b001100, mask, vs2, vs1, vd);
2021  }
2022  void Assembler::VCLMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
2023      EmitVectorOPMVX(m_buffer, 0b001100, mask, vs2, rs1, vd);
2024  }
2025  
2026  void Assembler::VCLMULH(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
2027      EmitVectorOPMVV(m_buffer, 0b001101, mask, vs2, vs1, vd);
2028  }
2029  void Assembler::VCLMULH(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
2030      EmitVectorOPMVX(m_buffer, 0b001101, mask, vs2, rs1, vd);
2031  }
2032  
2033  void Assembler::VGHSH(Vec vd, Vec vs2, Vec vs1) noexcept {
2034      EmitVectorOPMVVP(m_buffer, 0b101100, VecMask::No, vs2, vs1, vd);
2035  }
2036  void Assembler::VGMUL(Vec vd, Vec vs2) noexcept {
2037      EmitVectorOPMVVP(m_buffer, 0b101000, VecMask::No, vs2, Vec{0b10001}, vd);
2038  }
2039  
2040  void Assembler::VAESDF_VV(Vec vd, Vec vs2) noexcept {
2041      EmitVectorOPMVVP(m_buffer, 0b101000, VecMask::No, vs2, Vec{0b00001}, vd);
2042  }
2043  void Assembler::VAESDF_VS(Vec vd, Vec vs2) noexcept {
2044      EmitVectorOPMVVP(m_buffer, 0b101001, VecMask::No, vs2, Vec{0b00001}, vd);
2045  }
2046  
2047  void Assembler::VAESDM_VV(Vec vd, Vec vs2) noexcept {
2048      EmitVectorOPMVVP(m_buffer, 0b101000, VecMask::No, vs2, Vec{0}, vd);
2049  }
2050  void Assembler::VAESDM_VS(Vec vd, Vec vs2) noexcept {
2051      EmitVectorOPMVVP(m_buffer, 0b101001, VecMask::No, vs2, Vec{0}, vd);
2052  }
2053  
2054  void Assembler::VAESEF_VV(Vec vd, Vec vs2) noexcept {
2055      EmitVectorOPMVVP(m_buffer, 0b101000, VecMask::No, vs2, Vec{0b00011}, vd);
2056  }
2057  void Assembler::VAESEF_VS(Vec vd, Vec vs2) noexcept {
2058      EmitVectorOPMVVP(m_buffer, 0b101001, VecMask::No, vs2, Vec{0b00011}, vd);
2059  }
2060  
2061  void Assembler::VAESEM_VV(Vec vd, Vec vs2) noexcept {
2062      EmitVectorOPMVVP(m_buffer, 0b101000, VecMask::No, vs2, Vec{0b00010}, vd);
2063  }
2064  void Assembler::VAESEM_VS(Vec vd, Vec vs2) noexcept {
2065      EmitVectorOPMVVP(m_buffer, 0b101001, VecMask::No, vs2, Vec{0b00010}, vd);
2066  }
2067  
2068  // Little bit of weirdness (at first glance) for these is that the round
2069  // number immediate has valid ranges:
2070  //
2071  // - [1, 10] for VAESKF1
2072  // - [2, 14] for VAESKF2
2073  //
2074  // Any out of range values (0, 11-15) for VAESKF1, (0-1, 15) for VAESKF2
2075  // will be re-encoded into a valid range by inverting bit uimm[3]
2076  
2077  void Assembler::VAESKF1(Vec vd, Vec vs2, uint32_t uimm) noexcept {
2078      BISCUIT_ASSERT(uimm <= 15);
2079  
2080      if (uimm == 0 || uimm > 10) {
2081          uimm ^= 0b1000;
2082      }
2083  
2084      EmitVectorOPMVVP(m_buffer, 0b100010, VecMask::No, vs2, Vec{uimm}, vd);
2085  }
2086  void Assembler::VAESKF2(Vec vd, Vec vs2, uint32_t uimm) noexcept {
2087      BISCUIT_ASSERT(uimm <= 15);
2088  
2089      if (uimm < 2 || uimm > 14) {
2090          uimm ^= 0b1000;
2091      }
2092  
2093      EmitVectorOPMVVP(m_buffer, 0b101010, VecMask::No, vs2, Vec{uimm}, vd);
2094  }
2095  
2096  void Assembler::VAESZ(Vec vd, Vec vs2) noexcept {
2097      EmitVectorOPMVVP(m_buffer, 0b101001, VecMask::No, vs2, Vec{0b00111}, vd);
2098  }
2099  
2100  void Assembler::VSHA2MS(Vec vd, Vec vs2, Vec vs1) noexcept {
2101      EmitVectorOPMVVP(m_buffer, 0b101101, VecMask::No, vs2, vs1, vd);
2102  }
2103  void Assembler::VSHA2CH(Vec vd, Vec vs2, Vec vs1) noexcept {
2104      EmitVectorOPMVVP(m_buffer, 0b101110, VecMask::No, vs2, vs1, vd);
2105  }
2106  void Assembler::VSHA2CL(Vec vd, Vec vs2, Vec vs1) noexcept {
2107      EmitVectorOPMVVP(m_buffer, 0b101111, VecMask::No, vs2, vs1, vd);
2108  }
2109  
2110  void Assembler::VSM4K(Vec vd, Vec vs2, uint32_t uimm) noexcept {
2111      BISCUIT_ASSERT(uimm <= 7);
2112      EmitVectorOPMVVP(m_buffer, 0b100001, VecMask::No, vs2, Vec{uimm}, vd);
2113  }
2114  
2115  void Assembler::VSM4R_VV(Vec vd, Vec vs2) noexcept {
2116      EmitVectorOPMVVP(m_buffer, 0b101000, VecMask::No, vs2, Vec{0b10000}, vd);
2117  }
2118  void Assembler::VSM4R_VS(Vec vd, Vec vs2) noexcept {
2119      EmitVectorOPMVVP(m_buffer, 0b101001, VecMask::No, vs2, Vec{0b10000}, vd);
2120  }
2121  
2122  void Assembler::VSM3C(Vec vd, Vec vs2, uint32_t uimm) noexcept {
2123      BISCUIT_ASSERT(uimm <= 31);
2124      EmitVectorOPMVVP(m_buffer, 0b101011, VecMask::No, vs2, Vec{uimm}, vd);
2125  }
2126  void Assembler::VSM3ME(Vec vd, Vec vs2, Vec vs1) noexcept {
2127      EmitVectorOPMVVP(m_buffer, 0b100000, VecMask::No, vs2, vs1, vd);
2128  }
2129  
2130  // Zvfbfmin, Zvfbfwma Extension Instructions
2131  
2132  void Assembler::VFNCVTBF16_F_F_W(Vec vd, Vec vs, VecMask mask) noexcept {
2133      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v29, vd);
2134  }
2135  void Assembler::VFWCVTBF16_F_F_V(Vec vd, Vec vs, VecMask mask) noexcept {
2136      EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v13, vd);
2137  }
2138  
2139  void Assembler::VFWMACCBF16(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
2140      EmitVectorOPFVF(m_buffer, 0b111011, mask, vs2, rs1, vd);
2141  }
2142  void Assembler::VFWMACCBF16(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
2143      EmitVectorOPFVV(m_buffer, 0b111011, mask, vs2, vs1, vd);
2144  }
2145  
2146  } // namespace biscuit