diff --git a/README.md b/README.md index 2d93585f64b0b0..1123805d4098a1 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,11 @@ Any feature not listed below but present in the specification should be consider - (Done) `12.2. Vector Widening Integer Add/Subtract` - (Done) `vwadd{u}.{vv,vx,wv,wx}` - (Done) `vwsub{u}.{vv,vx,wv,wx}` - + - (Done) `12.3. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions` + - (Done) `vadc.{vvm,vxm,vim}` + - (Done) `vmadc.{vvm,vxm,vim}` + - (Done) `vsbc.{vvm,vxm}` + - (Done) `vmsbc.{vvm,vxm}` - (WIP) Clang intrinsics related to the `XTHeadVector` extension: - (WIP) `6. Configuration-Setting and Utility` - (Done) `6.1. Set vl and vtype` diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td b/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td index 0485a7a7e83d58..5ff4e383a65a9f 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td @@ -613,6 +613,12 @@ let TargetPrefix = "riscv" in { defm th_vwsub : XVBinaryABX; defm th_vwsubu_w : XVBinaryAAX; defm th_vwsub_w : XVBinaryAAX; + + // 12.3. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions + defm th_vadc : RISCVBinaryWithV0; + defm th_vmadc_carry_in : RISCVBinaryMaskOutWithV0; + defm th_vsbc : RISCVBinaryWithV0; + defm th_vmsbc_borrow_in : RISCVBinaryMaskOutWithV0; } // TargetPrefix = "riscv" let TargetPrefix = "riscv" in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td index d9cbdbc1060534..63fb6b1aa3c0f5 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td @@ -1723,6 +1723,72 @@ multiclass XVPseudoVWALU_WV_WX { } } +multiclass XVPseudoVCALU_VM_XM_IM { + foreach m = MxListXTHeadV in { + defvar mx = m.MX; + defvar WriteVICALUV_MX = !cast("WriteVICALUV_" # mx); + defvar WriteVICALUX_MX = !cast("WriteVICALUX_" # mx); + defvar WriteVICALUI_MX = !cast("WriteVICALUI_" # mx); + defvar ReadVICALUV_MX = !cast("ReadVICALUV_" # mx); + defvar ReadVICALUX_MX = !cast("ReadVICALUX_" # mx); + + defm "" : VPseudoTiedBinaryV_VM, + Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>; + defm "" : VPseudoTiedBinaryV_XM, + Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>; + defm "" : VPseudoTiedBinaryV_IM, + Sched<[WriteVICALUI_MX, ReadVICALUV_MX, ReadVMask]>; + } +} + +multiclass XVPseudoVCALUM_VM_XM_IM { + foreach m = MxListXTHeadV in { + defvar mx = m.MX; + defvar WriteVICALUV_MX = !cast("WriteVICALUV_" # mx); + defvar WriteVICALUX_MX = !cast("WriteVICALUX_" # mx); + defvar WriteVICALUI_MX = !cast("WriteVICALUI_" # mx); + defvar ReadVICALUV_MX = !cast("ReadVICALUV_" # mx); + defvar ReadVICALUX_MX = !cast("ReadVICALUX_" # mx); + + defm "" : VPseudoBinaryV_VM, + Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>; + defm "" : VPseudoBinaryV_XM, + Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>; + defm "" : VPseudoBinaryV_IM, + Sched<[WriteVICALUI_MX, ReadVICALUV_MX, ReadVMask]>; + } +} + +multiclass XVPseudoVCALU_VM_XM { + foreach m = MxListXTHeadV in { + defvar mx = m.MX; + defvar WriteVICALUV_MX = !cast("WriteVICALUV_" # mx); + defvar WriteVICALUX_MX = !cast("WriteVICALUX_" # mx); + defvar ReadVICALUV_MX = !cast("ReadVICALUV_" # mx); + defvar ReadVICALUX_MX = !cast("ReadVICALUX_" # mx); + + defm "" : VPseudoTiedBinaryV_VM, + Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>; + defm "" : VPseudoTiedBinaryV_XM, + Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>; + } +} + +multiclass XVPseudoVCALUM_VM_XM { + foreach m = MxListXTHeadV in { + defvar mx = m.MX; + defvar WriteVICALUV_MX = !cast("WriteVICALUV_" # mx); + defvar WriteVICALUX_MX = !cast("WriteVICALUX_" # mx); + defvar ReadVICALUV_MX = !cast("ReadVICALUV_" # mx); + defvar ReadVICALUX_MX = !cast("ReadVICALUX_" # mx); + + defm "" : VPseudoBinaryV_VM, + Sched<[WriteVICALUV_MX, ReadVICALUV_MX, ReadVICALUV_MX, ReadVMask]>; + defm "" : VPseudoBinaryV_XM, + Sched<[WriteVICALUX_MX, ReadVICALUV_MX, ReadVICALUX_MX, ReadVMask]>; + } +} + //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns for the XTHeadVector extension. //===----------------------------------------------------------------------===// @@ -1903,6 +1969,72 @@ multiclass XVPatBinaryW_WX { + foreach vti = AllIntegerXVectors in + let Predicates = GetXVTypePredicates.Predicates in + defm : VPatBinaryCarryInTAIL; +} + +multiclass XVPatBinaryV_XM_TAIL { + foreach vti = AllIntegerXVectors in + let Predicates = GetXVTypePredicates.Predicates in + defm : VPatBinaryCarryInTAIL; +} + +multiclass XVPatBinaryV_IM_TAIL { + foreach vti = AllIntegerXVectors in + let Predicates = GetXVTypePredicates.Predicates in + defm : VPatBinaryCarryInTAIL; +} +multiclass XVPatBinaryV_VM vtilist = AllIntegerXVectors> { + foreach vti = vtilist in + let Predicates = GetXVTypePredicates.Predicates in + defm : VPatBinaryCarryIn; +} + +multiclass XVPatBinaryV_XM vtilist = AllIntegerXVectors> { + foreach vti = vtilist in + let Predicates = GetXVTypePredicates.Predicates in + defm : VPatBinaryCarryIn; +} + +multiclass XVPatBinaryV_IM { + foreach vti = AllIntegerXVectors in + let Predicates = GetXVTypePredicates.Predicates in + defm : VPatBinaryCarryIn; +} + multiclass XVPatBinaryV_VV_VX_VI vtilist, Operand ImmType = simm5> : XVPatBinaryV_VV, @@ -1929,6 +2061,24 @@ multiclass XVPatBinaryW_WV_WX, XVPatBinaryW_WX; +multiclass XVPatBinaryV_VM_XM_IM + : XVPatBinaryV_VM_TAIL, + XVPatBinaryV_XM_TAIL, + XVPatBinaryV_IM_TAIL; + +multiclass XVPatBinaryM_VM_XM_IM + : XVPatBinaryV_VM, + XVPatBinaryV_XM, + XVPatBinaryV_IM; + +multiclass XVPatBinaryV_VM_XM + : XVPatBinaryV_VM_TAIL, + XVPatBinaryV_XM_TAIL; + +multiclass XVPatBinaryM_VM_XM + : XVPatBinaryV_VM, + XVPatBinaryV_XM; + //===----------------------------------------------------------------------===// // 12.1. Vector Single-Width Saturating Add and Subtract //===----------------------------------------------------------------------===// @@ -2023,6 +2173,25 @@ let Predicates = [HasVendorXTHeadV] in { defm : XVPatBinaryW_WV_WX<"int_riscv_th_vwsub_w", "PseudoTH_VWSUB", AllWidenableIntXVectors>; } +//===----------------------------------------------------------------------===// +// 12.3. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions +//===----------------------------------------------------------------------===// +let Predicates = [HasVendorXTHeadV] in { + defm PseudoTH_VADC : XVPseudoVCALU_VM_XM_IM; + defm PseudoTH_VMADC : XVPseudoVCALUM_VM_XM_IM<"@earlyclobber $rd">; + + defm PseudoTH_VSBC : XVPseudoVCALU_VM_XM; + defm PseudoTH_VMSBC : XVPseudoVCALUM_VM_XM<"@earlyclobber $rd">; +} // Predicates = [HasVendorXTHeadV] + +let Predicates = [HasVendorXTHeadV] in { + defm : XVPatBinaryV_VM_XM_IM<"int_riscv_th_vadc", "PseudoTH_VADC">; + defm : XVPatBinaryM_VM_XM_IM<"int_riscv_th_vmadc_carry_in", "PseudoTH_VMADC">; + + defm : XVPatBinaryV_VM_XM<"int_riscv_th_vsbc", "PseudoTH_VSBC">; + defm : XVPatBinaryM_VM_XM<"int_riscv_th_vmsbc_borrow_in", "PseudoTH_VMSBC">; +} // Predicates = [HasVendorXTHeadV] + //===----------------------------------------------------------------------===// // 12.14. Vector Integer Merge and Move Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vadc.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vadc.ll new file mode 100644 index 00000000000000..22f8aec1cc338b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vadc.ll @@ -0,0 +1,1797 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.th.vadc.nxv8i8.nxv8i8( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i8.nxv8i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv16i8.nxv16i8( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv16i8.nxv16i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv32i8.nxv32i8( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv32i8.nxv32i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv64i8.nxv64i8( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv64i8.nxv64i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv4i16.nxv4i16( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv4i16.nxv4i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv8i16.nxv8i16( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i16.nxv8i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv16i16.nxv16i16( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv16i16.nxv16i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv32i16.nxv32i16( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv32i16.nxv32i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv2i32.nxv2i32( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv2i32.nxv2i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv4i32.nxv4i32( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv4i32.nxv4i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv8i32.nxv8i32( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i32.nxv8i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv16i32.nxv16i32( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv16i32.nxv16i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv1i64.nxv1i64( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv1i64.nxv1i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv2i64.nxv2i64( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv2i64.nxv2i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv4i64.nxv4i64( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv4i64.nxv4i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv8i64.nxv8i64( + , + , + , + , + iXLen); + +define @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vadc.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i64.nxv8i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv8i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv16i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv16i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv32i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv32i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv64i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv64i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv4i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv4i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv8i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv16i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv16i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv32i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv32i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv2i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv2i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv4i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv4i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv8i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv16i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vadc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv16i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv1i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m1, d1 +; RV32-NEXT: th.vlse.v v9, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vadc.vvm v8, v8, v9, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; RV64-NEXT: th.vadc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv1i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv2i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m2, d1 +; RV32-NEXT: th.vlse.v v10, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vadc.vvm v8, v8, v10, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; RV64-NEXT: th.vadc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv2i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv4i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m4, d1 +; RV32-NEXT: th.vlse.v v12, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vadc.vvm v8, v8, v12, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; RV64-NEXT: th.vadc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv4i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vadc.nxv8i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m8, d1 +; RV32-NEXT: th.vlse.v v16, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vadc.vvm v8, v8, v16, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; RV64-NEXT: th.vadc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i8.i8( + undef, + %0, + i8 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv16i8.i8( + undef, + %0, + i8 -9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv32i8.i8( + undef, + %0, + i8 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv64i8.i8( + undef, + %0, + i8 -9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv4i16.i16( + undef, + %0, + i16 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i16.i16( + undef, + %0, + i16 -9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv16i16.i16( + undef, + %0, + i16 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv32i16.i16( + undef, + %0, + i16 -9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv2i32.i32( + undef, + %0, + i32 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv4i32.i32( + undef, + %0, + i32 -9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i32.i32( + undef, + %0, + i32 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv16i32.i32( + undef, + %0, + i32 -9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv1i64.i64( + undef, + %0, + i64 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv2i64.i64( + undef, + %0, + i64 -9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, 9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv4i64.i64( + undef, + %0, + i64 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vadc.vim v8, v8, -9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vadc.nxv8i64.i64( + undef, + %0, + i64 -9, + %1, + iXLen %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmadc.carry.in.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmadc.carry.in.ll new file mode 100644 index 00000000000000..0f93af5ceb0a40 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmadc.carry.in.ll @@ -0,0 +1,1913 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.th.vmadc.carry.in.nxv8i8.nxv8i8( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv8i1_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vmadc.vvm v10, v8, v9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i8.nxv8i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv16i8.nxv16i8( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv16i1_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vmadc.vvm v12, v8, v10, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv16i8.nxv16i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv32i8.nxv32i8( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv32i1_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vmadc.vvm v16, v8, v12, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv32i8.nxv32i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv64i8.nxv64i8( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv64i1_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vmadc.vvm v24, v8, v16, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v24 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv64i8.nxv64i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv4i16.nxv4i16( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv4i1_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmadc.vvm v10, v8, v9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv4i16.nxv4i16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv8i16.nxv8i16( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv8i1_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vmadc.vvm v12, v8, v10, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i16.nxv8i16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv16i16.nxv16i16( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv16i1_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vmadc.vvm v16, v8, v12, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv16i16.nxv16i16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv32i16.nxv32i16( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv32i1_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vmadc.vvm v24, v8, v16, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v24 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv32i16.nxv32i16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv2i32.nxv2i32( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv2i1_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmadc.vvm v10, v8, v9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv4i32.nxv4i32( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv4i1_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vmadc.vvm v12, v8, v10, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv8i32.nxv8i32( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv8i1_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vmadc.vvm v16, v8, v12, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv16i32.nxv16i32( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv16i1_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vmadc.vvm v24, v8, v16, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v24 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv1i64.nxv1i64( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv1i1_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmadc.vvm v10, v8, v9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv1i64.nxv1i64( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv2i64.nxv2i64( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv2i1_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vmadc.vvm v12, v8, v10, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv2i64.nxv2i64( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv4i64.nxv4i64( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv4i1_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vmadc.vvm v16, v8, v12, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv4i64.nxv4i64( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv8i64.nxv8i64( + , + , + , + iXLen); + +define @intrinsic_vmadc_carry_in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vvm_nxv8i1_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vmadc.vvm v24, v8, v16, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v24 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i64.nxv8i64( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv8i8.i8( + , + i8, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv8i1_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vmadc.vxm v9, v8, a0, v0 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i8.i8( + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv16i8.i8( + , + i8, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv16i1_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vmadc.vxm v10, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv16i8.i8( + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv32i8.i8( + , + i8, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv32i1_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vmadc.vxm v12, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv32i8.i8( + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv64i8.i8( + , + i8, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv64i1_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vmadc.vxm v16, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv64i8.i8( + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv4i16.i16( + , + i16, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv4i1_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vmadc.vxm v9, v8, a0, v0 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv4i16.i16( + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv8i16.i16( + , + i16, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv8i1_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vmadc.vxm v10, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i16.i16( + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv16i16.i16( + , + i16, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv16i1_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vmadc.vxm v12, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv16i16.i16( + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv32i16.i16( + , + i16, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv32i1_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vmadc.vxm v16, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv32i16.i16( + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv2i32.i32( + , + i32, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv2i1_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vmadc.vxm v9, v8, a0, v0 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv2i32.i32( + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv4i32.i32( + , + i32, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv4i1_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vmadc.vxm v10, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv4i32.i32( + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv8i32.i32( + , + i32, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv8i1_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vmadc.vxm v12, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i32.i32( + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv16i32.i32( + , + i32, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vxm_nxv16i1_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vmadc.vxm v16, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv16i32.i32( + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv1i64.i64( + , + i64, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadc_carry_in_vxm_nxv1i1_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m1, d1 +; RV32-NEXT: th.vlse.v v10, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vmadc.vvm v9, v8, v10, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vmv.v.v v0, v9 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc_carry_in_vxm_nxv1i1_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; RV64-NEXT: th.vmadc.vxm v9, v8, a0, v0 +; RV64-NEXT: th.vmv.v.v v0, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv1i64.i64( + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv2i64.i64( + , + i64, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadc_carry_in_vxm_nxv2i1_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m2, d1 +; RV32-NEXT: th.vlse.v v12, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vmadc.vvm v10, v8, v12, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vmv.v.v v0, v10 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc_carry_in_vxm_nxv2i1_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; RV64-NEXT: th.vmadc.vxm v10, v8, a0, v0 +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: csrr a1, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vmv.v.v v0, v10 +; RV64-NEXT: th.vsetvl zero, a0, a1 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv2i64.i64( + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv4i64.i64( + , + i64, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadc_carry_in_vxm_nxv4i1_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m4, d1 +; RV32-NEXT: th.vlse.v v16, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vmadc.vvm v12, v8, v16, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vmv.v.v v0, v12 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc_carry_in_vxm_nxv4i1_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; RV64-NEXT: th.vmadc.vxm v12, v8, a0, v0 +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: csrr a1, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vmv.v.v v0, v12 +; RV64-NEXT: th.vsetvl zero, a0, a1 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv4i64.i64( + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmadc.carry.in.nxv8i64.i64( + , + i64, + , + iXLen); + +define @intrinsic_vmadc_carry_in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmadc_carry_in_vxm_nxv8i1_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m8, d1 +; RV32-NEXT: th.vlse.v v24, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vmadc.vvm v16, v8, v24, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vmv.v.v v0, v16 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmadc_carry_in_vxm_nxv8i1_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; RV64-NEXT: th.vmadc.vxm v16, v8, a0, v0 +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: csrr a1, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vmv.v.v v0, v16 +; RV64-NEXT: th.vsetvl zero, a0, a1 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i64.i64( + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv8i1_nxv8i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv8i1_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vmadc.vim v9, v8, 9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i8.i8( + %0, + i8 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv16i1_nxv16i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv16i1_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vmadc.vim v10, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv16i8.i8( + %0, + i8 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv32i1_nxv32i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv32i1_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vmadc.vim v12, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv32i8.i8( + %0, + i8 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv64i1_nxv64i8_i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv64i1_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vmadc.vim v16, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv64i8.i8( + %0, + i8 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv4i1_nxv4i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv4i1_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmadc.vim v9, v8, 9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv4i16.i16( + %0, + i16 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv8i1_nxv8i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv8i1_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vmadc.vim v10, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i16.i16( + %0, + i16 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv16i1_nxv16i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv16i1_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vmadc.vim v12, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv16i16.i16( + %0, + i16 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv32i1_nxv32i16_i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv32i1_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vmadc.vim v16, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv32i16.i16( + %0, + i16 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv2i1_nxv2i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv2i1_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmadc.vim v9, v8, 9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv2i32.i32( + %0, + i32 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv4i1_nxv4i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv4i1_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vmadc.vim v10, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv4i32.i32( + %0, + i32 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv8i1_nxv8i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv8i1_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vmadc.vim v12, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i32.i32( + %0, + i32 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv16i1_nxv16i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv16i1_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vmadc.vim v16, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv16i32.i32( + %0, + i32 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv1i1_nxv1i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv1i1_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmadc.vim v9, v8, 9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv1i64.i64( + %0, + i64 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv2i1_nxv2i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv2i1_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vmadc.vim v10, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv2i64.i64( + %0, + i64 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv4i1_nxv4i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv4i1_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vmadc.vim v12, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv4i64.i64( + %0, + i64 9, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vmadc_carry_in_vim_nxv8i1_nxv8i64_i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmadc_carry_in_vim_nxv8i1_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vmadc.vim v16, v8, 9, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmadc.carry.in.nxv8i64.i64( + %0, + i64 9, + %1, + iXLen %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmsbc.borrow.in.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmsbc.borrow.in.ll new file mode 100644 index 00000000000000..92d94ef8d11d0e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmsbc.borrow.in.ll @@ -0,0 +1,1401 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv8i8.nxv8i8( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv8i1_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vmsbc.vvm v10, v8, v9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv8i8.nxv8i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv16i8.nxv16i8( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv16i1_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vmsbc.vvm v12, v8, v10, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv16i8.nxv16i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv32i8.nxv32i8( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv32i1_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vmsbc.vvm v16, v8, v12, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv32i8.nxv32i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv64i8.nxv64i8( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv64i1_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vmsbc.vvm v24, v8, v16, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v24 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv64i8.nxv64i8( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv4i16.nxv4i16( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv4i1_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmsbc.vvm v10, v8, v9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv4i16.nxv4i16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv8i16.nxv8i16( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv8i1_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vmsbc.vvm v12, v8, v10, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv8i16.nxv8i16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv16i16.nxv16i16( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv16i1_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vmsbc.vvm v16, v8, v12, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv16i16.nxv16i16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv32i16.nxv32i16( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv32i1_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vmsbc.vvm v24, v8, v16, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v24 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv32i16.nxv32i16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv2i32.nxv2i32( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv2i1_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmsbc.vvm v10, v8, v9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv4i32.nxv4i32( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv4i1_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vmsbc.vvm v12, v8, v10, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv8i32.nxv8i32( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv8i1_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vmsbc.vvm v16, v8, v12, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv16i32.nxv16i32( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv16i1_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vmsbc.vvm v24, v8, v16, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v24 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv1i64.nxv1i64( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv1i1_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmsbc.vvm v10, v8, v9, v0 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv1i64.nxv1i64( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv2i64.nxv2i64( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv2i1_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vmsbc.vvm v12, v8, v10, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv2i64.nxv2i64( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv4i64.nxv4i64( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv4i1_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vmsbc.vvm v16, v8, v12, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv4i64.nxv4i64( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv8i64.nxv8i64( + , + , + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vvm_nxv8i1_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vmsbc.vvm v24, v8, v16, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v24 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv8i64.nxv8i64( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv8i8.i8( + , + i8, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv8i1_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vmsbc.vxm v9, v8, a0, v0 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv8i8.i8( + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv16i8.i8( + , + i8, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv16i1_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vmsbc.vxm v10, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv16i8.i8( + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv32i8.i8( + , + i8, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv32i1_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vmsbc.vxm v12, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv32i8.i8( + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv64i8.i8( + , + i8, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv64i1_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vmsbc.vxm v16, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv64i8.i8( + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv4i16.i16( + , + i16, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv4i1_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vmsbc.vxm v9, v8, a0, v0 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv4i16.i16( + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv8i16.i16( + , + i16, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv8i1_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vmsbc.vxm v10, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv8i16.i16( + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv16i16.i16( + , + i16, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv16i1_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vmsbc.vxm v12, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv16i16.i16( + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv32i16.i16( + , + i16, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv32i1_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vmsbc.vxm v16, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv32i16.i16( + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv2i32.i32( + , + i32, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv2i1_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vmsbc.vxm v9, v8, a0, v0 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv2i32.i32( + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv4i32.i32( + , + i32, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv4i1_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vmsbc.vxm v10, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv4i32.i32( + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv8i32.i32( + , + i32, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv8i1_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vmsbc.vxm v12, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v12 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv8i32.i32( + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv16i32.i32( + , + i32, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv16i1_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vmsbc.vxm v16, v8, a0, v0 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a1, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v16 +; CHECK-NEXT: th.vsetvl zero, a0, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv16i32.i32( + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv1i64.i64( + , + i64, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv1i1_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m1, d1 +; RV32-NEXT: th.vlse.v v10, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vmsbc.vvm v9, v8, v10, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vmv.v.v v0, v9 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv1i1_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; RV64-NEXT: th.vmsbc.vxm v9, v8, a0, v0 +; RV64-NEXT: th.vmv.v.v v0, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv1i64.i64( + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv2i64.i64( + , + i64, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv2i1_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m2, d1 +; RV32-NEXT: th.vlse.v v12, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vmsbc.vvm v10, v8, v12, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vmv.v.v v0, v10 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv2i1_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; RV64-NEXT: th.vmsbc.vxm v10, v8, a0, v0 +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: csrr a1, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vmv.v.v v0, v10 +; RV64-NEXT: th.vsetvl zero, a0, a1 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv2i64.i64( + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv4i64.i64( + , + i64, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv4i1_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m4, d1 +; RV32-NEXT: th.vlse.v v16, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vmsbc.vvm v12, v8, v16, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vmv.v.v v0, v12 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv4i1_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; RV64-NEXT: th.vmsbc.vxm v12, v8, a0, v0 +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: csrr a1, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vmv.v.v v0, v12 +; RV64-NEXT: th.vsetvl zero, a0, a1 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv4i64.i64( + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vmsbc.borrow.in.nxv8i64.i64( + , + i64, + , + iXLen); + +define @intrinsic_vmsbc_borrow_in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv8i1_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m8, d1 +; RV32-NEXT: th.vlse.v v24, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vmsbc.vvm v16, v8, v24, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vmv.v.v v0, v16 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmsbc_borrow_in_vxm_nxv8i1_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; RV64-NEXT: th.vmsbc.vxm v16, v8, a0, v0 +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: csrr a1, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vmv.v.v v0, v16 +; RV64-NEXT: th.vsetvl zero, a0, a1 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbc.borrow.in.nxv8i64.i64( + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vsbc.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vsbc.ll new file mode 100644 index 00000000000000..da171729e03149 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vsbc.ll @@ -0,0 +1,1333 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.th.vsbc.nxv8i8.nxv8i8( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv8i8.nxv8i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv16i8.nxv16i8( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv16i8.nxv16i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv32i8.nxv32i8( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv32i8.nxv32i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv64i8.nxv64i8( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv64i8.nxv64i8( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv4i16.nxv4i16( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv4i16.nxv4i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv8i16.nxv8i16( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv8i16.nxv8i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv16i16.nxv16i16( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv16i16.nxv16i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv32i16.nxv32i16( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv32i16.nxv32i16( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv2i32.nxv2i32( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv2i32.nxv2i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv4i32.nxv4i32( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv4i32.nxv4i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv8i32.nxv8i32( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv8i32.nxv8i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv16i32.nxv16i32( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv16i32.nxv16i32( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv1i64.nxv1i64( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv1i64.nxv1i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv2i64.nxv2i64( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv2i64.nxv2i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv4i64.nxv4i64( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv4i64.nxv4i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv8i64.nxv8i64( + , + , + , + , + iXLen); + +define @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vsbc.vvm v8, v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv8i64.nxv8i64( + undef, + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv8i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv8i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv16i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv16i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv32i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv32i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv64i8.i8( + , + , + i8, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv64i8.i8( + undef, + %0, + i8 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv4i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv4i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv8i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv8i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv16i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv16i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv32i16.i16( + , + , + i16, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv32i16.i16( + undef, + %0, + i16 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv2i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv2i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv4i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv4i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv8i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv8i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv16i32.i32( + , + , + i32, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv16i32.i32( + undef, + %0, + i32 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv1i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m1, d1 +; RV32-NEXT: th.vlse.v v9, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vsbc.vvm v8, v8, v9, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; RV64-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv1i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv2i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m2, d1 +; RV32-NEXT: th.vlse.v v10, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vsbc.vvm v8, v8, v10, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; RV64-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv2i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv4i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m4, d1 +; RV32-NEXT: th.vlse.v v12, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vsbc.vvm v8, v8, v12, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; RV64-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv4i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.th.vsbc.nxv8i64.i64( + , + , + i64, + , + iXLen); + +define @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: csrr a3, vl +; RV32-NEXT: csrr a4, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a3, a4 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: th.vsetvli zero, a2, e64, m8, d1 +; RV32-NEXT: th.vlse.v v16, (a0), zero +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: th.vsbc.vvm v8, v8, v16, v0 +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: csrr a1, vtype +; RV32-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV32-NEXT: th.vsetvl zero, a0, a1 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: csrr a2, vl +; RV64-NEXT: csrr a3, vtype +; RV64-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; RV64-NEXT: th.vsetvl zero, a2, a3 +; RV64-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; RV64-NEXT: th.vsbc.vxm v8, v8, a0, v0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsbc.nxv8i64.i64( + undef, + %0, + i64 %1, + %2, + iXLen %3) + + ret %a +}