[llvm] [RISCV][MC] Remove register substitutions in `RISCVAsmPrinter`. NFC (PR #102728)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 9 23:47:18 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Brandon Wu (4vtomat)
<details>
<summary>Changes</summary>
Since we have the name for those registers, the `AsmPrinter` and
`AsmParser` can recognize them correctly, we don't need to substitute
them manually.
---
Patch is 499.81 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/102728.diff
64 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp (+1-13)
- (modified) llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll (+24-24)
- (modified) llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll (+48-48)
- (modified) llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll (+24-24)
- (modified) llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll (+60-60)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll (+60-60)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll (+20-20)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/floor-vp.ll (+24-24)
- (modified) llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll (+34-34)
- (modified) llvm/test/CodeGen/RISCV/rvv/rint-vp.ll (+34-34)
- (modified) llvm/test/CodeGen/RISCV/rvv/round-vp.ll (+34-34)
- (modified) llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll (+34-34)
- (modified) llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll (+34-34)
- (modified) llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll (+24-24)
- (modified) llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll (+24-24)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll (+14-14)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll (+34-34)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll (+13-13)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll (+14-14)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll (+34-34)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll (+19-19)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll (+26-26)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
index 93677433c04405..64e24394d66d9c 100644
--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
+++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -996,19 +996,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
case MachineOperand::MO_Register: {
Register Reg = MO.getReg();
- if (RISCV::VRM2RegClass.contains(Reg) ||
- RISCV::VRM4RegClass.contains(Reg) ||
- RISCV::VRM8RegClass.contains(Reg)) {
- Reg = TRI->getSubReg(Reg, RISCV::sub_vrm1_0);
- assert(Reg && "Subregister does not exist");
- } else if (RISCV::FPR16RegClass.contains(Reg)) {
- Reg =
- TRI->getMatchingSuperReg(Reg, RISCV::sub_16, &RISCV::FPR32RegClass);
- assert(Reg && "Subregister does not exist");
- } else if (RISCV::FPR64RegClass.contains(Reg)) {
- Reg = TRI->getSubReg(Reg, RISCV::sub_32);
- assert(Reg && "Superregister does not exist");
- } else if (RISCV::VRN2M1RegClass.contains(Reg) ||
+ if (RISCV::VRN2M1RegClass.contains(Reg) ||
RISCV::VRN2M2RegClass.contains(Reg) ||
RISCV::VRN2M4RegClass.contains(Reg) ||
RISCV::VRN3M1RegClass.contains(Reg) ||
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index dec67721514de6..838b86a33f3adb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -139,7 +139,7 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v12, v8, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
@@ -161,7 +161,7 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
; CHECK-NEXT: lui a1, %hi(.LCPI7_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI7_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: vfsgnjx.vv v10, v8, v8
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -183,7 +183,7 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
; CHECK-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v16, v8, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
@@ -205,7 +205,7 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16_unmasked(<vscale x 16 x half> %
; CHECK-NEXT: lui a1, %hi(.LCPI9_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI9_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
+; CHECK-NEXT: vfsgnjx.vv v12, v8, v8
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -227,7 +227,7 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; CHECK-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v24, v8, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
@@ -249,7 +249,7 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16_unmasked(<vscale x 32 x half> %
; CHECK-NEXT: lui a1, %hi(.LCPI11_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI11_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
+; CHECK-NEXT: vfsgnjx.vv v16, v8, v8
; CHECK-NEXT: vmflt.vf v0, v16, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -353,7 +353,7 @@ define <vscale x 4 x float> @vp_ceil_vv_nxv4f32(<vscale x 4 x float> %va, <vscal
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v12, v8, v8, v0.t
; CHECK-NEXT: lui a0, 307200
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -375,7 +375,7 @@ define <vscale x 4 x float> @vp_ceil_vv_nxv4f32_unmasked(<vscale x 4 x float> %v
; CHECK-LABEL: vp_ceil_vv_nxv4f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: vfsgnjx.vv v10, v8, v8
; CHECK-NEXT: lui a0, 307200
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
@@ -397,7 +397,7 @@ define <vscale x 8 x float> @vp_ceil_vv_nxv8f32(<vscale x 8 x float> %va, <vscal
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v16, v8, v8, v0.t
; CHECK-NEXT: lui a0, 307200
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -419,7 +419,7 @@ define <vscale x 8 x float> @vp_ceil_vv_nxv8f32_unmasked(<vscale x 8 x float> %v
; CHECK-LABEL: vp_ceil_vv_nxv8f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
+; CHECK-NEXT: vfsgnjx.vv v12, v8, v8
; CHECK-NEXT: lui a0, 307200
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v12, fa5
@@ -441,7 +441,7 @@ define <vscale x 16 x float> @vp_ceil_vv_nxv16f32(<vscale x 16 x float> %va, <vs
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v24, v8, v8, v0.t
; CHECK-NEXT: lui a0, 307200
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -463,7 +463,7 @@ define <vscale x 16 x float> @vp_ceil_vv_nxv16f32_unmasked(<vscale x 16 x float>
; CHECK-LABEL: vp_ceil_vv_nxv16f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
+; CHECK-NEXT: vfsgnjx.vv v16, v8, v8
; CHECK-NEXT: lui a0, 307200
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v16, fa5
@@ -529,7 +529,7 @@ define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vsc
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v12, v8, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
@@ -551,7 +551,7 @@ define <vscale x 2 x double> @vp_ceil_vv_nxv2f64_unmasked(<vscale x 2 x double>
; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: vfsgnjx.vv v10, v8, v8
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -573,7 +573,7 @@ define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vsc
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v16, v8, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
@@ -595,7 +595,7 @@ define <vscale x 4 x double> @vp_ceil_vv_nxv4f64_unmasked(<vscale x 4 x double>
; CHECK-NEXT: lui a1, %hi(.LCPI27_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
+; CHECK-NEXT: vfsgnjx.vv v12, v8, v8
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -617,7 +617,7 @@ define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vsc
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v24, v8, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
@@ -639,7 +639,7 @@ define <vscale x 7 x double> @vp_ceil_vv_nxv7f64_unmasked(<vscale x 7 x double>
; CHECK-NEXT: lui a1, %hi(.LCPI29_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI29_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
+; CHECK-NEXT: vfsgnjx.vv v16, v8, v8
; CHECK-NEXT: vmflt.vf v0, v16, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -661,7 +661,7 @@ define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vsc
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v24, v8, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
@@ -683,7 +683,7 @@ define <vscale x 8 x double> @vp_ceil_vv_nxv8f64_unmasked(<vscale x 8 x double>
; CHECK-NEXT: lui a1, %hi(.LCPI31_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI31_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
+; CHECK-NEXT: vfsgnjx.vv v16, v8, v8
; CHECK-NEXT: vmflt.vf v0, v16, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -721,7 +721,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: and a2, a3, a2
; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
+; CHECK-NEXT: vfsgnjx.vv v24, v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 3
@@ -742,7 +742,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: .LBB32_2:
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
+; CHECK-NEXT: vfsgnjx.vv v24, v8, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
@@ -773,7 +773,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64_unmasked(<vscale x 16 x doubl
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
+; CHECK-NEXT: vfsgnjx.vv v24, v16, v16
; CHECK-NEXT: vmflt.vf v0, v24, fa5
; CHECK-NEXT: fsrmi a2, 3
; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
@@ -786,7 +786,7 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64_unmasked(<vscale x 16 x doubl
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB33_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
+; CHECK-NEXT: vfsgnjx.vv v24, v8, v8
; CHECK-NEXT: vmflt.vf v0, v24, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
index d51f5eacd7d91a..c87088a65992fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
@@ -270,7 +270,7 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10
; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 4
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10
-; CHECK-ZVE64X-NEXT: vnot.v v8, v8
+; CHECK-ZVE64X-NEXT: vxor.vi v8, v8, -1
; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 1
; CHECK-ZVE64X-NEXT: li a0, 85
; CHECK-ZVE64X-NEXT: vand.vx v10, v10, a0
@@ -333,7 +333,7 @@ define <vscale x 32 x i8> @ctlz_nxv32i8(<vscale x 32 x i8> %va) {
; CHECK-NEXT: vor.vv v8, v8, v12
; CHECK-NEXT: vsrl.vi v12, v8, 4
; CHECK-NEXT: vor.vv v8, v8, v12
-; CHECK-NEXT: vnot.v v8, v8
+; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vsrl.vi v12, v8, 1
; CHECK-NEXT: li a0, 85
; CHECK-NEXT: vand.vx v12, v12, a0
@@ -368,7 +368,7 @@ define <vscale x 64 x i8> @ctlz_nxv64i8(<vscale x 64 x i8> %va) {
; CHECK-NEXT: vor.vv v8, v8, v16
; CHECK-NEXT: vsrl.vi v16, v8, 4
; CHECK-NEXT: vor.vv v8, v8, v16
-; CHECK-NEXT: vnot.v v8, v8
+; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vsrl.vi v16, v8, 1
; CHECK-NEXT: li a0, 85
; CHECK-NEXT: vand.vx v16, v16, a0
@@ -603,7 +603,7 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10
; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 8
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10
-; CHECK-ZVE64X-NEXT: vnot.v v8, v8
+; CHECK-ZVE64X-NEXT: vxor.vi v8, v8, -1
; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 1
; CHECK-ZVE64X-NEXT: lui a0, 5
; CHECK-ZVE64X-NEXT: addi a0, a0, 1365
@@ -669,7 +669,7 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v12
; CHECK-ZVE64X-NEXT: vsrl.vi v12, v8, 8
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v12
-; CHECK-ZVE64X-NEXT: vnot.v v8, v8
+; CHECK-ZVE64X-NEXT: vxor.vi v8, v8, -1
; CHECK-ZVE64X-NEXT: vsrl.vi v12, v8, 1
; CHECK-ZVE64X-NEXT: lui a0, 5
; CHECK-ZVE64X-NEXT: addi a0, a0, 1365
@@ -735,7 +735,7 @@ define <vscale x 32 x i16> @ctlz_nxv32i16(<vscale x 32 x i16> %va) {
; CHECK-NEXT: vor.vv v8, v8, v16
; CHECK-NEXT: vsrl.vi v16, v8, 8
; CHECK-NEXT: vor.vv v8, v8, v16
-; CHECK-NEXT: vnot.v v8, v8
+; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vsrl.vi v16, v8, 1
; CHECK-NEXT: lui a0, 5
; CHECK-NEXT: addi a0, a0, 1365
@@ -925,7 +925,7 @@ define <vscale x 4 x i32> @ctlz_nxv4i32(<vscale x 4 x i32> %va) {
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10
; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 16
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10
-; CHECK-ZVE64X-NEXT: vnot.v v8, v8
+; CHECK-ZVE64X-NEXT: vxor.vi v8, v8, -1
; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 1
; CHECK-ZVE64X-NEXT: lui a0, 349525
; CHECK-ZVE64X-NEXT: addi a0, a0, 1365
@@ -997,7 +997,7 @@ define <vscale x 8 x i32> @ctlz_nxv8i32(<vscale x 8 x i32> %va) {
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v12
; CHECK-ZVE64X-NEXT: vsrl.vi v12, v8, 16
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v12
-; CHECK-ZVE64X-NEXT: vnot.v v8, v8
+; CHECK-ZVE64X-NEXT: vxor.vi v8, v8, -1
; CHECK-ZVE64X-NEXT: vsrl.vi v12, v8, 1
; CHECK-ZVE64X-NEXT: lui a0, 349525
; CHECK-ZVE64X-NEXT: addi a0, a0, 1365
@@ -1069,7 +1069,7 @@ define <vscale x 16 x i32> @ctlz_nxv16i32(<vscale x 16 x i32> %va) {
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v16
; CHECK-ZVE64X-NEXT: vsrl.vi v16, v8, 16
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v16
-; CHECK-ZVE64X-NEXT: vnot.v v8, v8
+; CHECK-ZVE64X-NEXT: vxor.vi v8, v8, -1
; CHECK-ZVE64X-NEXT: vsrl.vi v16, v8, 1
; CHECK-ZVE64X-NEXT: lui a0, 349525
; CHECK-ZVE64X-NEXT: addi a0, a0, 1365
@@ -1285,7 +1285,7 @@ define <vscale x 2 x i64> @ctlz_nxv2i64(<vscale x 2 x i64> %va) {
; RV32I-NEXT: li a0, 32
; RV32I-NEXT: vsrl.vx v10, v8, a0
; RV32I-NEXT: vor.vv v8, v8, v10
-; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vxor.vi v8, v8, -1
; RV32I-NEXT: vsrl.vi v10, v8, 1
; RV32I-NEXT: lui a0, 349525
; RV32I-NEXT: addi a0, a0, 1365
@@ -1337,7 +1337,7 @@ define <vscale x 2 x i64> @ctlz_nxv2i64(<vscale x 2 x i64> %va) {
; RV64I-NEXT: li a0, 32
; RV64I-NEXT: vsrl.vx v10, v8, a0
; RV64I-NEXT: vor.vv v8, v8, v10
-; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vxor.vi v8, v8, -1
; RV64I-NEXT: vsrl.vi v10, v8, 1
; RV64I-NEXT: lui a0, 349525
; RV64I-NEXT: addiw a0, a0, 1365
@@ -1425,7 +1425,7 @@ define <vscale x 4 x i64> @ctlz_nxv4i64(<vscale x 4 x i64> %va) {
; RV32I-NEXT: li a0, 32
; RV32I-NEXT: vsrl.vx v12, v8, a0
; RV32I-NEXT: vor.vv v8, v8, v12
-; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vxor.vi v8, v8, -1
; RV32I-NEXT: vsrl.vi v12, v8, 1
; RV32I-NEXT: lui a0, 349525
; RV32I-NEXT: addi a0, a0, 1365
@@ -1477,7 +1477,7 @@ define <vscale x 4 x i64> @ctlz_nxv4i64(<vscale x 4 x i64> %va) {
; RV64I-NEXT: li a0, 32
; RV64I-NEXT: vsrl.vx v12, v8, a0
; RV64I-NEXT: vor.vv v8, v8, v12
-; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vxor.vi v8, v8, -1
; RV64I-NEXT: vsrl.vi v12, v8, 1
; RV64I-NEXT: lui a0, 349525
; RV64I-NEXT: addiw a0, a0, 1365
@@ -1565,7 +1565,7 @@ define <vscale x 8 x i64> @ctlz_nxv8i64(<vscale x 8 x i64> %va) {
; RV32I-NEXT: li a0, 32
; RV32I-NEXT: vsrl.vx v16, v8, a0
; RV32I-NEXT: vor.vv v8, v8, v16
-; RV32I-NEXT: vnot.v v8, v8
+; RV32I-NEXT: vxor.vi v8, v8, -1
; RV32I-NEXT: vsrl.vi v16, v8, 1
; RV32I-NEXT: lui a0, 349525
; RV32I-NEXT: addi a0, a0, 1365
@@ -1617,7 +1617,7 @@ define <vscale x 8 x i64> @ctlz_nxv8i64(<vscale x 8 x i64> %va) {
; RV64I-NEXT: li a0, 32
; RV64I-NEXT: vsrl.vx v16, v8, a0
; RV64I-NEXT: vor.vv v8, v8, v16
-; RV64I-NEXT: vnot.v v8, v8
+; RV64I-NEXT: vxor.vi v8, v8, -1
; RV64I-NEXT: vsrl.vi v16, v8, 1
; RV64I-NEXT: lui a0, 349525
; RV64I-NEXT: addiw a0, a0, 1365
@@ -1930,7 +1930,7 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10
; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 4
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10
-; CHECK-ZVE64X-NEXT: vnot.v v8, v8
+; CHECK-ZVE64X-NEXT: vxor.vi v8, v8, -1
; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 1
; CHECK-ZVE64X-NEXT: li a0, 85
; CHECK-ZVE64X-NEXT: vand.vx v10, v10, a0
@@ -1988,7 +1988,7 @@ define <vscale x 32 x i8> @ctlz_zero_undef_nxv32i8(<vscale x 32 x i8> %va) {
; CHECK-NEXT: vor.vv v8, v8, v12
; CHECK-NEXT: vsrl.vi v12, v8, 4
; CHECK-NEXT: vor.vv v8, v8, v12
-; CHECK-NEXT: vnot.v v8, v8
+; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vsrl.vi v12, v8, 1
; CHECK-NEXT: li a0, 85
; CHECK-NEXT: vand.vx v12, v12, a0
@@ -2022,7 +2022,7 @@ define <vscale x 64 x i8> @ctlz_zero_undef_nxv64i8(<vscale x 64 x i8> %va) {
; CHECK-NEXT: vor.vv v8, v8, v16
; CHECK-NEXT: vsrl.vi v16, v8, 4
; CHECK-NEXT: vor.vv v8, v8, v16
-; CHECK-NEXT: vnot.v v8, v8
+; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vsrl.vi v16, v8, 1
; CHECK-NEXT: li a0, 85
; CHECK-NEXT: vand.vx v16, v16, a0
@@ -2241,7 +2241,7 @@ define <vscale x 8 x i16> @ctlz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10
; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 8
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10
-; CHECK-ZVE64X-NEXT: vnot.v v8, v8
+; CHECK-ZVE64X-NEXT: vxor.vi v8, v8, -1
; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 1
; CHECK-ZVE64X-NEXT: lui a0, 5
; CHECK-ZVE64X-NEXT: addi a0, a0, 1365
@@ -2302,7 +2302,7 @@ define <vscale x 16 x i16> @ctlz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v12
; CHECK-ZVE64X-NEXT: vsrl.vi v12, v8, 8
; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v12
-; CHECK-ZVE64X-NEXT: vnot.v v8, v8
+; CHECK-ZVE64X-NEXT: vxor.vi v8, v8, -1
; CHECK-ZVE64X-NEXT: vsrl.vi v12, v8, 1
; CHECK-ZVE64X-NEXT: lui a0, 5
; CHECK-ZVE64X-NEXT: addi a0, a0, 1365
@@ -2363,7 +2363,7 @@ define <vscale x 32 x i16> @ctlz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
; CHECK-NEXT: vor.vv v8, v8, v16
; CHECK-NEXT: vsrl.vi v16, v8, 8
; CHECK-NEXT: vor.vv v8, v8, v16
-; CHECK-NEXT: vnot.v v8, v8
+; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vsrl.vi v16, v8, 1
; CHECK-NEXT: lui a0, 5
; CHECK-NEXT: addi a0, a0, 1365
@@ -2542,7 +2542,7 @@ define <vscale x 4 x i32> @ctlz_zero_und...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/102728
More information about the llvm-commits
mailing list