[llvm] 8ca4aa5 - [RISCV][VLOPT] Use vadd as user instruction in vl-opt-instrs test in more places
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 10 09:03:41 PST 2024
Author: Michael Maitland
Date: 2024-12-10T09:03:05-08:00
New Revision: 8ca4aa576c0bfd9f7dfaa4768d32ab0ff842f59b
URL: https://github.com/llvm/llvm-project/commit/8ca4aa576c0bfd9f7dfaa4768d32ab0ff842f59b
DIFF: https://github.com/llvm/llvm-project/commit/8ca4aa576c0bfd9f7dfaa4768d32ab0ff842f59b.diff
LOG: [RISCV][VLOPT] Use vadd as user instruction in vl-opt-instrs test in more places
We've decided to use vadd as the user instruction where possible for simplicity
in this file. This patch uses vadd as user in more places.
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index a6d0e806228f7a..daea264cdf7d6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -72,17 +72,17 @@ define <vscale x 4 x i32> @vsub_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b,
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vsub.vv v8, v8, v10
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vsub.vv v8, v8, v10
+; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vsub_vv:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vsub.vv v8, v8, v10
-; VLOPT-NEXT: vsub.vv v8, v8, v10
+; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -92,17 +92,17 @@ define <vscale x 4 x i32> @vsub_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vsub.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vsub.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vsub_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vsub.vx v10, v8, a0
-; VLOPT-NEXT: vsub.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -132,17 +132,17 @@ define <vscale x 4 x i32> @vrsub_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vrsub.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vsub.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vrsub_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vrsub.vx v10, v8, a0
-; VLOPT-NEXT: vsub.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -608,17 +608,17 @@ define <vscale x 4 x i32> @vsll_vi(<vscale x 4 x i32> %a, iXLen %vl) {
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vsll.vi v10, v8, 5
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vsll.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vsll_vi:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vsll.vi v10, v8, 5
-; VLOPT-NEXT: vsll.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen 5, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -808,17 +808,17 @@ define <vscale x 4 x i32> @vmul_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b,
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vmul.vv v8, v8, v10
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v8, v10
+; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmul_vv:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vmul.vv v8, v8, v10
-; VLOPT-NEXT: vmul.vv v8, v8, v10
+; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -828,17 +828,17 @@ define <vscale x 4 x i32> @vmul_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vmul.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmul_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vmul.vx v10, v8, a0
-; VLOPT-NEXT: vmul.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -848,17 +848,17 @@ define <vscale x 4 x i32> @vmulh_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vmulh.vv v8, v8, v10
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v8, v10
+; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmulh_vv:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vmulh.vv v8, v8, v10
-; VLOPT-NEXT: vmul.vv v8, v8, v10
+; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -868,17 +868,17 @@ define <vscale x 4 x i32> @vmulh_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vmulh.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmulh_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vmulh.vx v10, v8, a0
-; VLOPT-NEXT: vmul.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -888,17 +888,17 @@ define <vscale x 4 x i32> @vmulhu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vmulhu.vv v8, v8, v10
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v8, v10
+; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmulhu_vv:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vmulhu.vv v8, v8, v10
-; VLOPT-NEXT: vmul.vv v8, v8, v10
+; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -908,17 +908,17 @@ define <vscale x 4 x i32> @vmulhu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vmulhu.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmulhu_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vmulhu.vx v10, v8, a0
-; VLOPT-NEXT: vmul.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -928,17 +928,17 @@ define <vscale x 4 x i32> @vmulhsu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32>
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vmulhsu.vv v8, v8, v10
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v8, v10
+; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmulhsu_vv:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vmulhsu.vv v8, v8, v10
-; VLOPT-NEXT: vmul.vv v8, v8, v10
+; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -948,17 +948,17 @@ define <vscale x 4 x i32> @vmulhsu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl)
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vmulhsu.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmulhsu_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vmulhsu.vx v10, v8, a0
-; VLOPT-NEXT: vmul.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -968,17 +968,17 @@ define <vscale x 4 x i32> @vdivu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vdivu.vv v8, v8, v10
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v8, v10
+; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vdivu_vv:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vdivu.vv v8, v8, v10
-; VLOPT-NEXT: vmul.vv v8, v8, v10
+; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -988,17 +988,17 @@ define <vscale x 4 x i32> @vdivu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vdivu.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vdivu_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vdivu.vx v10, v8, a0
-; VLOPT-NEXT: vmul.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -1008,17 +1008,17 @@ define <vscale x 4 x i32> @vdiv_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b,
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vdiv.vv v8, v8, v10
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v8, v10
+; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vdiv_vv:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vdiv.vv v8, v8, v10
-; VLOPT-NEXT: vmul.vv v8, v8, v10
+; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -1028,17 +1028,17 @@ define <vscale x 4 x i32> @vdiv_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vdiv.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vdiv_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vdiv.vx v10, v8, a0
-; VLOPT-NEXT: vmul.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -1048,17 +1048,17 @@ define <vscale x 4 x i32> @vremu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vremu.vv v8, v8, v10
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v8, v10
+; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vremu_vv:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vremu.vv v8, v8, v10
-; VLOPT-NEXT: vmul.vv v8, v8, v10
+; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -1068,17 +1068,17 @@ define <vscale x 4 x i32> @vremu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vremu.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vremu_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vremu.vx v10, v8, a0
-; VLOPT-NEXT: vmul.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -1088,17 +1088,17 @@ define <vscale x 4 x i32> @vrem_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b,
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vrem.vv v8, v8, v10
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v8, v10
+; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vrem_vv:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vrem.vv v8, v8, v10
-; VLOPT-NEXT: vmul.vv v8, v8, v10
+; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
@@ -1108,17 +1108,17 @@ define <vscale x 4 x i32> @vrem_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vrem.vx v10, v8, a0
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vmul.vv v8, v10, v8
+; NOVLOPT-NEXT: vadd.vv v8, v10, v8
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vrem_vx:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vrem.vx v10, v8, a0
-; VLOPT-NEXT: vmul.vv v8, v10, v8
+; VLOPT-NEXT: vadd.vv v8, v10, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
More information about the llvm-commits
mailing list