[llvm] 54803f8 - [RISCV] Add test cases for widening add/sub with mismatched extends. NFC (#166700)

via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 6 07:23:27 PST 2025


Author: Craig Topper
Date: 2025-11-06T07:23:23-08:00
New Revision: 54803f8fce5b66fec9a39c6a532a5778083e6e40

URL: https://github.com/llvm/llvm-project/commit/54803f8fce5b66fec9a39c6a532a5778083e6e40
DIFF: https://github.com/llvm/llvm-project/commit/54803f8fce5b66fec9a39c6a532a5778083e6e40.diff

LOG: [RISCV] Add test cases for widening add/sub with mismatched extends. NFC (#166700)

These are test cases where we have an add and a sub with the same
operands. One operand is a sign extend and the other is a zero extend.

The sub can only form a vwsub.wv but because add is commutable, it could
form vwadd.wv or vwaddu.wv depending on which extend is removed. We want
to form vwadd.wv to match the sub so the vsext can be removed.

Depending on the order of the instructions and the operand order of the
add, we might form vwaddu.wv instead and no extends will be removed.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
index ad2ed47e67e64..b1f0eee3e9f52 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
@@ -570,7 +570,83 @@ define <vscale x 2 x i32> @vwop_vscale_zext_i8i32_multiple_users(ptr %x, ptr %y,
   ret <vscale x 2 x i32> %i
 }
 
+define <vscale x 4 x i32> @mismatched_extend_sub_add(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y) {
+; FOLDING-LABEL: mismatched_extend_sub_add:
+; FOLDING:       # %bb.0:
+; FOLDING-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; FOLDING-NEXT:    vzext.vf2 v10, v8
+; FOLDING-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; FOLDING-NEXT:    vwsub.wv v12, v10, v9
+; FOLDING-NEXT:    vwadd.wv v10, v10, v9
+; FOLDING-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; FOLDING-NEXT:    vmul.vv v8, v12, v10
+; FOLDING-NEXT:    ret
+  %a = zext <vscale x 4 x i16> %x to <vscale x 4 x i32>
+  %b = sext <vscale x 4 x i16> %y to <vscale x 4 x i32>
+  %c = sub <vscale x 4 x i32> %a, %b
+  %d = add <vscale x 4 x i32> %a, %b
+  %e = mul <vscale x 4 x i32> %c, %d
+  ret <vscale x 4 x i32> %e
+}
+
+; FIXME: this should remove the vsext
+define <vscale x 4 x i32> @mismatched_extend_sub_add_commuted(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y) {
+; FOLDING-LABEL: mismatched_extend_sub_add_commuted:
+; FOLDING:       # %bb.0:
+; FOLDING-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; FOLDING-NEXT:    vzext.vf2 v10, v8
+; FOLDING-NEXT:    vsext.vf2 v12, v9
+; FOLDING-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; FOLDING-NEXT:    vwsub.wv v10, v10, v9
+; FOLDING-NEXT:    vwaddu.wv v12, v12, v8
+; FOLDING-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; FOLDING-NEXT:    vmul.vv v8, v10, v12
+; FOLDING-NEXT:    ret
+  %a = zext <vscale x 4 x i16> %x to <vscale x 4 x i32>
+  %b = sext <vscale x 4 x i16> %y to <vscale x 4 x i32>
+  %c = sub <vscale x 4 x i32> %a, %b
+  %d = add <vscale x 4 x i32> %b, %a
+  %e = mul <vscale x 4 x i32> %c, %d
+  ret <vscale x 4 x i32> %e
+}
 
+define <vscale x 4 x i32> @mismatched_extend_add_sub(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y) {
+; FOLDING-LABEL: mismatched_extend_add_sub:
+; FOLDING:       # %bb.0:
+; FOLDING-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; FOLDING-NEXT:    vzext.vf2 v10, v8
+; FOLDING-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; FOLDING-NEXT:    vwadd.wv v12, v10, v9
+; FOLDING-NEXT:    vwsub.wv v10, v10, v9
+; FOLDING-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; FOLDING-NEXT:    vmul.vv v8, v12, v10
+; FOLDING-NEXT:    ret
+  %a = zext <vscale x 4 x i16> %x to <vscale x 4 x i32>
+  %b = sext <vscale x 4 x i16> %y to <vscale x 4 x i32>
+  %c = add <vscale x 4 x i32> %a, %b
+  %d = sub <vscale x 4 x i32> %a, %b
+  %e = mul <vscale x 4 x i32> %c, %d
+  ret <vscale x 4 x i32> %e
+}
+
+define <vscale x 4 x i32> @mismatched_extend_add_sub_commuted(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y) {
+; FOLDING-LABEL: mismatched_extend_add_sub_commuted:
+; FOLDING:       # %bb.0:
+; FOLDING-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; FOLDING-NEXT:    vzext.vf2 v10, v8
+; FOLDING-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; FOLDING-NEXT:    vwadd.wv v12, v10, v9
+; FOLDING-NEXT:    vwsub.wv v10, v10, v9
+; FOLDING-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; FOLDING-NEXT:    vmul.vv v8, v12, v10
+; FOLDING-NEXT:    ret
+  %a = zext <vscale x 4 x i16> %x to <vscale x 4 x i32>
+  %b = sext <vscale x 4 x i16> %y to <vscale x 4 x i32>
+  %c = add <vscale x 4 x i32> %a, %b
+  %d = sub <vscale x 4 x i32> %a, %b
+  %e = mul <vscale x 4 x i32> %c, %d
+  ret <vscale x 4 x i32> %e
+}
 
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; RV32: {{.*}}


        


More information about the llvm-commits mailing list