[llvm] [RISCV] Add tests for combineBinOpOfZExts. NFC (PR #86689)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 26 09:31:31 PDT 2024


================
@@ -0,0 +1,146 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+
+; Check that we perform binary arithmetic in a narrower type where possible, via
+; combineBinOpOfZExt or otherwise.
+
+define <vscale x 8 x i32> @add(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: add:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vwaddu.vv v12, v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v8, v12
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+  %add = add <vscale x 8 x i32> %a.zext, %b.zext
+  ret <vscale x 8 x i32> %add
+}
+
+define <vscale x 8 x i32> @sub(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sub:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vwsubu.vv v12, v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v8, v12
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+  %sub = sub <vscale x 8 x i32> %a.zext, %b.zext
+  ret <vscale x 8 x i32> %sub
+}
+
+define <vscale x 8 x i32> @mul(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: mul:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vwmulu.vv v12, v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v8, v12
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+  %mul = mul <vscale x 8 x i32> %a.zext, %b.zext
+  ret <vscale x 8 x i32> %mul
+}
+
+define <vscale x 8 x i32> @sdiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sdiv:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vzext.vf4 v12, v8
+; CHECK-NEXT:    vzext.vf4 v16, v9
+; CHECK-NEXT:    vdivu.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+  %sdiv = sdiv <vscale x 8 x i32> %a.zext, %b.zext
+  ret <vscale x 8 x i32> %sdiv
+}
+
+define <vscale x 8 x i32> @udiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: udiv:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vzext.vf4 v12, v8
+; CHECK-NEXT:    vzext.vf4 v16, v9
+; CHECK-NEXT:    vdivu.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+  %udiv = udiv <vscale x 8 x i32> %a.zext, %b.zext
+  ret <vscale x 8 x i32> %udiv
+}
+
+define <vscale x 8 x i32> @srem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: srem:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vzext.vf4 v12, v8
+; CHECK-NEXT:    vzext.vf4 v16, v9
+; CHECK-NEXT:    vremu.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+  %srem = srem <vscale x 8 x i32> %a.zext, %b.zext
+  ret <vscale x 8 x i32> %srem
+}
+
+define <vscale x 8 x i32> @urem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: urem:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vzext.vf4 v12, v8
+; CHECK-NEXT:    vzext.vf4 v16, v9
+; CHECK-NEXT:    vremu.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+  %urem = urem <vscale x 8 x i32> %a.zext, %b.zext
+  ret <vscale x 8 x i32> %urem
+}
+
+define <vscale x 8 x i32> @and(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
----------------
topperc wrote:

This was already optimized

https://github.com/llvm/llvm-project/pull/86689


More information about the llvm-commits mailing list