[llvm] [InstCombine][RISCV] Convert VPIntrinsics with splat operands to splats (PR #65706)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 7 20:19:51 PDT 2023


================
@@ -0,0 +1,896 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: opt -S -passes=instcombine,vector-combine %s | llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s --check-prefixs=INST-COMBINE, BOTH
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s --check-prefixs=NO-INST-COMBINE, BOTH
+
+declare <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.sdiv.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.or.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x float> @llvm.vp.fadd.nxv1i64(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x float> @llvm.vp.fsub.nxv1i64(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x float> @llvm.vp.fdiv.nxv1i64(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x float> @llvm.vp.frem.nxv1i64(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32)
+
+declare <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+
+define <vscale x 1 x i64> @add_nxv1i64_allonesmask(<vscale x 1 x i64> %x, i64 %y, i32 zeroext %evl) {
+; INST-COMBINE-LABEL: add_nxv1i64_allonesmask:
+; INST-COMBINE:       # %bb.0:
+; INST-COMBINE-NEXT:    addi a0, a0, 42
+; INST-COMBINE-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; INST-COMBINE-NEXT:    vmul.vx v8, v8, a0
+; INST-COMBINE-NEXT:    ret
+;
+; NO-INST-COMBINE-LABEL: add_nxv1i64_allonesmask:
+; NO-INST-COMBINE:       # %bb.0:
+; NO-INST-COMBINE-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; NO-INST-COMBINE-NEXT:    vmv.v.x v9, a0
+; NO-INST-COMBINE-NEXT:    li a0, 42
+; NO-INST-COMBINE-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NO-INST-COMBINE-NEXT:    vadd.vx v9, v9, a0
+; NO-INST-COMBINE-NEXT:    vmul.vv v8, v8, v9
+; NO-INST-COMBINE-NEXT:    ret
+  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
+  %mask = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %1 = insertelement <vscale x 1 x i64> poison, i64 %y, i32 0
+  %2 = shufflevector <vscale x 1 x i64> %1, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %3 = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %2, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 42, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %4 = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %3, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %4
+}
+
+define <vscale x 1 x i64> @add_nxv1i64_anymask(<vscale x 1 x i64> %x, i64 %y, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; BOTH-LABEL: add_nxv1i64_anymask:
+; BOTH:       # %bb.0:
+; BOTH-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; BOTH-NEXT:    vmv.v.x v9, a0
+; BOTH-NEXT:    li a0, 42
+; BOTH-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; BOTH-NEXT:    vadd.vx v9, v9, a0, v0.t
+; BOTH-NEXT:    vmul.vv v8, v8, v9, v0.t
+; BOTH-NEXT:    ret
+  %1 = insertelement <vscale x 1 x i64> poison, i64 %y, i32 0
+  %2 = shufflevector <vscale x 1 x i64> %1, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %3 = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %2, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 42, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %4 = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %3, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %4
+}
+
+define <vscale x 1 x i64> @sub_nxv1i64_allonesmask(<vscale x 1 x i64> %x, i64 %y, i32 zeroext %evl) {
+; INST-COMBINE-LABEL: sub_nxv1i64_allonesmask:
+; INST-COMBINE:       # %bb.0:
+; INST-COMBINE-NEXT:    addi a0, a0, -42
+; INST-COMBINE-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; INST-COMBINE-NEXT:    vmul.vx v8, v8, a0
+; INST-COMBINE-NEXT:    ret
+;
+; NO-INST-COMBINE-LABEL: sub_nxv1i64_allonesmask:
+; NO-INST-COMBINE:       # %bb.0:
+; NO-INST-COMBINE-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; NO-INST-COMBINE-NEXT:    vmv.v.x v9, a0
+; NO-INST-COMBINE-NEXT:    li a0, 42
+; NO-INST-COMBINE-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NO-INST-COMBINE-NEXT:    vsub.vx v9, v9, a0
+; NO-INST-COMBINE-NEXT:    vmul.vv v8, v8, v9
+; NO-INST-COMBINE-NEXT:    ret
+  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
+  %mask = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %1 = insertelement <vscale x 1 x i64> poison, i64 %y, i32 0
+  %2 = shufflevector <vscale x 1 x i64> %1, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %3 = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %2, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 42, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %4 = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %3, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %4
+}
+
+define <vscale x 1 x i64> @sub_nxv1i64_anymask(<vscale x 1 x i64> %x, i64 %y, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; BOTH-LABEL: sub_nxv1i64_anymask:
+; BOTH:       # %bb.0:
+; BOTH-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; BOTH-NEXT:    vmv.v.x v9, a0
+; BOTH-NEXT:    li a0, 42
+; BOTH-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; BOTH-NEXT:    vsub.vx v9, v9, a0, v0.t
+; BOTH-NEXT:    vmul.vv v8, v8, v9, v0.t
+; BOTH-NEXT:    ret
+  %1 = insertelement <vscale x 1 x i64> poison, i64 %y, i32 0
+  %2 = shufflevector <vscale x 1 x i64> %1, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %3 = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %2, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 42, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %4 = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %3, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %4
+}
+
+define <vscale x 1 x i64> @mul_nxv1i64_allonesmask(<vscale x 1 x i64> %x, i64 %y, i32 zeroext %evl) {
+; INST-COMBINE-LABEL: mul_nxv1i64_allonesmask:
+; INST-COMBINE:       # %bb.0:
+; INST-COMBINE-NEXT:    addi sp, sp, -32
+; INST-COMBINE-NEXT:    .cfi_def_cfa_offset 32
+; INST-COMBINE-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; INST-COMBINE-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; INST-COMBINE-NEXT:    .cfi_offset ra, -8
+; INST-COMBINE-NEXT:    .cfi_offset s0, -16
+; INST-COMBINE-NEXT:    csrr a2, vlenb
+; INST-COMBINE-NEXT:    slli a2, a2, 1
+; INST-COMBINE-NEXT:    sub sp, sp, a2
+; INST-COMBINE-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
+; INST-COMBINE-NEXT:    mv s0, a1
+; INST-COMBINE-NEXT:    addi a1, sp, 16
+; INST-COMBINE-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
+; INST-COMBINE-NEXT:    li a1, 42
+; INST-COMBINE-NEXT:    call __muldi3 at plt
+; INST-COMBINE-NEXT:    vsetvli zero, s0, e64, m1, ta, ma
+; INST-COMBINE-NEXT:    addi a1, sp, 16
+; INST-COMBINE-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; INST-COMBINE-NEXT:    vmul.vx v8, v8, a0
+; INST-COMBINE-NEXT:    csrr a0, vlenb
+; INST-COMBINE-NEXT:    slli a0, a0, 1
+; INST-COMBINE-NEXT:    add sp, sp, a0
+; INST-COMBINE-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; INST-COMBINE-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; INST-COMBINE-NEXT:    addi sp, sp, 32
+; INST-COMBINE-NEXT:    ret
+;
+; NO-INST-COMBINE-LABEL: mul_nxv1i64_allonesmask:
+; NO-INST-COMBINE:       # %bb.0:
+; NO-INST-COMBINE-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; NO-INST-COMBINE-NEXT:    vmv.v.x v9, a0
+; NO-INST-COMBINE-NEXT:    li a0, 42
+; NO-INST-COMBINE-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NO-INST-COMBINE-NEXT:    vmul.vx v9, v9, a0
+; NO-INST-COMBINE-NEXT:    vmul.vv v8, v8, v9
+; NO-INST-COMBINE-NEXT:    ret
+  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
+  %mask = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %1 = insertelement <vscale x 1 x i64> poison, i64 %y, i32 0
+  %2 = shufflevector <vscale x 1 x i64> %1, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %3 = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %2, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 42, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %4 = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %3, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %4
+}
+
+define <vscale x 1 x i64> @mul_nxv1i64_anymask(<vscale x 1 x i64> %x, i64 %y, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; BOTH-LABEL: mul_nxv1i64_anymask:
+; BOTH:       # %bb.0:
+; BOTH-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; BOTH-NEXT:    vmv.v.x v9, a0
+; BOTH-NEXT:    li a0, 42
+; BOTH-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; BOTH-NEXT:    vmul.vx v9, v9, a0, v0.t
+; BOTH-NEXT:    vmul.vv v8, v8, v9, v0.t
+; BOTH-NEXT:    ret
+  %1 = insertelement <vscale x 1 x i64> poison, i64 %y, i32 0
+  %2 = shufflevector <vscale x 1 x i64> %1, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %3 = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %2, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 42, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %4 = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %3, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %4
+}
+
+define <vscale x 1 x i64> @sdiv_nxv1i64_allonesmask(<vscale x 1 x i64> %x, i64 %y, i32 zeroext %evl) {
+; INST-COMBINE-LABEL: sdiv_nxv1i64_allonesmask:
+; INST-COMBINE:       # %bb.0:
+; INST-COMBINE-NEXT:    addi sp, sp, -32
+; INST-COMBINE-NEXT:    .cfi_def_cfa_offset 32
+; INST-COMBINE-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; INST-COMBINE-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; INST-COMBINE-NEXT:    .cfi_offset ra, -8
+; INST-COMBINE-NEXT:    .cfi_offset s0, -16
+; INST-COMBINE-NEXT:    csrr a2, vlenb
+; INST-COMBINE-NEXT:    slli a2, a2, 1
+; INST-COMBINE-NEXT:    sub sp, sp, a2
+; INST-COMBINE-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
+; INST-COMBINE-NEXT:    mv s0, a1
+; INST-COMBINE-NEXT:    addi a1, sp, 16
+; INST-COMBINE-NEXT:    vs1r.v v8, (a1) # Unknown-size Folded Spill
+; INST-COMBINE-NEXT:    li a1, 42
+; INST-COMBINE-NEXT:    call __divdi3 at plt
+; INST-COMBINE-NEXT:    vsetvli zero, s0, e64, m1, ta, ma
+; INST-COMBINE-NEXT:    addi a1, sp, 16
+; INST-COMBINE-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; INST-COMBINE-NEXT:    vmul.vx v8, v8, a0
+; INST-COMBINE-NEXT:    csrr a0, vlenb
+; INST-COMBINE-NEXT:    slli a0, a0, 1
+; INST-COMBINE-NEXT:    add sp, sp, a0
+; INST-COMBINE-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; INST-COMBINE-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; INST-COMBINE-NEXT:    addi sp, sp, 32
+; INST-COMBINE-NEXT:    ret
+;
+; NO-INST-COMBINE-LABEL: sdiv_nxv1i64_allonesmask:
+; NO-INST-COMBINE:       # %bb.0:
+; NO-INST-COMBINE-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; NO-INST-COMBINE-NEXT:    vmv.v.x v9, a0
+; NO-INST-COMBINE-NEXT:    li a0, 42
+; NO-INST-COMBINE-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NO-INST-COMBINE-NEXT:    vdiv.vx v9, v9, a0
+; NO-INST-COMBINE-NEXT:    vmul.vv v8, v8, v9
+; NO-INST-COMBINE-NEXT:    ret
+  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
+  %mask = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %1 = insertelement <vscale x 1 x i64> poison, i64 %y, i32 0
+  %2 = shufflevector <vscale x 1 x i64> %1, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %3 = call <vscale x 1 x i64> @llvm.vp.sdiv.nxv1i64(<vscale x 1 x i64> %2, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 42, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %4 = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %3, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %4
+}
+
+define <vscale x 1 x i64> @sdiv_nxv1i64_anymask(<vscale x 1 x i64> %x, i64 %y, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; BOTH-LABEL: sdiv_nxv1i64_anymask:
+; BOTH:       # %bb.0:
+; BOTH-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; BOTH-NEXT:    vmv.v.x v9, a0
+; BOTH-NEXT:    li a0, 42
+; BOTH-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; BOTH-NEXT:    vdiv.vx v9, v9, a0, v0.t
+; BOTH-NEXT:    vmul.vv v8, v8, v9, v0.t
+; BOTH-NEXT:    ret
+  %1 = insertelement <vscale x 1 x i64> poison, i64 %y, i32 0
+  %2 = shufflevector <vscale x 1 x i64> %1, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %3 = call <vscale x 1 x i64> @llvm.vp.sdiv.nxv1i64(<vscale x 1 x i64> %2, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 42, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %4 = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %3, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %4
+}
+
+define <vscale x 1 x i64> @udiv_nxv1i64_allonesmask(<vscale x 1 x i64> %x, i64 %y, i32 zeroext %evl) {
+; INST-COMBINE-LABEL: udiv_nxv1i64_allonesmask:
----------------
michaelmaitland wrote:

Maybe we don't want to do this for udivs?

https://github.com/llvm/llvm-project/pull/65706


More information about the llvm-commits mailing list