[llvm] [RISCV] Sink NOT to be fold into ANDN/ORN/XNOR/VANDN (PR #131632)

Piotr Fusik via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 18 00:55:13 PDT 2025


================
@@ -2109,3 +2109,662 @@ define <vscale x 1 x i16> @vand_vadd_vx_imm16(<vscale x 1 x i16> %x) {
   %b = add <vscale x 1 x i16> %a, splat (i16 32767)
   ret <vscale x 1 x i16> %b
 }
+
+define <vscale x 1 x i8> @vand_vx_hoisted_not(<vscale x 1 x i8> %x, i8 %m, i1 zeroext %cond) {
+; CHECK-LABEL: vand_vx_hoisted_not:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    beqz a1, .LBB94_2
+; CHECK-NEXT:  # %bb.1: # %mask
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:  .LBB94_2: # %identity
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVKB-LABEL: vand_vx_hoisted_not:
+; CHECK-ZVKB:       # %bb.0:
+; CHECK-ZVKB-NEXT:    beqz a1, .LBB94_2
+; CHECK-ZVKB-NEXT:  # %bb.1: # %mask
+; CHECK-ZVKB-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0
+; CHECK-ZVKB-NEXT:  .LBB94_2: # %identity
+; CHECK-ZVKB-NEXT:    ret
+  %a = xor i8 %m, -1
+  %head = insertelement <vscale x 1 x i8> poison, i8 %a, i32 0
+  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  br i1 %cond, label %mask, label %identity
+
+mask:
+  %masked = and <vscale x 1 x i8> %splat, %x
+  ret <vscale x 1 x i8> %masked
+
+identity:
+  ret <vscale x 1 x i8> %x
+}
+
+define <vscale x 1 x i8> @vand_vx_hoisted_not_swapped(<vscale x 1 x i8> %x, i8 %m, i1 zeroext %cond) {
+; CHECK-LABEL: vand_vx_hoisted_not_swapped:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    beqz a1, .LBB95_2
+; CHECK-NEXT:  # %bb.1: # %mask
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:  .LBB95_2: # %identity
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVKB-LABEL: vand_vx_hoisted_not_swapped:
+; CHECK-ZVKB:       # %bb.0:
+; CHECK-ZVKB-NEXT:    beqz a1, .LBB95_2
+; CHECK-ZVKB-NEXT:  # %bb.1: # %mask
+; CHECK-ZVKB-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0
+; CHECK-ZVKB-NEXT:  .LBB95_2: # %identity
+; CHECK-ZVKB-NEXT:    ret
+  %a = xor i8 %m, -1
+  %head = insertelement <vscale x 1 x i8> poison, i8 %a, i32 0
+  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  br i1 %cond, label %mask, label %identity
+
+mask:
+  %masked = and <vscale x 1 x i8> %x, %splat
+  ret <vscale x 1 x i8> %masked
+
+identity:
+  ret <vscale x 1 x i8> %x
+}
+
+define <vscale x 1 x i8> @vand_vv_hoisted_not(<vscale x 1 x i8> %x, <vscale x 1 x i8> %m, i1 zeroext %cond) {
+; CHECK-LABEL: vand_vv_hoisted_not:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    beqz a0, .LBB96_2
+; CHECK-NEXT:  # %bb.1: # %mask
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vnot.v v9, v9
+; CHECK-NEXT:    vand.vv v8, v9, v8
+; CHECK-NEXT:  .LBB96_2: # %identity
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVKB-LABEL: vand_vv_hoisted_not:
+; CHECK-ZVKB:       # %bb.0:
+; CHECK-ZVKB-NEXT:    beqz a0, .LBB96_2
+; CHECK-ZVKB-NEXT:  # %bb.1: # %mask
+; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-ZVKB-NEXT:    vandn.vv v8, v8, v9
+; CHECK-ZVKB-NEXT:  .LBB96_2: # %identity
+; CHECK-ZVKB-NEXT:    ret
+  %a = xor <vscale x 1 x i8> %m, splat (i8 -1)
+  br i1 %cond, label %mask, label %identity
+
+mask:
+  %masked = and <vscale x 1 x i8> %a, %x
+  ret <vscale x 1 x i8> %masked
+
+identity:
+  ret <vscale x 1 x i8> %x
+}
+
+define <vscale x 1 x i8> @vand_vv_hoisted_not_swapped(<vscale x 1 x i8> %x, <vscale x 1 x i8> %m, i1 zeroext %cond) {
+; CHECK-LABEL: vand_vv_hoisted_not_swapped:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    beqz a0, .LBB97_2
+; CHECK-NEXT:  # %bb.1: # %mask
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vnot.v v9, v9
+; CHECK-NEXT:    vand.vv v8, v8, v9
+; CHECK-NEXT:  .LBB97_2: # %identity
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVKB-LABEL: vand_vv_hoisted_not_swapped:
+; CHECK-ZVKB:       # %bb.0:
+; CHECK-ZVKB-NEXT:    beqz a0, .LBB97_2
+; CHECK-ZVKB-NEXT:  # %bb.1: # %mask
+; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-ZVKB-NEXT:    vandn.vv v8, v8, v9
+; CHECK-ZVKB-NEXT:  .LBB97_2: # %identity
+; CHECK-ZVKB-NEXT:    ret
+  %a = xor <vscale x 1 x i8> %m, splat (i8 -1)
+  br i1 %cond, label %mask, label %identity
+
+mask:
+  %masked = and <vscale x 1 x i8> %x, %a
+  ret <vscale x 1 x i8> %masked
+
+identity:
+  ret <vscale x 1 x i8> %x
+}
+
+declare i64 @llvm.vscale.i64()
+
+define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
+; CHECK-RV32-LABEL: vand_vx_loop_hoisted_not:
+; CHECK-RV32:       # %bb.0: # %entry
+; CHECK-RV32-NEXT:    addi sp, sp, -32
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT:    .cfi_offset ra, -4
+; CHECK-RV32-NEXT:    .cfi_offset s0, -8
+; CHECK-RV32-NEXT:    .cfi_offset s1, -12
+; CHECK-RV32-NEXT:    .cfi_offset s2, -16
+; CHECK-RV32-NEXT:    .cfi_offset s3, -20
+; CHECK-RV32-NEXT:    .cfi_offset s4, -24
+; CHECK-RV32-NEXT:    mv s0, a0
+; CHECK-RV32-NEXT:    not s2, a1
+; CHECK-RV32-NEXT:    csrr s1, vlenb
+; CHECK-RV32-NEXT:    srli s1, s1, 3
+; CHECK-RV32-NEXT:    li a2, 1
+; CHECK-RV32-NEXT:    mv a0, s1
+; CHECK-RV32-NEXT:    li a1, 0
+; CHECK-RV32-NEXT:    li a3, 0
+; CHECK-RV32-NEXT:    call __muldi3
+; CHECK-RV32-NEXT:    sltiu a0, a0, 65
+; CHECK-RV32-NEXT:    seqz a1, a1
+; CHECK-RV32-NEXT:    and a0, a1, a0
+; CHECK-RV32-NEXT:    bnez a0, .LBB98_2
+; CHECK-RV32-NEXT:  # %bb.1:
+; CHECK-RV32-NEXT:    li s3, 0
+; CHECK-RV32-NEXT:    li s4, 0
+; CHECK-RV32-NEXT:    j .LBB98_5
+; CHECK-RV32-NEXT:  .LBB98_2: # %vector.ph
+; CHECK-RV32-NEXT:    li a2, 508
+; CHECK-RV32-NEXT:    mv a0, s1
+; CHECK-RV32-NEXT:    li a1, 0
+; CHECK-RV32-NEXT:    li a3, 0
+; CHECK-RV32-NEXT:    call __muldi3
----------------
pfusik wrote:

Done

https://github.com/llvm/llvm-project/pull/131632


More information about the llvm-commits mailing list