[llvm] [RISCV] Select Zvkb VANDN for shorter constant loading sequences (PR #123345)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 20 08:25:00 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Piotr Fusik (pfusik)
<details>
<summary>Changes</summary>
This extends PR #<!-- -->120221 to vector instructions.
---
Full diff: https://github.com/llvm/llvm-project/pull/123345.diff
4 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (+18-1)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td (+21)
- (modified) llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll (+180-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll (+114)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 36292e3d572cb2..9855028ead9e20 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3224,8 +3224,25 @@ bool RISCVDAGToDAGISel::selectInvLogicImm(SDValue N, SDValue &Val) {
// Abandon this transform if the constant is needed elsewhere.
for (const SDNode *U : N->users()) {
- if (!ISD::isBitwiseLogicOp(U->getOpcode()))
+ switch (U->getOpcode()) {
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR:
+ if (!(Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbkb()))
+ return false;
+ break;
+ case RISCVISD::VMV_V_X_VL:
+ if (!Subtarget->hasStdExtZvkb())
+ return false;
+ if (!all_of(U->users(), [](const SDNode *V) {
+ return V->getOpcode() == ISD::AND ||
+ V->getOpcode() == RISCVISD::AND_VL;
+ }))
+ return false;
+ break;
+ default:
return false;
+ }
}
// For 64-bit constants, the instruction sequences get complex,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index c69d8885175219..430d75e5cec5b2 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -624,6 +624,13 @@ foreach vti = AllIntegerVectors in {
vti.RegClass:$rs2,
vti.ScalarRegClass:$rs1,
vti.AVL, vti.Log2SEW, TA_MA)>;
+ def : Pat<(vti.Vector (and (riscv_splat_vector invLogicImm:$rs1),
+ vti.RegClass:$rs2)),
+ (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX)
+ (vti.Vector (IMPLICIT_DEF)),
+ vti.RegClass:$rs2,
+ invLogicImm:$rs1,
+ vti.AVL, vti.Log2SEW, TA_MA)>;
}
}
@@ -758,6 +765,20 @@ foreach vti = AllIntegerVectors in {
GPR:$vl,
vti.Log2SEW,
TAIL_AGNOSTIC)>;
+
+ def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector invLogicImm:$rs1),
+ (vti.Vector vti.RegClass:$rs2),
+ (vti.Vector vti.RegClass:$passthru),
+ (vti.Mask V0),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK")
+ vti.RegClass:$passthru,
+ vti.RegClass:$rs2,
+ invLogicImm:$rs1,
+ (vti.Mask V0),
+ GPR:$vl,
+ vti.Log2SEW,
+ TAIL_AGNOSTIC)>;
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
index ea8b166c156cb6..cf73dceaae3064 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
@@ -1,8 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB32
-; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-NOZBB,CHECK-ZVKB32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-NOZBB,CHECK-ZVKB64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb,+zbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-ZBB,CHECK-ZVKB32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb,+zbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-ZBB,CHECK-ZVKB64
define <vscale x 1 x i8> @vandn_vv_nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y) {
; CHECK-LABEL: vandn_vv_nxv1i8:
@@ -1931,3 +1933,179 @@ define <vscale x 8 x i64> @vandn_vx_swapped_nxv8i64(i64 %x, <vscale x 8 x i64> %
%b = and <vscale x 8 x i64> %splat, %y
ret <vscale x 8 x i64> %b
}
+
+define <vscale x 1 x i16> @vandn_vx_imm16(<vscale x 1 x i16> %x) {
+; CHECK-LABEL: vandn_vx_imm16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, 8
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVKB-LABEL: vandn_vx_imm16:
+; CHECK-ZVKB: # %bb.0:
+; CHECK-ZVKB-NEXT: lui a0, 1048568
+; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
+; CHECK-ZVKB-NEXT: ret
+ %a = and <vscale x 1 x i16> splat (i16 32767), %x
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @vandn_vx_swapped_imm16(<vscale x 1 x i16> %x) {
+; CHECK-LABEL: vandn_vx_swapped_imm16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, 8
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVKB-LABEL: vandn_vx_swapped_imm16:
+; CHECK-ZVKB: # %bb.0:
+; CHECK-ZVKB-NEXT: lui a0, 1048568
+; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
+; CHECK-ZVKB-NEXT: ret
+ %a = and <vscale x 1 x i16> %x, splat (i16 32767)
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i64> @vandn_vx_imm64(<vscale x 1 x i64> %x) {
+; CHECK-RV32-LABEL: vandn_vx_imm64:
+; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: addi sp, sp, -16
+; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT: lui a0, 1044480
+; CHECK-RV32-NEXT: li a1, 255
+; CHECK-RV32-NEXT: sw a1, 8(sp)
+; CHECK-RV32-NEXT: sw a0, 12(sp)
+; CHECK-RV32-NEXT: addi a0, sp, 8
+; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
+; CHECK-RV32-NEXT: vand.vv v8, v8, v9
+; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
+; CHECK-RV32-NEXT: ret
+;
+; CHECK-RV64-LABEL: vandn_vx_imm64:
+; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: li a0, -1
+; CHECK-RV64-NEXT: slli a0, a0, 56
+; CHECK-RV64-NEXT: addi a0, a0, 255
+; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-RV64-NEXT: vand.vx v8, v8, a0
+; CHECK-RV64-NEXT: ret
+;
+; CHECK-ZVKB32-LABEL: vandn_vx_imm64:
+; CHECK-ZVKB32: # %bb.0:
+; CHECK-ZVKB32-NEXT: addi sp, sp, -16
+; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-ZVKB32-NEXT: lui a0, 1044480
+; CHECK-ZVKB32-NEXT: li a1, 255
+; CHECK-ZVKB32-NEXT: sw a1, 8(sp)
+; CHECK-ZVKB32-NEXT: sw a0, 12(sp)
+; CHECK-ZVKB32-NEXT: addi a0, sp, 8
+; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-ZVKB32-NEXT: vlse64.v v9, (a0), zero
+; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v9
+; CHECK-ZVKB32-NEXT: addi sp, sp, 16
+; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
+; CHECK-ZVKB32-NEXT: ret
+;
+; CHECK-ZVKB64-LABEL: vandn_vx_imm64:
+; CHECK-ZVKB64: # %bb.0:
+; CHECK-ZVKB64-NEXT: lui a0, 1048560
+; CHECK-ZVKB64-NEXT: srli a0, a0, 8
+; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0
+; CHECK-ZVKB64-NEXT: ret
+ %a = and <vscale x 1 x i64> %x, splat (i64 -72057594037927681)
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i16> @vandn_vx_multi_imm16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y) {
+; CHECK-LABEL: vandn_vx_multi_imm16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, 4
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vand.vx v9, v9, a0
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+;
+; CHECK-ZVKB-LABEL: vandn_vx_multi_imm16:
+; CHECK-ZVKB: # %bb.0:
+; CHECK-ZVKB-NEXT: lui a0, 1048572
+; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
+; CHECK-ZVKB-NEXT: vandn.vx v9, v9, a0
+; CHECK-ZVKB-NEXT: vadd.vv v8, v8, v9
+; CHECK-ZVKB-NEXT: ret
+ %a = and <vscale x 1 x i16> %x, splat (i16 16383)
+ %b = and <vscale x 1 x i16> %y, splat (i16 16383)
+ %c = add <vscale x 1 x i16> %a, %b
+ ret <vscale x 1 x i16> %c
+}
+
+define <vscale x 1 x i16> @vandn_vx_multi_scalar_imm16(<vscale x 1 x i16> %x, i16 %y) {
+; CHECK-LABEL: vandn_vx_multi_scalar_imm16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: or a0, a0, a1
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVKB-NOZBB-LABEL: vandn_vx_multi_scalar_imm16:
+; CHECK-ZVKB-NOZBB: # %bb.0:
+; CHECK-ZVKB-NOZBB-NEXT: lui a1, 8
+; CHECK-ZVKB-NOZBB-NEXT: addi a1, a1, -1
+; CHECK-ZVKB-NOZBB-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; CHECK-ZVKB-NOZBB-NEXT: vand.vx v8, v8, a1
+; CHECK-ZVKB-NOZBB-NEXT: or a0, a0, a1
+; CHECK-ZVKB-NOZBB-NEXT: vadd.vx v8, v8, a0
+; CHECK-ZVKB-NOZBB-NEXT: ret
+;
+; CHECK-ZVKB-ZBB-LABEL: vandn_vx_multi_scalar_imm16:
+; CHECK-ZVKB-ZBB: # %bb.0:
+; CHECK-ZVKB-ZBB-NEXT: lui a1, 1048568
+; CHECK-ZVKB-ZBB-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; CHECK-ZVKB-ZBB-NEXT: vandn.vx v8, v8, a1
+; CHECK-ZVKB-ZBB-NEXT: orn a0, a0, a1
+; CHECK-ZVKB-ZBB-NEXT: vadd.vx v8, v8, a0
+; CHECK-ZVKB-ZBB-NEXT: ret
+ %a = and <vscale x 1 x i16> %x, splat (i16 32767)
+ %b = or i16 %y, 32767
+ %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+ %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %c = add <vscale x 1 x i16> %a, %splat
+ ret <vscale x 1 x i16> %c
+}
+
+define <vscale x 1 x i16> @vand_vadd_vx_imm16(<vscale x 1 x i16> %x) {
+; CHECK-LABEL: vand_vadd_vx_imm16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, 8
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+;
+; CHECK-ZVKB-LABEL: vand_vadd_vx_imm16:
+; CHECK-ZVKB: # %bb.0:
+; CHECK-ZVKB-NEXT: lui a0, 8
+; CHECK-ZVKB-NEXT: addi a0, a0, -1
+; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-ZVKB-NEXT: vand.vx v8, v8, a0
+; CHECK-ZVKB-NEXT: vadd.vx v8, v8, a0
+; CHECK-ZVKB-NEXT: ret
+ %a = and <vscale x 1 x i16> %x, splat (i16 32767)
+ %b = add <vscale x 1 x i16> %a, splat (i16 32767)
+ ret <vscale x 1 x i16> %b
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
index 763b2908b10267..5d29b266546f59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
@@ -1429,3 +1429,117 @@ define <vscale x 8 x i64> @vandn_vx_vp_nxv8i64(i64 %a, <vscale x 8 x i64> %b, <v
%x = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %b, <vscale x 8 x i64> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
ret <vscale x 8 x i64> %x
}
+
+define <vscale x 1 x i16> @vandn_vx_vp_imm16(<vscale x 1 x i16> %x, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_imm16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+;
+; CHECK-ZVKB-LABEL: vandn_vx_vp_imm16:
+; CHECK-ZVKB: # %bb.0:
+; CHECK-ZVKB-NEXT: lui a1, 1048568
+; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a1, v0.t
+; CHECK-ZVKB-NEXT: ret
+ %a = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> splat (i16 32767), <vscale x 1 x i16> %x, <vscale x 1 x i1> %mask, i32 %evl)
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @vandn_vx_vp_swapped_imm16(<vscale x 1 x i16> %x, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_swapped_imm16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+;
+; CHECK-ZVKB-LABEL: vandn_vx_vp_swapped_imm16:
+; CHECK-ZVKB: # %bb.0:
+; CHECK-ZVKB-NEXT: lui a1, 1048568
+; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a1, v0.t
+; CHECK-ZVKB-NEXT: ret
+ %a = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> splat (i16 32767), <vscale x 1 x i1> %mask, i32 %evl)
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i64> @vandn_vx_vp_imm64(<vscale x 1 x i64> %x, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: vandn_vx_vp_imm64:
+; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: addi sp, sp, -16
+; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT: lui a1, 1044480
+; CHECK-RV32-NEXT: li a2, 255
+; CHECK-RV32-NEXT: sw a2, 8(sp)
+; CHECK-RV32-NEXT: sw a1, 12(sp)
+; CHECK-RV32-NEXT: addi a1, sp, 8
+; CHECK-RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-RV32-NEXT: vlse64.v v9, (a1), zero
+; CHECK-RV32-NEXT: vand.vv v8, v8, v9, v0.t
+; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
+; CHECK-RV32-NEXT: ret
+;
+; CHECK-RV64-LABEL: vandn_vx_vp_imm64:
+; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: li a1, -1
+; CHECK-RV64-NEXT: slli a1, a1, 56
+; CHECK-RV64-NEXT: addi a1, a1, 255
+; CHECK-RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; CHECK-RV64-NEXT: ret
+;
+; CHECK-ZVKB32-LABEL: vandn_vx_vp_imm64:
+; CHECK-ZVKB32: # %bb.0:
+; CHECK-ZVKB32-NEXT: addi sp, sp, -16
+; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-ZVKB32-NEXT: lui a1, 1044480
+; CHECK-ZVKB32-NEXT: li a2, 255
+; CHECK-ZVKB32-NEXT: sw a2, 8(sp)
+; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
+; CHECK-ZVKB32-NEXT: addi a1, sp, 8
+; CHECK-ZVKB32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-ZVKB32-NEXT: vlse64.v v9, (a1), zero
+; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v9, v0.t
+; CHECK-ZVKB32-NEXT: addi sp, sp, 16
+; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
+; CHECK-ZVKB32-NEXT: ret
+;
+; CHECK-ZVKB64-LABEL: vandn_vx_vp_imm64:
+; CHECK-ZVKB64: # %bb.0:
+; CHECK-ZVKB64-NEXT: lui a1, 1048560
+; CHECK-ZVKB64-NEXT: srli a1, a1, 8
+; CHECK-ZVKB64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a1, v0.t
+; CHECK-ZVKB64-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> splat (i64 -72057594037927681), <vscale x 1 x i1> %mask, i32 %evl)
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i16> @vand_vadd_vx_vp_imm16(<vscale x 1 x i16> %x, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vadd_vx_vp_imm16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vadd.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+;
+; CHECK-ZVKB-LABEL: vand_vadd_vx_vp_imm16:
+; CHECK-ZVKB: # %bb.0:
+; CHECK-ZVKB-NEXT: lui a1, 8
+; CHECK-ZVKB-NEXT: addi a1, a1, -1
+; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-ZVKB-NEXT: vand.vx v8, v8, a1, v0.t
+; CHECK-ZVKB-NEXT: vadd.vx v8, v8, a1, v0.t
+; CHECK-ZVKB-NEXT: ret
+ %a = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> splat (i16 32767), <vscale x 1 x i16> %x, <vscale x 1 x i1> %mask, i32 %evl)
+ %b = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> splat (i16 32767), <vscale x 1 x i16> %a, <vscale x 1 x i1> %mask, i32 %evl)
+ ret <vscale x 1 x i16> %b
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/123345
More information about the llvm-commits
mailing list