[llvm] c6bb6a7 - [LV] Add `-force-target-supports-masked-memory-ops` option (#184325)

via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 4 05:36:35 PST 2026


Author: Benjamin Maxwell
Date: 2026-03-04T13:36:29Z
New Revision: c6bb6a7e425472d670da80036f39645580668282

URL: https://github.com/llvm/llvm-project/commit/c6bb6a7e425472d670da80036f39645580668282
DIFF: https://github.com/llvm/llvm-project/commit/c6bb6a7e425472d670da80036f39645580668282.diff

LOG: [LV] Add `-force-target-supports-masked-memory-ops` option (#184325)

This can be used to make target agnostic tail-folding tests much less
verbose, as masked loads/stores can be used rather than scalar
predication.

Added: 
    llvm/test/Transforms/LoopVectorize/tail-folding-masked-mem-opts.ll

Modified: 
    llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
    llvm/test/Transforms/LoopVectorize/VPlan/conditional-scalar-assignment-vplan.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 36c8c0560c9eb..91c7f1680aac2 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -198,6 +198,13 @@ static cl::opt<unsigned> VectorizeMemoryCheckThreshold(
     "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
     cl::desc("The maximum allowed number of runtime memory checks"));
 
+/// Note: This currently only applies to `llvm.masked.load` and
+/// `llvm.masked.store`. TODO: Extend this to cover other operations as needed.
+static cl::opt<bool> ForceTargetSupportsMaskedMemoryOps(
+    "force-target-supports-masked-memory-ops", cl::init(false), cl::Hidden,
+    cl::desc("Assume the target supports masked memory operations (used for "
+             "testing)."));
+
 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
 // that predication is preferred, and this lists all options. I.e., the
 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
@@ -1179,7 +1186,8 @@ class LoopVectorizationCostModel {
   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment,
                           unsigned AddressSpace) const {
     return Legal->isConsecutivePtr(DataType, Ptr) &&
-           TTI.isLegalMaskedStore(DataType, Alignment, AddressSpace);
+           (ForceTargetSupportsMaskedMemoryOps ||
+            TTI.isLegalMaskedStore(DataType, Alignment, AddressSpace));
   }
 
   /// Returns true if the target machine supports masked load operation
@@ -1187,7 +1195,8 @@ class LoopVectorizationCostModel {
   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment,
                          unsigned AddressSpace) const {
     return Legal->isConsecutivePtr(DataType, Ptr) &&
-           TTI.isLegalMaskedLoad(DataType, Alignment, AddressSpace);
+           (ForceTargetSupportsMaskedMemoryOps ||
+            TTI.isLegalMaskedLoad(DataType, Alignment, AddressSpace));
   }
 
   /// Returns true if the target machine can represent \p V as a masked gather

diff  --git a/llvm/test/Transforms/LoopVectorize/VPlan/conditional-scalar-assignment-vplan.ll b/llvm/test/Transforms/LoopVectorize/VPlan/conditional-scalar-assignment-vplan.ll
index 4310b7e73b8fb..cb18a22409641 100644
--- a/llvm/test/Transforms/LoopVectorize/VPlan/conditional-scalar-assignment-vplan.ll
+++ b/llvm/test/Transforms/LoopVectorize/VPlan/conditional-scalar-assignment-vplan.ll
@@ -2,7 +2,8 @@
 ; RUN: opt -passes=loop-vectorize -vplan-print-after="optimize$" \
 ; RUN: -force-vector-width=4 -disable-output 2>&1 < %s | FileCheck %s
 ; RUN: opt -passes=loop-vectorize -vplan-print-after="optimize$" \
-; RUN: -force-vector-width=4 -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -force-vector-width=4 -force-target-supports-masked-memory-ops \
+; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
 ; RUN: -disable-output 2>&1 < %s | FileCheck %s --check-prefix=CHECK-TF
 
 ; This function is derived from the following C program:
@@ -91,35 +92,19 @@ define i32 @simple_csa_int_select(i64 %N, ptr %data, i32 %a) {
 ; CHECK-TF-NEXT:  <x1> vector loop: {
 ; CHECK-TF-NEXT:    vector.body:
 ; CHECK-TF-NEXT:      EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
-; CHECK-TF-NEXT:      ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<[[VP0]]>
-; CHECK-TF-NEXT:      WIDEN-REDUCTION-PHI ir<%data.phi> = phi ir<-1>, vp<[[VP12:%[0-9]+]]>
-; CHECK-TF-NEXT:      WIDEN-PHI vp<[[VP5:%[0-9]+]]> = phi [ ir<false>, vector.ph ], [ vp<[[VP11:%[0-9]+]]>, loop.0 ]
-; CHECK-TF-NEXT:      EMIT vp<[[VP6:%[0-9]+]]> = icmp ule ir<%iv>, vp<[[VP3]]>
-; CHECK-TF-NEXT:    Successor(s): pred.load
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:    <xVFxUF> pred.load: {
-; CHECK-TF-NEXT:      pred.load.entry:
-; CHECK-TF-NEXT:        BRANCH-ON-MASK vp<[[VP6]]>
-; CHECK-TF-NEXT:      Successor(s): pred.load.if, pred.load.continue
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:      pred.load.if:
-; CHECK-TF-NEXT:        vp<[[VP7:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]>
-; CHECK-TF-NEXT:        REPLICATE ir<%ld.addr> = getelementptr inbounds ir<%data>, vp<[[VP7]]>
-; CHECK-TF-NEXT:        REPLICATE ir<%ld> = load ir<%ld.addr> (S->V)
-; CHECK-TF-NEXT:      Successor(s): pred.load.continue
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:      pred.load.continue:
-; CHECK-TF-NEXT:        PHI-PREDICATED-INSTRUCTION vp<[[VP8:%[0-9]+]]> = ir<%ld>
-; CHECK-TF-NEXT:      No successors
-; CHECK-TF-NEXT:    }
-; CHECK-TF-NEXT:    Successor(s): loop.0
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:    loop.0:
-; CHECK-TF-NEXT:      WIDEN ir<%select.cmp> = icmp slt ir<%a>, vp<[[VP8]]>
-; CHECK-TF-NEXT:      EMIT vp<[[VP9:%[0-9]+]]> = logical-and vp<[[VP6]]>, ir<%select.cmp>
-; CHECK-TF-NEXT:      EMIT vp<[[VP10:%[0-9]+]]> = any-of vp<[[VP9]]>
-; CHECK-TF-NEXT:      EMIT vp<[[VP11]]> = select vp<[[VP10]]>, vp<[[VP9]]>, vp<[[VP5]]>
-; CHECK-TF-NEXT:      EMIT vp<[[VP12]]> = select vp<[[VP10]]>, vp<[[VP8]]>, ir<%data.phi>
+; CHECK-TF-NEXT:      WIDEN-REDUCTION-PHI ir<%data.phi> = phi ir<-1>, vp<[[VP13:%[0-9]+]]>
+; CHECK-TF-NEXT:      WIDEN-PHI vp<[[VP5:%[0-9]+]]> = phi [ ir<false>, vector.ph ], [ vp<[[VP12:%[0-9]+]]>, vector.body ]
+; CHECK-TF-NEXT:      vp<[[VP6:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]>
+; CHECK-TF-NEXT:      EMIT vp<[[VP7:%[0-9]+]]> = WIDEN-CANONICAL-INDUCTION vp<[[VP4]]>
+; CHECK-TF-NEXT:      EMIT vp<[[VP8:%[0-9]+]]> = icmp ule vp<[[VP7]]>, vp<[[VP3]]>
+; CHECK-TF-NEXT:      CLONE ir<%ld.addr> = getelementptr inbounds ir<%data>, vp<[[VP6]]>
+; CHECK-TF-NEXT:      vp<[[VP9:%[0-9]+]]> = vector-pointer inbounds ir<%ld.addr>
+; CHECK-TF-NEXT:      WIDEN ir<%ld> = load vp<[[VP9]]>, vp<[[VP8]]>
+; CHECK-TF-NEXT:      WIDEN ir<%select.cmp> = icmp slt ir<%a>, ir<%ld>
+; CHECK-TF-NEXT:      EMIT vp<[[VP10:%[0-9]+]]> = logical-and vp<[[VP8]]>, ir<%select.cmp>
+; CHECK-TF-NEXT:      EMIT vp<[[VP11:%[0-9]+]]> = any-of vp<[[VP10]]>
+; CHECK-TF-NEXT:      EMIT vp<[[VP12]]> = select vp<[[VP11]]>, vp<[[VP10]]>, vp<[[VP5]]>
+; CHECK-TF-NEXT:      EMIT vp<[[VP13]]> = select vp<[[VP11]]>, ir<%ld>, ir<%data.phi>
 ; CHECK-TF-NEXT:      EMIT vp<%index.next> = add vp<[[VP4]]>, vp<[[VP1]]>
 ; CHECK-TF-NEXT:      EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
 ; CHECK-TF-NEXT:    No successors
@@ -127,11 +112,11 @@ define i32 @simple_csa_int_select(i64 %N, ptr %data, i32 %a) {
 ; CHECK-TF-NEXT:  Successor(s): middle.block
 ; CHECK-TF-EMPTY:
 ; CHECK-TF-NEXT:  middle.block:
-; CHECK-TF-NEXT:    EMIT vp<[[VP14:%[0-9]+]]> = extract-last-active vp<[[VP12]]>, vp<[[VP11]]>, ir<-1>
+; CHECK-TF-NEXT:    EMIT vp<[[VP15:%[0-9]+]]> = extract-last-active vp<[[VP13]]>, vp<[[VP12]]>, ir<-1>
 ; CHECK-TF-NEXT:  Successor(s): ir-bb<exit>
 ; CHECK-TF-EMPTY:
 ; CHECK-TF-NEXT:  ir-bb<exit>:
-; CHECK-TF-NEXT:    IR   %select.data.lcssa = phi i32 [ %select.data, %loop ] (extra operand: vp<[[VP14]]> from middle.block)
+; CHECK-TF-NEXT:    IR   %select.data.lcssa = phi i32 [ %select.data, %loop ] (extra operand: vp<[[VP15]]> from middle.block)
 ; CHECK-TF-NEXT:  No successors
 ; CHECK-TF-EMPTY:
 ; CHECK-TF-NEXT:  scalar.ph:
@@ -271,55 +256,23 @@ define i32 @simple_csa_int_load(ptr noalias %a, ptr noalias %b, i32 %default_val
 ; CHECK-TF-NEXT:  <x1> vector loop: {
 ; CHECK-TF-NEXT:    vector.body:
 ; CHECK-TF-NEXT:      EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
-; CHECK-TF-NEXT:      ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<[[VP0]]>
-; CHECK-TF-NEXT:      WIDEN-REDUCTION-PHI ir<%data.phi> = phi ir<%default_val>, vp<[[VP14:%[0-9]+]]>
-; CHECK-TF-NEXT:      WIDEN-PHI vp<[[VP5:%[0-9]+]]> = phi [ ir<false>, vector.ph ], [ vp<[[VP13:%[0-9]+]]>, if.then.1 ]
+; CHECK-TF-NEXT:      WIDEN-REDUCTION-PHI ir<%data.phi> = phi ir<%default_val>, vp<[[VP15:%[0-9]+]]>
+; CHECK-TF-NEXT:      WIDEN-PHI vp<[[VP5:%[0-9]+]]> = phi [ ir<false>, vector.ph ], [ vp<[[VP14:%[0-9]+]]>, vector.body ]
 ; CHECK-TF-NEXT:      vp<[[VP6:%[0-9]+]]> = SCALAR-STEPS vp<[[VP4]]>, ir<1>, vp<[[VP0]]>
-; CHECK-TF-NEXT:      EMIT vp<[[VP7:%[0-9]+]]> = icmp ule ir<%iv>, vp<[[VP3]]>
-; CHECK-TF-NEXT:    Successor(s): pred.load
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:    <xVFxUF> pred.load: {
-; CHECK-TF-NEXT:      pred.load.entry:
-; CHECK-TF-NEXT:        BRANCH-ON-MASK vp<[[VP7]]>
-; CHECK-TF-NEXT:      Successor(s): pred.load.if, pred.load.continue
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:      pred.load.if:
-; CHECK-TF-NEXT:        REPLICATE ir<%a.addr> = getelementptr inbounds nuw ir<%a>, vp<[[VP6]]>
-; CHECK-TF-NEXT:        REPLICATE ir<%ld.a> = load ir<%a.addr> (S->V)
-; CHECK-TF-NEXT:      Successor(s): pred.load.continue
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:      pred.load.continue:
-; CHECK-TF-NEXT:        PHI-PREDICATED-INSTRUCTION vp<[[VP8:%[0-9]+]]> = ir<%ld.a>
-; CHECK-TF-NEXT:      No successors
-; CHECK-TF-NEXT:    }
-; CHECK-TF-NEXT:    Successor(s): loop.0
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:    loop.0:
-; CHECK-TF-NEXT:      WIDEN ir<%if.cond> = icmp sgt vp<[[VP8]]>, ir<%threshold>
-; CHECK-TF-NEXT:      EMIT vp<[[VP9:%[0-9]+]]> = logical-and vp<[[VP7]]>, ir<%if.cond>
-; CHECK-TF-NEXT:    Successor(s): pred.load
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:    <xVFxUF> pred.load: {
-; CHECK-TF-NEXT:      pred.load.entry:
-; CHECK-TF-NEXT:        BRANCH-ON-MASK vp<[[VP9]]>
-; CHECK-TF-NEXT:      Successor(s): pred.load.if, pred.load.continue
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:      pred.load.if:
-; CHECK-TF-NEXT:        REPLICATE ir<%b.addr> = getelementptr inbounds nuw ir<%b>, vp<[[VP6]]>
-; CHECK-TF-NEXT:        REPLICATE ir<%ld.b> = load ir<%b.addr> (S->V)
-; CHECK-TF-NEXT:      Successor(s): pred.load.continue
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:      pred.load.continue:
-; CHECK-TF-NEXT:        PHI-PREDICATED-INSTRUCTION vp<[[VP10:%[0-9]+]]> = ir<%ld.b>
-; CHECK-TF-NEXT:      No successors
-; CHECK-TF-NEXT:    }
-; CHECK-TF-NEXT:    Successor(s): if.then.1
-; CHECK-TF-EMPTY:
-; CHECK-TF-NEXT:    if.then.1:
-; CHECK-TF-NEXT:      EMIT vp<[[VP11:%[0-9]+]]> = logical-and vp<[[VP7]]>, vp<[[VP9]]>
-; CHECK-TF-NEXT:      EMIT vp<[[VP12:%[0-9]+]]> = any-of vp<[[VP11]]>
-; CHECK-TF-NEXT:      EMIT vp<[[VP13]]> = select vp<[[VP12]]>, vp<[[VP11]]>, vp<[[VP5]]>
-; CHECK-TF-NEXT:      EMIT vp<[[VP14]]> = select vp<[[VP12]]>, vp<[[VP10]]>, ir<%data.phi>
+; CHECK-TF-NEXT:      EMIT vp<[[VP7:%[0-9]+]]> = WIDEN-CANONICAL-INDUCTION vp<[[VP4]]>
+; CHECK-TF-NEXT:      EMIT vp<[[VP8:%[0-9]+]]> = icmp ule vp<[[VP7]]>, vp<[[VP3]]>
+; CHECK-TF-NEXT:      CLONE ir<%a.addr> = getelementptr inbounds nuw ir<%a>, vp<[[VP6]]>
+; CHECK-TF-NEXT:      vp<[[VP9:%[0-9]+]]> = vector-pointer inbounds nuw ir<%a.addr>
+; CHECK-TF-NEXT:      WIDEN ir<%ld.a> = load vp<[[VP9]]>, vp<[[VP8]]>
+; CHECK-TF-NEXT:      WIDEN ir<%if.cond> = icmp sgt ir<%ld.a>, ir<%threshold>
+; CHECK-TF-NEXT:      EMIT vp<[[VP10:%[0-9]+]]> = logical-and vp<[[VP8]]>, ir<%if.cond>
+; CHECK-TF-NEXT:      CLONE ir<%b.addr> = getelementptr ir<%b>, vp<[[VP6]]>
+; CHECK-TF-NEXT:      vp<[[VP11:%[0-9]+]]> = vector-pointer ir<%b.addr>
+; CHECK-TF-NEXT:      WIDEN ir<%ld.b> = load vp<[[VP11]]>, vp<[[VP10]]>
+; CHECK-TF-NEXT:      EMIT vp<[[VP12:%[0-9]+]]> = logical-and vp<[[VP8]]>, vp<[[VP10]]>
+; CHECK-TF-NEXT:      EMIT vp<[[VP13:%[0-9]+]]> = any-of vp<[[VP12]]>
+; CHECK-TF-NEXT:      EMIT vp<[[VP14]]> = select vp<[[VP13]]>, vp<[[VP12]]>, vp<[[VP5]]>
+; CHECK-TF-NEXT:      EMIT vp<[[VP15]]> = select vp<[[VP13]]>, ir<%ld.b>, ir<%data.phi>
 ; CHECK-TF-NEXT:      EMIT vp<%index.next> = add vp<[[VP4]]>, vp<[[VP1]]>
 ; CHECK-TF-NEXT:      EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
 ; CHECK-TF-NEXT:    No successors
@@ -327,11 +280,11 @@ define i32 @simple_csa_int_load(ptr noalias %a, ptr noalias %b, i32 %default_val
 ; CHECK-TF-NEXT:  Successor(s): middle.block
 ; CHECK-TF-EMPTY:
 ; CHECK-TF-NEXT:  middle.block:
-; CHECK-TF-NEXT:    EMIT vp<[[VP16:%[0-9]+]]> = extract-last-active vp<[[VP14]]>, vp<[[VP13]]>, ir<%default_val>
+; CHECK-TF-NEXT:    EMIT vp<[[VP17:%[0-9]+]]> = extract-last-active vp<[[VP15]]>, vp<[[VP14]]>, ir<%default_val>
 ; CHECK-TF-NEXT:  Successor(s): ir-bb<exit>
 ; CHECK-TF-EMPTY:
 ; CHECK-TF-NEXT:  ir-bb<exit>:
-; CHECK-TF-NEXT:    IR   %select.data.lcssa = phi i32 [ %select.data, %latch ] (extra operand: vp<[[VP16]]> from middle.block)
+; CHECK-TF-NEXT:    IR   %select.data.lcssa = phi i32 [ %select.data, %latch ] (extra operand: vp<[[VP17]]> from middle.block)
 ; CHECK-TF-NEXT:  No successors
 ; CHECK-TF-EMPTY:
 ; CHECK-TF-NEXT:  scalar.ph:

diff  --git a/llvm/test/Transforms/LoopVectorize/tail-folding-masked-mem-opts.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-masked-mem-opts.ll
new file mode 100644
index 0000000000000..4f0e7282025a1
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-masked-mem-opts.ll
@@ -0,0 +1,170 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
+; RUN: opt -p loop-vectorize -force-vector-width=2 -force-target-supports-masked-memory-ops=true -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck %s --check-prefixes=CHECK,CHECK-MASKED
+; RUN: opt -p loop-vectorize -force-vector-width=2 -force-target-supports-masked-memory-ops=false -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck %s --check-prefixes=CHECK,CHECK-PREDICATE
+
+; Tests -force-target-supports-masked-memory-ops=false/true.
+; With -force-target-supports-masked-memory-ops=true masked memory operations should be used.
+
+define void @simple_memcpy(ptr noalias %dst, ptr noalias %src, i64 %n)  {
+; CHECK-MASKED-LABEL: define void @simple_memcpy(
+; CHECK-MASKED-SAME: ptr noalias [[DST:%.*]], ptr noalias [[SRC:%.*]], i64 [[N:%.*]]) {
+; CHECK-MASKED-NEXT:  [[ENTRY:.*:]]
+; CHECK-MASKED-NEXT:    [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
+; CHECK-MASKED-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK-MASKED:       [[VECTOR_PH]]:
+; CHECK-MASKED-NEXT:    [[N_RND_UP:%.*]] = add i64 [[UMAX]], 1
+; CHECK-MASKED-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2
+; CHECK-MASKED-NEXT:    [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-MASKED-NEXT:    [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[UMAX]], 1
+; CHECK-MASKED-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
+; CHECK-MASKED-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-MASKED-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK-MASKED:       [[VECTOR_BODY]]:
+; CHECK-MASKED-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-MASKED-NEXT:    [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[INDEX]], i64 0
+; CHECK-MASKED-NEXT:    [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-MASKED-NEXT:    [[VEC_IV:%.*]] = add <2 x i64> [[BROADCAST_SPLAT2]], <i64 0, i64 1>
+; CHECK-MASKED-NEXT:    [[TMP0:%.*]] = icmp ule <2 x i64> [[VEC_IV]], [[BROADCAST_SPLAT]]
+; CHECK-MASKED-NEXT:    [[TMP1:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-MASKED-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr align 4 [[TMP1]], <2 x i1> [[TMP0]], <2 x i32> poison)
+; CHECK-MASKED-NEXT:    [[TMP2:%.*]] = getelementptr i32, ptr [[DST]], i64 [[INDEX]]
+; CHECK-MASKED-NEXT:    call void @llvm.masked.store.v2i32.p0(<2 x i32> [[WIDE_MASKED_LOAD]], ptr align 4 [[TMP2]], <2 x i1> [[TMP0]])
+; CHECK-MASKED-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; CHECK-MASKED-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-MASKED-NEXT:    br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-MASKED:       [[MIDDLE_BLOCK]]:
+; CHECK-MASKED-NEXT:    br label %[[EXIT:.*]]
+; CHECK-MASKED:       [[EXIT]]:
+; CHECK-MASKED-NEXT:    ret void
+;
+; CHECK-PREDICATE-LABEL: define void @simple_memcpy(
+; CHECK-PREDICATE-SAME: ptr noalias [[DST:%.*]], ptr noalias [[SRC:%.*]], i64 [[N:%.*]]) {
+; CHECK-PREDICATE-NEXT:  [[ENTRY:.*:]]
+; CHECK-PREDICATE-NEXT:    [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
+; CHECK-PREDICATE-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK-PREDICATE:       [[VECTOR_PH]]:
+; CHECK-PREDICATE-NEXT:    [[N_RND_UP:%.*]] = add i64 [[UMAX]], 1
+; CHECK-PREDICATE-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2
+; CHECK-PREDICATE-NEXT:    [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-PREDICATE-NEXT:    [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[UMAX]], 1
+; CHECK-PREDICATE-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
+; CHECK-PREDICATE-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-PREDICATE-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK-PREDICATE:       [[VECTOR_BODY]]:
+; CHECK-PREDICATE-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE2:.*]] ]
+; CHECK-PREDICATE-NEXT:    [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE2]] ]
+; CHECK-PREDICATE-NEXT:    [[TMP0:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-PREDICATE-NEXT:    [[TMP1:%.*]] = extractelement <2 x i1> [[TMP0]], i32 0
+; CHECK-PREDICATE-NEXT:    br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; CHECK-PREDICATE:       [[PRED_STORE_IF]]:
+; CHECK-PREDICATE-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 0
+; CHECK-PREDICATE-NEXT:    [[TMP3:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[TMP2]]
+; CHECK-PREDICATE-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
+; CHECK-PREDICATE-NEXT:    [[TMP5:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP2]]
+; CHECK-PREDICATE-NEXT:    store i32 [[TMP4]], ptr [[TMP5]], align 4
+; CHECK-PREDICATE-NEXT:    br label %[[PRED_STORE_CONTINUE]]
+; CHECK-PREDICATE:       [[PRED_STORE_CONTINUE]]:
+; CHECK-PREDICATE-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[TMP0]], i32 1
+; CHECK-PREDICATE-NEXT:    br i1 [[TMP6]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2]]
+; CHECK-PREDICATE:       [[PRED_STORE_IF1]]:
+; CHECK-PREDICATE-NEXT:    [[TMP7:%.*]] = add i64 [[INDEX]], 1
+; CHECK-PREDICATE-NEXT:    [[TMP8:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[TMP7]]
+; CHECK-PREDICATE-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-PREDICATE-NEXT:    [[TMP10:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP7]]
+; CHECK-PREDICATE-NEXT:    store i32 [[TMP9]], ptr [[TMP10]], align 4
+; CHECK-PREDICATE-NEXT:    br label %[[PRED_STORE_CONTINUE2]]
+; CHECK-PREDICATE:       [[PRED_STORE_CONTINUE2]]:
+; CHECK-PREDICATE-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; CHECK-PREDICATE-NEXT:    [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; CHECK-PREDICATE-NEXT:    [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-PREDICATE-NEXT:    br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-PREDICATE:       [[MIDDLE_BLOCK]]:
+; CHECK-PREDICATE-NEXT:    br label %[[EXIT:.*]]
+; CHECK-PREDICATE:       [[EXIT]]:
+; CHECK-PREDICATE-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
+  %src.ptr = getelementptr i32, ptr %src, i64 %iv
+  %data = load i32, ptr %src.ptr
+  %dest.ptr = getelementptr i32, ptr %dst, i64 %iv
+  store i32 %data, ptr %dest.ptr
+  %iv.next = add nsw i64 %iv, 1
+  %not.exit = icmp ult i64 %iv.next, %n
+  br i1 %not.exit, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+; Negative test: Non-consecutive load/stores cannot be masked.
+define void @non_consecutive_copy(ptr noalias %dst, ptr noalias %src, i64 %n)  {
+; CHECK-LABEL: define void @non_consecutive_copy(
+; CHECK-SAME: ptr noalias [[DST:%.*]], ptr noalias [[SRC:%.*]], i64 [[N:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[N_RND_UP:%.*]] = add i64 [[UMAX]], 1
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT:    [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[UMAX]], 1
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE3:.*]] ]
+; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE3]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i64> [[VEC_IND]], splat (i64 1)
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[TMP0]], i32 0
+; CHECK-NEXT:    br i1 [[TMP2]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; CHECK:       [[PRED_STORE_IF]]:
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP6]]
+; CHECK-NEXT:    store i32 [[TMP5]], ptr [[TMP7]], align 4
+; CHECK-NEXT:    br label %[[PRED_STORE_CONTINUE]]
+; CHECK:       [[PRED_STORE_CONTINUE]]:
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x i1> [[TMP0]], i32 1
+; CHECK-NEXT:    br i1 [[TMP8]], label %[[PRED_STORE_IF2:.*]], label %[[PRED_STORE_CONTINUE3]]
+; CHECK:       [[PRED_STORE_IF2]]:
+; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP12]]
+; CHECK-NEXT:    store i32 [[TMP11]], ptr [[TMP13]], align 4
+; CHECK-NEXT:    br label %[[PRED_STORE_CONTINUE3]]
+; CHECK:       [[PRED_STORE_CONTINUE3]]:
+; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX1]], 2
+; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
+  %index = mul i64 %iv, 2
+  %src.ptr = getelementptr i32, ptr %src, i64 %index
+  %data = load i32, ptr %src.ptr
+  %dest.ptr = getelementptr i32, ptr %dst, i64 %index
+  store i32 %data, ptr %dest.ptr
+  %iv.next = add nsw i64 %iv, 1
+  %not.exit = icmp ult i64 %iv.next, %n
+  br i1 %not.exit, label %loop, label %exit
+
+exit:
+  ret void
+}


        


More information about the llvm-commits mailing list