[llvm] 0e3a5f1 - [ARM] Some extra gather/scatter tests. NFC
David Green via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 2 03:32:27 PDT 2021
Author: David Green
Date: 2021-11-02T10:32:22Z
New Revision: 0e3a5f1ab3a9e751a7e404cb50cfca1954ef4221
URL: https://github.com/llvm/llvm-project/commit/0e3a5f1ab3a9e751a7e404cb50cfca1954ef4221
DIFF: https://github.com/llvm/llvm-project/commit/0e3a5f1ab3a9e751a7e404cb50cfca1954ef4221.diff
LOG: [ARM] Some extra gather/scatter tests. NFC
Added:
Modified:
llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
index b6a1c7b94f5ef..8122a870e669f 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
@@ -1399,6 +1399,145 @@ for.cond.cleanup: ; preds = %for.body, %middle.b
ret void
}
+define void @shl(i32* nocapture %x, i32* noalias nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: shl:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: cmp r2, #1
+; CHECK-NEXT: it lt
+; CHECK-NEXT: poplt {r7, pc}
+; CHECK-NEXT: .LBB15_1: @ %vector.ph
+; CHECK-NEXT: adr r3, .LCPI15_0
+; CHECK-NEXT: vldrw.u32 q0, [r3]
+; CHECK-NEXT: vmov.i32 q1, #0x4
+; CHECK-NEXT: dlstp.32 lr, r2
+; CHECK-NEXT: .LBB15_2: @ %vector.body
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vshl.i32 q2, q0, #2
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vldrw.u32 q3, [r1, q2, uxtw #2]
+; CHECK-NEXT: vstrw.32 q3, [r0], #16
+; CHECK-NEXT: letp lr, .LBB15_2
+; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
+; CHECK-NEXT: pop {r7, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.4:
+; CHECK-NEXT: .LCPI15_0:
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 1 @ 0x1
+; CHECK-NEXT: .long 2 @ 0x2
+; CHECK-NEXT: .long 3 @ 0x3
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %vector.ph, label %for.cond.cleanup
+
+vector.ph: ; preds = %entry
+ %n.rnd.up = add i32 %n, 3
+ %n.vec = and i32 %n.rnd.up, -4
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
+ %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
+ %0 = shl nsw <4 x i32> %vec.ind, <i32 2, i32 2, i32 2, i32 2>
+ %1 = getelementptr inbounds i32, i32* %y, <4 x i32> %0
+ %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+ %2 = getelementptr inbounds i32, i32* %x, i32 %index
+ %3 = bitcast i32* %2 to <4 x i32>*
+ call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %wide.masked.gather, <4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask)
+ %index.next = add i32 %index, 4
+ %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
+ %4 = icmp eq i32 %index.next, %n.vec
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+}
+
+define void @shlor(i32* nocapture %x, i32* noalias nocapture readonly %y, i32 %n) {
+; CHECK-LABEL: shlor:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: cmp r2, #1
+; CHECK-NEXT: blt .LBB16_3
+; CHECK-NEXT: @ %bb.1: @ %vector.ph
+; CHECK-NEXT: adr r3, .LCPI16_0
+; CHECK-NEXT: vldrw.u32 q0, [r3]
+; CHECK-NEXT: vmov.i32 q1, #0x4
+; CHECK-NEXT: dlstp.32 lr, r2
+; CHECK-NEXT: .LBB16_2: @ %vector.body
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vshl.i32 q2, q0, #3
+; CHECK-NEXT: vmov q4, q2
+; CHECK-NEXT: vldrw.u32 q3, [r1, q2, uxtw #2]
+; CHECK-NEXT: vorr.i32 q4, #0x2
+; CHECK-NEXT: vldrw.u32 q5, [r1, q4, uxtw #2]
+; CHECK-NEXT: vmov q4, q2
+; CHECK-NEXT: vadd.i32 q3, q5, q3
+; CHECK-NEXT: vorr.i32 q4, #0x4
+; CHECK-NEXT: vldrw.u32 q5, [r1, q4, uxtw #2]
+; CHECK-NEXT: vorr.i32 q2, #0x6
+; CHECK-NEXT: vadd.i32 q3, q3, q5
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vldrw.u32 q4, [r1, q2, uxtw #2]
+; CHECK-NEXT: vadd.i32 q2, q3, q4
+; CHECK-NEXT: vstrw.32 q2, [r0], #16
+; CHECK-NEXT: letp lr, .LBB16_2
+; CHECK-NEXT: .LBB16_3: @ %for.cond.cleanup
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: pop {r7, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.4:
+; CHECK-NEXT: .LCPI16_0:
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 1 @ 0x1
+; CHECK-NEXT: .long 2 @ 0x2
+; CHECK-NEXT: .long 3 @ 0x3
+entry:
+ %cmp23 = icmp sgt i32 %n, 0
+ br i1 %cmp23, label %vector.ph, label %for.cond.cleanup
+
+vector.ph: ; preds = %entry
+ %n.rnd.up = add i32 %n, 3
+ %n.vec = and i32 %n.rnd.up, -4
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
+ %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
+ %0 = shl nsw <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
+ %1 = getelementptr inbounds i32, i32* %y, <4 x i32> %0
+ %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+ %2 = or <4 x i32> %0, <i32 2, i32 2, i32 2, i32 2>
+ %3 = getelementptr inbounds i32, i32* %y, <4 x i32> %2
+ %wide.masked.gather25 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+ %4 = add nsw <4 x i32> %wide.masked.gather25, %wide.masked.gather
+ %5 = or <4 x i32> %0, <i32 4, i32 4, i32 4, i32 4>
+ %6 = getelementptr inbounds i32, i32* %y, <4 x i32> %5
+ %wide.masked.gather26 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %6, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+ %7 = add nsw <4 x i32> %4, %wide.masked.gather26
+ %8 = or <4 x i32> %0, <i32 6, i32 6, i32 6, i32 6>
+ %9 = getelementptr inbounds i32, i32* %y, <4 x i32> %8
+ %wide.masked.gather27 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %9, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+ %10 = add nsw <4 x i32> %7, %wide.masked.gather27
+ %11 = getelementptr inbounds i32, i32* %x, i32 %index
+ %12 = bitcast i32* %11 to <4 x i32>*
+ call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %10, <4 x i32>* %12, i32 4, <4 x i1> %active.lane.mask)
+ %index.next = add i32 %index, 4
+ %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
+ %13 = icmp eq i32 %index.next, %n.vec
+ br i1 %13, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+}
+
declare <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*>, i32, <2 x i1>, <2 x i32>)
declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
@@ -1419,3 +1558,4 @@ declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x
declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>)
declare <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*>, i32, <32 x i1>, <32 x i8>)
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
diff --git a/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
index 5bdf3b929bb3e..6e800eb06b098 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
@@ -225,6 +225,163 @@ for.cond.cleanup: ; preds = %for.body, %middle.b
ret void
}
+define void @shl(i32* nocapture readonly %x, i32* noalias nocapture %y, i32 %n) {
+; CHECK-LABEL: shl:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: cmp r2, #1
+; CHECK-NEXT: it lt
+; CHECK-NEXT: poplt {r7, pc}
+; CHECK-NEXT: .LBB4_1: @ %vector.ph
+; CHECK-NEXT: adr r3, .LCPI4_0
+; CHECK-NEXT: vldrw.u32 q0, [r3]
+; CHECK-NEXT: vmov.i32 q1, #0x4
+; CHECK-NEXT: dlstp.32 lr, r2
+; CHECK-NEXT: .LBB4_2: @ %vector.body
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vshl.i32 q3, q0, #2
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vldrw.u32 q2, [r0], #16
+; CHECK-NEXT: vstrw.32 q2, [r1, q3, uxtw #2]
+; CHECK-NEXT: letp lr, .LBB4_2
+; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
+; CHECK-NEXT: pop {r7, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.4:
+; CHECK-NEXT: .LCPI4_0:
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 1 @ 0x1
+; CHECK-NEXT: .long 2 @ 0x2
+; CHECK-NEXT: .long 3 @ 0x3
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %vector.ph, label %for.cond.cleanup
+
+vector.ph: ; preds = %entry
+ %n.rnd.up = add i32 %n, 3
+ %n.vec = and i32 %n.rnd.up, -4
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
+ %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
+ %0 = getelementptr inbounds i32, i32* %x, i32 %index
+ %1 = bitcast i32* %0 to <4 x i32>*
+ %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
+ %2 = shl nsw <4 x i32> %vec.ind, <i32 2, i32 2, i32 2, i32 2>
+ %3 = getelementptr inbounds i32, i32* %y, <4 x i32> %2
+ call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %wide.masked.load, <4 x i32*> %3, i32 4, <4 x i1> %active.lane.mask)
+ %index.next = add i32 %index, 4
+ %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
+ %4 = icmp eq i32 %index.next, %n.vec
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+}
+
+define void @shlor(i32* nocapture readonly %x, i32* noalias nocapture %y, i32 %n) {
+; CHECK-LABEL: shlor:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: .pad #80
+; CHECK-NEXT: sub sp, #80
+; CHECK-NEXT: cmp r2, #1
+; CHECK-NEXT: blt .LBB5_3
+; CHECK-NEXT: @ %bb.1: @ %vector.ph
+; CHECK-NEXT: vmov.i32 q1, #0x1
+; CHECK-NEXT: adr r3, .LCPI5_0
+; CHECK-NEXT: vstrw.32 q1, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT: vmov.i32 q1, #0x3
+; CHECK-NEXT: vldrw.u32 q0, [r3]
+; CHECK-NEXT: vstrw.32 q1, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT: vmov.i32 q1, #0x2
+; CHECK-NEXT: vstrw.32 q1, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT: vmov.i32 q1, #0x4
+; CHECK-NEXT: vstrw.32 q1, [sp] @ 16-byte Spill
+; CHECK-NEXT: vldrw.u32 q6, [sp] @ 16-byte Reload
+; CHECK-NEXT: dlstp.32 lr, r2
+; CHECK-NEXT: .LBB5_2: @ %vector.body
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vldrw.u32 q5, [r0], #16
+; CHECK-NEXT: vldrw.u32 q2, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q3, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q4, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT: vshl.i32 q7, q0, #3
+; CHECK-NEXT: vadd.i32 q1, q5, q6
+; CHECK-NEXT: vadd.i32 q2, q5, q2
+; CHECK-NEXT: vadd.i32 q3, q5, q3
+; CHECK-NEXT: vadd.i32 q5, q5, q4
+; CHECK-NEXT: vmov q4, q7
+; CHECK-NEXT: vstrw.32 q1, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT: vmov q1, q7
+; CHECK-NEXT: vstrw.32 q5, [r1, q7, uxtw #2]
+; CHECK-NEXT: vadd.i32 q0, q0, q6
+; CHECK-NEXT: vorr.i32 q4, #0x4
+; CHECK-NEXT: vorr.i32 q7, #0x2
+; CHECK-NEXT: vstrw.32 q3, [r1, q7, uxtw #2]
+; CHECK-NEXT: vstrw.32 q2, [r1, q4, uxtw #2]
+; CHECK-NEXT: vorr.i32 q1, #0x6
+; CHECK-NEXT: vldrw.u32 q2, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT: vstrw.32 q2, [r1, q1, uxtw #2]
+; CHECK-NEXT: letp lr, .LBB5_2
+; CHECK-NEXT: .LBB5_3: @ %for.cond.cleanup
+; CHECK-NEXT: add sp, #80
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: pop {r7, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.4:
+; CHECK-NEXT: .LCPI5_0:
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 1 @ 0x1
+; CHECK-NEXT: .long 2 @ 0x2
+; CHECK-NEXT: .long 3 @ 0x3
+entry:
+ %cmp33 = icmp sgt i32 %n, 0
+ br i1 %cmp33, label %vector.ph, label %for.cond.cleanup
+
+vector.ph: ; preds = %entry
+ %n.rnd.up = add i32 %n, 3
+ %n.vec = and i32 %n.rnd.up, -4
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
+ %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
+ %0 = getelementptr inbounds i32, i32* %x, i32 %index
+ %1 = bitcast i32* %0 to <4 x i32>*
+ %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
+ %2 = add nsw <4 x i32> %wide.masked.load, <i32 1, i32 1, i32 1, i32 1>
+ %3 = shl nsw <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
+ %4 = getelementptr inbounds i32, i32* %y, <4 x i32> %3
+ call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %2, <4 x i32*> %4, i32 4, <4 x i1> %active.lane.mask)
+ %5 = add nsw <4 x i32> %wide.masked.load, <i32 2, i32 2, i32 2, i32 2>
+ %6 = or <4 x i32> %3, <i32 2, i32 2, i32 2, i32 2>
+ %7 = getelementptr inbounds i32, i32* %y, <4 x i32> %6
+ call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %5, <4 x i32*> %7, i32 4, <4 x i1> %active.lane.mask)
+ %8 = add nsw <4 x i32> %wide.masked.load, <i32 3, i32 3, i32 3, i32 3>
+ %9 = or <4 x i32> %3, <i32 4, i32 4, i32 4, i32 4>
+ %10 = getelementptr inbounds i32, i32* %y, <4 x i32> %9
+ call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %8, <4 x i32*> %10, i32 4, <4 x i1> %active.lane.mask)
+ %11 = add nsw <4 x i32> %wide.masked.load, <i32 4, i32 4, i32 4, i32 4>
+ %12 = or <4 x i32> %3, <i32 6, i32 6, i32 6, i32 6>
+ %13 = getelementptr inbounds i32, i32* %y, <4 x i32> %12
+ call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %11, <4 x i32*> %13, i32 4, <4 x i1> %active.lane.mask)
+ %index.next = add i32 %index, 4
+ %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
+ %14 = icmp eq i32 %index.next, %n.vec
+ br i1 %14, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+}
+
declare void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8>, <8 x i8*>, i32, <8 x i1>)
declare void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16>, <8 x i16*>, i32, <8 x i1>)
declare void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half>, <8 x half*>, i32, <8 x i1>)
@@ -234,3 +391,5 @@ declare void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16>, <4 x i16*>, i32, <4 x
declare void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half>, <4 x half*>, i32, <4 x i1>)
declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
declare void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float>, <4 x float*>, i32, <4 x i1>)
+declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
+declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
More information about the llvm-commits
mailing list