[llvm] 01371d6 - [LLVM][AArch64] Reduce uses of "undef" in SVE CodeGen tests.
Paul Walker via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 26 03:14:43 PST 2025
Author: Paul Walker
Date: 2025-02-26T11:11:03Z
New Revision: 01371d64a91ed65d18670a1ee570058a0678ce0b
URL: https://github.com/llvm/llvm-project/commit/01371d64a91ed65d18670a1ee570058a0678ce0b
DIFF: https://github.com/llvm/llvm-project/commit/01371d64a91ed65d18670a1ee570058a0678ce0b.diff
LOG: [LLVM][AArch64] Reduce uses of "undef" in SVE CodeGen tests.
Using "poison" better reflects realworld generated IR. The main idioms
ported are:
* Inserting into an undefined vector.
* Vector splats.
* Masked load/gather operations with an undefined passthrough.
Added:
Modified:
llvm/test/CodeGen/AArch64/aarch64-dup-ext-scalable.ll
llvm/test/CodeGen/AArch64/aarch64-sve-and-combine-crash.ll
llvm/test/CodeGen/AArch64/dag-combine-insert-subvector.ll
llvm/test/CodeGen/AArch64/sub-splat-sub.ll
llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
llvm/test/CodeGen/AArch64/sve-extract-element.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-addressing-modes.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll
llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
llvm/test/CodeGen/AArch64/sve-insert-element.ll
llvm/test/CodeGen/AArch64/sve-insert-vector-to-predicate-load.ll
llvm/test/CodeGen/AArch64/sve-insert-vector.ll
llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll
llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll
llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
llvm/test/CodeGen/AArch64/sve-intrinsics-scalar-to-vec.ll
llvm/test/CodeGen/AArch64/sve-ld1r.ll
llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll
llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll
llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll
llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll
llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll
llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll
llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-reg.ll
llvm/test/CodeGen/AArch64/sve-masked-gather.ll
llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
llvm/test/CodeGen/AArch64/sve-nontemporal-masked-ldst.ll
llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
llvm/test/CodeGen/AArch64/sve-pred-selectop.ll
llvm/test/CodeGen/AArch64/sve-split-load.ll
llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll
llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
llvm/test/CodeGen/AArch64/sve-unary-movprfx.ll
llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
llvm/test/CodeGen/AArch64/sve-vector-compress.ll
llvm/test/CodeGen/AArch64/sve-vector-splat.ll
llvm/test/CodeGen/AArch64/sve-vl-arith.ll
llvm/test/CodeGen/AArch64/sve2-unary-movprfx.ll
llvm/test/CodeGen/AArch64/vector-insert-dag-combines.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AArch64/aarch64-dup-ext-scalable.ll b/llvm/test/CodeGen/AArch64/aarch64-dup-ext-scalable.ll
index 36b81d8e495ce..9379d78b9220a 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-dup-ext-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-dup-ext-scalable.ll
@@ -11,8 +11,8 @@ define <vscale x 2 x i16> @dupsext_v2i8_v2i16(i8 %src, <vscale x 2 x i16> %b) {
; CHECK-NEXT: ret
entry:
%in = sext i8 %src to i16
- %broadcast.splatinsert = insertelement <vscale x 2 x i16> undef, i16 %in, i16 0
- %broadcast.splat = shufflevector <vscale x 2 x i16> %broadcast.splatinsert, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i16> poison, i16 %in, i16 0
+ %broadcast.splat = shufflevector <vscale x 2 x i16> %broadcast.splatinsert, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nsw <vscale x 2 x i16> %broadcast.splat, %b
ret <vscale x 2 x i16> %out
}
@@ -27,8 +27,8 @@ define <vscale x 4 x i16> @dupsext_v4i8_v4i16(i8 %src, <vscale x 4 x i16> %b) {
; CHECK-NEXT: ret
entry:
%in = sext i8 %src to i16
- %broadcast.splatinsert = insertelement <vscale x 4 x i16> undef, i16 %in, i16 0
- %broadcast.splat = shufflevector <vscale x 4 x i16> %broadcast.splatinsert, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 4 x i16> poison, i16 %in, i16 0
+ %broadcast.splat = shufflevector <vscale x 4 x i16> %broadcast.splatinsert, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
%out = mul nsw <vscale x 4 x i16> %broadcast.splat, %b
ret <vscale x 4 x i16> %out
}
@@ -43,8 +43,8 @@ define <vscale x 8 x i16> @dupsext_v8i8_v8i16(i8 %src, <vscale x 8 x i16> %b) {
; CHECK-NEXT: ret
entry:
%in = sext i8 %src to i16
- %broadcast.splatinsert = insertelement <vscale x 8 x i16> undef, i16 %in, i16 0
- %broadcast.splat = shufflevector <vscale x 8 x i16> %broadcast.splatinsert, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 8 x i16> poison, i16 %in, i16 0
+ %broadcast.splat = shufflevector <vscale x 8 x i16> %broadcast.splatinsert, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%out = mul nsw <vscale x 8 x i16> %broadcast.splat, %b
ret <vscale x 8 x i16> %out
}
@@ -59,8 +59,8 @@ define <vscale x 2 x i32> @dupsext_v2i8_v2i32(i8 %src, <vscale x 2 x i32> %b) {
; CHECK-NEXT: ret
entry:
%in = sext i8 %src to i32
- %broadcast.splatinsert = insertelement <vscale x 2 x i32> undef, i32 %in, i32 0
- %broadcast.splat = shufflevector <vscale x 2 x i32> %broadcast.splatinsert, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i32> poison, i32 %in, i32 0
+ %broadcast.splat = shufflevector <vscale x 2 x i32> %broadcast.splatinsert, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nsw <vscale x 2 x i32> %broadcast.splat, %b
ret <vscale x 2 x i32> %out
}
@@ -75,8 +75,8 @@ define <vscale x 4 x i32> @dupsext_v4i8_v4i32(i8 %src, <vscale x 4 x i32> %b) {
; CHECK-NEXT: ret
entry:
%in = sext i8 %src to i32
- %broadcast.splatinsert = insertelement <vscale x 4 x i32> undef, i32 %in, i32 0
- %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %in, i32 0
+ %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%out = mul nsw <vscale x 4 x i32> %broadcast.splat, %b
ret <vscale x 4 x i32> %out
}
@@ -92,8 +92,8 @@ define <vscale x 2 x i64> @dupsext_v2i8_v2i64(i8 %src, <vscale x 2 x i64> %b) {
; CHECK-NEXT: ret
entry:
%in = sext i8 %src to i64
- %broadcast.splatinsert = insertelement <vscale x 2 x i64> undef, i64 %in, i64 0
- %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i64> poison, i64 %in, i64 0
+ %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nsw <vscale x 2 x i64> %broadcast.splat, %b
ret <vscale x 2 x i64> %out
}
@@ -108,8 +108,8 @@ define <vscale x 2 x i32> @dupsext_v2i16_v2i32(i16 %src, <vscale x 2 x i32> %b)
; CHECK-NEXT: ret
entry:
%in = sext i16 %src to i32
- %broadcast.splatinsert = insertelement <vscale x 2 x i32> undef, i32 %in, i32 0
- %broadcast.splat = shufflevector <vscale x 2 x i32> %broadcast.splatinsert, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i32> poison, i32 %in, i32 0
+ %broadcast.splat = shufflevector <vscale x 2 x i32> %broadcast.splatinsert, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nsw <vscale x 2 x i32> %broadcast.splat, %b
ret <vscale x 2 x i32> %out
}
@@ -124,8 +124,8 @@ define <vscale x 4 x i32> @dupsext_v4i16_v4i32(i16 %src, <vscale x 4 x i32> %b)
; CHECK-NEXT: ret
entry:
%in = sext i16 %src to i32
- %broadcast.splatinsert = insertelement <vscale x 4 x i32> undef, i32 %in, i32 0
- %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %in, i32 0
+ %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%out = mul nsw <vscale x 4 x i32> %broadcast.splat, %b
ret <vscale x 4 x i32> %out
}
@@ -141,8 +141,8 @@ define <vscale x 2 x i64> @dupsext_v2i16_v2i64(i16 %src, <vscale x 2 x i64> %b)
; CHECK-NEXT: ret
entry:
%in = sext i16 %src to i64
- %broadcast.splatinsert = insertelement <vscale x 2 x i64> undef, i64 %in, i64 0
- %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i64> poison, i64 %in, i64 0
+ %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nsw <vscale x 2 x i64> %broadcast.splat, %b
ret <vscale x 2 x i64> %out
}
@@ -158,8 +158,8 @@ define <vscale x 2 x i64> @dupsext_v2i32_v2i64(i32 %src, <vscale x 2 x i64> %b)
; CHECK-NEXT: ret
entry:
%in = sext i32 %src to i64
- %broadcast.splatinsert = insertelement <vscale x 2 x i64> undef, i64 %in, i64 0
- %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i64> poison, i64 %in, i64 0
+ %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nsw <vscale x 2 x i64> %broadcast.splat, %b
ret <vscale x 2 x i64> %out
}
@@ -174,8 +174,8 @@ define <vscale x 2 x i16> @dupzext_v2i8_v2i16(i8 %src, <vscale x 2 x i16> %b) {
; CHECK-NEXT: ret
entry:
%in = zext i8 %src to i16
- %broadcast.splatinsert = insertelement <vscale x 2 x i16> undef, i16 %in, i16 0
- %broadcast.splat = shufflevector <vscale x 2 x i16> %broadcast.splatinsert, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i16> poison, i16 %in, i16 0
+ %broadcast.splat = shufflevector <vscale x 2 x i16> %broadcast.splatinsert, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nuw <vscale x 2 x i16> %broadcast.splat, %b
ret <vscale x 2 x i16> %out
}
@@ -190,8 +190,8 @@ define <vscale x 4 x i16> @dupzext_v4i8_v4i16(i8 %src, <vscale x 4 x i16> %b) {
; CHECK-NEXT: ret
entry:
%in = zext i8 %src to i16
- %broadcast.splatinsert = insertelement <vscale x 4 x i16> undef, i16 %in, i16 0
- %broadcast.splat = shufflevector <vscale x 4 x i16> %broadcast.splatinsert, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 4 x i16> poison, i16 %in, i16 0
+ %broadcast.splat = shufflevector <vscale x 4 x i16> %broadcast.splatinsert, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
%out = mul nuw <vscale x 4 x i16> %broadcast.splat, %b
ret <vscale x 4 x i16> %out
}
@@ -206,8 +206,8 @@ define <vscale x 8 x i16> @dupzext_v8i8_v8i16(i8 %src, <vscale x 8 x i16> %b) {
; CHECK-NEXT: ret
entry:
%in = zext i8 %src to i16
- %broadcast.splatinsert = insertelement <vscale x 8 x i16> undef, i16 %in, i16 0
- %broadcast.splat = shufflevector <vscale x 8 x i16> %broadcast.splatinsert, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 8 x i16> poison, i16 %in, i16 0
+ %broadcast.splat = shufflevector <vscale x 8 x i16> %broadcast.splatinsert, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%out = mul nuw <vscale x 8 x i16> %broadcast.splat, %b
ret <vscale x 8 x i16> %out
}
@@ -222,8 +222,8 @@ define <vscale x 2 x i32> @dupzext_v2i8_v2i32(i8 %src, <vscale x 2 x i32> %b) {
; CHECK-NEXT: ret
entry:
%in = zext i8 %src to i32
- %broadcast.splatinsert = insertelement <vscale x 2 x i32> undef, i32 %in, i32 0
- %broadcast.splat = shufflevector <vscale x 2 x i32> %broadcast.splatinsert, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i32> poison, i32 %in, i32 0
+ %broadcast.splat = shufflevector <vscale x 2 x i32> %broadcast.splatinsert, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nuw <vscale x 2 x i32> %broadcast.splat, %b
ret <vscale x 2 x i32> %out
}
@@ -238,8 +238,8 @@ define <vscale x 4 x i32> @dupzext_v4i8_v4i32(i8 %src, <vscale x 4 x i32> %b) {
; CHECK-NEXT: ret
entry:
%in = zext i8 %src to i32
- %broadcast.splatinsert = insertelement <vscale x 4 x i32> undef, i32 %in, i32 0
- %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %in, i32 0
+ %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%out = mul nuw <vscale x 4 x i32> %broadcast.splat, %b
ret <vscale x 4 x i32> %out
}
@@ -255,8 +255,8 @@ define <vscale x 2 x i64> @dupzext_v2i8_v2i64(i8 %src, <vscale x 2 x i64> %b) {
; CHECK-NEXT: ret
entry:
%in = zext i8 %src to i64
- %broadcast.splatinsert = insertelement <vscale x 2 x i64> undef, i64 %in, i64 0
- %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i64> poison, i64 %in, i64 0
+ %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nuw <vscale x 2 x i64> %broadcast.splat, %b
ret <vscale x 2 x i64> %out
}
@@ -271,8 +271,8 @@ define <vscale x 2 x i32> @dupzext_v2i16_v2i32(i16 %src, <vscale x 2 x i32> %b)
; CHECK-NEXT: ret
entry:
%in = zext i16 %src to i32
- %broadcast.splatinsert = insertelement <vscale x 2 x i32> undef, i32 %in, i32 0
- %broadcast.splat = shufflevector <vscale x 2 x i32> %broadcast.splatinsert, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i32> poison, i32 %in, i32 0
+ %broadcast.splat = shufflevector <vscale x 2 x i32> %broadcast.splatinsert, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nuw <vscale x 2 x i32> %broadcast.splat, %b
ret <vscale x 2 x i32> %out
}
@@ -287,8 +287,8 @@ define <vscale x 4 x i32> @dupzext_v4i16_v4i32(i16 %src, <vscale x 4 x i32> %b)
; CHECK-NEXT: ret
entry:
%in = zext i16 %src to i32
- %broadcast.splatinsert = insertelement <vscale x 4 x i32> undef, i32 %in, i32 0
- %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %in, i32 0
+ %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%out = mul nuw <vscale x 4 x i32> %broadcast.splat, %b
ret <vscale x 4 x i32> %out
}
@@ -304,8 +304,8 @@ define <vscale x 2 x i64> @dupzext_v2i16_v2i64(i16 %src, <vscale x 2 x i64> %b)
; CHECK-NEXT: ret
entry:
%in = zext i16 %src to i64
- %broadcast.splatinsert = insertelement <vscale x 2 x i64> undef, i64 %in, i64 0
- %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i64> poison, i64 %in, i64 0
+ %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nuw <vscale x 2 x i64> %broadcast.splat, %b
ret <vscale x 2 x i64> %out
}
@@ -320,8 +320,8 @@ define <vscale x 2 x i64> @dupzext_v2i32_v2i64(i32 %src, <vscale x 2 x i64> %b)
; CHECK-NEXT: ret
entry:
%in = zext i32 %src to i64
- %broadcast.splatinsert = insertelement <vscale x 2 x i64> undef, i64 %in, i64 0
- %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %broadcast.splatinsert = insertelement <vscale x 2 x i64> poison, i64 %in, i64 0
+ %broadcast.splat = shufflevector <vscale x 2 x i64> %broadcast.splatinsert, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%out = mul nuw <vscale x 2 x i64> %broadcast.splat, %b
ret <vscale x 2 x i64> %out
}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-sve-and-combine-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-and-combine-crash.ll
index e612401fcfa29..f169dede4b4a1 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-sve-and-combine-crash.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-sve-and-combine-crash.ll
@@ -15,7 +15,7 @@ define <vscale x 4 x i32> @test(<vscale x 8 x i16> %in1, <vscale x 4 x i32> %in2
; CHECK-NEXT: ret
entry:
%i1 = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpkhi.nxv4i32(<vscale x 8 x i16> %in1)
- %i2 = shufflevector <vscale x 4 x i32> %in2, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %i2 = shufflevector <vscale x 4 x i32> %in2, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%i3 = and <vscale x 4 x i32> %i1, %i2
ret <vscale x 4 x i32> %i3
}
diff --git a/llvm/test/CodeGen/AArch64/dag-combine-insert-subvector.ll b/llvm/test/CodeGen/AArch64/dag-combine-insert-subvector.ll
index 3573ca08acd33..9e5918cb1b000 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-insert-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-insert-subvector.ll
@@ -14,12 +14,12 @@ declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x do
define <vscale x 2 x double> @reproducer_one(<vscale x 4 x float> %vec_a) #0 {
%a = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> %vec_a, i64 0)
%b = bitcast <16 x float> %a to <8 x double>
- %retval = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> %b, i64 0)
+ %retval = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> poison, <8 x double> %b, i64 0)
ret <vscale x 2 x double> %retval
}
define <vscale x 2 x double> @reproducer_two(<4 x double> %a, <4 x double> %b) #0 {
%concat = shufflevector <4 x double> %a, <4 x double> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
- %retval = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> %concat, i64 0)
+ %retval = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> poison, <8 x double> %concat, i64 0)
ret <vscale x 2 x double> %retval
}
diff --git a/llvm/test/CodeGen/AArch64/sub-splat-sub.ll b/llvm/test/CodeGen/AArch64/sub-splat-sub.ll
index 54022591bb771..69bebe18e7228 100644
--- a/llvm/test/CodeGen/AArch64/sub-splat-sub.ll
+++ b/llvm/test/CodeGen/AArch64/sub-splat-sub.ll
@@ -8,8 +8,8 @@ define <16 x i8> @subsubii8(<16 x i8> %a, i8 %b) {
; CHECK-NEXT: ret
entry:
%sub = sub i8 0, %b
- %0 = insertelement <16 x i8> undef, i8 %sub, i32 0
- %sh_prom = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
+ %0 = insertelement <16 x i8> poison, i8 %sub, i32 0
+ %sh_prom = shufflevector <16 x i8> %0, <16 x i8> poison, <16 x i32> zeroinitializer
%sub2 = sub <16 x i8> zeroinitializer, %sh_prom
ret <16 x i8> %sub2
}
@@ -21,8 +21,8 @@ define <vscale x 16 x i8> @subsubni8(<vscale x 16 x i8> %a, i8 %b) {
; CHECK-NEXT: ret
entry:
%sub = sub i8 0, %b
- %0 = insertelement <vscale x 16 x i8> undef, i8 %sub, i32 0
- %sh_prom = shufflevector <vscale x 16 x i8> %0, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %0 = insertelement <vscale x 16 x i8> poison, i8 %sub, i32 0
+ %sh_prom = shufflevector <vscale x 16 x i8> %0, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
%sub2 = sub <vscale x 16 x i8> zeroinitializer, %sh_prom
ret <vscale x 16 x i8> %sub2
}
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
index dfd4417326db0..34532ddd9a2b0 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
@@ -266,10 +266,10 @@ define aarch64_sve_vector_pcs [4 x <vscale x 16 x i1>] @callee_with_svepred_arg_
%r1 = and <vscale x 16 x i1> %p1, %p5
%r2 = and <vscale x 16 x i1> %p2, %p6
%r3 = and <vscale x 16 x i1> %p3, %p7
- %1 = insertvalue [4 x <vscale x 16 x i1>] undef, <vscale x 16 x i1> %r0, 0
- %2 = insertvalue [4 x <vscale x 16 x i1>] %1, <vscale x 16 x i1> %r1, 1
- %3 = insertvalue [4 x <vscale x 16 x i1>] %2, <vscale x 16 x i1> %r2, 2
- %4 = insertvalue [4 x <vscale x 16 x i1>] %3, <vscale x 16 x i1> %r3, 3
+ %1 = insertvalue [4 x <vscale x 16 x i1>] poison, <vscale x 16 x i1> %r0, 0
+ %2 = insertvalue [4 x <vscale x 16 x i1>] %1, <vscale x 16 x i1> %r1, 1
+ %3 = insertvalue [4 x <vscale x 16 x i1>] %2, <vscale x 16 x i1> %r2, 2
+ %4 = insertvalue [4 x <vscale x 16 x i1>] %3, <vscale x 16 x i1> %r3, 3
ret [4 x <vscale x 16 x i1>] %4
}
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index 3b7b03e6ef61f..aa08dc7e21582 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -44,7 +44,7 @@ entry:
%14 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3, 1
%15 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3, 2
%16 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3, 3
- %17 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> undef, <vscale x 2 x double> %13, i64 0)
+ %17 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> poison, <vscale x 2 x double> %13, i64 0)
%18 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %17, <vscale x 2 x double> %14, i64 2)
%19 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %18, <vscale x 2 x double> %15, i64 4)
%20 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %19, <vscale x 2 x double> %16, i64 6)
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-element.ll b/llvm/test/CodeGen/AArch64/sve-extract-element.ll
index 939c7e4310018..9ebeb098c60c0 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-element.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-element.ll
@@ -525,7 +525,7 @@ define i32 @test_undef_lane_4xi32(<vscale x 4 x i32> %a) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
- %b = extractelement <vscale x 4 x i32> %a, i32 undef
+ %b = extractelement <vscale x 4 x i32> %a, i32 poison
ret i32 %b
}
@@ -533,7 +533,7 @@ define i8 @extract_of_insert_undef_16xi8(i8 %a) #0 {
; CHECK-LABEL: extract_of_insert_undef_16xi8:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
- %b = insertelement <vscale x 16 x i8> undef, i8 %a, i32 0
+ %b = insertelement <vscale x 16 x i8> poison, i8 %a, i32 0
%c = extractelement <vscale x 16 x i8> %b, i32 0
ret i8 %c
}
@@ -582,8 +582,8 @@ define i64 @test_lanex_splat_2xi64(i64 %x, i32 %y) #0 {
; CHECK-LABEL: test_lanex_splat_2xi64:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
- %a = insertelement <vscale x 2 x i64> undef, i64 %x, i32 0
- %b = shufflevector <vscale x 2 x i64> %a, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %a = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
+ %b = shufflevector <vscale x 2 x i64> %a, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%c = extractelement <vscale x 2 x i64> %b, i32 %y
ret i64 %c
}
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-addressing-modes.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-addressing-modes.ll
index 28e1412c524a0..7ccf899c70e31 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-addressing-modes.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-addressing-modes.ll
@@ -13,7 +13,7 @@ define void @masked_gather_base_plus_stride_v8f32(ptr %dst, ptr %src) #0 {
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = getelementptr float, ptr %src, <8 x i64> <i64 0, i64 7, i64 14, i64 21, i64 28, i64 35, i64 42, i64 49>
- %data = tail call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> undef)
+ %data = tail call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x float> poison)
store <8 x float> %data, ptr %dst, align 4
ret void
}
@@ -28,7 +28,7 @@ define void @masked_gather_base_plus_stride_v4f64(ptr %dst, ptr %src) #0 {
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = getelementptr double, ptr %src, <4 x i64> <i64 -2, i64 -34, i64 -66, i64 -98>
- %data = tail call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x double> undef)
+ %data = tail call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 true), <4 x double> poison)
store <4 x double> %data, ptr %dst, align 8
ret void
}
@@ -43,7 +43,7 @@ define void @masked_scatter_base_plus_stride_v8f32(ptr %dst, ptr %src) #0 {
; CHECK-NEXT: ret
%data = load <8 x float>, ptr %src, align 4
%ptrs = getelementptr float, ptr %dst, <8 x i64> <i64 0, i64 -7, i64 -14, i64 -21, i64 -28, i64 -35, i64 -42, i64 -49>
- tail call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> %data, <8 x ptr> %ptrs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+ tail call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> %data, <8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true))
ret void
}
@@ -57,7 +57,7 @@ define void @masked_scatter_base_plus_stride_v4f64(ptr %dst, ptr %src) #0 {
; CHECK-NEXT: ret
%data = load <4 x double>, ptr %src, align 8
%ptrs = getelementptr double, ptr %dst, <4 x i64> <i64 -2, i64 1, i64 4, i64 7>
- tail call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %data, <4 x ptr> %ptrs, i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+ tail call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %data, <4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 true))
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll
index f7751131005e3..ee82ad8467322 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll
@@ -864,10 +864,10 @@ define void @concat_v32i8_undef(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1b { z0.b }, p0, [x1]
; CHECK-NEXT: ret
%op1 = load <16 x i8>, ptr %a
- %res = shufflevector <16 x i8> %op1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
- i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
- i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
- i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %res = shufflevector <16 x i8> %op1, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
+ i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+ i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
store <32 x i8> %res, ptr %b
ret void
}
@@ -880,8 +880,8 @@ define void @concat_v16i16_undef(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1h { z0.h }, p0, [x1]
; CHECK-NEXT: ret
%op1 = load <8 x i16>, ptr %a
- %res = shufflevector <8 x i16> %op1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
- i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %res = shufflevector <8 x i16> %op1, <8 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
store <16 x i16> %res, ptr %b
ret void
}
@@ -894,7 +894,7 @@ define void @concat_v8i32_undef(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
; CHECK-NEXT: ret
%op1 = load <4 x i32>, ptr %a
- %res = shufflevector <4 x i32> %op1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <4 x i32> %op1, <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i32> %res, ptr %b
ret void
}
@@ -907,7 +907,7 @@ define void @concat_v4i64_undef(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
%op1 = load <2 x i64>, ptr %a
- %res = shufflevector <2 x i64> %op1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <2 x i64> %op1, <2 x i64> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
store <4 x i64> %res, ptr %b
ret void
}
@@ -924,12 +924,12 @@ define void @concat_v32i8_4op(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1b { z0.b }, p0, [x1]
; CHECK-NEXT: ret
%op1 = load <8 x i8>, ptr %a
- %shuffle = shufflevector <8 x i8> %op1, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
- i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %res = shufflevector <16 x i8> %shuffle, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
- i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
- i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
- i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %shuffle = shufflevector <8 x i8> %op1, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %res = shufflevector <16 x i8> %shuffle, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
+ i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+ i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
store <32 x i8> %res, ptr %b
ret void
}
@@ -942,9 +942,9 @@ define void @concat_v16i16_4op(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1h { z0.h }, p0, [x1]
; CHECK-NEXT: ret
%op1 = load <4 x i16>, ptr %a
- %shuffle = shufflevector <4 x i16> %op1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %res = shufflevector <8 x i16> %shuffle, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
- i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle = shufflevector <4 x i16> %op1, <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <8 x i16> %shuffle, <8 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
store <16 x i16> %res, ptr %b
ret void
}
@@ -957,8 +957,8 @@ define void @concat_v8i32_4op(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
; CHECK-NEXT: ret
%op1 = load <2 x i32>, ptr %a
- %shuffle = shufflevector <2 x i32> %op1, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %res = shufflevector <4 x i32> %shuffle, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <2 x i32> %op1, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x i32> %shuffle, <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i32> %res, ptr %b
ret void
}
@@ -971,8 +971,8 @@ define void @concat_v4i64_4op(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
%op1 = load <1 x i64>, ptr %a
- %shuffle = shufflevector <1 x i64> %op1, <1 x i64> undef, <2 x i32> <i32 0, i32 1>
- %res = shufflevector <2 x i64> %shuffle, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle = shufflevector <1 x i64> %op1, <1 x i64> poison, <2 x i32> <i32 0, i32 1>
+ %res = shufflevector <2 x i64> %shuffle, <2 x i64> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
store <4 x i64> %res, ptr %b
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
index f2ad98f8caec9..d2206a74533d8 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
@@ -20,7 +20,7 @@ define void @masked_gather_v2i8(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1b { z0.s }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <2 x ptr>, ptr %b
- %vals = call <2 x i8> @llvm.masked.gather.v2i8(<2 x ptr> %ptrs, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i8> undef)
+ %vals = call <2 x i8> @llvm.masked.gather.v2i8(<2 x ptr> %ptrs, i32 8, <2 x i1> splat (i1 true), <2 x i8> poison)
store <2 x i8> %vals, ptr %a
ret void
}
@@ -34,7 +34,7 @@ define void @masked_gather_v4i8(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1b { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <4 x ptr>, ptr %b
- %vals = call <4 x i8> @llvm.masked.gather.v4i8(<4 x ptr> %ptrs, i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+ %vals = call <4 x i8> @llvm.masked.gather.v4i8(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 true), <4 x i8> poison)
store <4 x i8> %vals, ptr %a
ret void
}
@@ -67,7 +67,7 @@ define void @masked_gather_v8i8(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: str d0, [x0]
; VBITS_GE_512-NEXT: ret
%ptrs = load <8 x ptr>, ptr %b
- %vals = call <8 x i8> @llvm.masked.gather.v8i8(<8 x ptr> %ptrs, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+ %vals = call <8 x i8> @llvm.masked.gather.v8i8(<8 x ptr> %ptrs, i32 8, <8 x i1> splat (i1 true), <8 x i8> poison)
store <8 x i8> %vals, ptr %a
ret void
}
@@ -84,8 +84,7 @@ define void @masked_gather_v16i8(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: str q0, [x0]
; CHECK-NEXT: ret
%ptrs = load <16 x ptr>, ptr %b
- %vals = call <16 x i8> @llvm.masked.gather.v16i8(<16 x ptr> %ptrs, i32 8, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+ %vals = call <16 x i8> @llvm.masked.gather.v16i8(<16 x ptr> %ptrs, i32 8, <16 x i1> splat (i1 true), <16 x i8> poison)
store <16 x i8> %vals, ptr %a
ret void
}
@@ -99,10 +98,7 @@ define void @masked_gather_v32i8(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: st1b { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <32 x ptr>, ptr %b
- %vals = call <32 x i8> @llvm.masked.gather.v32i8(<32 x ptr> %ptrs, i32 8, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i8> undef)
+ %vals = call <32 x i8> @llvm.masked.gather.v32i8(<32 x ptr> %ptrs, i32 8, <32 x i1> splat (i1 true), <32 x i8> poison)
store <32 x i8> %vals, ptr %a
ret void
}
@@ -122,7 +118,7 @@ define void @masked_gather_v2i16(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1h { z0.s }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <2 x ptr>, ptr %b
- %vals = call <2 x i16> @llvm.masked.gather.v2i16(<2 x ptr> %ptrs, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i16> undef)
+ %vals = call <2 x i16> @llvm.masked.gather.v2i16(<2 x ptr> %ptrs, i32 8, <2 x i1> splat (i1 true), <2 x i16> poison)
store <2 x i16> %vals, ptr %a
ret void
}
@@ -138,7 +134,7 @@ define void @masked_gather_v4i16(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: str d0, [x0]
; CHECK-NEXT: ret
%ptrs = load <4 x ptr>, ptr %b
- %vals = call <4 x i16> @llvm.masked.gather.v4i16(<4 x ptr> %ptrs, i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+ %vals = call <4 x i16> @llvm.masked.gather.v4i16(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 true), <4 x i16> poison)
store <4 x i16> %vals, ptr %a
ret void
}
@@ -170,7 +166,7 @@ define void @masked_gather_v8i16(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: str q0, [x0]
; VBITS_GE_512-NEXT: ret
%ptrs = load <8 x ptr>, ptr %b
- %vals = call <8 x i16> @llvm.masked.gather.v8i16(<8 x ptr> %ptrs, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+ %vals = call <8 x i16> @llvm.masked.gather.v8i16(<8 x ptr> %ptrs, i32 8, <8 x i1> splat (i1 true), <8 x i16> poison)
store <8 x i16> %vals, ptr %a
ret void
}
@@ -184,8 +180,7 @@ define void @masked_gather_v16i16(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: st1h { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <16 x ptr>, ptr %b
- %vals = call <16 x i16> @llvm.masked.gather.v16i16(<16 x ptr> %ptrs, i32 8, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i16> undef)
+ %vals = call <16 x i16> @llvm.masked.gather.v16i16(<16 x ptr> %ptrs, i32 8, <16 x i1> splat (i1 true), <16 x i16> poison)
store <16 x i16> %vals, ptr %a
ret void
}
@@ -199,10 +194,7 @@ define void @masked_gather_v32i16(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: st1h { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <32 x ptr>, ptr %b
- %vals = call <32 x i16> @llvm.masked.gather.v32i16(<32 x ptr> %ptrs, i32 8, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i16> undef)
+ %vals = call <32 x i16> @llvm.masked.gather.v32i16(<32 x ptr> %ptrs, i32 8, <32 x i1> splat (i1 true), <32 x i16> poison)
store <32 x i16> %vals, ptr %a
ret void
}
@@ -221,7 +213,7 @@ define void @masked_gather_v2i32(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: str d0, [x0]
; CHECK-NEXT: ret
%ptrs = load <2 x ptr>, ptr %b
- %vals = call <2 x i32> @llvm.masked.gather.v2i32(<2 x ptr> %ptrs, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i32> undef)
+ %vals = call <2 x i32> @llvm.masked.gather.v2i32(<2 x ptr> %ptrs, i32 8, <2 x i1> splat (i1 true), <2 x i32> poison)
store <2 x i32> %vals, ptr %a
ret void
}
@@ -236,7 +228,7 @@ define void @masked_gather_v4i32(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: str q0, [x0]
; CHECK-NEXT: ret
%ptrs = load <4 x ptr>, ptr %b
- %vals = call <4 x i32> @llvm.masked.gather.v4i32(<4 x ptr> %ptrs, i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+ %vals = call <4 x i32> @llvm.masked.gather.v4i32(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 true), <4 x i32> poison)
store <4 x i32> %vals, ptr %a
ret void
}
@@ -266,7 +258,7 @@ define void @masked_gather_v8i32(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: st1w { z0.d }, p0, [x0]
; VBITS_GE_512-NEXT: ret
%ptrs = load <8 x ptr>, ptr %b
- %vals = call <8 x i32> @llvm.masked.gather.v8i32(<8 x ptr> %ptrs, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+ %vals = call <8 x i32> @llvm.masked.gather.v8i32(<8 x ptr> %ptrs, i32 8, <8 x i1> splat (i1 true), <8 x i32> poison)
store <8 x i32> %vals, ptr %a
ret void
}
@@ -280,8 +272,7 @@ define void @masked_gather_v16i32(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: st1w { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <16 x ptr>, ptr %b
- %vals = call <16 x i32> @llvm.masked.gather.v16i32(<16 x ptr> %ptrs, i32 8, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i32> undef)
+ %vals = call <16 x i32> @llvm.masked.gather.v16i32(<16 x ptr> %ptrs, i32 8, <16 x i1> splat (i1 true), <16 x i32> poison)
store <16 x i32> %vals, ptr %a
ret void
}
@@ -295,10 +286,7 @@ define void @masked_gather_v32i32(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: st1w { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <32 x ptr>, ptr %b
- %vals = call <32 x i32> @llvm.masked.gather.v32i32(<32 x ptr> %ptrs, i32 8, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i32> undef)
+ %vals = call <32 x i32> @llvm.masked.gather.v32i32(<32 x ptr> %ptrs, i32 8, <32 x i1> splat (i1 true), <32 x i32> poison)
store <32 x i32> %vals, ptr %a
ret void
}
@@ -316,7 +304,7 @@ define void @masked_gather_v2i64(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: str q0, [x0]
; CHECK-NEXT: ret
%ptrs = load <2 x ptr>, ptr %b
- %vals = call <2 x i64> @llvm.masked.gather.v2i64(<2 x ptr> %ptrs, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> undef)
+ %vals = call <2 x i64> @llvm.masked.gather.v2i64(<2 x ptr> %ptrs, i32 8, <2 x i1> splat (i1 true), <2 x i64> poison)
store <2 x i64> %vals, ptr %a
ret void
}
@@ -330,7 +318,7 @@ define void @masked_gather_v4i64(ptr %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <4 x ptr>, ptr %b
- %vals = call <4 x i64> @llvm.masked.gather.v4i64(<4 x ptr> %ptrs, i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i64> undef)
+ %vals = call <4 x i64> @llvm.masked.gather.v4i64(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 true), <4 x i64> poison)
store <4 x i64> %vals, ptr %a
ret void
}
@@ -356,7 +344,7 @@ define void @masked_gather_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0]
; VBITS_GE_512-NEXT: ret
%ptrs = load <8 x ptr>, ptr %b
- %vals = call <8 x i64> @llvm.masked.gather.v8i64(<8 x ptr> %ptrs, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i64> undef)
+ %vals = call <8 x i64> @llvm.masked.gather.v8i64(<8 x ptr> %ptrs, i32 8, <8 x i1> splat (i1 true), <8 x i64> poison)
store <8 x i64> %vals, ptr %a
ret void
}
@@ -370,8 +358,7 @@ define void @masked_gather_v16i64(ptr %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <16 x ptr>, ptr %b
- %vals = call <16 x i64> @llvm.masked.gather.v16i64(<16 x ptr> %ptrs, i32 8, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i64> undef)
+ %vals = call <16 x i64> @llvm.masked.gather.v16i64(<16 x ptr> %ptrs, i32 8, <16 x i1> splat (i1 true), <16 x i64> poison)
store <16 x i64> %vals, ptr %a
ret void
}
@@ -385,10 +372,7 @@ define void @masked_gather_v32i64(ptr %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%ptrs = load <32 x ptr>, ptr %b
- %vals = call <32 x i64> @llvm.masked.gather.v32i64(<32 x ptr> %ptrs, i32 8, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true,
- i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i64> undef)
+ %vals = call <32 x i64> @llvm.masked.gather.v32i64(<32 x ptr> %ptrs, i32 8, <32 x i1> splat (i1 true), <32 x i64> poison)
store <32 x i64> %vals, ptr %a
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
index 27e95489f8ad7..5516a4716d59d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
@@ -29,7 +29,7 @@ define void @masked_gather_v2i8(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <2 x i8>, ptr %a
%ptrs = load <2 x ptr>, ptr %b
%mask = icmp eq <2 x i8> %cval, zeroinitializer
- %vals = call <2 x i8> @llvm.masked.gather.v2i8(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x i8> undef)
+ %vals = call <2 x i8> @llvm.masked.gather.v2i8(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x i8> poison)
store <2 x i8> %vals, ptr %a
ret void
}
@@ -51,7 +51,7 @@ define void @masked_gather_v4i8(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <4 x i8>, ptr %a
%ptrs = load <4 x ptr>, ptr %b
%mask = icmp eq <4 x i8> %cval, zeroinitializer
- %vals = call <4 x i8> @llvm.masked.gather.v4i8(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x i8> undef)
+ %vals = call <4 x i8> @llvm.masked.gather.v4i8(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x i8> poison)
store <4 x i8> %vals, ptr %a
ret void
}
@@ -106,7 +106,7 @@ define void @masked_gather_v8i8(ptr %a, ptr %b) #0 {
%cval = load <8 x i8>, ptr %a
%ptrs = load <8 x ptr>, ptr %b
%mask = icmp eq <8 x i8> %cval, zeroinitializer
- %vals = call <8 x i8> @llvm.masked.gather.v8i8(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x i8> undef)
+ %vals = call <8 x i8> @llvm.masked.gather.v8i8(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x i8> poison)
store <8 x i8> %vals, ptr %a
ret void
}
@@ -131,7 +131,7 @@ define void @masked_gather_v16i8(ptr %a, ptr %b) vscale_range(8,0) #0 {
%cval = load <16 x i8>, ptr %a
%ptrs = load <16 x ptr>, ptr %b
%mask = icmp eq <16 x i8> %cval, zeroinitializer
- %vals = call <16 x i8> @llvm.masked.gather.v16i8(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x i8> undef)
+ %vals = call <16 x i8> @llvm.masked.gather.v16i8(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x i8> poison)
store <16 x i8> %vals, ptr %a
ret void
}
@@ -153,7 +153,7 @@ define void @masked_gather_v32i8(ptr %a, ptr %b) vscale_range(16,0) #0 {
%cval = load <32 x i8>, ptr %a
%ptrs = load <32 x ptr>, ptr %b
%mask = icmp eq <32 x i8> %cval, zeroinitializer
- %vals = call <32 x i8> @llvm.masked.gather.v32i8(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x i8> undef)
+ %vals = call <32 x i8> @llvm.masked.gather.v32i8(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x i8> poison)
store <32 x i8> %vals, ptr %a
ret void
}
@@ -182,7 +182,7 @@ define void @masked_gather_v2i16(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <2 x i16>, ptr %a
%ptrs = load <2 x ptr>, ptr %b
%mask = icmp eq <2 x i16> %cval, zeroinitializer
- %vals = call <2 x i16> @llvm.masked.gather.v2i16(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x i16> undef)
+ %vals = call <2 x i16> @llvm.masked.gather.v2i16(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x i16> poison)
store <2 x i16> %vals, ptr %a
ret void
}
@@ -205,7 +205,7 @@ define void @masked_gather_v4i16(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <4 x i16>, ptr %a
%ptrs = load <4 x ptr>, ptr %b
%mask = icmp eq <4 x i16> %cval, zeroinitializer
- %vals = call <4 x i16> @llvm.masked.gather.v4i16(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x i16> undef)
+ %vals = call <4 x i16> @llvm.masked.gather.v4i16(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x i16> poison)
store <4 x i16> %vals, ptr %a
ret void
}
@@ -253,7 +253,7 @@ define void @masked_gather_v8i16(ptr %a, ptr %b) #0 {
%cval = load <8 x i16>, ptr %a
%ptrs = load <8 x ptr>, ptr %b
%mask = icmp eq <8 x i16> %cval, zeroinitializer
- %vals = call <8 x i16> @llvm.masked.gather.v8i16(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x i16> undef)
+ %vals = call <8 x i16> @llvm.masked.gather.v8i16(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x i16> poison)
store <8 x i16> %vals, ptr %a
ret void
}
@@ -274,7 +274,7 @@ define void @masked_gather_v16i16(ptr %a, ptr %b) vscale_range(8,0) #0 {
%cval = load <16 x i16>, ptr %a
%ptrs = load <16 x ptr>, ptr %b
%mask = icmp eq <16 x i16> %cval, zeroinitializer
- %vals = call <16 x i16> @llvm.masked.gather.v16i16(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x i16> undef)
+ %vals = call <16 x i16> @llvm.masked.gather.v16i16(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x i16> poison)
store <16 x i16> %vals, ptr %a
ret void
}
@@ -295,7 +295,7 @@ define void @masked_gather_v32i16(ptr %a, ptr %b) vscale_range(16,0) #0 {
%cval = load <32 x i16>, ptr %a
%ptrs = load <32 x ptr>, ptr %b
%mask = icmp eq <32 x i16> %cval, zeroinitializer
- %vals = call <32 x i16> @llvm.masked.gather.v32i16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x i16> undef)
+ %vals = call <32 x i16> @llvm.masked.gather.v32i16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x i16> poison)
store <32 x i16> %vals, ptr %a
ret void
}
@@ -320,7 +320,7 @@ define void @masked_gather_v2i32(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <2 x i32>, ptr %a
%ptrs = load <2 x ptr>, ptr %b
%mask = icmp eq <2 x i32> %cval, zeroinitializer
- %vals = call <2 x i32> @llvm.masked.gather.v2i32(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x i32> undef)
+ %vals = call <2 x i32> @llvm.masked.gather.v2i32(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x i32> poison)
store <2 x i32> %vals, ptr %a
ret void
}
@@ -341,7 +341,7 @@ define void @masked_gather_v4i32(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <4 x i32>, ptr %a
%ptrs = load <4 x ptr>, ptr %b
%mask = icmp eq <4 x i32> %cval, zeroinitializer
- %vals = call <4 x i32> @llvm.masked.gather.v4i32(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x i32> undef)
+ %vals = call <4 x i32> @llvm.masked.gather.v4i32(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x i32> poison)
store <4 x i32> %vals, ptr %a
ret void
}
@@ -385,7 +385,7 @@ define void @masked_gather_v8i32(ptr %a, ptr %b) #0 {
%cval = load <8 x i32>, ptr %a
%ptrs = load <8 x ptr>, ptr %b
%mask = icmp eq <8 x i32> %cval, zeroinitializer
- %vals = call <8 x i32> @llvm.masked.gather.v8i32(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x i32> undef)
+ %vals = call <8 x i32> @llvm.masked.gather.v8i32(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x i32> poison)
store <8 x i32> %vals, ptr %a
ret void
}
@@ -405,7 +405,7 @@ define void @masked_gather_v16i32(ptr %a, ptr %b) vscale_range(8,0) #0 {
%cval = load <16 x i32>, ptr %a
%ptrs = load <16 x ptr>, ptr %b
%mask = icmp eq <16 x i32> %cval, zeroinitializer
- %vals = call <16 x i32> @llvm.masked.gather.v16i32(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x i32> undef)
+ %vals = call <16 x i32> @llvm.masked.gather.v16i32(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x i32> poison)
store <16 x i32> %vals, ptr %a
ret void
}
@@ -425,7 +425,7 @@ define void @masked_gather_v32i32(ptr %a, ptr %b) vscale_range(16,0) #0 {
%cval = load <32 x i32>, ptr %a
%ptrs = load <32 x ptr>, ptr %b
%mask = icmp eq <32 x i32> %cval, zeroinitializer
- %vals = call <32 x i32> @llvm.masked.gather.v32i32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x i32> undef)
+ %vals = call <32 x i32> @llvm.masked.gather.v32i32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x i32> poison)
store <32 x i32> %vals, ptr %a
ret void
}
@@ -452,7 +452,7 @@ define void @masked_gather_v1i64(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <1 x i64>, ptr %a
%ptrs = load <1 x ptr>, ptr %b
%mask = icmp eq <1 x i64> %cval, zeroinitializer
- %vals = call <1 x i64> @llvm.masked.gather.v1i64(<1 x ptr> %ptrs, i32 8, <1 x i1> %mask, <1 x i64> undef)
+ %vals = call <1 x i64> @llvm.masked.gather.v1i64(<1 x ptr> %ptrs, i32 8, <1 x i1> %mask, <1 x i64> poison)
store <1 x i64> %vals, ptr %a
ret void
}
@@ -471,7 +471,7 @@ define void @masked_gather_v2i64(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <2 x i64>, ptr %a
%ptrs = load <2 x ptr>, ptr %b
%mask = icmp eq <2 x i64> %cval, zeroinitializer
- %vals = call <2 x i64> @llvm.masked.gather.v2i64(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x i64> undef)
+ %vals = call <2 x i64> @llvm.masked.gather.v2i64(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x i64> poison)
store <2 x i64> %vals, ptr %a
ret void
}
@@ -489,7 +489,7 @@ define void @masked_gather_v4i64(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <4 x i64>, ptr %a
%ptrs = load <4 x ptr>, ptr %b
%mask = icmp eq <4 x i64> %cval, zeroinitializer
- %vals = call <4 x i64> @llvm.masked.gather.v4i64(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x i64> undef)
+ %vals = call <4 x i64> @llvm.masked.gather.v4i64(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x i64> poison)
store <4 x i64> %vals, ptr %a
ret void
}
@@ -523,7 +523,7 @@ define void @masked_gather_v8i64(ptr %a, ptr %b) #0 {
%cval = load <8 x i64>, ptr %a
%ptrs = load <8 x ptr>, ptr %b
%mask = icmp eq <8 x i64> %cval, zeroinitializer
- %vals = call <8 x i64> @llvm.masked.gather.v8i64(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x i64> undef)
+ %vals = call <8 x i64> @llvm.masked.gather.v8i64(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x i64> poison)
store <8 x i64> %vals, ptr %a
ret void
}
@@ -541,7 +541,7 @@ define void @masked_gather_v16i64(ptr %a, ptr %b) vscale_range(8,0) #0 {
%cval = load <16 x i64>, ptr %a
%ptrs = load <16 x ptr>, ptr %b
%mask = icmp eq <16 x i64> %cval, zeroinitializer
- %vals = call <16 x i64> @llvm.masked.gather.v16i64(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x i64> undef)
+ %vals = call <16 x i64> @llvm.masked.gather.v16i64(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x i64> poison)
store <16 x i64> %vals, ptr %a
ret void
}
@@ -559,7 +559,7 @@ define void @masked_gather_v32i64(ptr %a, ptr %b) vscale_range(16,0) #0 {
%cval = load <32 x i64>, ptr %a
%ptrs = load <32 x ptr>, ptr %b
%mask = icmp eq <32 x i64> %cval, zeroinitializer
- %vals = call <32 x i64> @llvm.masked.gather.v32i64(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x i64> undef)
+ %vals = call <32 x i64> @llvm.masked.gather.v32i64(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x i64> poison)
store <32 x i64> %vals, ptr %a
ret void
}
@@ -591,7 +591,7 @@ define void @masked_gather_v2f16(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <2 x half>, ptr %a
%ptrs = load <2 x ptr>, ptr %b
%mask = fcmp oeq <2 x half> %cval, zeroinitializer
- %vals = call <2 x half> @llvm.masked.gather.v2f16(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x half> undef)
+ %vals = call <2 x half> @llvm.masked.gather.v2f16(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x half> poison)
store <2 x half> %vals, ptr %a
ret void
}
@@ -614,7 +614,7 @@ define void @masked_gather_v4f16(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <4 x half>, ptr %a
%ptrs = load <4 x ptr>, ptr %b
%mask = fcmp oeq <4 x half> %cval, zeroinitializer
- %vals = call <4 x half> @llvm.masked.gather.v4f16(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x half> undef)
+ %vals = call <4 x half> @llvm.masked.gather.v4f16(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x half> poison)
store <4 x half> %vals, ptr %a
ret void
}
@@ -662,7 +662,7 @@ define void @masked_gather_v8f16(ptr %a, ptr %b) #0 {
%cval = load <8 x half>, ptr %a
%ptrs = load <8 x ptr>, ptr %b
%mask = fcmp oeq <8 x half> %cval, zeroinitializer
- %vals = call <8 x half> @llvm.masked.gather.v8f16(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x half> undef)
+ %vals = call <8 x half> @llvm.masked.gather.v8f16(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x half> poison)
store <8 x half> %vals, ptr %a
ret void
}
@@ -683,7 +683,7 @@ define void @masked_gather_v16f16(ptr %a, ptr %b) vscale_range(8,0) #0 {
%cval = load <16 x half>, ptr %a
%ptrs = load <16 x ptr>, ptr %b
%mask = fcmp oeq <16 x half> %cval, zeroinitializer
- %vals = call <16 x half> @llvm.masked.gather.v16f16(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x half> undef)
+ %vals = call <16 x half> @llvm.masked.gather.v16f16(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x half> poison)
store <16 x half> %vals, ptr %a
ret void
}
@@ -704,7 +704,7 @@ define void @masked_gather_v32f16(ptr %a, ptr %b) vscale_range(16,0) #0 {
%cval = load <32 x half>, ptr %a
%ptrs = load <32 x ptr>, ptr %b
%mask = fcmp oeq <32 x half> %cval, zeroinitializer
- %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x half> undef)
+ %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x half> poison)
store <32 x half> %vals, ptr %a
ret void
}
@@ -729,7 +729,7 @@ define void @masked_gather_v2f32(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <2 x float>, ptr %a
%ptrs = load <2 x ptr>, ptr %b
%mask = fcmp oeq <2 x float> %cval, zeroinitializer
- %vals = call <2 x float> @llvm.masked.gather.v2f32(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x float> undef)
+ %vals = call <2 x float> @llvm.masked.gather.v2f32(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x float> poison)
store <2 x float> %vals, ptr %a
ret void
}
@@ -750,7 +750,7 @@ define void @masked_gather_v4f32(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <4 x float>, ptr %a
%ptrs = load <4 x ptr>, ptr %b
%mask = fcmp oeq <4 x float> %cval, zeroinitializer
- %vals = call <4 x float> @llvm.masked.gather.v4f32(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x float> undef)
+ %vals = call <4 x float> @llvm.masked.gather.v4f32(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x float> poison)
store <4 x float> %vals, ptr %a
ret void
}
@@ -794,7 +794,7 @@ define void @masked_gather_v8f32(ptr %a, ptr %b) #0 {
%cval = load <8 x float>, ptr %a
%ptrs = load <8 x ptr>, ptr %b
%mask = fcmp oeq <8 x float> %cval, zeroinitializer
- %vals = call <8 x float> @llvm.masked.gather.v8f32(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x float> undef)
+ %vals = call <8 x float> @llvm.masked.gather.v8f32(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x float> poison)
store <8 x float> %vals, ptr %a
ret void
}
@@ -814,7 +814,7 @@ define void @masked_gather_v16f32(ptr %a, ptr %b) vscale_range(8,0) #0 {
%cval = load <16 x float>, ptr %a
%ptrs = load <16 x ptr>, ptr %b
%mask = fcmp oeq <16 x float> %cval, zeroinitializer
- %vals = call <16 x float> @llvm.masked.gather.v16f32(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x float> undef)
+ %vals = call <16 x float> @llvm.masked.gather.v16f32(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x float> poison)
store <16 x float> %vals, ptr %a
ret void
}
@@ -834,7 +834,7 @@ define void @masked_gather_v32f32(ptr %a, ptr %b) vscale_range(16,0) #0 {
%cval = load <32 x float>, ptr %a
%ptrs = load <32 x ptr>, ptr %b
%mask = fcmp oeq <32 x float> %cval, zeroinitializer
- %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef)
+ %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> poison)
store <32 x float> %vals, ptr %a
ret void
}
@@ -861,7 +861,7 @@ define void @masked_gather_v1f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <1 x double>, ptr %a
%ptrs = load <1 x ptr>, ptr %b
%mask = fcmp oeq <1 x double> %cval, zeroinitializer
- %vals = call <1 x double> @llvm.masked.gather.v1f64(<1 x ptr> %ptrs, i32 8, <1 x i1> %mask, <1 x double> undef)
+ %vals = call <1 x double> @llvm.masked.gather.v1f64(<1 x ptr> %ptrs, i32 8, <1 x i1> %mask, <1 x double> poison)
store <1 x double> %vals, ptr %a
ret void
}
@@ -880,7 +880,7 @@ define void @masked_gather_v2f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <2 x double>, ptr %a
%ptrs = load <2 x ptr>, ptr %b
%mask = fcmp oeq <2 x double> %cval, zeroinitializer
- %vals = call <2 x double> @llvm.masked.gather.v2f64(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x double> undef)
+ %vals = call <2 x double> @llvm.masked.gather.v2f64(<2 x ptr> %ptrs, i32 8, <2 x i1> %mask, <2 x double> poison)
store <2 x double> %vals, ptr %a
ret void
}
@@ -898,7 +898,7 @@ define void @masked_gather_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
%cval = load <4 x double>, ptr %a
%ptrs = load <4 x ptr>, ptr %b
%mask = fcmp oeq <4 x double> %cval, zeroinitializer
- %vals = call <4 x double> @llvm.masked.gather.v4f64(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x double> undef)
+ %vals = call <4 x double> @llvm.masked.gather.v4f64(<4 x ptr> %ptrs, i32 8, <4 x i1> %mask, <4 x double> poison)
store <4 x double> %vals, ptr %a
ret void
}
@@ -932,7 +932,7 @@ define void @masked_gather_v8f64(ptr %a, ptr %b) #0 {
%cval = load <8 x double>, ptr %a
%ptrs = load <8 x ptr>, ptr %b
%mask = fcmp oeq <8 x double> %cval, zeroinitializer
- %vals = call <8 x double> @llvm.masked.gather.v8f64(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x double> undef)
+ %vals = call <8 x double> @llvm.masked.gather.v8f64(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x double> poison)
store <8 x double> %vals, ptr %a
ret void
}
@@ -950,7 +950,7 @@ define void @masked_gather_v16f64(ptr %a, ptr %b) vscale_range(8,0) #0 {
%cval = load <16 x double>, ptr %a
%ptrs = load <16 x ptr>, ptr %b
%mask = fcmp oeq <16 x double> %cval, zeroinitializer
- %vals = call <16 x double> @llvm.masked.gather.v16f64(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x double> undef)
+ %vals = call <16 x double> @llvm.masked.gather.v16f64(<16 x ptr> %ptrs, i32 8, <16 x i1> %mask, <16 x double> poison)
store <16 x double> %vals, ptr %a
ret void
}
@@ -968,7 +968,7 @@ define void @masked_gather_v32f64(ptr %a, ptr %b) vscale_range(16,0) #0 {
%cval = load <32 x double>, ptr %a
%ptrs = load <32 x ptr>, ptr %b
%mask = fcmp oeq <32 x double> %cval, zeroinitializer
- %vals = call <32 x double> @llvm.masked.gather.v32f64(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x double> undef)
+ %vals = call <32 x double> @llvm.masked.gather.v32f64(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x double> poison)
store <32 x double> %vals, ptr %a
ret void
}
@@ -993,7 +993,7 @@ define void @masked_gather_32b_scaled_sext_f16(ptr %a, ptr %b, ptr %base) vscale
%ext = sext <32 x i32> %idxs to <32 x i64>
%ptrs = getelementptr half, ptr %base, <32 x i64> %ext
%mask = fcmp oeq <32 x half> %cvals, zeroinitializer
- %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x half> undef)
+ %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x half> poison)
store <32 x half> %vals, ptr %a
ret void
}
@@ -1013,7 +1013,7 @@ define void @masked_gather_32b_scaled_sext_f32(ptr %a, ptr %b, ptr %base) vscale
%ext = sext <32 x i32> %idxs to <32 x i64>
%ptrs = getelementptr float, ptr %base, <32 x i64> %ext
%mask = fcmp oeq <32 x float> %cvals, zeroinitializer
- %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef)
+ %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> poison)
store <32 x float> %vals, ptr %a
ret void
}
@@ -1033,7 +1033,7 @@ define void @masked_gather_32b_scaled_sext_f64(ptr %a, ptr %b, ptr %base) vscale
%ext = sext <32 x i32> %idxs to <32 x i64>
%ptrs = getelementptr double, ptr %base, <32 x i64> %ext
%mask = fcmp oeq <32 x double> %cvals, zeroinitializer
- %vals = call <32 x double> @llvm.masked.gather.v32f64(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x double> undef)
+ %vals = call <32 x double> @llvm.masked.gather.v32f64(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x double> poison)
store <32 x double> %vals, ptr %a
ret void
}
@@ -1055,7 +1055,7 @@ define void @masked_gather_32b_scaled_zext(ptr %a, ptr %b, ptr %base) vscale_ran
%ext = zext <32 x i32> %idxs to <32 x i64>
%ptrs = getelementptr half, ptr %base, <32 x i64> %ext
%mask = fcmp oeq <32 x half> %cvals, zeroinitializer
- %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x half> undef)
+ %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x half> poison)
store <32 x half> %vals, ptr %a
ret void
}
@@ -1078,7 +1078,7 @@ define void @masked_gather_32b_unscaled_sext(ptr %a, ptr %b, ptr %base) vscale_r
%byte_ptrs = getelementptr i8, ptr %base, <32 x i64> %ext
%ptrs = bitcast <32 x ptr> %byte_ptrs to <32 x ptr>
%mask = fcmp oeq <32 x half> %cvals, zeroinitializer
- %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x half> undef)
+ %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x half> poison)
store <32 x half> %vals, ptr %a
ret void
}
@@ -1101,7 +1101,7 @@ define void @masked_gather_32b_unscaled_zext(ptr %a, ptr %b, ptr %base) vscale_r
%byte_ptrs = getelementptr i8, ptr %base, <32 x i64> %ext
%ptrs = bitcast <32 x ptr> %byte_ptrs to <32 x ptr>
%mask = fcmp oeq <32 x half> %cvals, zeroinitializer
- %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x half> undef)
+ %vals = call <32 x half> @llvm.masked.gather.v32f16(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x half> poison)
store <32 x half> %vals, ptr %a
ret void
}
@@ -1122,7 +1122,7 @@ define void @masked_gather_64b_scaled(ptr %a, ptr %b, ptr %base) vscale_range(16
%idxs = load <32 x i64>, ptr %b
%ptrs = getelementptr float, ptr %base, <32 x i64> %idxs
%mask = fcmp oeq <32 x float> %cvals, zeroinitializer
- %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef)
+ %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> poison)
store <32 x float> %vals, ptr %a
ret void
}
@@ -1144,7 +1144,7 @@ define void @masked_gather_64b_unscaled(ptr %a, ptr %b, ptr %base) vscale_range(
%byte_ptrs = getelementptr i8, ptr %base, <32 x i64> %idxs
%ptrs = bitcast <32 x ptr> %byte_ptrs to <32 x ptr>
%mask = fcmp oeq <32 x float> %cvals, zeroinitializer
- %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef)
+ %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> poison)
store <32 x float> %vals, ptr %a
ret void
}
@@ -1166,7 +1166,7 @@ define void @masked_gather_vec_plus_reg(ptr %a, ptr %b, i64 %off) vscale_range(1
%byte_ptrs = getelementptr i8, <32 x ptr> %bases, i64 %off
%ptrs = bitcast <32 x ptr> %byte_ptrs to <32 x ptr>
%mask = fcmp oeq <32 x float> %cvals, zeroinitializer
- %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef)
+ %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> poison)
store <32 x float> %vals, ptr %a
ret void
}
@@ -1188,7 +1188,7 @@ define void @masked_gather_vec_plus_imm(ptr %a, ptr %b) vscale_range(16,0) #0 {
%byte_ptrs = getelementptr i8, <32 x ptr> %bases, i64 4
%ptrs = bitcast <32 x ptr> %byte_ptrs to <32 x ptr>
%mask = fcmp oeq <32 x float> %cvals, zeroinitializer
- %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> undef)
+ %vals = call <32 x float> @llvm.masked.gather.v32f32(<32 x ptr> %ptrs, i32 8, <32 x i1> %mask, <32 x float> poison)
store <32 x float> %vals, ptr %a
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
index c22d9e71c51a9..6513b01d00922 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
@@ -185,7 +185,7 @@ define void @masked_load_v64i8(ptr %ap, ptr %bp, ptr %c) #0 {
%a = load <64 x i8>, ptr %ap
%b = load <64 x i8>, ptr %bp
%mask = icmp eq <64 x i8> %a, %b
- %load = call <64 x i8> @llvm.masked.load.v64i8(ptr %ap, i32 8, <64 x i1> %mask, <64 x i8> undef)
+ %load = call <64 x i8> @llvm.masked.load.v64i8(ptr %ap, i32 8, <64 x i1> %mask, <64 x i8> poison)
store <64 x i8> %load, ptr %c
ret void
}
@@ -219,7 +219,7 @@ define void @masked_load_v32i16(ptr %ap, ptr %bp, ptr %c) #0 {
%a = load <32 x i16>, ptr %ap
%b = load <32 x i16>, ptr %bp
%mask = icmp eq <32 x i16> %a, %b
- %load = call <32 x i16> @llvm.masked.load.v32i16(ptr %ap, i32 8, <32 x i1> %mask, <32 x i16> undef)
+ %load = call <32 x i16> @llvm.masked.load.v32i16(ptr %ap, i32 8, <32 x i1> %mask, <32 x i16> poison)
store <32 x i16> %load, ptr %c
ret void
}
@@ -253,7 +253,7 @@ define void @masked_load_v16i32(ptr %ap, ptr %bp, ptr %c) #0 {
%a = load <16 x i32>, ptr %ap
%b = load <16 x i32>, ptr %bp
%mask = icmp eq <16 x i32> %a, %b
- %load = call <16 x i32> @llvm.masked.load.v16i32(ptr %ap, i32 8, <16 x i1> %mask, <16 x i32> undef)
+ %load = call <16 x i32> @llvm.masked.load.v16i32(ptr %ap, i32 8, <16 x i1> %mask, <16 x i32> poison)
store <16 x i32> %load, ptr %c
ret void
}
@@ -287,7 +287,7 @@ define void @masked_load_v8i64(ptr %ap, ptr %bp, ptr %c) #0 {
%a = load <8 x i64>, ptr %ap
%b = load <8 x i64>, ptr %bp
%mask = icmp eq <8 x i64> %a, %b
- %load = call <8 x i64> @llvm.masked.load.v8i64(ptr %ap, i32 8, <8 x i1> %mask, <8 x i64> undef)
+ %load = call <8 x i64> @llvm.masked.load.v8i64(ptr %ap, i32 8, <8 x i1> %mask, <8 x i64> poison)
store <8 x i64> %load, ptr %c
ret void
}
@@ -392,7 +392,7 @@ define void @masked_load_sext_v32i8i16(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <32 x i8>, ptr %bp
%mask = icmp eq <32 x i8> %b, zeroinitializer
- %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
+ %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> poison)
%ext = sext <32 x i8> %load to <32 x i16>
store <32 x i16> %ext, ptr %c
ret void
@@ -427,7 +427,7 @@ define void @masked_load_sext_v16i8i32(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <16 x i8>, ptr %bp
%mask = icmp eq <16 x i8> %b, zeroinitializer
- %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %ap, i32 8, <16 x i1> %mask, <16 x i8> undef)
+ %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %ap, i32 8, <16 x i1> %mask, <16 x i8> poison)
%ext = sext <16 x i8> %load to <16 x i32>
store <16 x i32> %ext, ptr %c
ret void
@@ -463,7 +463,7 @@ define void @masked_load_sext_v8i8i64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i8>, ptr %bp
%mask = icmp eq <8 x i8> %b, zeroinitializer
- %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %ap, i32 8, <8 x i1> %mask, <8 x i8> undef)
+ %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %ap, i32 8, <8 x i1> %mask, <8 x i8> poison)
%ext = sext <8 x i8> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -495,7 +495,7 @@ define void @masked_load_sext_v16i16i32(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <16 x i16>, ptr %bp
%mask = icmp eq <16 x i16> %b, zeroinitializer
- %load = call <16 x i16> @llvm.masked.load.v16i16(ptr %ap, i32 8, <16 x i1> %mask, <16 x i16> undef)
+ %load = call <16 x i16> @llvm.masked.load.v16i16(ptr %ap, i32 8, <16 x i1> %mask, <16 x i16> poison)
%ext = sext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, ptr %c
ret void
@@ -530,7 +530,7 @@ define void @masked_load_sext_v8i16i64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i16>, ptr %bp
%mask = icmp eq <8 x i16> %b, zeroinitializer
- %load = call <8 x i16> @llvm.masked.load.v8i16(ptr %ap, i32 8, <8 x i1> %mask, <8 x i16> undef)
+ %load = call <8 x i16> @llvm.masked.load.v8i16(ptr %ap, i32 8, <8 x i1> %mask, <8 x i16> poison)
%ext = sext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -562,7 +562,7 @@ define void @masked_load_sext_v8i32i64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i32>, ptr %bp
%mask = icmp eq <8 x i32> %b, zeroinitializer
- %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
+ %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> poison)
%ext = sext <8 x i32> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -594,7 +594,7 @@ define void @masked_load_zext_v32i8i16(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <32 x i8>, ptr %bp
%mask = icmp eq <32 x i8> %b, zeroinitializer
- %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
+ %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> poison)
%ext = zext <32 x i8> %load to <32 x i16>
store <32 x i16> %ext, ptr %c
ret void
@@ -629,7 +629,7 @@ define void @masked_load_zext_v16i8i32(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <16 x i8>, ptr %bp
%mask = icmp eq <16 x i8> %b, zeroinitializer
- %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %ap, i32 8, <16 x i1> %mask, <16 x i8> undef)
+ %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %ap, i32 8, <16 x i1> %mask, <16 x i8> poison)
%ext = zext <16 x i8> %load to <16 x i32>
store <16 x i32> %ext, ptr %c
ret void
@@ -665,7 +665,7 @@ define void @masked_load_zext_v8i8i64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i8>, ptr %bp
%mask = icmp eq <8 x i8> %b, zeroinitializer
- %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %ap, i32 8, <8 x i1> %mask, <8 x i8> undef)
+ %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %ap, i32 8, <8 x i1> %mask, <8 x i8> poison)
%ext = zext <8 x i8> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -697,7 +697,7 @@ define void @masked_load_zext_v16i16i32(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <16 x i16>, ptr %bp
%mask = icmp eq <16 x i16> %b, zeroinitializer
- %load = call <16 x i16> @llvm.masked.load.v16i16(ptr %ap, i32 8, <16 x i1> %mask, <16 x i16> undef)
+ %load = call <16 x i16> @llvm.masked.load.v16i16(ptr %ap, i32 8, <16 x i1> %mask, <16 x i16> poison)
%ext = zext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, ptr %c
ret void
@@ -732,7 +732,7 @@ define void @masked_load_zext_v8i16i64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i16>, ptr %bp
%mask = icmp eq <8 x i16> %b, zeroinitializer
- %load = call <8 x i16> @llvm.masked.load.v8i16(ptr %ap, i32 8, <8 x i1> %mask, <8 x i16> undef)
+ %load = call <8 x i16> @llvm.masked.load.v8i16(ptr %ap, i32 8, <8 x i1> %mask, <8 x i16> poison)
%ext = zext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -764,7 +764,7 @@ define void @masked_load_zext_v8i32i64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i32>, ptr %bp
%mask = icmp eq <8 x i32> %b, zeroinitializer
- %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
+ %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> poison)
%ext = zext <8 x i32> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -805,7 +805,7 @@ define void @masked_load_sext_v32i8i16_m16(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <32 x i16>, ptr %bp
%mask = icmp eq <32 x i16> %b, zeroinitializer
- %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
+ %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> poison)
%ext = sext <32 x i8> %load to <32 x i16>
store <32 x i16> %ext, ptr %c
ret void
@@ -849,7 +849,7 @@ define void @masked_load_sext_v16i8i32_m32(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <16 x i32>, ptr %bp
%mask = icmp eq <16 x i32> %b, zeroinitializer
- %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %ap, i32 8, <16 x i1> %mask, <16 x i8> undef)
+ %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %ap, i32 8, <16 x i1> %mask, <16 x i8> poison)
%ext = sext <16 x i8> %load to <16 x i32>
store <16 x i32> %ext, ptr %c
ret void
@@ -895,7 +895,7 @@ define void @masked_load_sext_v8i8i64_m64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i64>, ptr %bp
%mask = icmp eq <8 x i64> %b, zeroinitializer
- %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %ap, i32 8, <8 x i1> %mask, <8 x i8> undef)
+ %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %ap, i32 8, <8 x i1> %mask, <8 x i8> poison)
%ext = sext <8 x i8> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -938,7 +938,7 @@ define void @masked_load_sext_v16i16i32_m32(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <16 x i32>, ptr %bp
%mask = icmp eq <16 x i32> %b, zeroinitializer
- %load = call <16 x i16> @llvm.masked.load.v16i16(ptr %ap, i32 8, <16 x i1> %mask, <16 x i16> undef)
+ %load = call <16 x i16> @llvm.masked.load.v16i16(ptr %ap, i32 8, <16 x i1> %mask, <16 x i16> poison)
%ext = sext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, ptr %c
ret void
@@ -982,7 +982,7 @@ define void @masked_load_sext_v8i16i64_m64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i64>, ptr %bp
%mask = icmp eq <8 x i64> %b, zeroinitializer
- %load = call <8 x i16> @llvm.masked.load.v8i16(ptr %ap, i32 8, <8 x i1> %mask, <8 x i16> undef)
+ %load = call <8 x i16> @llvm.masked.load.v8i16(ptr %ap, i32 8, <8 x i1> %mask, <8 x i16> poison)
%ext = sext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -1023,7 +1023,7 @@ define void @masked_load_sext_v8i32i64_m64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i64>, ptr %bp
%mask = icmp eq <8 x i64> %b, zeroinitializer
- %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
+ %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> poison)
%ext = sext <8 x i32> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -1064,7 +1064,7 @@ define void @masked_load_zext_v32i8i16_m16(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <32 x i16>, ptr %bp
%mask = icmp eq <32 x i16> %b, zeroinitializer
- %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
+ %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> poison)
%ext = zext <32 x i8> %load to <32 x i16>
store <32 x i16> %ext, ptr %c
ret void
@@ -1108,7 +1108,7 @@ define void @masked_load_zext_v16i8i32_m32(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <16 x i32>, ptr %bp
%mask = icmp eq <16 x i32> %b, zeroinitializer
- %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %ap, i32 8, <16 x i1> %mask, <16 x i8> undef)
+ %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %ap, i32 8, <16 x i1> %mask, <16 x i8> poison)
%ext = zext <16 x i8> %load to <16 x i32>
store <16 x i32> %ext, ptr %c
ret void
@@ -1154,7 +1154,7 @@ define void @masked_load_zext_v8i8i64_m64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i64>, ptr %bp
%mask = icmp eq <8 x i64> %b, zeroinitializer
- %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %ap, i32 8, <8 x i1> %mask, <8 x i8> undef)
+ %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %ap, i32 8, <8 x i1> %mask, <8 x i8> poison)
%ext = zext <8 x i8> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -1197,7 +1197,7 @@ define void @masked_load_zext_v16i16i32_m32(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <16 x i32>, ptr %bp
%mask = icmp eq <16 x i32> %b, zeroinitializer
- %load = call <16 x i16> @llvm.masked.load.v16i16(ptr %ap, i32 8, <16 x i1> %mask, <16 x i16> undef)
+ %load = call <16 x i16> @llvm.masked.load.v16i16(ptr %ap, i32 8, <16 x i1> %mask, <16 x i16> poison)
%ext = zext <16 x i16> %load to <16 x i32>
store <16 x i32> %ext, ptr %c
ret void
@@ -1241,7 +1241,7 @@ define void @masked_load_zext_v8i16i64_m64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i64>, ptr %bp
%mask = icmp eq <8 x i64> %b, zeroinitializer
- %load = call <8 x i16> @llvm.masked.load.v8i16(ptr %ap, i32 8, <8 x i1> %mask, <8 x i16> undef)
+ %load = call <8 x i16> @llvm.masked.load.v8i16(ptr %ap, i32 8, <8 x i1> %mask, <8 x i16> poison)
%ext = zext <8 x i16> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -1282,7 +1282,7 @@ define void @masked_load_zext_v8i32i64_m64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i64>, ptr %bp
%mask = icmp eq <8 x i64> %b, zeroinitializer
- %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
+ %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> poison)
%ext = zext <8 x i32> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -1299,7 +1299,7 @@ define void @masked_load_sext_v128i8i16(ptr %ap, ptr %bp, ptr %c) vscale_range(1
; CHECK-NEXT: ret
%b = load <128 x i8>, ptr %bp
%mask = icmp eq <128 x i8> %b, zeroinitializer
- %load = call <128 x i8> @llvm.masked.load.v128i8(ptr %ap, i32 8, <128 x i1> %mask, <128 x i8> undef)
+ %load = call <128 x i8> @llvm.masked.load.v128i8(ptr %ap, i32 8, <128 x i1> %mask, <128 x i8> poison)
%ext = sext <128 x i8> %load to <128 x i16>
store <128 x i16> %ext, ptr %c
ret void
@@ -1316,7 +1316,7 @@ define void @masked_load_sext_v64i8i32(ptr %ap, ptr %bp, ptr %c) vscale_range(16
; CHECK-NEXT: ret
%b = load <64 x i8>, ptr %bp
%mask = icmp eq <64 x i8> %b, zeroinitializer
- %load = call <64 x i8> @llvm.masked.load.v64i8(ptr %ap, i32 8, <64 x i1> %mask, <64 x i8> undef)
+ %load = call <64 x i8> @llvm.masked.load.v64i8(ptr %ap, i32 8, <64 x i1> %mask, <64 x i8> poison)
%ext = sext <64 x i8> %load to <64 x i32>
store <64 x i32> %ext, ptr %c
ret void
@@ -1333,7 +1333,7 @@ define void @masked_load_sext_v32i8i64(ptr %ap, ptr %bp, ptr %c) vscale_range(16
; CHECK-NEXT: ret
%b = load <32 x i8>, ptr %bp
%mask = icmp eq <32 x i8> %b, zeroinitializer
- %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
+ %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> poison)
%ext = sext <32 x i8> %load to <32 x i64>
store <32 x i64> %ext, ptr %c
ret void
@@ -1350,7 +1350,7 @@ define void @masked_load_sext_v64i16i32(ptr %ap, ptr %bp, ptr %c) vscale_range(1
; CHECK-NEXT: ret
%b = load <64 x i16>, ptr %bp
%mask = icmp eq <64 x i16> %b, zeroinitializer
- %load = call <64 x i16> @llvm.masked.load.v64i16(ptr %ap, i32 8, <64 x i1> %mask, <64 x i16> undef)
+ %load = call <64 x i16> @llvm.masked.load.v64i16(ptr %ap, i32 8, <64 x i1> %mask, <64 x i16> poison)
%ext = sext <64 x i16> %load to <64 x i32>
store <64 x i32> %ext, ptr %c
ret void
@@ -1367,7 +1367,7 @@ define void @masked_load_sext_v32i16i64(ptr %ap, ptr %bp, ptr %c) vscale_range(1
; CHECK-NEXT: ret
%b = load <32 x i16>, ptr %bp
%mask = icmp eq <32 x i16> %b, zeroinitializer
- %load = call <32 x i16> @llvm.masked.load.v32i16(ptr %ap, i32 8, <32 x i1> %mask, <32 x i16> undef)
+ %load = call <32 x i16> @llvm.masked.load.v32i16(ptr %ap, i32 8, <32 x i1> %mask, <32 x i16> poison)
%ext = sext <32 x i16> %load to <32 x i64>
store <32 x i64> %ext, ptr %c
ret void
@@ -1384,7 +1384,7 @@ define void @masked_load_sext_v32i32i64(ptr %ap, ptr %bp, ptr %c) vscale_range(1
; CHECK-NEXT: ret
%b = load <32 x i32>, ptr %bp
%mask = icmp eq <32 x i32> %b, zeroinitializer
- %load = call <32 x i32> @llvm.masked.load.v32i32(ptr %ap, i32 8, <32 x i1> %mask, <32 x i32> undef)
+ %load = call <32 x i32> @llvm.masked.load.v32i32(ptr %ap, i32 8, <32 x i1> %mask, <32 x i32> poison)
%ext = sext <32 x i32> %load to <32 x i64>
store <32 x i64> %ext, ptr %c
ret void
@@ -1401,7 +1401,7 @@ define void @masked_load_zext_v128i8i16(ptr %ap, ptr %bp, ptr %c) vscale_range(1
; CHECK-NEXT: ret
%b = load <128 x i8>, ptr %bp
%mask = icmp eq <128 x i8> %b, zeroinitializer
- %load = call <128 x i8> @llvm.masked.load.v128i8(ptr %ap, i32 8, <128 x i1> %mask, <128 x i8> undef)
+ %load = call <128 x i8> @llvm.masked.load.v128i8(ptr %ap, i32 8, <128 x i1> %mask, <128 x i8> poison)
%ext = zext <128 x i8> %load to <128 x i16>
store <128 x i16> %ext, ptr %c
ret void
@@ -1418,7 +1418,7 @@ define void @masked_load_zext_v64i8i32(ptr %ap, ptr %bp, ptr %c) vscale_range(16
; CHECK-NEXT: ret
%b = load <64 x i8>, ptr %bp
%mask = icmp eq <64 x i8> %b, zeroinitializer
- %load = call <64 x i8> @llvm.masked.load.v64i8(ptr %ap, i32 8, <64 x i1> %mask, <64 x i8> undef)
+ %load = call <64 x i8> @llvm.masked.load.v64i8(ptr %ap, i32 8, <64 x i1> %mask, <64 x i8> poison)
%ext = zext <64 x i8> %load to <64 x i32>
store <64 x i32> %ext, ptr %c
ret void
@@ -1435,7 +1435,7 @@ define void @masked_load_zext_v32i8i64(ptr %ap, ptr %bp, ptr %c) vscale_range(16
; CHECK-NEXT: ret
%b = load <32 x i8>, ptr %bp
%mask = icmp eq <32 x i8> %b, zeroinitializer
- %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
+ %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %ap, i32 8, <32 x i1> %mask, <32 x i8> poison)
%ext = zext <32 x i8> %load to <32 x i64>
store <32 x i64> %ext, ptr %c
ret void
@@ -1452,7 +1452,7 @@ define void @masked_load_zext_v64i16i32(ptr %ap, ptr %bp, ptr %c) vscale_range(1
; CHECK-NEXT: ret
%b = load <64 x i16>, ptr %bp
%mask = icmp eq <64 x i16> %b, zeroinitializer
- %load = call <64 x i16> @llvm.masked.load.v64i16(ptr %ap, i32 8, <64 x i1> %mask, <64 x i16> undef)
+ %load = call <64 x i16> @llvm.masked.load.v64i16(ptr %ap, i32 8, <64 x i1> %mask, <64 x i16> poison)
%ext = zext <64 x i16> %load to <64 x i32>
store <64 x i32> %ext, ptr %c
ret void
@@ -1469,7 +1469,7 @@ define void @masked_load_zext_v32i16i64(ptr %ap, ptr %bp, ptr %c) vscale_range(1
; CHECK-NEXT: ret
%b = load <32 x i16>, ptr %bp
%mask = icmp eq <32 x i16> %b, zeroinitializer
- %load = call <32 x i16> @llvm.masked.load.v32i16(ptr %ap, i32 8, <32 x i1> %mask, <32 x i16> undef)
+ %load = call <32 x i16> @llvm.masked.load.v32i16(ptr %ap, i32 8, <32 x i1> %mask, <32 x i16> poison)
%ext = zext <32 x i16> %load to <32 x i64>
store <32 x i64> %ext, ptr %c
ret void
@@ -1486,7 +1486,7 @@ define void @masked_load_zext_v32i32i64(ptr %ap, ptr %bp, ptr %c) vscale_range(1
; CHECK-NEXT: ret
%b = load <32 x i32>, ptr %bp
%mask = icmp eq <32 x i32> %b, zeroinitializer
- %load = call <32 x i32> @llvm.masked.load.v32i32(ptr %ap, i32 8, <32 x i1> %mask, <32 x i32> undef)
+ %load = call <32 x i32> @llvm.masked.load.v32i32(ptr %ap, i32 8, <32 x i1> %mask, <32 x i32> poison)
%ext = zext <32 x i32> %load to <32 x i64>
store <32 x i64> %ext, ptr %c
ret void
@@ -1518,7 +1518,7 @@ define void @masked_load_sext_ugt_v8i32i64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i32>, ptr %bp
%mask = icmp ugt <8 x i32> %b, zeroinitializer
- %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
+ %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> poison)
%ext = sext <8 x i32> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
@@ -1550,7 +1550,7 @@ define void @masked_load_zext_sgt_v8i32i64(ptr %ap, ptr %bp, ptr %c) #0 {
; VBITS_GE_512-NEXT: ret
%b = load <8 x i32>, ptr %bp
%mask = icmp sgt <8 x i32> %b, zeroinitializer
- %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
+ %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %ap, i32 8, <8 x i1> %mask, <8 x i32> poison)
%ext = zext <8 x i32> %load to <8 x i64>
store <8 x i64> %ext, ptr %c
ret void
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
index 6871fd53fa6ad..797f953591b11 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll
@@ -15,7 +15,7 @@ define void @test_revbv16i16(ptr %a) #0 {
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <32 x i8>, ptr %a
- %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14, i32 17, i32 16, i32 19, i32 18, i32 21, i32 20, i32 23, i32 22, i32 undef, i32 24, i32 27, i32 undef, i32 29, i32 28, i32 undef, i32 undef>
+ %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> poison, <32 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14, i32 17, i32 16, i32 19, i32 18, i32 21, i32 20, i32 23, i32 22, i32 poison, i32 24, i32 27, i32 poison, i32 29, i32 28, i32 poison, i32 poison>
store <32 x i8> %tmp2, ptr %a
ret void
}
@@ -31,7 +31,7 @@ define void @test_revbv8i32(ptr %a) #0 {
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <32 x i8>, ptr %a
- %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20, i32 27, i32 undef, i32 undef, i32 undef, i32 31, i32 30, i32 29, i32 undef>
+ %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> poison, <32 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20, i32 27, i32 poison, i32 poison, i32 poison, i32 31, i32 30, i32 29, i32 poison>
store <32 x i8> %tmp2, ptr %a
ret void
}
@@ -47,7 +47,7 @@ define void @test_revbv4i64(ptr %a) #0 {
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <32 x i8>, ptr %a
- %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 31, i32 30, i32 29, i32 undef, i32 27, i32 undef, i32 undef, i32 undef>
+ %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> poison, <32 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 31, i32 30, i32 29, i32 poison, i32 27, i32 poison, i32 poison, i32 poison>
store <32 x i8> %tmp2, ptr %a
ret void
}
@@ -63,7 +63,7 @@ define void @test_revhv8i32(ptr %a) #0 {
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <16 x i16>, ptr %a
- %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+ %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
store <16 x i16> %tmp2, ptr %a
ret void
}
@@ -79,7 +79,7 @@ define void @test_revhv8f32(ptr %a) #0 {
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <16 x half>, ptr %a
- %tmp2 = shufflevector <16 x half> %tmp1, <16 x half> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+ %tmp2 = shufflevector <16 x half> %tmp1, <16 x half> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
store <16 x half> %tmp2, ptr %a
ret void
}
@@ -95,7 +95,7 @@ define void @test_revhv4i64(ptr %a) #0 {
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <16 x i16>, ptr %a
- %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
+ %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
store <16 x i16> %tmp2, ptr %a
ret void
}
@@ -111,7 +111,7 @@ define void @test_revwv4i64(ptr %a) #0 {
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
- %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
store <8 x i32> %tmp2, ptr %a
ret void
}
@@ -127,7 +127,7 @@ define void @test_revwv4f64(ptr %a) #0 {
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <8 x float>, ptr %a
- %tmp2 = shufflevector <8 x float> %tmp1, <8 x float> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ %tmp2 = shufflevector <8 x float> %tmp1, <8 x float> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
store <8 x float> %tmp2, ptr %a
ret void
}
@@ -140,7 +140,7 @@ define <16 x i8> @test_revv16i8(ptr %a) #0 {
; CHECK-NEXT: rev64 v0.16b, v0.16b
; CHECK-NEXT: ret
%tmp1 = load <16 x i8>, ptr %a
- %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
+ %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> poison, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
ret <16 x i8> %tmp2
}
@@ -185,7 +185,7 @@ define void @test_revhv32i16(ptr %a) #0 {
; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0]
; VBITS_GE_512-NEXT: ret
%tmp1 = load <32 x i16>, ptr %a
- %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> undef, <32 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20, i32 27, i32 undef, i32 undef, i32 undef, i32 31, i32 30, i32 29, i32 undef>
+ %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> poison, <32 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20, i32 27, i32 poison, i32 poison, i32 poison, i32 31, i32 30, i32 29, i32 poison>
store <32 x i16> %tmp2, ptr %a
ret void
}
@@ -203,7 +203,7 @@ define void @test_rev_elts_fail(ptr %a) #1 {
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <4 x i64>, ptr %a
- %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
store <4 x i64> %tmp2, ptr %a
ret void
}
@@ -219,7 +219,7 @@ define void @test_revdv4i64_sve2p1(ptr %a) #2 {
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <4 x i64>, ptr %a
- %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
store <4 x i64> %tmp2, ptr %a
ret void
}
@@ -234,7 +234,7 @@ define void @test_revdv4f64_sve2p1(ptr %a) #2 {
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <4 x double>, ptr %a
- %tmp2 = shufflevector <4 x double> %tmp1, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ %tmp2 = shufflevector <4 x double> %tmp1, <4 x double> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
store <4 x double> %tmp2, ptr %a
ret void
}
@@ -266,7 +266,7 @@ define void @test_revv8i32(ptr %a) #0 {
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0]
; VBITS_GE_512-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
- %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
store <8 x i32> %tmp2, ptr %a
ret void
}
@@ -281,7 +281,7 @@ define void @test_revv32i8_vl256(ptr %a) #1 {
; CHECK-NEXT: st1b { z0.b }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <32 x i8>, ptr %a
- %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> poison, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
store <32 x i8> %tmp2, ptr %a
ret void
}
@@ -296,7 +296,7 @@ define void @test_revv16i16_vl256(ptr %a) #1 {
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <16 x i16>, ptr %a
- %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> <i32 undef, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> poison, <16 x i32> <i32 poison, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
store <16 x i16> %tmp2, ptr %a
ret void
}
@@ -311,7 +311,7 @@ define void @test_revv8f32_vl256(ptr %a) #1 {
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <8 x float>, ptr %a
- %tmp2 = shufflevector <8 x float> %tmp1, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ %tmp2 = shufflevector <8 x float> %tmp1, <8 x float> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
store <8 x float> %tmp2, ptr %a
ret void
}
@@ -326,7 +326,7 @@ define void @test_revv4f64_vl256(ptr %a) #1 {
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <4 x double>, ptr %a
- %tmp2 = shufflevector <4 x double> %tmp1, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %tmp2 = shufflevector <4 x double> %tmp1, <4 x double> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
store <4 x double> %tmp2, ptr %a
ret void
}
@@ -360,7 +360,7 @@ define void @test_rev_fail(ptr %a) #1 {
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <16 x i16>, ptr %a
- %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
+ %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> poison, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
store <16 x i16> %tmp2, ptr %a
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
index fba324cfd74ac..52a4a5ff7cc4a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
@@ -201,7 +201,7 @@ define void @zip1_v8i32_undef(ptr %a) #0 {
; VBITS_EQ_512-NEXT: st1w { z0.s }, p0, [x0]
; VBITS_EQ_512-NEXT: ret
%tmp1 = load volatile <8 x i32>, ptr %a
- %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
+ %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
store volatile <8 x i32> %tmp2, ptr %a
ret void
}
@@ -231,7 +231,7 @@ define void @trn_v32i8(ptr %a, ptr %b) #0 {
%tmp1 = load <32 x i8>, ptr %a
%tmp2 = load <32 x i8>, ptr %b
%tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 0, i32 32, i32 2, i32 34, i32 4, i32 36, i32 6, i32 38, i32 8, i32 40, i32 10, i32 42, i32 12, i32 44, i32 14, i32 46, i32 16, i32 48, i32 18, i32 50, i32 20, i32 52, i32 22, i32 54, i32 24, i32 56, i32 26, i32 58, i32 28, i32 60, i32 30, i32 62>
- %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 1, i32 33, i32 3, i32 35, i32 undef, i32 37, i32 7, i32 undef, i32 undef, i32 41, i32 11, i32 43, i32 13, i32 45, i32 15, i32 47, i32 17, i32 49, i32 19, i32 51, i32 21, i32 53, i32 23, i32 55, i32 25, i32 57, i32 27, i32 59, i32 29, i32 61, i32 31, i32 63>
+ %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 1, i32 33, i32 3, i32 35, i32 poison, i32 37, i32 7, i32 poison, i32 poison, i32 41, i32 11, i32 43, i32 13, i32 45, i32 15, i32 47, i32 17, i32 49, i32 19, i32 51, i32 21, i32 53, i32 23, i32 55, i32 25, i32 57, i32 27, i32 59, i32 29, i32 61, i32 31, i32 63>
%tmp5 = add <32 x i8> %tmp3, %tmp4
store <32 x i8> %tmp5, ptr %a
ret void
@@ -269,7 +269,7 @@ define void @trn_v32i16(ptr %a, ptr %b) #0 {
%tmp1 = load <32 x i16>, ptr %a
%tmp2 = load <32 x i16>, ptr %b
%tmp3 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 0, i32 32, i32 2, i32 34, i32 4, i32 36, i32 6, i32 38, i32 8, i32 40, i32 10, i32 42, i32 12, i32 44, i32 14, i32 46, i32 16, i32 48, i32 18, i32 50, i32 20, i32 52, i32 22, i32 54, i32 24, i32 56, i32 26, i32 58, i32 28, i32 60, i32 30, i32 62>
- %tmp4 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 1, i32 33, i32 3, i32 35, i32 undef, i32 37, i32 7, i32 undef, i32 undef, i32 41, i32 11, i32 43, i32 13, i32 45, i32 15, i32 47, i32 17, i32 49, i32 19, i32 51, i32 21, i32 53, i32 23, i32 55, i32 25, i32 57, i32 27, i32 59, i32 29, i32 61, i32 31, i32 63>
+ %tmp4 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 1, i32 33, i32 3, i32 35, i32 poison, i32 37, i32 7, i32 poison, i32 poison, i32 41, i32 11, i32 43, i32 13, i32 45, i32 15, i32 47, i32 17, i32 49, i32 19, i32 51, i32 21, i32 53, i32 23, i32 55, i32 25, i32 57, i32 27, i32 59, i32 29, i32 61, i32 31, i32 63>
%tmp5 = add <32 x i16> %tmp3, %tmp4
store <32 x i16> %tmp5, ptr %a
ret void
@@ -330,8 +330,8 @@ define void @trn_v8i32(ptr %a, ptr %b) #0 {
; VBITS_EQ_512-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
%tmp2 = load <8 x i32>, ptr %b
- %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
+ %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 0, i32 8, i32 poison, i32 poison, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 1, i32 poison, i32 3, i32 11, i32 5, i32 13, i32 poison, i32 poison>
%tmp5 = add <8 x i32> %tmp3, %tmp4
store <8 x i32> %tmp5, ptr %a
ret void
@@ -409,8 +409,8 @@ define void @trn_v8i32_undef(ptr %a) #0 {
; VBITS_EQ_512-NEXT: st1w { z0.s }, p0, [x0]
; VBITS_EQ_512-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
- %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
- %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
+ %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
%tmp5 = add <8 x i32> %tmp3, %tmp4
store <8 x i32> %tmp5, ptr %a
ret void
@@ -480,7 +480,7 @@ define void @zip2_v8i32_undef(ptr %a) #1 {
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load volatile <8 x i32>, ptr %a
- %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
+ %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
store volatile <8 x i32> %tmp2, ptr %a
ret void
}
@@ -501,7 +501,7 @@ define void @uzp_v32i8(ptr %a, ptr %b) #1 {
%tmp1 = load <32 x i8>, ptr %a
%tmp2 = load <32 x i8>, ptr %b
%tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
- %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 9, i32 11, i32 13, i32 undef, i32 undef, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
+ %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 1, i32 3, i32 5, i32 poison, i32 9, i32 11, i32 13, i32 poison, i32 poison, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
%tmp5 = add <32 x i8> %tmp3, %tmp4
store <32 x i8> %tmp5, ptr %a
ret void
@@ -531,7 +531,7 @@ define void @uzp_v32i16(ptr %a, ptr %b) #1 {
%tmp1 = load <32 x i16>, ptr %a
%tmp2 = load <32 x i16>, ptr %b
%tmp3 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
- %tmp4 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 9, i32 11, i32 13, i32 undef, i32 undef, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
+ %tmp4 = shufflevector <32 x i16> %tmp1, <32 x i16> %tmp2, <32 x i32> <i32 1, i32 3, i32 5, i32 poison, i32 9, i32 11, i32 13, i32 poison, i32 poison, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
%tmp5 = add <32 x i16> %tmp3, %tmp4
store <32 x i16> %tmp5, ptr %a
ret void
@@ -574,8 +574,8 @@ define void @uzp_v8f32(ptr %a, ptr %b) #1 {
; CHECK-NEXT: ret
%tmp1 = load <8 x float>, ptr %a
%tmp2 = load <8 x float>, ptr %b
- %tmp3 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> <i32 0, i32 undef, i32 4, i32 6, i32 undef, i32 10, i32 12, i32 14>
- %tmp4 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> <i32 1, i32 undef, i32 5, i32 7, i32 9, i32 11, i32 undef, i32 undef>
+ %tmp3 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> <i32 0, i32 poison, i32 4, i32 6, i32 poison, i32 10, i32 12, i32 14>
+ %tmp4 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> <i32 1, i32 poison, i32 5, i32 7, i32 9, i32 11, i32 poison, i32 poison>
%tmp5 = fadd <8 x float> %tmp3, %tmp4
store <8 x float> %tmp5, ptr %a
ret void
@@ -635,8 +635,8 @@ define void @uzp_v8i32_undef(ptr %a) #1 {
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
- %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 0, i32 2, i32 4, i32 6>
- %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 1, i32 3, i32 5, i32 7>
+ %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 0, i32 2, i32 4, i32 6>
+ %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 1, i32 3, i32 5, i32 7>
%tmp5 = add <8 x i32> %tmp3, %tmp4
store <8 x i32> %tmp5, ptr %a
ret void
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll
index 4ba34407ff184..41e4a38fad90b 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll
@@ -22,7 +22,7 @@ define <4 x i1> @reshuffle_v4i1_nxv4i1(<vscale x 4 x i1> %a) #0 {
%el1 = extractelement <vscale x 4 x i1> %a, i32 1
%el2 = extractelement <vscale x 4 x i1> %a, i32 2
%el3 = extractelement <vscale x 4 x i1> %a, i32 3
- %v0 = insertelement <4 x i1> undef, i1 %el0, i32 0
+ %v0 = insertelement <4 x i1> poison, i1 %el0, i32 0
%v1 = insertelement <4 x i1> %v0, i1 %el1, i32 1
%v2 = insertelement <4 x i1> %v1, i1 %el2, i32 2
%v3 = insertelement <4 x i1> %v2, i1 %el3, i32 3
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
index 749a1866e7192..e33bc8da97c05 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
@@ -16,8 +16,8 @@ define void @hang_when_merging_stores_after_legalisation(ptr %a, <2 x i32> %b) v
; CHECK-NEXT: ext z1.b, z1.b, z1.b, #16
; CHECK-NEXT: st2 { v0.4s, v1.4s }, [x0]
; CHECK-NEXT: ret
- %splat = shufflevector <2 x i32> %b, <2 x i32> undef, <8 x i32> zeroinitializer
- %interleaved.vec = shufflevector <8 x i32> %splat, <8 x i32> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ %splat = shufflevector <2 x i32> %b, <2 x i32> poison, <8 x i32> zeroinitializer
+ %interleaved.vec = shufflevector <8 x i32> %splat, <8 x i32> poison, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, ptr %a, align 4
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll
index 2b72d46adb8d4..a0dd0408025a6 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll
@@ -15,8 +15,8 @@ define <8 x i8> @splat_v8i8(i8 %a) vscale_range(2,0) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: dup v0.8b, w0
; CHECK-NEXT: ret
- %insert = insertelement <8 x i8> undef, i8 %a, i64 0
- %splat = shufflevector <8 x i8> %insert, <8 x i8> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i8> poison, i8 %a, i64 0
+ %splat = shufflevector <8 x i8> %insert, <8 x i8> poison, <8 x i32> zeroinitializer
ret <8 x i8> %splat
}
@@ -26,8 +26,8 @@ define <16 x i8> @splat_v16i8(i8 %a) vscale_range(2,0) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: dup v0.16b, w0
; CHECK-NEXT: ret
- %insert = insertelement <16 x i8> undef, i8 %a, i64 0
- %splat = shufflevector <16 x i8> %insert, <16 x i8> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i8> poison, i8 %a, i64 0
+ %splat = shufflevector <16 x i8> %insert, <16 x i8> poison, <16 x i32> zeroinitializer
ret <16 x i8> %splat
}
@@ -38,8 +38,8 @@ define void @splat_v32i8(i8 %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ptrue p0.b, vl32
; CHECK-NEXT: st1b { z0.b }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <32 x i8> undef, i8 %a, i64 0
- %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x i8> poison, i8 %a, i64 0
+ %splat = shufflevector <32 x i8> %insert, <32 x i8> poison, <32 x i32> zeroinitializer
store <32 x i8> %splat, ptr %b
ret void
}
@@ -60,8 +60,8 @@ define void @splat_v64i8(i8 %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ptrue p0.b, vl64
; VBITS_GE_512-NEXT: st1b { z0.b }, p0, [x1]
; VBITS_GE_512-NEXT: ret
- %insert = insertelement <64 x i8> undef, i8 %a, i64 0
- %splat = shufflevector <64 x i8> %insert, <64 x i8> undef, <64 x i32> zeroinitializer
+ %insert = insertelement <64 x i8> poison, i8 %a, i64 0
+ %splat = shufflevector <64 x i8> %insert, <64 x i8> poison, <64 x i32> zeroinitializer
store <64 x i8> %splat, ptr %b
ret void
}
@@ -73,8 +73,8 @@ define void @splat_v128i8(i8 %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ptrue p0.b, vl128
; CHECK-NEXT: st1b { z0.b }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <128 x i8> undef, i8 %a, i64 0
- %splat = shufflevector <128 x i8> %insert, <128 x i8> undef, <128 x i32> zeroinitializer
+ %insert = insertelement <128 x i8> poison, i8 %a, i64 0
+ %splat = shufflevector <128 x i8> %insert, <128 x i8> poison, <128 x i32> zeroinitializer
store <128 x i8> %splat, ptr %b
ret void
}
@@ -86,8 +86,8 @@ define void @splat_v256i8(i8 %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ptrue p0.b, vl256
; CHECK-NEXT: st1b { z0.b }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <256 x i8> undef, i8 %a, i64 0
- %splat = shufflevector <256 x i8> %insert, <256 x i8> undef, <256 x i32> zeroinitializer
+ %insert = insertelement <256 x i8> poison, i8 %a, i64 0
+ %splat = shufflevector <256 x i8> %insert, <256 x i8> poison, <256 x i32> zeroinitializer
store <256 x i8> %splat, ptr %b
ret void
}
@@ -98,8 +98,8 @@ define <4 x i16> @splat_v4i16(i16 %a) vscale_range(2,0) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: dup v0.4h, w0
; CHECK-NEXT: ret
- %insert = insertelement <4 x i16> undef, i16 %a, i64 0
- %splat = shufflevector <4 x i16> %insert, <4 x i16> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x i16> poison, i16 %a, i64 0
+ %splat = shufflevector <4 x i16> %insert, <4 x i16> poison, <4 x i32> zeroinitializer
ret <4 x i16> %splat
}
@@ -109,8 +109,8 @@ define <8 x i16> @splat_v8i16(i16 %a) vscale_range(2,0) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: dup v0.8h, w0
; CHECK-NEXT: ret
- %insert = insertelement <8 x i16> undef, i16 %a, i64 0
- %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i16> poison, i16 %a, i64 0
+ %splat = shufflevector <8 x i16> %insert, <8 x i16> poison, <8 x i32> zeroinitializer
ret <8 x i16> %splat
}
@@ -121,8 +121,8 @@ define void @splat_v16i16(i16 %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ptrue p0.h, vl16
; CHECK-NEXT: st1h { z0.h }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <16 x i16> undef, i16 %a, i64 0
- %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i16> poison, i16 %a, i64 0
+ %splat = shufflevector <16 x i16> %insert, <16 x i16> poison, <16 x i32> zeroinitializer
store <16 x i16> %splat, ptr %b
ret void
}
@@ -143,8 +143,8 @@ define void @splat_v32i16(i16 %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ptrue p0.h, vl32
; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x1]
; VBITS_GE_512-NEXT: ret
- %insert = insertelement <32 x i16> undef, i16 %a, i64 0
- %splat = shufflevector <32 x i16> %insert, <32 x i16> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x i16> poison, i16 %a, i64 0
+ %splat = shufflevector <32 x i16> %insert, <32 x i16> poison, <32 x i32> zeroinitializer
store <32 x i16> %splat, ptr %b
ret void
}
@@ -156,8 +156,8 @@ define void @splat_v64i16(i16 %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ptrue p0.h, vl64
; CHECK-NEXT: st1h { z0.h }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <64 x i16> undef, i16 %a, i64 0
- %splat = shufflevector <64 x i16> %insert, <64 x i16> undef, <64 x i32> zeroinitializer
+ %insert = insertelement <64 x i16> poison, i16 %a, i64 0
+ %splat = shufflevector <64 x i16> %insert, <64 x i16> poison, <64 x i32> zeroinitializer
store <64 x i16> %splat, ptr %b
ret void
}
@@ -169,8 +169,8 @@ define void @splat_v128i16(i16 %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ptrue p0.h, vl128
; CHECK-NEXT: st1h { z0.h }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <128 x i16> undef, i16 %a, i64 0
- %splat = shufflevector <128 x i16> %insert, <128 x i16> undef, <128 x i32> zeroinitializer
+ %insert = insertelement <128 x i16> poison, i16 %a, i64 0
+ %splat = shufflevector <128 x i16> %insert, <128 x i16> poison, <128 x i32> zeroinitializer
store <128 x i16> %splat, ptr %b
ret void
}
@@ -181,8 +181,8 @@ define <2 x i32> @splat_v2i32(i32 %a) vscale_range(2,0) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: dup v0.2s, w0
; CHECK-NEXT: ret
- %insert = insertelement <2 x i32> undef, i32 %a, i64 0
- %splat = shufflevector <2 x i32> %insert, <2 x i32> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x i32> poison, i32 %a, i64 0
+ %splat = shufflevector <2 x i32> %insert, <2 x i32> poison, <2 x i32> zeroinitializer
ret <2 x i32> %splat
}
@@ -192,8 +192,8 @@ define <4 x i32> @splat_v4i32(i32 %a) vscale_range(2,0) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: dup v0.4s, w0
; CHECK-NEXT: ret
- %insert = insertelement <4 x i32> undef, i32 %a, i64 0
- %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x i32> poison, i32 %a, i64 0
+ %splat = shufflevector <4 x i32> %insert, <4 x i32> poison, <4 x i32> zeroinitializer
ret <4 x i32> %splat
}
@@ -204,8 +204,8 @@ define void @splat_v8i32(i32 %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ptrue p0.s, vl8
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <8 x i32> undef, i32 %a, i64 0
- %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i32> poison, i32 %a, i64 0
+ %splat = shufflevector <8 x i32> %insert, <8 x i32> poison, <8 x i32> zeroinitializer
store <8 x i32> %splat, ptr %b
ret void
}
@@ -226,8 +226,8 @@ define void @splat_v16i32(i32 %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ptrue p0.s, vl16
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x1]
; VBITS_GE_512-NEXT: ret
- %insert = insertelement <16 x i32> undef, i32 %a, i64 0
- %splat = shufflevector <16 x i32> %insert, <16 x i32> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i32> poison, i32 %a, i64 0
+ %splat = shufflevector <16 x i32> %insert, <16 x i32> poison, <16 x i32> zeroinitializer
store <16 x i32> %splat, ptr %b
ret void
}
@@ -239,8 +239,8 @@ define void @splat_v32i32(i32 %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ptrue p0.s, vl32
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <32 x i32> undef, i32 %a, i64 0
- %splat = shufflevector <32 x i32> %insert, <32 x i32> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x i32> poison, i32 %a, i64 0
+ %splat = shufflevector <32 x i32> %insert, <32 x i32> poison, <32 x i32> zeroinitializer
store <32 x i32> %splat, ptr %b
ret void
}
@@ -252,8 +252,8 @@ define void @splat_v64i32(i32 %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ptrue p0.s, vl64
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <64 x i32> undef, i32 %a, i64 0
- %splat = shufflevector <64 x i32> %insert, <64 x i32> undef, <64 x i32> zeroinitializer
+ %insert = insertelement <64 x i32> poison, i32 %a, i64 0
+ %splat = shufflevector <64 x i32> %insert, <64 x i32> poison, <64 x i32> zeroinitializer
store <64 x i32> %splat, ptr %b
ret void
}
@@ -264,8 +264,8 @@ define <1 x i64> @splat_v1i64(i64 %a) vscale_range(2,0) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: fmov d0, x0
; CHECK-NEXT: ret
- %insert = insertelement <1 x i64> undef, i64 %a, i64 0
- %splat = shufflevector <1 x i64> %insert, <1 x i64> undef, <1 x i32> zeroinitializer
+ %insert = insertelement <1 x i64> poison, i64 %a, i64 0
+ %splat = shufflevector <1 x i64> %insert, <1 x i64> poison, <1 x i32> zeroinitializer
ret <1 x i64> %splat
}
@@ -275,8 +275,8 @@ define <2 x i64> @splat_v2i64(i64 %a) vscale_range(2,0) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: dup v0.2d, x0
; CHECK-NEXT: ret
- %insert = insertelement <2 x i64> undef, i64 %a, i64 0
- %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x i64> poison, i64 %a, i64 0
+ %splat = shufflevector <2 x i64> %insert, <2 x i64> poison, <2 x i32> zeroinitializer
ret <2 x i64> %splat
}
@@ -287,8 +287,8 @@ define void @splat_v4i64(i64 %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: ptrue p0.d, vl4
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <4 x i64> undef, i64 %a, i64 0
- %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x i64> poison, i64 %a, i64 0
+ %splat = shufflevector <4 x i64> %insert, <4 x i64> poison, <4 x i32> zeroinitializer
store <4 x i64> %splat, ptr %b
ret void
}
@@ -309,8 +309,8 @@ define void @splat_v8i64(i64 %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: ptrue p0.d, vl8
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_512-NEXT: ret
- %insert = insertelement <8 x i64> undef, i64 %a, i64 0
- %splat = shufflevector <8 x i64> %insert, <8 x i64> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i64> poison, i64 %a, i64 0
+ %splat = shufflevector <8 x i64> %insert, <8 x i64> poison, <8 x i32> zeroinitializer
store <8 x i64> %splat, ptr %b
ret void
}
@@ -322,8 +322,8 @@ define void @splat_v16i64(i64 %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: ptrue p0.d, vl16
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <16 x i64> undef, i64 %a, i64 0
- %splat = shufflevector <16 x i64> %insert, <16 x i64> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i64> poison, i64 %a, i64 0
+ %splat = shufflevector <16 x i64> %insert, <16 x i64> poison, <16 x i32> zeroinitializer
store <16 x i64> %splat, ptr %b
ret void
}
@@ -335,8 +335,8 @@ define void @splat_v32i64(i64 %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: ptrue p0.d, vl32
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
- %insert = insertelement <32 x i64> undef, i64 %a, i64 0
- %splat = shufflevector <32 x i64> %insert, <32 x i64> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x i64> poison, i64 %a, i64 0
+ %splat = shufflevector <32 x i64> %insert, <32 x i64> poison, <32 x i32> zeroinitializer
store <32 x i64> %splat, ptr %b
ret void
}
@@ -352,8 +352,8 @@ define <4 x half> @splat_v4f16(half %a) vscale_range(2,0) #0 {
; CHECK-NEXT: // kill: def $h0 killed $h0 def $q0
; CHECK-NEXT: dup v0.4h, v0.h[0]
; CHECK-NEXT: ret
- %insert = insertelement <4 x half> undef, half %a, i64 0
- %splat = shufflevector <4 x half> %insert, <4 x half> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x half> poison, half %a, i64 0
+ %splat = shufflevector <4 x half> %insert, <4 x half> poison, <4 x i32> zeroinitializer
ret <4 x half> %splat
}
@@ -364,8 +364,8 @@ define <8 x half> @splat_v8f16(half %a) vscale_range(2,0) #0 {
; CHECK-NEXT: // kill: def $h0 killed $h0 def $q0
; CHECK-NEXT: dup v0.8h, v0.h[0]
; CHECK-NEXT: ret
- %insert = insertelement <8 x half> undef, half %a, i64 0
- %splat = shufflevector <8 x half> %insert, <8 x half> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x half> poison, half %a, i64 0
+ %splat = shufflevector <8 x half> %insert, <8 x half> poison, <8 x i32> zeroinitializer
ret <8 x half> %splat
}
@@ -377,8 +377,8 @@ define void @splat_v16f16(half %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: mov z0.h, h0
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
- %insert = insertelement <16 x half> undef, half %a, i64 0
- %splat = shufflevector <16 x half> %insert, <16 x half> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x half> poison, half %a, i64 0
+ %splat = shufflevector <16 x half> %insert, <16 x half> poison, <16 x i32> zeroinitializer
store <16 x half> %splat, ptr %b
ret void
}
@@ -401,8 +401,8 @@ define void @splat_v32f16(half %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: mov z0.h, h0
; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0]
; VBITS_GE_512-NEXT: ret
- %insert = insertelement <32 x half> undef, half %a, i64 0
- %splat = shufflevector <32 x half> %insert, <32 x half> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x half> poison, half %a, i64 0
+ %splat = shufflevector <32 x half> %insert, <32 x half> poison, <32 x i32> zeroinitializer
store <32 x half> %splat, ptr %b
ret void
}
@@ -415,8 +415,8 @@ define void @splat_v64f16(half %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: mov z0.h, h0
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
- %insert = insertelement <64 x half> undef, half %a, i64 0
- %splat = shufflevector <64 x half> %insert, <64 x half> undef, <64 x i32> zeroinitializer
+ %insert = insertelement <64 x half> poison, half %a, i64 0
+ %splat = shufflevector <64 x half> %insert, <64 x half> poison, <64 x i32> zeroinitializer
store <64 x half> %splat, ptr %b
ret void
}
@@ -429,8 +429,8 @@ define void @splat_v128f16(half %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: mov z0.h, h0
; CHECK-NEXT: st1h { z0.h }, p0, [x0]
; CHECK-NEXT: ret
- %insert = insertelement <128 x half> undef, half %a, i64 0
- %splat = shufflevector <128 x half> %insert, <128 x half> undef, <128 x i32> zeroinitializer
+ %insert = insertelement <128 x half> poison, half %a, i64 0
+ %splat = shufflevector <128 x half> %insert, <128 x half> poison, <128 x i32> zeroinitializer
store <128 x half> %splat, ptr %b
ret void
}
@@ -442,8 +442,8 @@ define <2 x float> @splat_v2f32(float %a, <2 x float> %op2) vscale_range(2,0) #0
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: dup v0.2s, v0.s[0]
; CHECK-NEXT: ret
- %insert = insertelement <2 x float> undef, float %a, i64 0
- %splat = shufflevector <2 x float> %insert, <2 x float> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x float> poison, float %a, i64 0
+ %splat = shufflevector <2 x float> %insert, <2 x float> poison, <2 x i32> zeroinitializer
ret <2 x float> %splat
}
@@ -454,8 +454,8 @@ define <4 x float> @splat_v4f32(float %a, <4 x float> %op2) vscale_range(2,0) #0
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: dup v0.4s, v0.s[0]
; CHECK-NEXT: ret
- %insert = insertelement <4 x float> undef, float %a, i64 0
- %splat = shufflevector <4 x float> %insert, <4 x float> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x float> poison, float %a, i64 0
+ %splat = shufflevector <4 x float> %insert, <4 x float> poison, <4 x i32> zeroinitializer
ret <4 x float> %splat
}
@@ -467,8 +467,8 @@ define void @splat_v8f32(float %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: mov z0.s, s0
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
- %insert = insertelement <8 x float> undef, float %a, i64 0
- %splat = shufflevector <8 x float> %insert, <8 x float> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x float> poison, float %a, i64 0
+ %splat = shufflevector <8 x float> %insert, <8 x float> poison, <8 x i32> zeroinitializer
store <8 x float> %splat, ptr %b
ret void
}
@@ -491,8 +491,8 @@ define void @splat_v16f32(float %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: mov z0.s, s0
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0]
; VBITS_GE_512-NEXT: ret
- %insert = insertelement <16 x float> undef, float %a, i64 0
- %splat = shufflevector <16 x float> %insert, <16 x float> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x float> poison, float %a, i64 0
+ %splat = shufflevector <16 x float> %insert, <16 x float> poison, <16 x i32> zeroinitializer
store <16 x float> %splat, ptr %b
ret void
}
@@ -505,8 +505,8 @@ define void @splat_v32f32(float %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: mov z0.s, s0
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
- %insert = insertelement <32 x float> undef, float %a, i64 0
- %splat = shufflevector <32 x float> %insert, <32 x float> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x float> poison, float %a, i64 0
+ %splat = shufflevector <32 x float> %insert, <32 x float> poison, <32 x i32> zeroinitializer
store <32 x float> %splat, ptr %b
ret void
}
@@ -519,8 +519,8 @@ define void @splat_v64f32(float %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: mov z0.s, s0
; CHECK-NEXT: st1w { z0.s }, p0, [x0]
; CHECK-NEXT: ret
- %insert = insertelement <64 x float> undef, float %a, i64 0
- %splat = shufflevector <64 x float> %insert, <64 x float> undef, <64 x i32> zeroinitializer
+ %insert = insertelement <64 x float> poison, float %a, i64 0
+ %splat = shufflevector <64 x float> %insert, <64 x float> poison, <64 x i32> zeroinitializer
store <64 x float> %splat, ptr %b
ret void
}
@@ -530,8 +530,8 @@ define <1 x double> @splat_v1f64(double %a, <1 x double> %op2) vscale_range(2,0)
; CHECK-LABEL: splat_v1f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
- %insert = insertelement <1 x double> undef, double %a, i64 0
- %splat = shufflevector <1 x double> %insert, <1 x double> undef, <1 x i32> zeroinitializer
+ %insert = insertelement <1 x double> poison, double %a, i64 0
+ %splat = shufflevector <1 x double> %insert, <1 x double> poison, <1 x i32> zeroinitializer
ret <1 x double> %splat
}
@@ -542,8 +542,8 @@ define <2 x double> @splat_v2f64(double %a, <2 x double> %op2) vscale_range(2,0)
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: dup v0.2d, v0.d[0]
; CHECK-NEXT: ret
- %insert = insertelement <2 x double> undef, double %a, i64 0
- %splat = shufflevector <2 x double> %insert, <2 x double> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x double> poison, double %a, i64 0
+ %splat = shufflevector <2 x double> %insert, <2 x double> poison, <2 x i32> zeroinitializer
ret <2 x double> %splat
}
@@ -555,8 +555,8 @@ define void @splat_v4f64(double %a, ptr %b) vscale_range(2,0) #0 {
; CHECK-NEXT: mov z0.d, d0
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
- %insert = insertelement <4 x double> undef, double %a, i64 0
- %splat = shufflevector <4 x double> %insert, <4 x double> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x double> poison, double %a, i64 0
+ %splat = shufflevector <4 x double> %insert, <4 x double> poison, <4 x i32> zeroinitializer
store <4 x double> %splat, ptr %b
ret void
}
@@ -579,8 +579,8 @@ define void @splat_v8f64(double %a, ptr %b) #0 {
; VBITS_GE_512-NEXT: mov z0.d, d0
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0]
; VBITS_GE_512-NEXT: ret
- %insert = insertelement <8 x double> undef, double %a, i64 0
- %splat = shufflevector <8 x double> %insert, <8 x double> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x double> poison, double %a, i64 0
+ %splat = shufflevector <8 x double> %insert, <8 x double> poison, <8 x i32> zeroinitializer
store <8 x double> %splat, ptr %b
ret void
}
@@ -593,8 +593,8 @@ define void @splat_v16f64(double %a, ptr %b) vscale_range(8,0) #0 {
; CHECK-NEXT: mov z0.d, d0
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
- %insert = insertelement <16 x double> undef, double %a, i64 0
- %splat = shufflevector <16 x double> %insert, <16 x double> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x double> poison, double %a, i64 0
+ %splat = shufflevector <16 x double> %insert, <16 x double> poison, <16 x i32> zeroinitializer
store <16 x double> %splat, ptr %b
ret void
}
@@ -607,8 +607,8 @@ define void @splat_v32f64(double %a, ptr %b) vscale_range(16,0) #0 {
; CHECK-NEXT: mov z0.d, d0
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
- %insert = insertelement <32 x double> undef, double %a, i64 0
- %splat = shufflevector <32 x double> %insert, <32 x double> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x double> poison, double %a, i64 0
+ %splat = shufflevector <32 x double> %insert, <32 x double> poison, <32 x i32> zeroinitializer
store <32 x double> %splat, ptr %b
ret void
}
@@ -707,7 +707,7 @@ define void @load_splat_v8f32(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %a
- %splat = shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> zeroinitializer
+ %splat = shufflevector <8 x float> %v, <8 x float> poison, <8 x i32> zeroinitializer
store <8 x float> %splat, ptr %b
ret void
}
@@ -721,7 +721,7 @@ define void @load_splat_v4f64(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %a
- %splat = shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> zeroinitializer
+ %splat = shufflevector <4 x double> %v, <4 x double> poison, <4 x i32> zeroinitializer
store <4 x double> %splat, ptr %b
ret void
}
@@ -735,7 +735,7 @@ define void @load_splat_v32i8(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-NEXT: st1b { z0.b }, p0, [x1]
; CHECK-NEXT: ret
%v = load <32 x i8>, ptr %a
- %splat = shufflevector <32 x i8> %v, <32 x i8> undef, <32 x i32> zeroinitializer
+ %splat = shufflevector <32 x i8> %v, <32 x i8> poison, <32 x i32> zeroinitializer
store <32 x i8> %splat, ptr %b
ret void
}
@@ -749,7 +749,7 @@ define void @load_splat_v16i16(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-NEXT: st1h { z0.h }, p0, [x1]
; CHECK-NEXT: ret
%v = load <16 x i16>, ptr %a
- %splat = shufflevector <16 x i16> %v, <16 x i16> undef, <16 x i32> zeroinitializer
+ %splat = shufflevector <16 x i16> %v, <16 x i16> poison, <16 x i32> zeroinitializer
store <16 x i16> %splat, ptr %b
ret void
}
@@ -763,7 +763,7 @@ define void @load_splat_v8i32(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
; CHECK-NEXT: ret
%v = load <8 x i32>, ptr %a
- %splat = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> zeroinitializer
+ %splat = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> zeroinitializer
store <8 x i32> %splat, ptr %b
ret void
}
@@ -777,7 +777,7 @@ define void @load_splat_v4i64(ptr %a, ptr %b) vscale_range(2,2) #0 {
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
%v = load <4 x i64>, ptr %a
- %splat = shufflevector <4 x i64> %v, <4 x i64> undef, <4 x i32> zeroinitializer
+ %splat = shufflevector <4 x i64> %v, <4 x i64> poison, <4 x i32> zeroinitializer
store <4 x i64> %splat, ptr %b
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
index 047716292ac3b..9f5e0eb9878c2 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
@@ -11,8 +11,8 @@ define void @scatter_i8_index_offset_maximum(ptr %base, i64 %offset, <vscale x 4
; CHECK-NEXT: add x8, x0, x1
; CHECK-NEXT: st1b { z0.s }, p0, [x8, z1.s, sxtw]
; CHECK-NEXT: ret
- %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %t0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t4 = mul <vscale x 4 x i64> splat(i64 33554431), %step
%t5 = add <vscale x 4 x i64> %t1, %t4
@@ -30,8 +30,8 @@ define void @scatter_i16_index_offset_minimum(ptr %base, i64 %offset, <vscale x
; CHECK-NEXT: add x8, x0, x1, lsl #1
; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw #1]
; CHECK-NEXT: ret
- %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %t0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t4 = mul <vscale x 4 x i64> splat(i64 -33554432), %step
%t5 = add <vscale x 4 x i64> %t1, %t4
@@ -48,13 +48,13 @@ define <vscale x 4 x i8> @gather_i8_index_offset_8(ptr %base, i64 %offset, <vsca
; CHECK-NEXT: add x8, x0, x1
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, sxtw]
; CHECK-NEXT: ret
- %splat.insert0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %splat0 = shufflevector <vscale x 4 x i64> %splat.insert0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %splat.insert0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %splat0 = shufflevector <vscale x 4 x i64> %splat.insert0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t1 = mul <vscale x 4 x i64> splat(i64 1), %step
%t2 = add <vscale x 4 x i64> %splat0, %t1
%t3 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t2
- %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %t3, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> undef)
+ %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %t3, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> poison)
ret <vscale x 4 x i8> %load
}
@@ -79,10 +79,10 @@ define void @scatter_f16_index_offset_var(ptr %base, i64 %offset, i64 %scale, <v
; CHECK-NEXT: st1h { z3.d }, p2, [x0, z4.d, lsl #1]
; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, lsl #1]
; CHECK-NEXT: ret
- %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
- %t2 = insertelement <vscale x 4 x i64> undef, i64 %scale, i32 0
- %t3 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %t0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %t2 = insertelement <vscale x 4 x i64> poison, i64 %scale, i32 0
+ %t3 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t4 = mul <vscale x 4 x i64> %t3, %step
%t5 = add <vscale x 4 x i64> %t1, %t4
@@ -109,8 +109,8 @@ define void @scatter_i8_index_offset_maximum_plus_one(ptr %base, i64 %offset, <v
; CHECK-NEXT: madd x8, x9, x10, x8
; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d]
; CHECK-NEXT: ret
- %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %t0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t4 = mul <vscale x 4 x i64> splat(i64 33554432), %step
%t5 = add <vscale x 4 x i64> %t1, %t4
@@ -138,8 +138,8 @@ define void @scatter_i8_index_offset_minimum_minus_one(ptr %base, i64 %offset, <
; CHECK-NEXT: madd x8, x9, x10, x8
; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d]
; CHECK-NEXT: ret
- %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %t0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t4 = mul <vscale x 4 x i64> splat(i64 -33554433), %step
%t5 = add <vscale x 4 x i64> %t1, %t4
@@ -166,8 +166,8 @@ define void @scatter_i8_index_stride_too_big(ptr %base, i64 %offset, <vscale x 4
; CHECK-NEXT: madd x8, x9, x10, x8
; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d]
; CHECK-NEXT: ret
- %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %t0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t4 = mul <vscale x 4 x i64> splat(i64 4611686018427387904), %step
%t5 = add <vscale x 4 x i64> %t1, %t4
@@ -186,13 +186,13 @@ define <vscale x 4 x i8> @gather_8i8_index_offset_8(ptr %base, i64 %offset, <vsc
; CHECK-NEXT: add x8, x0, x1, lsl #3
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, sxtw]
; CHECK-NEXT: ret
- %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %t0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t2 = add <vscale x 4 x i64> %t1, %step
%t3 = getelementptr [8 x i8], ptr %base, <vscale x 4 x i64> %t2
%t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
- %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %t4, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> undef)
+ %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %t4, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> poison)
ret <vscale x 4 x i8> %load
}
@@ -207,13 +207,13 @@ define <vscale x 4 x float> @gather_f32_index_offset_8(ptr %base, i64 %offset, <
; CHECK-NEXT: add x8, x0, x1, lsl #5
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, z0.s, sxtw]
; CHECK-NEXT: ret
- %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %t0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t2 = add <vscale x 4 x i64> %t1, %step
%t3 = getelementptr [8 x float], ptr %base, <vscale x 4 x i64> %t2
%t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
- %load = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %t4, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x float> undef)
+ %load = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %t4, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x float> poison)
ret <vscale x 4 x float> %load
}
@@ -227,8 +227,8 @@ define void @scatter_i8_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1>
; CHECK-NEXT: add x8, x0, x1, lsl #3
; CHECK-NEXT: st1b { z0.s }, p0, [x8, z1.s, sxtw]
; CHECK-NEXT: ret
- %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %t0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t2 = add <vscale x 4 x i64> %t1, %step
%t3 = getelementptr [8 x i8], ptr %base, <vscale x 4 x i64> %t2
@@ -248,8 +248,8 @@ define void @scatter_f16_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1
; CHECK-NEXT: add x8, x0, x1, lsl #4
; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw]
; CHECK-NEXT: ret
- %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %t0 = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%t2 = add <vscale x 4 x i64> %t1, %step
%t3 = getelementptr [8 x half], ptr %base, <vscale x 4 x i64> %t2
@@ -268,10 +268,10 @@ define void @scatter_f16_index_add_add(ptr %base, i64 %offset, i64 %offset2, <vs
; CHECK-NEXT: add x8, x9, x1, lsl #4
; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw]
; CHECK-NEXT: ret
- %splat.offset.ins = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %splat.offset = shufflevector <vscale x 4 x i64> %splat.offset.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
- %splat.offset2.ins = insertelement <vscale x 4 x i64> undef, i64 %offset2, i32 0
- %splat.offset2 = shufflevector <vscale x 4 x i64> %splat.offset2.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %splat.offset.ins = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %splat.offset = shufflevector <vscale x 4 x i64> %splat.offset.ins, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %splat.offset2.ins = insertelement <vscale x 4 x i64> poison, i64 %offset2, i32 0
+ %splat.offset2 = shufflevector <vscale x 4 x i64> %splat.offset2.ins, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%add1 = add <vscale x 4 x i64> %splat.offset, %step
%add2 = add <vscale x 4 x i64> %add1, %splat.offset2
@@ -291,10 +291,10 @@ define void @scatter_f16_index_add_add_mul(ptr %base, i64 %offset, i64 %offset2,
; CHECK-NEXT: add x8, x9, x1, lsl #7
; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw]
; CHECK-NEXT: ret
- %splat.offset.ins = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
- %splat.offset = shufflevector <vscale x 4 x i64> %splat.offset.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
- %splat.offset2.ins = insertelement <vscale x 4 x i64> undef, i64 %offset2, i32 0
- %splat.offset2 = shufflevector <vscale x 4 x i64> %splat.offset2.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %splat.offset.ins = insertelement <vscale x 4 x i64> poison, i64 %offset, i32 0
+ %splat.offset = shufflevector <vscale x 4 x i64> %splat.offset.ins, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %splat.offset2.ins = insertelement <vscale x 4 x i64> poison, i64 %offset2, i32 0
+ %splat.offset2 = shufflevector <vscale x 4 x i64> %splat.offset2.ins, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
%add1 = add <vscale x 4 x i64> %splat.offset, %step
%add2 = add <vscale x 4 x i64> %add1, %splat.offset2
@@ -312,7 +312,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64_const_with_vec_offsets(<vscale
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d, lsl #3]
; CHECK-NEXT: ret
%ptrs = getelementptr i64, ptr inttoptr (i64 8 to ptr), <vscale x 2 x i64> %vector_offsets
- %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
+ %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %data
}
@@ -322,11 +322,11 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64_null_with_vec_plus_scalar_offse
; CHECK-NEXT: lsl x8, x0, #3
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d, lsl #3]
; CHECK-NEXT: ret
- %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 %scalar_offset, i64 0
- %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %scalar_offset.ins = insertelement <vscale x 2 x i64> poison, i64 %scalar_offset, i64 0
+ %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
%ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
- %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
+ %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %data
}
@@ -338,7 +338,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64_null_with__vec_plus_imm_offsets
; CHECK-NEXT: ret
%offsets = add <vscale x 2 x i64> %vector_offsets, splat(i64 1)
%ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
- %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
+ %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %data
}
@@ -351,7 +351,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32_s8_offsets(ptr %base, <vscale x
; CHECK-NEXT: ret
%offsets.sext = sext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
%ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets.sext
- %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %data
}
@@ -363,7 +363,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32_u8_offsets(ptr %base, <vscale x
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
%ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets.zext
- %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %data
}
@@ -377,7 +377,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32_u32s8_offsets(ptr %base, <vscal
%offsets.sext = sext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
%offsets.sext.zext = zext <vscale x 4 x i32> %offsets.sext to <vscale x 4 x i64>
%ptrs = getelementptr i32, ptr %base, <vscale x 4 x i64> %offsets.sext.zext
- %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %data
}
@@ -398,8 +398,8 @@ define void @masked_scatter_nxv2i64_null_with_vec_plus_scalar_offsets(<vscale x
; CHECK-NEXT: lsl x8, x0, #3
; CHECK-NEXT: st1d { z1.d }, p0, [x8, z0.d, lsl #3]
; CHECK-NEXT: ret
- %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 %scalar_offset, i64 0
- %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %scalar_offset.ins = insertelement <vscale x 2 x i64> poison, i64 %scalar_offset, i64 0
+ %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
%ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg)
diff --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
index 4d2bc4bde13f5..73d043b411696 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
@@ -95,7 +95,7 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_zext(ptr %out, ptr %in, <v
%wide.load = load <vscale x 16 x i8>, ptr %2, align 1
%3 = zext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
%4 = getelementptr inbounds i8, ptr %in, <vscale x 16 x i64> %3
- %wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> undef)
+ %wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> poison)
ret <vscale x 16 x i8> %wide.masked.gather
}
@@ -121,7 +121,7 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_sext(ptr %out, ptr %in, <v
%wide.load = load <vscale x 16 x i8>, ptr %2, align 1
%3 = sext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
%4 = getelementptr inbounds i8, ptr %in, <vscale x 16 x i64> %3
- %wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> undef)
+ %wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> poison)
ret <vscale x 16 x i8> %wide.masked.gather
}
@@ -141,7 +141,7 @@ define <vscale x 8 x i16> @narrow_i64_gather_index_i16_zext(ptr %out, ptr %in, <
%wide.load = load <vscale x 8 x i16>, ptr %2, align 1
%3 = zext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
%4 = getelementptr inbounds i16, ptr %in, <vscale x 8 x i64> %3
- %wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> undef)
+ %wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> poison)
ret <vscale x 8 x i16> %wide.masked.gather
}
@@ -161,7 +161,7 @@ define <vscale x 8 x i16> @narrow_i64_gather_index_i16_sext(ptr %out, ptr %in, <
%wide.load = load <vscale x 8 x i16>, ptr %2, align 1
%3 = sext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
%4 = getelementptr inbounds i16, ptr %in, <vscale x 8 x i64> %3
- %wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> undef)
+ %wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> poison)
ret <vscale x 8 x i16> %wide.masked.gather
}
@@ -177,7 +177,7 @@ define <vscale x 4 x i32> @no_narrow_i64_gather_index_i32(ptr %out, ptr %in, <vs
%wide.load = load <vscale x 4 x i32>, ptr %2, align 1
%3 = zext <vscale x 4 x i32> %wide.load to <vscale x 4 x i64>
%4 = getelementptr inbounds i32, ptr %in, <vscale x 4 x i64> %3
- %wide.masked.gather = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %4, i32 1, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> undef)
+ %wide.masked.gather = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %4, i32 1, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %wide.masked.gather
}
@@ -192,7 +192,7 @@ define <vscale x 2 x i64> @no_narrow_i64_gather_index_i64(ptr %out, ptr %in, <vs
%2 = bitcast ptr %1 to ptr
%wide.load = load <vscale x 2 x i64>, ptr %2, align 1
%3 = getelementptr inbounds i64, ptr %in, <vscale x 2 x i64> %wide.load
- %wide.masked.gather = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> %3, i32 1, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> undef)
+ %wide.masked.gather = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> %3, i32 1, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %wide.masked.gather
}
diff --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
index 7344964f13bba..7cd85ab506172 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
@@ -186,7 +186,7 @@ define <vscale x 8 x i16> @test_lane6_undef_8xi16(i16 %a) {
; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h
; CHECK-NEXT: mov z0.h, p0/m, w0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 8 x i16> undef, i16 %a, i32 6
+ %b = insertelement <vscale x 8 x i16> poison, i16 %a, i32 6
ret <vscale x 8 x i16> %b
}
@@ -195,7 +195,7 @@ define <vscale x 16 x i8> @test_lane0_undef_16xi8(i8 %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: fmov s0, w0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 16 x i8> undef, i8 %a, i32 0
+ %b = insertelement <vscale x 16 x i8> poison, i8 %a, i32 0
ret <vscale x 16 x i8> %b
}
@@ -249,7 +249,7 @@ define <vscale x 8 x half> @test_insert_into_undef_nxv8f16(half %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 8 x half> undef, half %a, i32 0
+ %b = insertelement <vscale x 8 x half> poison, half %a, i32 0
ret <vscale x 8 x half> %b
}
@@ -258,7 +258,7 @@ define <vscale x 4 x half> @test_insert_into_undef_nxv4f16(half %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 4 x half> undef, half %a, i32 0
+ %b = insertelement <vscale x 4 x half> poison, half %a, i32 0
ret <vscale x 4 x half> %b
}
@@ -267,7 +267,7 @@ define <vscale x 2 x half> @test_insert_into_undef_nxv2f16(half %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 2 x half> undef, half %a, i32 0
+ %b = insertelement <vscale x 2 x half> poison, half %a, i32 0
ret <vscale x 2 x half> %b
}
@@ -276,7 +276,7 @@ define <vscale x 8 x bfloat> @test_insert_into_undef_nxv8bf16(bfloat %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 8 x bfloat> undef, bfloat %a, i32 0
+ %b = insertelement <vscale x 8 x bfloat> poison, bfloat %a, i32 0
ret <vscale x 8 x bfloat> %b
}
@@ -285,7 +285,7 @@ define <vscale x 4 x bfloat> @test_insert_into_undef_nxv4bf16(bfloat %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 4 x bfloat> undef, bfloat %a, i32 0
+ %b = insertelement <vscale x 4 x bfloat> poison, bfloat %a, i32 0
ret <vscale x 4 x bfloat> %b
}
@@ -294,7 +294,7 @@ define <vscale x 2 x bfloat> @test_insert_into_undef_nxv2bf16(bfloat %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 2 x bfloat> undef, bfloat %a, i32 0
+ %b = insertelement <vscale x 2 x bfloat> poison, bfloat %a, i32 0
ret <vscale x 2 x bfloat> %b
}
@@ -303,7 +303,7 @@ define <vscale x 4 x float> @test_insert_into_undef_nxv4f32(float %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 4 x float> undef, float %a, i32 0
+ %b = insertelement <vscale x 4 x float> poison, float %a, i32 0
ret <vscale x 4 x float> %b
}
@@ -312,7 +312,7 @@ define <vscale x 2 x float> @test_insert_into_undef_nxv2f32(float %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 2 x float> undef, float %a, i32 0
+ %b = insertelement <vscale x 2 x float> poison, float %a, i32 0
ret <vscale x 2 x float> %b
}
@@ -321,7 +321,7 @@ define <vscale x 2 x double> @test_insert_into_undef_nxv2f64(double %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: ret
- %b = insertelement <vscale x 2 x double> undef, double %a, i32 0
+ %b = insertelement <vscale x 2 x double> poison, double %a, i32 0
ret <vscale x 2 x double> %b
}
@@ -335,7 +335,7 @@ define <vscale x 2 x half> @test_insert_with_index_nxv2f16(half %h, i64 %idx) {
; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z2.d
; CHECK-NEXT: mov z0.h, p0/m, h0
; CHECK-NEXT: ret
- %res = insertelement <vscale x 2 x half> undef, half %h, i64 %idx
+ %res = insertelement <vscale x 2 x half> poison, half %h, i64 %idx
ret <vscale x 2 x half> %res
}
@@ -348,7 +348,7 @@ define <vscale x 4 x half> @test_insert_with_index_nxv4f16(half %h, i64 %idx) {
; CHECK-NEXT: cmpeq p0.s, p0/z, z1.s, z2.s
; CHECK-NEXT: mov z0.h, p0/m, h0
; CHECK-NEXT: ret
- %res = insertelement <vscale x 4 x half> undef, half %h, i64 %idx
+ %res = insertelement <vscale x 4 x half> poison, half %h, i64 %idx
ret <vscale x 4 x half> %res
}
@@ -361,7 +361,7 @@ define <vscale x 8 x half> @test_insert_with_index_nxv8f16(half %h, i64 %idx) {
; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z2.h
; CHECK-NEXT: mov z0.h, p0/m, h0
; CHECK-NEXT: ret
- %res = insertelement <vscale x 8 x half> undef, half %h, i64 %idx
+ %res = insertelement <vscale x 8 x half> poison, half %h, i64 %idx
ret <vscale x 8 x half> %res
}
@@ -374,7 +374,7 @@ define <vscale x 2 x bfloat> @test_insert_with_index_nxv2bf16(bfloat %h, i64 %id
; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z2.d
; CHECK-NEXT: mov z0.h, p0/m, h0
; CHECK-NEXT: ret
- %res = insertelement <vscale x 2 x bfloat> undef, bfloat %h, i64 %idx
+ %res = insertelement <vscale x 2 x bfloat> poison, bfloat %h, i64 %idx
ret <vscale x 2 x bfloat> %res
}
@@ -387,7 +387,7 @@ define <vscale x 4 x bfloat> @test_insert_with_index_nxv4bf16(bfloat %h, i64 %id
; CHECK-NEXT: cmpeq p0.s, p0/z, z1.s, z2.s
; CHECK-NEXT: mov z0.h, p0/m, h0
; CHECK-NEXT: ret
- %res = insertelement <vscale x 4 x bfloat> undef, bfloat %h, i64 %idx
+ %res = insertelement <vscale x 4 x bfloat> poison, bfloat %h, i64 %idx
ret <vscale x 4 x bfloat> %res
}
@@ -400,7 +400,7 @@ define <vscale x 8 x bfloat> @test_insert_with_index_nxv8bf16(bfloat %h, i64 %id
; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z2.h
; CHECK-NEXT: mov z0.h, p0/m, h0
; CHECK-NEXT: ret
- %res = insertelement <vscale x 8 x bfloat> undef, bfloat %h, i64 %idx
+ %res = insertelement <vscale x 8 x bfloat> poison, bfloat %h, i64 %idx
ret <vscale x 8 x bfloat> %res
}
@@ -413,7 +413,7 @@ define <vscale x 2 x float> @test_insert_with_index_nxv2f32(float %f, i64 %idx)
; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z2.d
; CHECK-NEXT: mov z0.s, p0/m, s0
; CHECK-NEXT: ret
- %res = insertelement <vscale x 2 x float> undef, float %f, i64 %idx
+ %res = insertelement <vscale x 2 x float> poison, float %f, i64 %idx
ret <vscale x 2 x float> %res
}
@@ -426,7 +426,7 @@ define <vscale x 4 x float> @test_insert_with_index_nxv4f32(float %f, i64 %idx)
; CHECK-NEXT: cmpeq p0.s, p0/z, z1.s, z2.s
; CHECK-NEXT: mov z0.s, p0/m, s0
; CHECK-NEXT: ret
- %res = insertelement <vscale x 4 x float> undef, float %f, i64 %idx
+ %res = insertelement <vscale x 4 x float> poison, float %f, i64 %idx
ret <vscale x 4 x float> %res
}
@@ -439,7 +439,7 @@ define <vscale x 2 x double> @test_insert_with_index_nxv2f64(double %d, i64 %idx
; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z2.d
; CHECK-NEXT: mov z0.d, p0/m, d0
; CHECK-NEXT: ret
- %res = insertelement <vscale x 2 x double> undef, double %d, i64 %idx
+ %res = insertelement <vscale x 2 x double> poison, double %d, i64 %idx
ret <vscale x 2 x double> %res
}
diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector-to-predicate-load.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector-to-predicate-load.ll
index b906de7ed9f74..1df66dc84af6d 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector-to-predicate-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector-to-predicate-load.ll
@@ -7,7 +7,7 @@ define <vscale x 16 x i1> @pred_load_v2i8(ptr %addr) #0 {
; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, ptr %addr
; CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
%load = load <2 x i8>, ptr %addr, align 4
- %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> %load, i64 0)
+ %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> poison, <2 x i8> %load, i64 0)
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
ret <vscale x 16 x i1> %ret
}
@@ -17,7 +17,7 @@ define <vscale x 16 x i1> @pred_load_v4i8(ptr %addr) #1 {
; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, ptr %addr
; CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
%load = load <4 x i8>, ptr %addr, align 4
- %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
+ %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> poison, <4 x i8> %load, i64 0)
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
ret <vscale x 16 x i1> %ret
}
@@ -27,7 +27,7 @@ define <vscale x 16 x i1> @pred_load_v8i8(ptr %addr) #2 {
; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i1>, ptr %addr
; CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]]
%load = load <8 x i8>, ptr %addr, align 4
- %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> %load, i64 0)
+ %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> poison, <8 x i8> %load, i64 0)
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
ret <vscale x 16 x i1> %ret
}
@@ -45,7 +45,7 @@ entry:
br label %bb1
bb1:
- %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> %load, i64 0)
+ %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> poison, <2 x i8> %load, i64 0)
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
ret <vscale x 16 x i1> %ret
}
@@ -55,7 +55,7 @@ define <vscale x 16 x i1> @pred_load_neg1(ptr %addr) #0 {
; CHECK-LABEL: @pred_load_neg1(
; CHECK: call <vscale x 2 x i8> @llvm.vector.insert
%load = load <4 x i8>, ptr %addr, align 4
- %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
+ %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> poison, <4 x i8> %load, i64 0)
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
ret <vscale x 16 x i1> %ret
}
@@ -65,7 +65,7 @@ define <vscale x 16 x i1> @pred_load_neg2(ptr %addr) #2 {
; CHECK-LABEL: @pred_load_neg2(
; CHECK: call <vscale x 2 x i8> @llvm.vector.insert
%load = load <4 x i8>, ptr %addr, align 4
- %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
+ %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> poison, <4 x i8> %load, i64 0)
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
ret <vscale x 16 x i1> %ret
}
@@ -75,7 +75,7 @@ define <vscale x 16 x i1> @pred_load_neg3(ptr %addr) #1 {
; CHECK-LABEL: @pred_load_neg3(
; CHECK: call <vscale x 2 x i8> @llvm.vector.insert
%load = load <4 x i8>, ptr %addr, align 4
- %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 4)
+ %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> poison, <4 x i8> %load, i64 4)
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
ret <vscale x 16 x i1> %ret
}
@@ -85,7 +85,7 @@ define <vscale x 16 x i1> @pred_load_neg4(ptr %addr) #3 {
; CHECK-LABEL: @pred_load_neg4(
; CHECK: call <vscale x 2 x i8> @llvm.vector.insert
%load = load <4 x i8>, ptr %addr, align 4
- %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> undef, <4 x i8> %load, i64 0)
+ %insert = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v4i8(<vscale x 2 x i8> poison, <4 x i8> %load, i64 0)
%ret = bitcast <vscale x 2 x i8> %insert to <vscale x 16 x i1>
ret <vscale x 16 x i1> %ret
}
diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
index 5d1d7cf65c09e..581c163388985 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
@@ -152,7 +152,7 @@ define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64>
; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl]
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
- %v0 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
+ %v0 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> poison, <vscale x 8 x i64> %sv0, i64 0)
%v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %v0, <vscale x 8 x i64> %sv1, i64 8)
store <vscale x 16 x i64> %v, ptr %out
ret void
@@ -167,7 +167,7 @@ define void @insert_nxv8i64_nxv16i64_lo(<vscale x 8 x i64> %sv0, ptr %out) {
; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl]
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: ret
- %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
+ %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> poison, <vscale x 8 x i64> %sv0, i64 0)
store <vscale x 16 x i64> %v, ptr %out
ret void
}
@@ -181,7 +181,7 @@ define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, ptr %out) {
; CHECK-NEXT: st1d { z1.d }, p0, [x0, #5, mul vl]
; CHECK-NEXT: st1d { z0.d }, p0, [x0, #4, mul vl]
; CHECK-NEXT: ret
- %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 8)
+ %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> poison, <vscale x 8 x i64> %sv0, i64 8)
store <vscale x 16 x i64> %v, ptr %out
ret void
}
@@ -212,7 +212,7 @@ define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, ptr %out) uwt
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .cfi_restore w29
; CHECK-NEXT: ret
- %v0 = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
+ %v0 = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> poison, <2 x i64> %sv0, i64 0)
%v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> %v0, <2 x i64> %sv1, i64 4)
store <vscale x 16 x i64> %v, ptr %out
ret void
@@ -226,7 +226,7 @@ define void @insert_v2i64_nxv16i64_lo0(ptr %psv, ptr %out) {
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
%sv = load <2 x i64>, ptr %psv
- %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 0)
+ %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> poison, <2 x i64> %sv, i64 0)
store <vscale x 16 x i64> %v, ptr %out
ret void
}
@@ -253,7 +253,7 @@ define void @insert_v2i64_nxv16i64_lo2(ptr %psv, ptr %out) uwtable {
; CHECK-NEXT: .cfi_restore w29
; CHECK-NEXT: ret
%sv = load <2 x i64>, ptr %psv
- %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)
+ %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> poison, <2 x i64> %sv, i64 2)
store <vscale x 16 x i64> %v, ptr %out
ret void
}
@@ -267,7 +267,7 @@ define <vscale x 4 x i32> @insert_nxv1i32_nxv4i32_undef() nounwind {
; CHECK-NEXT: mov z0.s, #1 // =0x1
; CHECK-NEXT: ret
entry:
- %retval = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> splat(i32 1), i64 0)
+ %retval = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> poison, <vscale x 1 x i32> splat(i32 1), i64 0)
ret <vscale x 4 x i32> %retval
}
@@ -277,7 +277,7 @@ define <vscale x 6 x i16> @insert_nxv1i16_nxv6i16_undef() nounwind {
; CHECK-NEXT: mov z0.h, #1 // =0x1
; CHECK-NEXT: ret
entry:
- %retval = call <vscale x 6 x i16> @llvm.vector.insert.nxv6i16.nxv1i16(<vscale x 6 x i16> undef, <vscale x 1 x i16> splat(i16 1), i64 0)
+ %retval = call <vscale x 6 x i16> @llvm.vector.insert.nxv6i16.nxv1i16(<vscale x 6 x i16> poison, <vscale x 1 x i16> splat(i16 1), i64 0)
ret <vscale x 6 x i16> %retval
}
@@ -287,7 +287,7 @@ define <vscale x 4 x float> @insert_nxv1f32_nxv4f32_undef(<vscale x 1 x float> %
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: ret
entry:
- %retval = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> undef, <vscale x 1 x float> %subvec, i64 0)
+ %retval = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> poison, <vscale x 1 x float> %subvec, i64 0)
ret <vscale x 4 x float> %retval
}
@@ -408,7 +408,7 @@ define <vscale x 3 x i32> @insert_nxv3i32_nxv2i32(<vscale x 2 x i32> %sv0) {
; CHECK: // %bb.0:
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: ret
- %v0 = call <vscale x 3 x i32> @llvm.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32> undef, <vscale x 2 x i32> %sv0, i64 0)
+ %v0 = call <vscale x 3 x i32> @llvm.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32> poison, <vscale x 2 x i32> %sv0, i64 0)
ret <vscale x 3 x i32> %v0
}
@@ -428,7 +428,7 @@ define <vscale x 3 x float> @insert_nxv3f32_nxv2f32(<vscale x 2 x float> %sv0) n
; CHECK: // %bb.0:
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: ret
- %v0 = call <vscale x 3 x float> @llvm.vector.insert.nxv3f32.nxv2f32(<vscale x 3 x float> undef, <vscale x 2 x float> %sv0, i64 0)
+ %v0 = call <vscale x 3 x float> @llvm.vector.insert.nxv3f32.nxv2f32(<vscale x 3 x float> poison, <vscale x 2 x float> %sv0, i64 0)
ret <vscale x 3 x float> %v0
}
@@ -464,7 +464,7 @@ define <vscale x 6 x i32> @insert_nxv6i32_nxv2i32(<vscale x 2 x i32> %sv0, <vsc
; CHECK-NEXT: addvl sp, sp, #2
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
- %v0 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32> undef, <vscale x 2 x i32> %sv0, i64 0)
+ %v0 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32> poison, <vscale x 2 x i32> %sv0, i64 0)
%v1 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32> %v0, <vscale x 2 x i32> %sv1, i64 2)
ret <vscale x 6 x i32> %v1
}
@@ -474,7 +474,7 @@ define <vscale x 6 x i32> @insert_nxv6i32_nxv3i32(<vscale x 3 x i32> %sv0) {
; CHECK-LABEL: insert_nxv6i32_nxv3i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
- %v0 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv3i32(<vscale x 6 x i32> undef, <vscale x 3 x i32> %sv0, i64 0)
+ %v0 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv3i32(<vscale x 6 x i32> poison, <vscale x 3 x i32> %sv0, i64 0)
ret <vscale x 6 x i32> %v0
}
@@ -482,7 +482,7 @@ define <vscale x 12 x i32> @insert_nxv12i32_nxv4i32(<vscale x 4 x i32> %sv0, <vs
; CHECK-LABEL: insert_nxv12i32_nxv4i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
- %v0 = call <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> undef, <vscale x 4 x i32> %sv0, i64 0)
+ %v0 = call <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> poison, <vscale x 4 x i32> %sv0, i64 0)
%v1 = call <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> %v0, <vscale x 4 x i32> %sv1, i64 4)
%v2 = call <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> %v1, <vscale x 4 x i32> %sv2, i64 8)
ret <vscale x 12 x i32> %v2
@@ -659,7 +659,7 @@ define <vscale x 2 x i1> @insert_nxv2i1_v8i1_const_true_into_undef() vscale_rang
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ret
- %v0 = call <vscale x 2 x i1> @llvm.vector.insert.nxv2i1.v8i1 (<vscale x 2 x i1> undef, <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
+ %v0 = call <vscale x 2 x i1> @llvm.vector.insert.nxv2i1.v8i1 (<vscale x 2 x i1> poison, <8 x i1> splat (i1 true), i64 0)
ret <vscale x 2 x i1> %v0
}
@@ -668,7 +668,7 @@ define <vscale x 4 x i1> @insert_nxv4i1_v16i1_const_true_into_undef() vscale_ran
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ret
- %v0 = call <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.v16i1 (<vscale x 4 x i1> undef, <16 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
+ %v0 = call <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.v16i1 (<vscale x 4 x i1> poison, <16 x i1> splat (i1 true), i64 0)
ret <vscale x 4 x i1> %v0
}
@@ -677,7 +677,7 @@ define <vscale x 8 x i1> @insert_nxv8i1_v32i1_const_true_into_undef() vscale_ran
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: ret
- %v0 = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.v32i1 (<vscale x 8 x i1> undef, <32 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
+ %v0 = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.v32i1 (<vscale x 8 x i1> poison, <32 x i1> splat (i1 true), i64 0)
ret <vscale x 8 x i1> %v0
}
@@ -686,7 +686,7 @@ define <vscale x 16 x i1> @insert_nxv16i1_v64i1_const_true_into_undef() vscale_r
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: ret
- %v0 = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.v64i1 (<vscale x 16 x i1> undef, <64 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
+ %v0 = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.v64i1 (<vscale x 16 x i1> poison, <64 x i1> splat (i1 true), i64 0)
ret <vscale x 16 x i1> %v0
}
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll
index 4ebe57bad6891..e953bdff20913 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll
@@ -91,7 +91,7 @@ define <vscale x 2 x i64> @gld1h_index_sxtw(<vscale x 2 x i1> %pg, ptr %base, <v
; CHECK: // %bb.0:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
@@ -106,7 +106,7 @@ define <vscale x 2 x i64> @gld1w_index_sxtw(<vscale x 2 x i1> %pg, ptr %base, <v
; CHECK: // %bb.0:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
@@ -121,7 +121,7 @@ define <vscale x 2 x i64> @gld1d_index_sxtw(<vscale x 2 x i1> %pg, ptr %base, <v
; CHECK: // %bb.0:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1> %pg,
@@ -135,7 +135,7 @@ define <vscale x 2 x double> @gld1d_index_double_sxtw(<vscale x 2 x i1> %pg, ptr
; CHECK: // %bb.0:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.index.nxv2f64(<vscale x 2 x i1> %pg,
@@ -154,7 +154,7 @@ define <vscale x 2 x i64> @gld1sh_index_sxtw(<vscale x 2 x i1> %pg, ptr %base, <
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw #1]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
@@ -169,7 +169,7 @@ define <vscale x 2 x i64> @gld1sw_index_sxtw(<vscale x 2 x i1> %pg, ptr %base, <
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw #2]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
@@ -189,7 +189,7 @@ define <vscale x 2 x i64> @gld1h_index_uxtw(<vscale x 2 x i1> %pg, ptr %base, <v
; CHECK: // %bb.0:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
@@ -204,7 +204,7 @@ define <vscale x 2 x i64> @gld1w_index_uxtw(<vscale x 2 x i1> %pg, ptr %base, <v
; CHECK: // %bb.0:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
@@ -219,7 +219,7 @@ define <vscale x 2 x i64> @gld1d_index_uxtw(<vscale x 2 x i1> %pg, ptr %base, <v
; CHECK: // %bb.0:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1> %pg,
@@ -233,7 +233,7 @@ define <vscale x 2 x double> @gld1d_index_double_uxtw(<vscale x 2 x i1> %pg, ptr
; CHECK: // %bb.0:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.index.nxv2f64(<vscale x 2 x i1> %pg,
@@ -252,7 +252,7 @@ define <vscale x 2 x i64> @gld1sh_index_uxtw(<vscale x 2 x i1> %pg, ptr %base, <
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
@@ -267,7 +267,7 @@ define <vscale x 2 x i64> @gld1sw_index_uxtw(<vscale x 2 x i1> %pg, ptr %base, <
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw #2]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll
index e96411596613c..65d80c86b4de5 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll
@@ -115,7 +115,7 @@ define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
@@ -130,7 +130,7 @@ define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
@@ -145,7 +145,7 @@ define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %offsets)
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
@@ -160,7 +160,7 @@ define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %pg,
@@ -174,7 +174,7 @@ define <vscale x 2 x double> @gld1d_d_double_sxtw(<vscale x 2 x i1> %pg, ptr %ba
; CHECK: // %bb.0:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1> %pg,
@@ -193,7 +193,7 @@ define <vscale x 2 x i64> @gld1sb_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
@@ -208,7 +208,7 @@ define <vscale x 2 x i64> @gld1sh_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
@@ -223,7 +223,7 @@ define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw]
; CHECK-NEXT: ret
- %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %sxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %offsets)
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
@@ -243,7 +243,7 @@ define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
@@ -258,7 +258,7 @@ define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
@@ -273,7 +273,7 @@ define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %offsets)
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
@@ -288,7 +288,7 @@ define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %pg,
@@ -302,7 +302,7 @@ define <vscale x 2 x double> @gld1d_d_double_uxtw(<vscale x 2 x i1> %pg, ptr %ba
; CHECK: // %bb.0:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1> %pg,
@@ -321,7 +321,7 @@ define <vscale x 2 x i64> @gld1sb_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d, uxtw]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
@@ -336,7 +336,7 @@ define <vscale x 2 x i64> @gld1sh_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %b)
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
@@ -351,7 +351,7 @@ define <vscale x 2 x i64> @gld1sw_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw]
; CHECK-NEXT: ret
- %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef,
+ %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> poison,
<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %offsets)
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
index 309742fe8282b..723b217cf15a3 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
@@ -84,7 +84,7 @@ define <vscale x 16 x i8> @ld1rqb_i8_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %
; CHECK-NEXT: ret
%ptr = getelementptr inbounds <16 x i8>, ptr %addr, i16 -1
%load = load <16 x i8>, ptr %ptr
- %1 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %load, i64 0)
+ %1 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> %load, i64 0)
%2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %1, i64 0)
ret <vscale x 16 x i8> %2
}
@@ -97,7 +97,7 @@ define <vscale x 16 x i8> @ld1rqb_i8_scalar_dupqlane(<vscale x 8 x i1> %pred, pt
; CHECK-NEXT: ret
%ptr = getelementptr inbounds i8, ptr %addr, i64 %idx
%load = load <16 x i8>, ptr %ptr
- %1 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %load, i64 0)
+ %1 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> %load, i64 0)
%2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %1, i64 0)
ret <vscale x 16 x i8> %2
}
@@ -201,7 +201,7 @@ define <vscale x 8 x i16> @ld1rqh_i16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr
; CHECK-NEXT: ret
%ptr = getelementptr inbounds <8 x i16>, ptr %addr, i16 -1
%load = load <8 x i16>, ptr %ptr
- %1 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %load, i64 0)
+ %1 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> poison, <8 x i16> %load, i64 0)
%2 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %1, i64 0)
ret <vscale x 8 x i16> %2
}
@@ -214,7 +214,7 @@ define <vscale x 8 x i16> @ld1rqh_i16_scalar_dupqlane(<vscale x 8 x i1> %pred, p
; CHECK-NEXT: ret
%ptr = getelementptr inbounds i16, ptr %addr, i64 %idx
%load = load <8 x i16>, ptr %ptr
- %1 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %load, i64 0)
+ %1 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> poison, <8 x i16> %load, i64 0)
%2 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %1, i64 0)
ret <vscale x 8 x i16> %2
}
@@ -227,7 +227,7 @@ define <vscale x 8 x half> @ld1rqh_f16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr
; CHECK-NEXT: ret
%ptr = getelementptr inbounds <8 x half>, ptr %addr, i16 -1
%load = load <8 x half>, ptr %ptr
- %1 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %load, i64 0)
+ %1 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %load, i64 0)
%2 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %1, i64 0)
ret <vscale x 8 x half> %2
}
@@ -240,7 +240,7 @@ define <vscale x 8 x half> @ld1rqh_f16_scalar_dupqlane(<vscale x 8 x i1> %pred,
; CHECK-NEXT: ret
%ptr = getelementptr inbounds half, ptr %addr, i64 %idx
%load = load <8 x half>, ptr %ptr
- %1 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %load, i64 0)
+ %1 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %load, i64 0)
%2 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %1, i64 0)
ret <vscale x 8 x half> %2
}
@@ -253,7 +253,7 @@ define <vscale x 8 x bfloat> @ld1rqh_bf16_imm_dupqlane(<vscale x 8 x i1> %pred,
; CHECK-NEXT: ret
%ptr = getelementptr inbounds <8 x bfloat>, ptr %addr, i16 -1
%load = load <8 x bfloat>, ptr %ptr
- %1 = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> %load, i64 0)
+ %1 = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> poison, <8 x bfloat> %load, i64 0)
%2 = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %1, i64 0)
ret <vscale x 8 x bfloat> %2
}
@@ -266,7 +266,7 @@ define <vscale x 8 x bfloat> @ld1rqh_bf16_scalar_dupqlane(<vscale x 8 x i1> %pre
; CHECK-NEXT: ret
%ptr = getelementptr inbounds bfloat, ptr %addr, i64 %idx
%load = load <8 x bfloat>, ptr %ptr
- %1 = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> %load, i64 0)
+ %1 = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> poison, <8 x bfloat> %load, i64 0)
%2 = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %1, i64 0)
ret <vscale x 8 x bfloat> %2
}
@@ -341,7 +341,7 @@ define <vscale x 4 x i32> @ld1rqw_i32_imm_dupqlane(<vscale x 4 x i1> %pred, ptr
; CHECK-NEXT: ret
%ptr = getelementptr inbounds <4 x i32>, ptr %addr, i32 1
%load = load <4 x i32>, ptr %ptr
- %1 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %load, i64 0)
+ %1 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> %load, i64 0)
%2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %1, i64 0)
ret <vscale x 4 x i32> %2
}
@@ -354,7 +354,7 @@ define <vscale x 4 x i32> @ld1rqw_i32_scalar_dupqlane(<vscale x 4 x i1> %pred, p
; CHECK-NEXT: ret
%ptr = getelementptr inbounds i32, ptr %addr, i64 %idx
%load = load <4 x i32>, ptr %ptr
- %1 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %load, i64 0)
+ %1 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> %load, i64 0)
%2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %1, i64 0)
ret <vscale x 4 x i32> %2
}
@@ -367,7 +367,7 @@ define <vscale x 4 x float> @ld1rqw_f32_imm_dupqlane(<vscale x 4 x i1> %pred, pt
; CHECK-NEXT: ret
%ptr = getelementptr inbounds <4 x float>, ptr %addr, i32 1
%load = load <4 x float>, ptr %ptr
- %1 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %load, i64 0)
+ %1 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %load, i64 0)
%2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %1, i64 0)
ret <vscale x 4 x float> %2
}
@@ -380,7 +380,7 @@ define <vscale x 4 x float> @ld1rqw_f32_scalar_dupqlane(<vscale x 4 x i1> %pred,
; CHECK-NEXT: ret
%ptr = getelementptr inbounds float, ptr %addr, i64 %idx
%load = load <4 x float>, ptr %ptr
- %1 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %load, i64 0)
+ %1 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %load, i64 0)
%2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %1, i64 0)
ret <vscale x 4 x float> %2
}
@@ -455,7 +455,7 @@ define <vscale x 2 x i64> @ld1rqd_i64_imm_dupqlane(<vscale x 2 x i1> %pred, ptr
; CHECK-NEXT: ret
%ptr = getelementptr inbounds <2 x i64>, ptr %addr, i64 1
%load = load <2 x i64>, ptr %ptr
- %1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %load, i64 0)
+ %1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> %load, i64 0)
%2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %1, i64 0)
ret <vscale x 2 x i64> %2
}
@@ -468,7 +468,7 @@ define <vscale x 2 x i64> @ld1rqd_i64_scalar_dupqlane(<vscale x 2 x i1> %pred, p
; CHECK-NEXT: ret
%ptr = getelementptr inbounds i64, ptr %addr, i64 %idx
%load = load <2 x i64>, ptr %ptr
- %1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %load, i64 0)
+ %1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> %load, i64 0)
%2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %1, i64 0)
ret <vscale x 2 x i64> %2
}
@@ -481,7 +481,7 @@ define <vscale x 2 x double> @ld1rqd_f64_imm_dupqlane(<vscale x 2 x i1> %pred, p
; CHECK-NEXT: ret
%ptr = getelementptr inbounds <2 x double>, ptr %addr, i64 1
%load = load <2 x double>, ptr %ptr
- %1 = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %load, i64 0)
+ %1 = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> poison, <2 x double> %load, i64 0)
%2 = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %1, i64 0)
ret <vscale x 2 x double> %2
}
@@ -494,7 +494,7 @@ define <vscale x 2 x double> @ld1rqd_f64_scalar_dupqlane(<vscale x 2 x i1> %pred
; CHECK-NEXT: ret
%ptr = getelementptr inbounds double, ptr %addr, i64 %idx
%load = load <2 x double>, ptr %ptr
- %1 = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %load, i64 0)
+ %1 = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> poison, <2 x double> %load, i64 0)
%2 = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %1, i64 0)
ret <vscale x 2 x double> %2
}
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
index 5d8ee3c8abcf3..a5e9fb7d9a6e6 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
@@ -16,7 +16,7 @@ define <vscale x 16 x i32> @masked_ld1b_i8_sext_i32(ptr %base, <vscale x 16 x i1
; CHECK-NEXT: sunpklo z2.s, z3.h
; CHECK-NEXT: sunpkhi z3.s, z3.h
; CHECK-NEXT: ret
- %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+ %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> poison)
%res = sext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i32>
ret <vscale x 16 x i32> %res
}
@@ -44,7 +44,7 @@ define <vscale x 16 x i32> @masked_ld1b_i8_zext_i32(ptr %base, <vscale x 16 x i1
; CHECK-NEXT: uunpklo z2.s, z3.h
; CHECK-NEXT: uunpkhi z3.s, z3.h
; CHECK-NEXT: ret
- %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+ %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> poison)
%res = zext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i32>
ret <vscale x 16 x i32> %res
}
@@ -80,7 +80,7 @@ define <vscale x 16 x i64> @masked_ld1b_i8_sext(ptr %base, <vscale x 16 x i1> %m
; CHECK-NEXT: sunpklo z6.d, z7.s
; CHECK-NEXT: sunpkhi z7.d, z7.s
; CHECK-NEXT: ret
- %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+ %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> poison)
%res = sext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i64>
ret <vscale x 16 x i64> %res
}
@@ -116,7 +116,7 @@ define <vscale x 16 x i64> @masked_ld1b_i8_zext(ptr %base, <vscale x 16 x i1> %m
; CHECK-NEXT: uunpklo z6.d, z7.s
; CHECK-NEXT: uunpkhi z7.d, z7.s
; CHECK-NEXT: ret
- %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+ %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> poison)
%res = zext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i64>
ret <vscale x 16 x i64> %res
}
@@ -148,7 +148,7 @@ define <vscale x 8 x i64> @masked_ld1h_i16_sext(ptr %base, <vscale x 8 x i1> %ma
; CHECK-NEXT: sunpklo z2.d, z3.s
; CHECK-NEXT: sunpkhi z3.d, z3.s
; CHECK-NEXT: ret
- %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+ %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> poison)
%res = sext <vscale x 8 x i16> %wide.masked.load to <vscale x 8 x i64>
ret <vscale x 8 x i64> %res
}
@@ -176,7 +176,7 @@ define <vscale x 8 x i64> @masked_ld1h_i16_zext(ptr %base, <vscale x 8 x i1> %ma
; CHECK-NEXT: uunpklo z2.d, z3.s
; CHECK-NEXT: uunpkhi z3.d, z3.s
; CHECK-NEXT: ret
- %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+ %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> poison)
%res = zext <vscale x 8 x i16> %wide.masked.load to <vscale x 8 x i64>
ret <vscale x 8 x i64> %res
}
@@ -204,7 +204,7 @@ define <vscale x 4 x i64> @masked_ld1w_i32_sext(ptr %base, <vscale x 4 x i1> %ma
; CHECK-NEXT: sunpklo z0.d, z1.s
; CHECK-NEXT: sunpkhi z1.d, z1.s
; CHECK-NEXT: ret
- %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
%res = sext <vscale x 4 x i32> %wide.masked.load to <vscale x 4 x i64>
ret <vscale x 4 x i64> %res
}
@@ -216,7 +216,7 @@ define <vscale x 4 x i64> @masked_ld1w_i32_zext(ptr %base, <vscale x 4 x i1> %ma
; CHECK-NEXT: uunpklo z0.d, z1.s
; CHECK-NEXT: uunpkhi z1.d, z1.s
; CHECK-NEXT: ret
- %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
%res = zext <vscale x 4 x i32> %wide.masked.load to <vscale x 4 x i64>
ret <vscale x 4 x i64> %res
}
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
index 3096b50988476..ef31badb5e1f5 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll
@@ -601,9 +601,9 @@ define dso_local <vscale x 4 x float> @dupq_f32_repeat_complex(float %x, float %
; CHECK-NEXT: mov v0.s[1], v1.s[0]
; CHECK-NEXT: mov z0.d, d0
; CHECK-NEXT: ret
- %1 = insertelement <4 x float> undef, float %x, i64 0
+ %1 = insertelement <4 x float> poison, float %x, i64 0
%2 = insertelement <4 x float> %1, float %y, i64 1
- %3 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %2, i64 0)
+ %3 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %2, i64 0)
%4 = bitcast <vscale x 4 x float> %3 to <vscale x 2 x double>
%5 = shufflevector <vscale x 2 x double> %4, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
%6 = bitcast <vscale x 2 x double> %5 to <vscale x 4 x float>
@@ -618,9 +618,9 @@ define dso_local <vscale x 8 x half> @dupq_f16_repeat_complex(half %x, half %y)
; CHECK-NEXT: mov v0.h[1], v1.h[0]
; CHECK-NEXT: mov z0.s, s0
; CHECK-NEXT: ret
- %1 = insertelement <8 x half> undef, half %x, i64 0
+ %1 = insertelement <8 x half> poison, half %x, i64 0
%2 = insertelement <8 x half> %1, half %y, i64 1
- %3 = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %2, i64 0)
+ %3 = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %2, i64 0)
%4 = bitcast <vscale x 8 x half> %3 to <vscale x 4 x float>
%5 = shufflevector <vscale x 4 x float> %4, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
%6 = bitcast <vscale x 4 x float> %5 to <vscale x 8 x half>
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-scalar-to-vec.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-scalar-to-vec.ll
index 2d644688a5359..238f188f93815 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-scalar-to-vec.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-scalar-to-vec.ll
@@ -119,7 +119,7 @@ define <vscale x 8 x bfloat> @test_svdup_n_bf16_x(<vscale x 8 x i1> %pg, bfloat
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.h, p0/m, h0
; CHECK-NEXT: ret
- %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.dup.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x i1> %pg, bfloat %op)
+ %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.dup.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x i1> %pg, bfloat %op)
ret <vscale x 8 x bfloat> %out
}
diff --git a/llvm/test/CodeGen/AArch64/sve-ld1r.ll b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
index fbe82e8591fd0..0a3f734661502 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1r.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
@@ -33,8 +33,8 @@ define <vscale x 16 x i8> @ld1r_stack() {
store volatile i8 %valp2, ptr %valp
%valp3 = getelementptr i8, ptr %valp, i32 2
%val = load i8, ptr %valp3
- %1 = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
- %2 = shufflevector <vscale x 16 x i8> %1, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %1 = insertelement <vscale x 16 x i8> poison, i8 %val, i32 0
+ %2 = shufflevector <vscale x 16 x i8> %1, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i8> %2
}
@@ -45,8 +45,8 @@ define <vscale x 16 x i8> @ld1rb(ptr %valp) {
; CHECK-NEXT: ld1rb { z0.b }, p0/z, [x0]
; CHECK-NEXT: ret
%val = load i8, ptr %valp
- %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
- %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %ins = insertelement <vscale x 16 x i8> poison, i8 %val, i32 0
+ %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i8> %shf
}
@@ -58,8 +58,8 @@ define <vscale x 16 x i8> @ld1rb_gep(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i8, ptr %valp, i32 63
%val = load i8, ptr %valp2
- %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
- %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %ins = insertelement <vscale x 16 x i8> poison, i8 %val, i32 0
+ %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i8> %shf
}
@@ -72,8 +72,8 @@ define <vscale x 16 x i8> @ld1rb_gep_out_of_range_up(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i8, ptr %valp, i32 64
%val = load i8, ptr %valp2
- %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
- %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %ins = insertelement <vscale x 16 x i8> poison, i8 %val, i32 0
+ %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i8> %shf
}
@@ -86,8 +86,8 @@ define <vscale x 16 x i8> @ld1rb_gep_out_of_range_down(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i8, ptr %valp, i32 -1
%val = load i8, ptr %valp2
- %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
- %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %ins = insertelement <vscale x 16 x i8> poison, i8 %val, i32 0
+ %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i8> %shf
}
@@ -99,8 +99,8 @@ define <vscale x 8 x i16> @ld1rb_i8_i16_zext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i8, ptr %valp
%ext = zext i8 %val to i16
- %ins = insertelement <vscale x 8 x i16> undef, i16 %ext, i32 0
- %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x i16> poison, i16 %ext, i32 0
+ %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %shf
}
@@ -112,8 +112,8 @@ define <vscale x 8 x i16> @ld1rb_i8_i16_sext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i8, ptr %valp
%ext = sext i8 %val to i16
- %ins = insertelement <vscale x 8 x i16> undef, i16 %ext, i32 0
- %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x i16> poison, i16 %ext, i32 0
+ %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %shf
}
@@ -125,8 +125,8 @@ define <vscale x 4 x i32> @ld1rb_i8_i32_zext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i8, ptr %valp
%ext = zext i8 %val to i32
- %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
- %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i32> poison, i32 %ext, i32 0
+ %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %shf
}
@@ -138,8 +138,8 @@ define <vscale x 4 x i32> @ld1rb_i8_i32_sext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i8, ptr %valp
%ext = sext i8 %val to i32
- %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
- %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i32> poison, i32 %ext, i32 0
+ %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %shf
}
@@ -151,8 +151,8 @@ define <vscale x 2 x i64> @ld1rb_i8_i64_zext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i8, ptr %valp
%ext = zext i8 %val to i64
- %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
- %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %ext, i32 0
+ %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %shf
}
@@ -164,8 +164,8 @@ define <vscale x 2 x i64> @ld1rb_i8_i64_sext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i8, ptr %valp
%ext = sext i8 %val to i64
- %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
- %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %ext, i32 0
+ %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %shf
}
@@ -176,8 +176,8 @@ define <vscale x 8 x i16> @ld1rh(ptr %valp) {
; CHECK-NEXT: ld1rh { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%val = load i16, ptr %valp
- %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
- %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x i16> poison, i16 %val, i32 0
+ %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %shf
}
@@ -189,8 +189,8 @@ define <vscale x 8 x i16> @ld1rh_gep(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i16, ptr %valp, i32 63
%val = load i16, ptr %valp2
- %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
- %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x i16> poison, i16 %val, i32 0
+ %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %shf
}
@@ -203,8 +203,8 @@ define <vscale x 8 x i16> @ld1rh_gep_out_of_range_up(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i16, ptr %valp, i32 64
%val = load i16, ptr %valp2
- %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
- %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x i16> poison, i16 %val, i32 0
+ %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %shf
}
@@ -217,8 +217,8 @@ define <vscale x 8 x i16> @ld1rh_gep_out_of_range_down(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i16, ptr %valp, i32 -1
%val = load i16, ptr %valp2
- %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
- %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x i16> poison, i16 %val, i32 0
+ %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %shf
}
@@ -230,8 +230,8 @@ define <vscale x 4 x i32> @ld1rh_i16_i32_zext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i16, ptr %valp
%ext = zext i16 %val to i32
- %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
- %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i32> poison, i32 %ext, i32 0
+ %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %shf
}
@@ -243,8 +243,8 @@ define <vscale x 4 x i32> @ld1rh_i16_i32_sext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i16, ptr %valp
%ext = sext i16 %val to i32
- %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
- %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i32> poison, i32 %ext, i32 0
+ %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %shf
}
@@ -256,8 +256,8 @@ define <vscale x 2 x i64> @ld1rh_i16_i64_zext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i16, ptr %valp
%ext = zext i16 %val to i64
- %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
- %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %ext, i32 0
+ %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %shf
}
@@ -269,8 +269,8 @@ define <vscale x 2 x i64> @ld1rh_i16_i64_sext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i16, ptr %valp
%ext = sext i16 %val to i64
- %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
- %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %ext, i32 0
+ %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %shf
}
@@ -281,8 +281,8 @@ define <vscale x 4 x i32> @ld1rw(ptr %valp) {
; CHECK-NEXT: ld1rw { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%val = load i32, ptr %valp
- %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
- %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i32> poison, i32 %val, i32 0
+ %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %shf
}
@@ -294,8 +294,8 @@ define <vscale x 4 x i32> @ld1rw_gep(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i32, ptr %valp, i32 63
%val = load i32, ptr %valp2
- %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
- %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i32> poison, i32 %val, i32 0
+ %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %shf
}
@@ -308,8 +308,8 @@ define <vscale x 4 x i32> @ld1rw_gep_out_of_range_up(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i32, ptr %valp, i32 64
%val = load i32, ptr %valp2
- %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
- %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i32> poison, i32 %val, i32 0
+ %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %shf
}
@@ -322,8 +322,8 @@ define <vscale x 4 x i32> @ld1rw_gep_out_of_range_down(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i32, ptr %valp, i32 -1
%val = load i32, ptr %valp2
- %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
- %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i32> poison, i32 %val, i32 0
+ %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %shf
}
@@ -335,8 +335,8 @@ define <vscale x 2 x i64> @ld1rw_i32_i64_zext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i32, ptr %valp
%ext = zext i32 %val to i64
- %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
- %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %ext, i32 0
+ %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %shf
}
@@ -348,8 +348,8 @@ define <vscale x 2 x i64> @ld1rw_i32_i64_sext(ptr %valp) {
; CHECK-NEXT: ret
%val = load i32, ptr %valp
%ext = sext i32 %val to i64
- %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
- %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %ext, i32 0
+ %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %shf
}
@@ -360,8 +360,8 @@ define <vscale x 2 x i64> @ld1rd(ptr %valp) {
; CHECK-NEXT: ld1rd { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%val = load i64, ptr %valp
- %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
- %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %val, i32 0
+ %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %shf
}
@@ -373,8 +373,8 @@ define <vscale x 2 x i64> @ld1rd_gep(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i64, ptr %valp, i32 63
%val = load i64, ptr %valp2
- %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
- %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %val, i32 0
+ %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %shf
}
@@ -387,8 +387,8 @@ define <vscale x 2 x i64> @ld1rd_gep_out_of_range_up(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i64, ptr %valp, i32 64
%val = load i64, ptr %valp2
- %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
- %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %val, i32 0
+ %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %shf
}
@@ -401,8 +401,8 @@ define <vscale x 2 x i64> @ld1rd_gep_out_of_range_down(ptr %valp) {
; CHECK-NEXT: ret
%valp2 = getelementptr i64, ptr %valp, i32 -1
%val = load i64, ptr %valp2
- %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
- %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %val, i32 0
+ %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %shf
}
@@ -419,8 +419,8 @@ define <vscale x 8 x half> @ld1rh_half(ptr %valp) {
; CHECK-NO-LD1R-NEXT: mov z0.h, h0
; CHECK-NO-LD1R-NEXT: ret
%val = load half, ptr %valp
- %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x half> %shf
}
@@ -431,8 +431,8 @@ define <vscale x 8 x half> @ld1rh_half_neoverse(ptr %valp) #1 {
; CHECK-NEXT: mov z0.h, h0
; CHECK-NEXT: ret
%val = load half, ptr %valp
- %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x half> %shf
}
@@ -450,8 +450,8 @@ define <vscale x 8 x half> @ld1rh_half_gep(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr half, ptr %valp, i32 63
%val = load half, ptr %valp2
- %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x half> %shf
}
@@ -470,8 +470,8 @@ define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_up(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr half, ptr %valp, i32 64
%val = load half, ptr %valp2
- %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x half> %shf
}
@@ -490,8 +490,8 @@ define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_down(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr half, ptr %valp, i32 -1
%val = load half, ptr %valp2
- %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x half> %shf
}
@@ -508,8 +508,8 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4(ptr %valp) {
; CHECK-NO-LD1R-NEXT: mov z0.h, h0
; CHECK-NO-LD1R-NEXT: ret
%val = load half, ptr %valp
- %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x half> %shf
}
@@ -527,8 +527,8 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4_gep(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr half, ptr %valp, i32 63
%val = load half, ptr %valp2
- %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x half> %shf
}
@@ -547,8 +547,8 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_up(ptr %valp)
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr half, ptr %valp, i32 64
%val = load half, ptr %valp2
- %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x half> %shf
}
@@ -567,8 +567,8 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_down(ptr %valp
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr half, ptr %valp, i32 -1
%val = load half, ptr %valp2
- %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x half> %shf
}
@@ -585,8 +585,8 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2(ptr %valp) {
; CHECK-NO-LD1R-NEXT: mov z0.h, h0
; CHECK-NO-LD1R-NEXT: ret
%val = load half, ptr %valp
- %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x half> %shf
}
@@ -604,8 +604,8 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2_gep(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr half, ptr %valp, i32 63
%val = load half, ptr %valp2
- %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x half> %shf
}
@@ -624,8 +624,8 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_up(ptr %valp)
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr half, ptr %valp, i32 64
%val = load half, ptr %valp2
- %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x half> %shf
}
@@ -644,8 +644,8 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_down(ptr %valp
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr half, ptr %valp, i32 -1
%val = load half, ptr %valp2
- %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
- %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x half> poison, half %val, i32 0
+ %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x half> %shf
}
@@ -662,8 +662,8 @@ define <vscale x 4 x float> @ld1rw_float(ptr %valp) {
; CHECK-NO-LD1R-NEXT: mov z0.s, s0
; CHECK-NO-LD1R-NEXT: ret
%val = load float, ptr %valp
- %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
- %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x float> poison, float %val, i32 0
+ %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x float> %shf
}
@@ -681,8 +681,8 @@ define <vscale x 4 x float> @ld1rw_float_gep(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr float, ptr %valp, i32 63
%val = load float, ptr %valp2
- %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
- %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x float> poison, float %val, i32 0
+ %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x float> %shf
}
@@ -701,8 +701,8 @@ define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_up(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr float, ptr %valp, i32 64
%val = load float, ptr %valp2
- %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
- %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x float> poison, float %val, i32 0
+ %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x float> %shf
}
@@ -721,8 +721,8 @@ define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_down(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr float, ptr %valp, i32 -1
%val = load float, ptr %valp2
- %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
- %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x float> poison, float %val, i32 0
+ %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x float> %shf
}
@@ -739,8 +739,8 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2(ptr %valp) {
; CHECK-NO-LD1R-NEXT: mov z0.s, s0
; CHECK-NO-LD1R-NEXT: ret
%val = load float, ptr %valp
- %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
- %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x float> poison, float %val, i32 0
+ %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x float> %shf
}
@@ -758,8 +758,8 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2_gep(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr float, ptr %valp, i32 63
%val = load float, ptr %valp2
- %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
- %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x float> poison, float %val, i32 0
+ %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x float> %shf
}
@@ -778,8 +778,8 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_up(ptr %valp
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr float, ptr %valp, i32 64
%val = load float, ptr %valp2
- %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
- %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x float> poison, float %val, i32 0
+ %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x float> %shf
}
@@ -798,8 +798,8 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_down(ptr %va
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr float, ptr %valp, i32 -1
%val = load float, ptr %valp2
- %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
- %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x float> poison, float %val, i32 0
+ %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x float> %shf
}
@@ -816,8 +816,8 @@ define <vscale x 2 x double> @ld1rd_double(ptr %valp) {
; CHECK-NO-LD1R-NEXT: mov z0.d, d0
; CHECK-NO-LD1R-NEXT: ret
%val = load double, ptr %valp
- %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
- %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x double> poison, double %val, i32 0
+ %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x double> %shf
}
@@ -835,8 +835,8 @@ define <vscale x 2 x double> @ld1rd_double_gep(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr double, ptr %valp, i32 63
%val = load double, ptr %valp2
- %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
- %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x double> poison, double %val, i32 0
+ %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x double> %shf
}
@@ -855,8 +855,8 @@ define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_up(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr double, ptr %valp, i32 64
%val = load double, ptr %valp2
- %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
- %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x double> poison, double %val, i32 0
+ %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x double> %shf
}
@@ -875,8 +875,8 @@ define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_down(ptr %valp) {
; CHECK-NO-LD1R-NEXT: ret
%valp2 = getelementptr double, ptr %valp, i32 -1
%val = load double, ptr %valp2
- %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
- %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x double> poison, double %val, i32 0
+ %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x double> %shf
}
@@ -887,7 +887,7 @@ define <vscale x 2 x double> @dupq_ld1rqd_f64(ptr %a) {
; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <2 x double>, ptr %a
- %2 = tail call fast <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %1, i64 0)
+ %2 = tail call fast <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> poison, <2 x double> %1, i64 0)
%3 = tail call fast <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %2, i64 0)
ret <vscale x 2 x double> %3
}
@@ -899,7 +899,7 @@ define <vscale x 4 x float> @dupq_ld1rqw_f32(ptr %a) {
; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <4 x float>, ptr %a
- %2 = tail call fast <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %1, i64 0)
+ %2 = tail call fast <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %1, i64 0)
%3 = tail call fast <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %2, i64 0)
ret <vscale x 4 x float> %3
}
@@ -911,7 +911,7 @@ define <vscale x 8 x half> @dupq_ld1rqh_f16(ptr %a) {
; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <8 x half>, ptr %a
- %2 = tail call fast <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %1, i64 0)
+ %2 = tail call fast <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %1, i64 0)
%3 = tail call fast <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %2, i64 0)
ret <vscale x 8 x half> %3
}
@@ -923,7 +923,7 @@ define <vscale x 8 x bfloat> @dupq_ld1rqh_bf16(ptr %a) #0 {
; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <8 x bfloat>, ptr %a
- %2 = tail call fast <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> %1, i64 0)
+ %2 = tail call fast <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> poison, <8 x bfloat> %1, i64 0)
%3 = tail call fast <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %2, i64 0)
ret <vscale x 8 x bfloat> %3
}
@@ -935,7 +935,7 @@ define <vscale x 2 x i64> @dupq_ld1rqd_i64(ptr %a) #0 {
; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <2 x i64>, ptr %a
- %2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %1, i64 0)
+ %2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> %1, i64 0)
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2, i64 0)
ret <vscale x 2 x i64> %3
}
@@ -947,7 +947,7 @@ define <vscale x 4 x i32> @dupq_ld1rqw_i32(ptr %a) #0 {
; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <4 x i32>, ptr %a
- %2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %1, i64 0)
+ %2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> %1, i64 0)
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2, i64 0)
ret <vscale x 4 x i32> %3
}
@@ -959,7 +959,7 @@ define <vscale x 8 x i16> @dupq_ld1rqw_i16(ptr %a) #0 {
; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <8 x i16>, ptr %a
- %2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %1, i64 0)
+ %2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> poison, <8 x i16> %1, i64 0)
%3 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %2, i64 0)
ret <vscale x 8 x i16> %3
}
@@ -971,7 +971,7 @@ define <vscale x 16 x i8> @dupq_ld1rqw_i8(ptr %a) #0 {
; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <16 x i8>, ptr %a
- %2 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %1, i64 0)
+ %2 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> %1, i64 0)
%3 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %2, i64 0)
ret <vscale x 16 x i8> %3
}
@@ -981,7 +981,7 @@ define <vscale x 16 x i8> @dupq_ld1rqw_i8(ptr %a) #0 {
; Tests for dup:
;
; Positive tests:
-; * dup with passthru=undef or passthrue=zero.
+; * dup with passthru=poison or passthru=zero.
; * sign/zero extending.
; * unpacked types.
;
@@ -996,7 +996,7 @@ define <vscale x 16 x i8> @dup_ld1rb_i8_passthruundef_nxv16i8(<vscale x 16 x i1>
; CHECK-NEXT: ld1rb { z0.b }, p0/z, [x0]
; CHECK-NEXT: ret
%ld = load i8, ptr %addr
- %res = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, i8 %ld)
+ %res = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> poison, <vscale x 16 x i1> %pg, i8 %ld)
ret <vscale x 16 x i8> %res
}
define <vscale x 8 x i16> @dup_ld1rh_i16_passthruundef_nxv8i16(<vscale x 8 x i1> %pg, ptr %addr) {
@@ -1005,7 +1005,7 @@ define <vscale x 8 x i16> @dup_ld1rh_i16_passthruundef_nxv8i16(<vscale x 8 x i1>
; CHECK-NEXT: ld1rh { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%ld = load i16, ptr %addr
- %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, i16 %ld)
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, i16 %ld)
ret <vscale x 8 x i16> %res
}
define <vscale x 8 x i16> @dup_ld1rh_i8_passthruundef_nxv8i16_sext(<vscale x 8 x i1> %pg, ptr %addr) {
@@ -1015,7 +1015,7 @@ define <vscale x 8 x i16> @dup_ld1rh_i8_passthruundef_nxv8i16_sext(<vscale x 8 x
; CHECK-NEXT: ret
%ld = load i8, ptr %addr
%ext = sext i8 %ld to i16
- %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, i16 %ext)
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, i16 %ext)
ret <vscale x 8 x i16> %res
}
define <vscale x 8 x i16> @dup_ld1rh_i8_passthruundef_nxv8i16_zext(<vscale x 8 x i1> %pg, ptr %addr) {
@@ -1025,7 +1025,7 @@ define <vscale x 8 x i16> @dup_ld1rh_i8_passthruundef_nxv8i16_zext(<vscale x 8 x
; CHECK-NEXT: ret
%ld = load i8, ptr %addr
%ext = zext i8 %ld to i16
- %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, i16 %ext)
+ %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, i16 %ext)
ret <vscale x 8 x i16> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i32_passthruundef_nxv4i32(<vscale x 4 x i1> %pg, ptr %addr) {
@@ -1034,7 +1034,7 @@ define <vscale x 4 x i32> @dup_ld1rs_i32_passthruundef_nxv4i32(<vscale x 4 x i1>
; CHECK-NEXT: ld1rw { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%ld = load i32, ptr %addr
- %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ld)
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, i32 %ld)
ret <vscale x 4 x i32> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i8_passthruundef_nxv4i32_sext(<vscale x 4 x i1> %pg, ptr %addr) {
@@ -1044,7 +1044,7 @@ define <vscale x 4 x i32> @dup_ld1rs_i8_passthruundef_nxv4i32_sext(<vscale x 4 x
; CHECK-NEXT: ret
%ld = load i8, ptr %addr
%ext = sext i8 %ld to i32
- %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, i32 %ext)
ret <vscale x 4 x i32> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i8_passthruundef_nxv4i32_zext(<vscale x 4 x i1> %pg, ptr %addr) {
@@ -1054,7 +1054,7 @@ define <vscale x 4 x i32> @dup_ld1rs_i8_passthruundef_nxv4i32_zext(<vscale x 4 x
; CHECK-NEXT: ret
%ld = load i8, ptr %addr
%ext = zext i8 %ld to i32
- %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, i32 %ext)
ret <vscale x 4 x i32> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i16_passthruundef_nxv4i32_sext(<vscale x 4 x i1> %pg, ptr %addr) {
@@ -1064,7 +1064,7 @@ define <vscale x 4 x i32> @dup_ld1rs_i16_passthruundef_nxv4i32_sext(<vscale x 4
; CHECK-NEXT: ret
%ld = load i16, ptr %addr
%ext = sext i16 %ld to i32
- %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, i32 %ext)
ret <vscale x 4 x i32> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i16_passthruundef_nxv4i32_zext(<vscale x 4 x i1> %pg, ptr %addr) {
@@ -1074,7 +1074,7 @@ define <vscale x 4 x i32> @dup_ld1rs_i16_passthruundef_nxv4i32_zext(<vscale x 4
; CHECK-NEXT: ret
%ld = load i16, ptr %addr
%ext = zext i16 %ld to i32
- %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
+ %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, i32 %ext)
ret <vscale x 4 x i32> %res
}
define <vscale x 2 x i64> @dup_ld1rd_i64_passthruundef_nxv2i64(<vscale x 2 x i1> %pg, ptr %addr) {
@@ -1083,7 +1083,7 @@ define <vscale x 2 x i64> @dup_ld1rd_i64_passthruundef_nxv2i64(<vscale x 2 x i1>
; CHECK-NEXT: ld1rd { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%ld = load i64, ptr %addr
- %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ld)
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, i64 %ld)
ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i8_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, ptr %addr) {
@@ -1093,7 +1093,7 @@ define <vscale x 2 x i64> @dup_ld1rs_i8_passthruundef_nxv2i64_sext(<vscale x 2 x
; CHECK-NEXT: ret
%ld = load i8, ptr %addr
%ext = sext i8 %ld to i64
- %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, i64 %ext)
ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i8_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, ptr %addr) {
@@ -1103,7 +1103,7 @@ define <vscale x 2 x i64> @dup_ld1rs_i8_passthruundef_nxv2i64_zext(<vscale x 2 x
; CHECK-NEXT: ret
%ld = load i8, ptr %addr
%ext = zext i8 %ld to i64
- %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, i64 %ext)
ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i16_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, ptr %addr) {
@@ -1113,7 +1113,7 @@ define <vscale x 2 x i64> @dup_ld1rs_i16_passthruundef_nxv2i64_sext(<vscale x 2
; CHECK-NEXT: ret
%ld = load i16, ptr %addr
%ext = sext i16 %ld to i64
- %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, i64 %ext)
ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i16_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, ptr %addr) {
@@ -1123,7 +1123,7 @@ define <vscale x 2 x i64> @dup_ld1rs_i16_passthruundef_nxv2i64_zext(<vscale x 2
; CHECK-NEXT: ret
%ld = load i16, ptr %addr
%ext = zext i16 %ld to i64
- %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, i64 %ext)
ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i32_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, ptr %addr) {
@@ -1133,7 +1133,7 @@ define <vscale x 2 x i64> @dup_ld1rs_i32_passthruundef_nxv2i64_sext(<vscale x 2
; CHECK-NEXT: ret
%ld = load i32, ptr %addr
%ext = sext i32 %ld to i64
- %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, i64 %ext)
ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i32_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, ptr %addr) {
@@ -1143,7 +1143,7 @@ define <vscale x 2 x i64> @dup_ld1rs_i32_passthruundef_nxv2i64_zext(<vscale x 2
; CHECK-NEXT: ret
%ld = load i32, ptr %addr
%ext = zext i32 %ld to i64
- %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
+ %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, i64 %ext)
ret <vscale x 2 x i64> %res
}
define <vscale x 8 x half> @dup_ld1rh_half_passthruundef_nxv8f16(<vscale x 8 x i1> %pg, ptr %addr) {
@@ -1158,7 +1158,7 @@ define <vscale x 8 x half> @dup_ld1rh_half_passthruundef_nxv8f16(<vscale x 8 x i
; CHECK-NO-LD1R-NEXT: mov z0.h, p0/m, h0
; CHECK-NO-LD1R-NEXT: ret
%ld = load half, ptr %addr
- %res = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> %pg, half %ld)
+ %res = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x i1> %pg, half %ld)
ret <vscale x 8 x half> %res
}
define <vscale x 4 x float> @dup_ld1rs_float_passthruundef_nxv4f32(<vscale x 4 x i1> %pg, ptr %addr) {
@@ -1173,7 +1173,7 @@ define <vscale x 4 x float> @dup_ld1rs_float_passthruundef_nxv4f32(<vscale x 4 x
; CHECK-NO-LD1R-NEXT: mov z0.s, p0/m, s0
; CHECK-NO-LD1R-NEXT: ret
%ld = load float, ptr %addr
- %res = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> %pg, float %ld)
+ %res = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x i1> %pg, float %ld)
ret <vscale x 4 x float> %res
}
define <vscale x 2 x double> @dup_ld1rd_double_passthruundef_nxv2f64(<vscale x 2 x i1> %pg, ptr %addr) {
@@ -1188,7 +1188,7 @@ define <vscale x 2 x double> @dup_ld1rd_double_passthruundef_nxv2f64(<vscale x 2
; CHECK-NO-LD1R-NEXT: mov z0.d, p0/m, d0
; CHECK-NO-LD1R-NEXT: ret
%ld = load double, ptr %addr
- %res = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> %pg, double %ld)
+ %res = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x i1> %pg, double %ld)
ret <vscale x 2 x double> %res
}
define <vscale x 4 x half> @dup_ld1rh_half_passthruundef_nxv4f16(<vscale x 4 x i1> %pg, ptr %addr) {
@@ -1203,7 +1203,7 @@ define <vscale x 4 x half> @dup_ld1rh_half_passthruundef_nxv4f16(<vscale x 4 x i
; CHECK-NO-LD1R-NEXT: mov z0.h, p0/m, h0
; CHECK-NO-LD1R-NEXT: ret
%ld = load half, ptr %addr
- %res = call <vscale x 4 x half> @llvm.aarch64.sve.dup.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> %pg, half %ld)
+ %res = call <vscale x 4 x half> @llvm.aarch64.sve.dup.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x i1> %pg, half %ld)
ret <vscale x 4 x half> %res
}
define <vscale x 16 x i8> @dup_ld1rb_i8_passthruzero_nxv16i8(<vscale x 16 x i1> %pg, ptr %addr) {
@@ -1422,8 +1422,8 @@ define ptr @avoid_preindex_load(ptr %src, ptr %out) {
%ptr = getelementptr inbounds i8, ptr %src, i64 1
%tmp = load i8, ptr %ptr, align 4
%ext = sext i8 %tmp to i64
- %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
- %dup = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %ext, i32 0
+ %dup = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
store <vscale x 2 x i64> %dup, ptr %out
ret ptr %ptr
}
@@ -1441,7 +1441,7 @@ define ptr @avoid_preindex_load_dup(ptr %src, <vscale x 2 x i1> %pg, ptr %out) {
%ptr = getelementptr inbounds i8, ptr %src, i64 1
%tmp = load i8, ptr %ptr, align 4
%ext = sext i8 %tmp to i64
- %dup = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
+ %dup = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, i64 %ext)
store <vscale x 2 x i64> %dup, ptr %out
ret ptr %ptr
}
@@ -1494,8 +1494,8 @@ define ptr @preidx8sext64_instead_of_ld1r(ptr %src, ptr %out, ptr %dst) {
%ptr = getelementptr inbounds i8, ptr %src, i64 1
%tmp = load i8, ptr %ptr, align 4
%ext = sext i8 %tmp to i64
- %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
- %dup = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %ext, i32 0
+ %dup = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
store <vscale x 2 x i64> %dup, ptr %out
store i64 %ext, ptr %dst
ret ptr %ptr
diff --git a/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll b/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
index 7edfe2e20e94c..c680f8942f9a8 100644
--- a/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
@@ -89,7 +89,7 @@ define void @masked_ld_st_nxv8i16(ptr %in, ptr %out, i64 %n) {
; IR-NEXT: [[UGLYGEP1:%.*]] = getelementptr i8, ptr [[IN:%.*]], i64 [[TMP0]]
; IR-NEXT: [[TMP1:%.*]] = shl i64 [[INDVAR]], 1
; IR-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[OUT:%.*]], i64 [[TMP1]]
-; IR-NEXT: [[VAL:%.*]] = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[UGLYGEP1]], i32 4, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> undef)
+; IR-NEXT: [[VAL:%.*]] = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[UGLYGEP1]], i32 4, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> poison)
; IR-NEXT: [[ADDP_VEC:%.*]] = add <vscale x 8 x i16> [[VAL]], splat (i16 3)
; IR-NEXT: call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[ADDP_VEC]], ptr [[UGLYGEP]], i32 4, <vscale x 8 x i1> splat (i1 true))
; IR-NEXT: [[INDVAR_NEXT]] = add nsw i64 [[INDVAR]], [[SCALED_VF]]
@@ -127,7 +127,7 @@ loop: ; preds = %loop, %loop.ph
%indvar = phi i64 [ 0, %loop.ph ], [ %indvar.next, %loop ]
%ptr.in = getelementptr inbounds i16, ptr %in, i64 %indvar
%ptr.out = getelementptr inbounds i16, ptr %out, i64 %indvar
- %val = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr %ptr.in, i32 4, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> undef)
+ %val = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr %ptr.in, i32 4, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> poison)
%addp_vec = add <vscale x 8 x i16> %val, splat (i16 3)
call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> %addp_vec, ptr %ptr.out, i32 4, <vscale x 8 x i1> splat (i1 true))
%indvar.next = add nsw i64 %indvar, %scaled_vf
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll
index ef77a62aa0e8d..d90cb8fb28542 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll
@@ -12,7 +12,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
; CHECK-NEXT: ret
%ptrs = getelementptr i16, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -23,7 +23,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
; CHECK-NEXT: ret
%ptrs = getelementptr i32, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -34,7 +34,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
; CHECK-NEXT: ret
%ptrs = getelementptr i64, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals
}
@@ -44,7 +44,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
; CHECK-NEXT: ret
%ptrs = getelementptr half, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %vals
}
@@ -54,7 +54,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i3
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
; CHECK-NEXT: ret
%ptrs = getelementptr bfloat, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %vals
}
@@ -64,7 +64,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
; CHECK-NEXT: ret
%ptrs = getelementptr float, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %vals
}
@@ -74,7 +74,7 @@ define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i32
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
; CHECK-NEXT: ret
%ptrs = getelementptr double, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %vals
}
@@ -84,7 +84,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw #1]
; CHECK-NEXT: ret
%ptrs = getelementptr i16, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -95,7 +95,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw #2]
; CHECK-NEXT: ret
%ptrs = getelementptr i32, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -110,7 +110,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i16(ptr %base, <vscale x 4 x i32> %
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
; CHECK-NEXT: ret
%ptrs = getelementptr i16, ptr %base, <vscale x 4 x i32> %offsets
- %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
%vals.zext = zext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.zext
}
@@ -121,7 +121,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32(ptr %base, <vscale x 4 x i32> %
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
; CHECK-NEXT: ret
%ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets
- %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %vals
}
@@ -131,7 +131,7 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(ptr %base, <vscale x 4 x i32>
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
; CHECK-NEXT: ret
%ptrs = getelementptr half, ptr %base, <vscale x 4 x i32> %offsets
- %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+ %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> poison)
ret <vscale x 4 x half> %vals
}
@@ -141,7 +141,7 @@ define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(ptr %base, <vscale x 4 x i3
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
; CHECK-NEXT: ret
%ptrs = getelementptr bfloat, ptr %base, <vscale x 4 x i32> %offsets
- %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
+ %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> poison)
ret <vscale x 4 x bfloat> %vals
}
@@ -151,7 +151,7 @@ define <vscale x 4 x float> @masked_gather_nxv4f32(ptr %base, <vscale x 4 x i32>
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
; CHECK-NEXT: ret
%ptrs = getelementptr float, ptr %base, <vscale x 4 x i32> %offsets
- %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+ %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> poison)
ret <vscale x 4 x float> %vals
}
@@ -161,7 +161,7 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i16(ptr %base, <vscale x 4 x i32>
; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw #1]
; CHECK-NEXT: ret
%ptrs = getelementptr i16, ptr %base, <vscale x 4 x i32> %offsets
- %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
%vals.sext = sext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.sext
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll
index 5e7c79b92dabc..1d903e07bef56 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll
@@ -12,7 +12,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i8(ptr %base, <vscale x 2 x i32> %o
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -24,7 +24,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -36,7 +36,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -48,7 +48,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals
}
@@ -59,7 +59,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %vals
}
@@ -70,7 +70,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i3
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %vals
}
@@ -81,7 +81,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %vals
}
@@ -92,7 +92,7 @@ define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i32
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %vals
}
@@ -102,7 +102,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i8(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -114,7 +114,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -126,7 +126,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -141,7 +141,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i8(ptr %base, <vscale x 4 x i32> %o
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
- %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+ %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
%vals.zext = zext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.zext
}
@@ -153,7 +153,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i16(ptr %base, <vscale x 4 x i32> %
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
%vals.zext = zext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.zext
}
@@ -165,7 +165,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32(ptr %base, <vscale x 4 x i32> %
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %vals
}
@@ -176,7 +176,7 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(ptr %base, <vscale x 4 x i32>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+ %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> poison)
ret <vscale x 4 x half> %vals
}
@@ -187,7 +187,7 @@ define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(ptr %base, <vscale x 4 x i3
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
+ %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> poison)
ret <vscale x 4 x bfloat> %vals
}
@@ -198,7 +198,7 @@ define <vscale x 4 x float> @masked_gather_nxv4f32(ptr %base, <vscale x 4 x i32>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+ %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> poison)
ret <vscale x 4 x float> %vals
}
@@ -208,7 +208,7 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i8(ptr %base, <vscale x 4 x i32> %
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, z0.s, sxtw]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
- %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+ %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
%vals.sext = sext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.sext
}
@@ -220,7 +220,7 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i16(ptr %base, <vscale x 4 x i32>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
%vals.sext = sext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.sext
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll
index 895fda758748b..f239af3c14194 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll
@@ -13,7 +13,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -25,7 +25,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -37,7 +37,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals
}
@@ -48,7 +48,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr half, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %vals
}
@@ -59,7 +59,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i3
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr bfloat, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %vals
}
@@ -70,7 +70,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr float, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %vals
}
@@ -81,7 +81,7 @@ define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i32
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr double, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %vals
}
@@ -92,7 +92,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -104,7 +104,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i32>
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -120,7 +120,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i16(ptr %base, <vscale x 4 x i32> %
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%ptrs = getelementptr i16, ptr %base, <vscale x 4 x i64> %offsets.zext
- %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
%vals.zext = zext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.zext
}
@@ -132,7 +132,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32(ptr %base, <vscale x 4 x i32> %
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%ptrs = getelementptr i32, ptr %base, <vscale x 4 x i64> %offsets.zext
- %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %vals
}
@@ -143,7 +143,7 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(ptr %base, <vscale x 4 x i32>
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%ptrs = getelementptr half, ptr %base, <vscale x 4 x i64> %offsets.zext
- %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+ %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> poison)
ret <vscale x 4 x half> %vals
}
@@ -154,7 +154,7 @@ define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(ptr %base, <vscale x 4 x i3
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%ptrs = getelementptr bfloat, ptr %base, <vscale x 4 x i64> %offsets.zext
- %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
+ %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> poison)
ret <vscale x 4 x bfloat> %vals
}
@@ -165,7 +165,7 @@ define <vscale x 4 x float> @masked_gather_nxv4f32(ptr %base, <vscale x 4 x i32>
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%ptrs = getelementptr float, ptr %base, <vscale x 4 x i64> %offsets.zext
- %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+ %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> poison)
ret <vscale x 4 x float> %vals
}
@@ -176,7 +176,7 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i16(ptr %base, <vscale x 4 x i32>
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%ptrs = getelementptr i16, ptr %base, <vscale x 4 x i64> %offsets.zext
- %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
%vals.sext = sext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.sext
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll
index f5e31c0cedd1e..5a031f016b057 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll
@@ -13,7 +13,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i8(ptr %base, <vscale x 2 x i32> %o
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -26,7 +26,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -39,7 +39,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -52,7 +52,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals
}
@@ -64,7 +64,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32>
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %vals
}
@@ -76,7 +76,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i3
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %vals
}
@@ -88,7 +88,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i32>
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %vals
}
@@ -100,7 +100,7 @@ define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i32
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %vals
}
@@ -111,7 +111,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i8(ptr %base, <vscale x 2 x i32> %
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -124,7 +124,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i32>
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -137,7 +137,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i32>
%offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -153,7 +153,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i8(ptr %base, <vscale x 4 x i32> %o
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
- %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+ %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
%vals.zext = zext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.zext
}
@@ -166,7 +166,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i16(ptr %base, <vscale x 4 x i32> %
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
%vals.zext = zext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.zext
}
@@ -179,7 +179,7 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32(ptr %base, <vscale x 4 x i32> %
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %vals
}
@@ -191,7 +191,7 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(ptr %base, <vscale x 4 x i32>
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+ %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> poison)
ret <vscale x 4 x half> %vals
}
@@ -203,7 +203,7 @@ define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(ptr %base, <vscale x 4 x i3
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
+ %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> poison)
ret <vscale x 4 x bfloat> %vals
}
@@ -215,7 +215,7 @@ define <vscale x 4 x float> @masked_gather_nxv4f32(ptr %base, <vscale x 4 x i32>
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+ %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> poison)
ret <vscale x 4 x float> %vals
}
@@ -226,7 +226,7 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i8(ptr %base, <vscale x 4 x i32> %
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
- %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+ %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
%vals.sext = sext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.sext
}
@@ -239,7 +239,7 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i16(ptr %base, <vscale x 4 x i32>
%offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
%ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
- %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
%vals.sext = sext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vals.sext
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll
index ce25b689c8d0e..624541766c957 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll
@@ -8,7 +8,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i64> %
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
; CHECK-NEXT: ret
%ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -19,7 +19,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i64> %
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
; CHECK-NEXT: ret
%ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -30,7 +30,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i64> %
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, lsl #3]
; CHECK-NEXT: ret
%ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals
}
@@ -40,7 +40,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i64>
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
; CHECK-NEXT: ret
%ptrs = getelementptr half, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %vals
}
@@ -50,7 +50,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i6
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
; CHECK-NEXT: ret
%ptrs = getelementptr bfloat, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %vals
}
@@ -60,7 +60,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i64>
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
; CHECK-NEXT: ret
%ptrs = getelementptr float, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %vals
}
@@ -70,7 +70,7 @@ define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i64
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, lsl #3]
; CHECK-NEXT: ret
%ptrs = getelementptr double, ptr %base, <vscale x 2 x i64> %offsets
- %vals.sext = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %vals.sext = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %vals.sext
}
@@ -80,7 +80,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i64>
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, z0.d, lsl #1]
; CHECK-NEXT: ret
%ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -91,7 +91,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i64>
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, z0.d, lsl #2]
; CHECK-NEXT: ret
%ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll
index ba9be548660d4..0ed4dd1e4136e 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll
@@ -8,7 +8,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i8(ptr %base, <vscale x 2 x i64> %o
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -20,7 +20,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i64> %
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -32,7 +32,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i64> %
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -44,7 +44,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i64> %
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals
}
@@ -55,7 +55,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i64>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %vals
}
@@ -66,7 +66,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i6
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %vals
}
@@ -77,7 +77,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i64>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %vals
}
@@ -88,7 +88,7 @@ define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i64
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %vals
}
@@ -98,7 +98,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i8(ptr %base, <vscale x 2 x i64> %
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -110,7 +110,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i64>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -122,7 +122,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i64>
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
index dfdfc456ccdba..5d58d8992694a 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
@@ -18,7 +18,7 @@ define <vscale x 2 x i64> @masked_sgather_sext(ptr %base, <vscale x 2 x i64> %of
; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d
; CHECK-NEXT: ret
%ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
- %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%data.sext = sext <vscale x 2 x i8> %data to <vscale x 2 x i64>
%add = add <vscale x 2 x i8> %data, %vals
%add.sext = sext <vscale x 2 x i8> %add to <vscale x 2 x i64>
@@ -37,7 +37,7 @@ define <vscale x 2 x i64> @masked_sgather_zext(ptr %base, <vscale x 2 x i64> %of
; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
- %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%data.zext = zext <vscale x 2 x i8> %data to <vscale x 2 x i64>
%add = add <vscale x 2 x i8> %data, %vals
%add.zext = zext <vscale x 2 x i8> %add to <vscale x 2 x i64>
@@ -53,7 +53,7 @@ define <vscale x 2 x i8> @masked_gather_nxv2i8(<vscale x 2 x ptr> %ptrs, <vscale
; CHECK: // %bb.0:
; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
ret <vscale x 2 x i8> %data
}
@@ -63,7 +63,7 @@ define <vscale x 2 x i16> @masked_gather_nxv2i16(<vscale x 2 x ptr> %ptrs, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %data = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %data = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
ret <vscale x 2 x i16> %data
}
@@ -73,7 +73,7 @@ define <vscale x 2 x i32> @masked_gather_nxv2i32(<vscale x 2 x ptr> %ptrs, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %data = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %data = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
ret <vscale x 2 x i32> %data
}
@@ -86,7 +86,7 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(<vscale x 4 x ptr> %ptrs, <vsc
; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: ret
- %data = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+ %data = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %mask, <vscale x 4 x half> poison)
ret <vscale x 4 x half> %data
}
@@ -98,7 +98,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i16>
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
; CHECK-NEXT: ret
%ptrs = getelementptr float, ptr %base, <vscale x 2 x i16> %indices
- %data = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %data = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %data
}
@@ -119,7 +119,7 @@ define <vscale x 8 x half> @masked_gather_nxv8f16(<vscale x 8 x ptr> %ptrs, <vsc
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h
; CHECK-NEXT: ret
- %data = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
+ %data = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> poison)
ret <vscale x 8 x half> %data
}
@@ -135,7 +135,7 @@ define <vscale x 8 x bfloat> @masked_gather_nxv8bf16(ptr %base, <vscale x 8 x i1
; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
; CHECK-NEXT: ret
%ptrs = getelementptr bfloat, ptr %base, <vscale x 8 x i16> %indices
- %data = call <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16(<vscale x 8 x ptr> %ptrs, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> undef)
+ %data = call <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16(<vscale x 8 x ptr> %ptrs, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> poison)
ret <vscale x 8 x bfloat> %data
}
@@ -153,7 +153,7 @@ define <vscale x 4 x double> @masked_gather_nxv4f64(ptr %base, <vscale x 4 x i16
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, z1.d, lsl #3]
; CHECK-NEXT: ret
%ptrs = getelementptr double, ptr %base, <vscale x 4 x i16> %indices
- %data = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
+ %data = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x double> poison)
ret <vscale x 4 x double> %data
}
@@ -167,7 +167,7 @@ define <vscale x 8 x float> @masked_gather_nxv8f32(ptr %base, <vscale x 8 x i32>
; CHECK-NEXT: ret
%offsets.zext = zext <vscale x 8 x i32> %offsets to <vscale x 8 x i64>
%ptrs = getelementptr float, ptr %base, <vscale x 8 x i64> %offsets.zext
- %vals = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
+ %vals = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> poison)
ret <vscale x 8 x float> %vals
}
@@ -196,7 +196,7 @@ define <vscale x 16 x i8> @masked_gather_nxv16i8(ptr %base, <vscale x 16 x i8> %
; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
; CHECK-NEXT: ret
%ptrs = getelementptr i8, ptr %base, <vscale x 16 x i8> %indices
- %data = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8(<vscale x 16 x ptr> %ptrs, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+ %data = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8(<vscale x 16 x ptr> %ptrs, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> poison)
ret <vscale x 16 x i8> %data
}
@@ -226,7 +226,7 @@ define <vscale x 32 x i32> @masked_gather_nxv32i32(ptr %base, <vscale x 32 x i32
; CHECK-NEXT: ld1w { z7.s }, p0/z, [x0, z7.s, sxtw #2]
; CHECK-NEXT: ret
%ptrs = getelementptr i32, ptr %base, <vscale x 32 x i32> %indices
- %data = call <vscale x 32 x i32> @llvm.masked.gather.nxv32i32(<vscale x 32 x ptr> %ptrs, i32 4, <vscale x 32 x i1> %mask, <vscale x 32 x i32> undef)
+ %data = call <vscale x 32 x i32> @llvm.masked.gather.nxv32i32(<vscale x 32 x ptr> %ptrs, i32 4, <vscale x 32 x i1> %mask, <vscale x 32 x i32> poison)
ret <vscale x 32 x i32> %data
}
@@ -245,7 +245,7 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i8(<vscale x 4 x ptr> %ptrs, <vsca
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: sxtb z0.s, p0/m, z0.s
; CHECK-NEXT: ret
- %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+ %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
%svals = sext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
ret <vscale x 4 x i32> %svals
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
index 9e34beedf5458..14e96e32664a6 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
@@ -7,7 +7,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i8(<vscale x 2 x ptr> %bases, <vsca
; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d, #1]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i32 1
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -18,7 +18,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i16(<vscale x 2 x ptr> %bases, <vsc
; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d, #2]
; CHECK-NEXT: ret
%ptrs = getelementptr i16, <vscale x 2 x ptr> %bases, i32 1
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -29,7 +29,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i32(<vscale x 2 x ptr> %bases, <vsc
; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d, #4]
; CHECK-NEXT: ret
%ptrs = getelementptr i32, <vscale x 2 x ptr> %bases, i32 1
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -40,7 +40,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64(<vscale x 2 x ptr> %bases, <vsc
; CHECK-NEXT: ld1d { z0.d }, p0/z, [z0.d, #8]
; CHECK-NEXT: ret
%ptrs = getelementptr i64, <vscale x 2 x ptr> %bases, i32 1
- %vals.zext = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals.zext = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals.zext
}
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x ptr> %bases, <vs
; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d, #4]
; CHECK-NEXT: ret
%ptrs = getelementptr half, <vscale x 2 x ptr> %bases, i32 2
- %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %vals
}
@@ -60,7 +60,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(<vscale x 2 x ptr> %bases,
; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d, #4]
; CHECK-NEXT: ret
%ptrs = getelementptr bfloat, <vscale x 2 x ptr> %bases, i32 2
- %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %vals
}
@@ -70,7 +70,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(<vscale x 2 x ptr> %bases, <v
; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d, #12]
; CHECK-NEXT: ret
%ptrs = getelementptr float, <vscale x 2 x ptr> %bases, i32 3
- %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %vals
}
@@ -80,7 +80,7 @@ define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x ptr> %bases, <
; CHECK-NEXT: ld1d { z0.d }, p0/z, [z0.d, #32]
; CHECK-NEXT: ret
%ptrs = getelementptr double, <vscale x 2 x ptr> %bases, i32 4
- %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %vals
}
@@ -90,7 +90,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i8(<vscale x 2 x ptr> %bases, <vsc
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z0.d, #5]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i32 5
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -101,7 +101,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i16(<vscale x 2 x ptr> %bases, <vs
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [z0.d, #12]
; CHECK-NEXT: ret
%ptrs = getelementptr i16, <vscale x 2 x ptr> %bases, i32 6
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -112,7 +112,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(<vscale x 2 x ptr> %bases, <vs
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [z0.d, #28]
; CHECK-NEXT: ret
%ptrs = getelementptr i32, <vscale x 2 x ptr> %bases, i32 7
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -126,7 +126,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i8_range(<vscale x 2 x ptr> %bases,
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x8, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i32 32
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -138,7 +138,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16_range(<vscale x 2 x ptr> %base
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x8, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr half, <vscale x 2 x ptr> %bases, i32 32
- %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %vals
}
@@ -149,7 +149,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16_range(<vscale x 2 x ptr> %b
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x8, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr bfloat, <vscale x 2 x ptr> %bases, i32 32
- %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %vals
}
@@ -160,7 +160,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32_range(<vscale x 2 x ptr> %bas
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x8, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr float, <vscale x 2 x ptr> %bases, i32 32
- %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %vals
}
@@ -171,7 +171,7 @@ define <vscale x 2 x double> @masked_gather_nxv2f64_range(<vscale x 2 x ptr> %ba
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr double, <vscale x 2 x ptr> %bases, i32 32
- %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %vals
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-reg.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-reg.ll
index 88afc09d6f75b..699afa7786bea 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-reg.ll
@@ -7,7 +7,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i8(<vscale x 2 x ptr> %bases, i64 %
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -19,7 +19,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i16(<vscale x 2 x ptr> %bases, i64
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -31,7 +31,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i32(<vscale x 2 x ptr> %bases, i64
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -43,7 +43,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64(<vscale x 2 x ptr> %bases, i64
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals
}
@@ -54,7 +54,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x ptr> %bases, i64
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %vals
}
@@ -65,7 +65,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(<vscale x 2 x ptr> %bases,
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %vals
}
@@ -76,7 +76,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(<vscale x 2 x ptr> %bases, i6
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %vals
}
@@ -87,7 +87,7 @@ define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x ptr> %bases, i
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %vals
}
@@ -97,7 +97,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i8(<vscale x 2 x ptr> %bases, i64
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -109,7 +109,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i16(<vscale x 2 x ptr> %bases, i64
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -121,7 +121,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(<vscale x 2 x ptr> %bases, i64
; CHECK-NEXT: ret
%byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
%ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather.ll
index 9f884d0022679..5a7865e92415f 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather.ll
@@ -6,7 +6,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i8(<vscale x 2 x ptr> %ptrs, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -16,7 +16,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i16(<vscale x 2 x ptr> %ptrs, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -26,7 +26,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i32(<vscale x 2 x ptr> %ptrs, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.zext
}
@@ -36,7 +36,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64(<vscale x 2 x ptr> %ptrs, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals
}
@@ -45,7 +45,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x ptr> %ptrs, <vsc
; CHECK: // %bb.0:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %vals
}
@@ -54,7 +54,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(<vscale x 2 x ptr> %ptrs, <
; CHECK: // %bb.0:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %vals
}
@@ -63,7 +63,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32(<vscale x 2 x ptr> %ptrs, <vs
; CHECK: // %bb.0:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %vals
}
@@ -72,7 +72,7 @@ define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x ptr> %ptrs, <v
; CHECK: // %bb.0:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %vals
}
@@ -81,7 +81,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i8(<vscale x 2 x ptr> %ptrs, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -91,7 +91,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i16(<vscale x 2 x ptr> %ptrs, <vsc
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -101,7 +101,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(<vscale x 2 x ptr> %ptrs, <vsc
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
- %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
ret <vscale x 2 x i64> %vals.sext
}
@@ -135,7 +135,7 @@ define <vscale x 2 x i64> @masked_gather_non_power_of_two_based_scaling(ptr %bas
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr inbounds %i64_x3, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals
}
@@ -147,7 +147,7 @@ define <vscale x 2 x i64> @masked_gather_non_element_type_based_scaling(ptr %bas
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d]
; CHECK-NEXT: ret
%ptrs = getelementptr inbounds %i64_x4, ptr %base, <vscale x 2 x i64> %offsets
- %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %vals
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
index 14bb93a867e96..800bcc1231460 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
@@ -8,7 +8,7 @@ define <vscale x 2 x i64> @masked_load_nxv2i64(ptr %a, <vscale x 2 x i1> %mask)
; CHECK-LABEL: masked_load_nxv2i64:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %load
}
@@ -16,7 +16,7 @@ define <vscale x 4 x i32> @masked_load_nxv4i32(ptr %a, <vscale x 4 x i1> %mask)
; CHECK-LABEL: masked_load_nxv4i32:
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %load
}
@@ -24,7 +24,7 @@ define <vscale x 8 x i16> @masked_load_nxv8i16(ptr %a, <vscale x 8 x i1> %mask)
; CHECK-LABEL: masked_load_nxv8i16:
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+ %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> poison)
ret <vscale x 8 x i16> %load
}
@@ -32,7 +32,7 @@ define <vscale x 16 x i8> @masked_load_nxv16i8(ptr %a, <vscale x 16 x i1> %mask)
; CHECK-LABEL: masked_load_nxv16i8:
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+ %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> poison)
ret <vscale x 16 x i8> %load
}
@@ -40,7 +40,7 @@ define <vscale x 2 x double> @masked_load_nxv2f64(ptr %a, <vscale x 2 x i1> %mas
; CHECK-LABEL: masked_load_nxv2f64:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %load
}
@@ -48,7 +48,7 @@ define <vscale x 2 x float> @masked_load_nxv2f32(ptr %a, <vscale x 2 x i1> %mask
; CHECK-LABEL: masked_load_nxv2f32:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %load
}
@@ -56,7 +56,7 @@ define <vscale x 2 x half> @masked_load_nxv2f16(ptr %a, <vscale x 2 x i1> %mask)
; CHECK-LABEL: masked_load_nxv2f16:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+ %load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> poison)
ret <vscale x 2 x half> %load
}
@@ -64,7 +64,7 @@ define <vscale x 2 x bfloat> @masked_load_nxv2bf16(ptr %a, <vscale x 2 x i1> %ma
; CHECK-LABEL: masked_load_nxv2bf16:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x bfloat> @llvm.masked.load.nxv2bf16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+ %load = call <vscale x 2 x bfloat> @llvm.masked.load.nxv2bf16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> poison)
ret <vscale x 2 x bfloat> %load
}
@@ -72,7 +72,7 @@ define <vscale x 4 x float> @masked_load_nxv4f32(ptr %a, <vscale x 4 x i1> %mask
; CHECK-LABEL: masked_load_nxv4f32:
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+ %load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> poison)
ret <vscale x 4 x float> %load
}
@@ -80,7 +80,7 @@ define <vscale x 4 x half> @masked_load_nxv4f16(ptr %a, <vscale x 4 x i1> %mask)
; CHECK-LABEL: masked_load_nxv4f16:
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+ %load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> poison)
ret <vscale x 4 x half> %load
}
@@ -88,7 +88,7 @@ define <vscale x 4 x bfloat> @masked_load_nxv4bf16(ptr %a, <vscale x 4 x i1> %ma
; CHECK-LABEL: masked_load_nxv4bf16:
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 4 x bfloat> @llvm.masked.load.nxv4bf16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
+ %load = call <vscale x 4 x bfloat> @llvm.masked.load.nxv4bf16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> poison)
ret <vscale x 4 x bfloat> %load
}
@@ -96,7 +96,7 @@ define <vscale x 8 x half> @masked_load_nxv8f16(ptr %a, <vscale x 8 x i1> %mask)
; CHECK-LABEL: masked_load_nxv8f16:
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
+ %load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> poison)
ret <vscale x 8 x half> %load
}
@@ -104,7 +104,7 @@ define <vscale x 8 x bfloat> @masked_load_nxv8bf16(ptr %a, <vscale x 8 x i1> %ma
; CHECK-LABEL: masked_load_nxv8bf16:
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> undef)
+ %load = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> poison)
ret <vscale x 8 x bfloat> %load
}
@@ -122,7 +122,7 @@ define <vscale x 2 x i16> @masked_load_nxv2i16(ptr noalias %in, <vscale x 2 x i1
; CHECK-LABEL: masked_load_nxv2i16
; CHECK: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
ret <vscale x 2 x i16> %wide.load
}
@@ -244,28 +244,28 @@ define <vscale x 2 x ptr> @masked.load.nxv2p0i8(ptr %vector_ptr, <vscale x 2 x i
; CHECK-LABEL: masked.load.nxv2p0i8:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+ %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> poison)
ret <vscale x 2 x ptr> %v
}
define <vscale x 2 x ptr> @masked.load.nxv2p0i16(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked.load.nxv2p0i16:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+ %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> poison)
ret <vscale x 2 x ptr> %v
}
define <vscale x 2 x ptr> @masked.load.nxv2p0i32(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked.load.nxv2p0i32:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+ %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> poison)
ret <vscale x 2 x ptr> %v
}
define <vscale x 2 x ptr> @masked.load.nxv2p0i64(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked.load.nxv2p0i64:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+ %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> poison)
ret <vscale x 2 x ptr> %v
}
@@ -275,28 +275,28 @@ define <vscale x 2 x ptr> @masked.load.nxv2p0bf16(ptr %vector_ptr, <vscale x 2 x
; CHECK-LABEL: masked.load.nxv2p0bf16:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+ %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> poison)
ret <vscale x 2 x ptr> %v
}
define <vscale x 2 x ptr> @masked.load.nxv2p0f16(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked.load.nxv2p0f16:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+ %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> poison)
ret <vscale x 2 x ptr> %v
}
define <vscale x 2 x ptr> @masked.load.nxv2p0f32(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked.load.nxv2p0f32:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+ %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> poison)
ret <vscale x 2 x ptr> %v
}
define <vscale x 2 x ptr> @masked.load.nxv2p0f64(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked.load.nxv2p0f64:
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+ %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> poison)
ret <vscale x 2 x ptr> %v
}
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
index d397424cb162f..5277c2efab85d 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
@@ -10,7 +10,7 @@ define <vscale x 2 x i64> @masked_sload_nxv2i8(ptr %a, <vscale x 2 x i1> %mask)
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%ext = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -20,7 +20,7 @@ define <vscale x 2 x i64> @masked_sload_nxv2i16(ptr %a, <vscale x 2 x i1> %mask)
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%ext = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -30,7 +30,7 @@ define <vscale x 2 x i64> @masked_sload_nxv2i32(ptr %a, <vscale x 2 x i1> %mask)
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -40,7 +40,7 @@ define <vscale x 4 x i32> @masked_sload_nxv4i8(ptr %a, <vscale x 4 x i1> %mask)
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+ %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
%ext = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -50,7 +50,7 @@ define <vscale x 4 x i32> @masked_sload_nxv4i16(ptr %a, <vscale x 4 x i1> %mask)
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
%ext = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -60,7 +60,7 @@ define <vscale x 8 x i16> @masked_sload_nxv8i8(ptr %a, <vscale x 8 x i1> %mask)
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
+ %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> poison)
%ext = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
ret <vscale x 8 x i16> %ext
}
@@ -90,7 +90,7 @@ define <vscale x 16 x i32> @masked_sload_nxv16i8(ptr %a, <vscale x 16 x i1> %mas
; CHECK-NEXT: sunpklo z2.s, z3.h
; CHECK-NEXT: sunpkhi z3.s, z3.h
; CHECK-NEXT: ret
- %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+ %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> poison)
%ext = sext <vscale x 16 x i8> %load to <vscale x 16 x i32>
ret <vscale x 16 x i32> %ext
}
@@ -106,7 +106,7 @@ define <vscale x 4 x double> @masked_sload_4i8_4f32(ptr noalias %in, <vscale x 4
; CHECK-NEXT: scvtf z0.d, p1/m, z0.d
; CHECK-NEXT: scvtf z1.d, p1/m, z1.d
; CHECK-NEXT: ret
- %wide.load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %in, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+ %wide.load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %in, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
%sext = sext <vscale x 4 x i8> %wide.load to <vscale x 4 x i64>
%res = sitofp <vscale x 4 x i64> %sext to <vscale x 4 x double>
ret <vscale x 4 x double> %res
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
index 17fa13e4e8a8b..f69ab0de06e08 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
@@ -10,7 +10,7 @@ define <vscale x 2 x i64> @masked_zload_nxv2i8(ptr %src, <vscale x 2 x i1> %mask
; CHECK: // %bb.0:
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
%ext = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -20,7 +20,7 @@ define <vscale x 2 x i64> @masked_zload_nxv2i16(ptr %src, <vscale x 2 x i1> %mas
; CHECK: // %bb.0:
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%ext = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -30,7 +30,7 @@ define <vscale x 2 x i64> @masked_zload_nxv2i32(ptr %src, <vscale x 2 x i1> %mas
; CHECK: // %bb.0:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
%ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -40,7 +40,7 @@ define <vscale x 4 x i32> @masked_zload_nxv4i8(ptr %src, <vscale x 4 x i1> %mask
; CHECK: // %bb.0:
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+ %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
%ext = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -50,7 +50,7 @@ define <vscale x 4 x i32> @masked_zload_nxv4i16(ptr %src, <vscale x 4 x i1> %mas
; CHECK: // %bb.0:
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
%ext = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -60,7 +60,7 @@ define <vscale x 8 x i16> @masked_zload_nxv8i8(ptr %src, <vscale x 8 x i1> %mask
; CHECK: // %bb.0:
; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %src, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
+ %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %src, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> poison)
%ext = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
ret <vscale x 8 x i16> %ext
}
@@ -89,7 +89,7 @@ define <vscale x 8 x i64> @masked_zload_nxv8i16(ptr %a, <vscale x 8 x i1> %mask)
; CHECK-NEXT: uunpklo z2.d, z3.s
; CHECK-NEXT: uunpkhi z3.d, z3.s
; CHECK-NEXT: ret
- %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+ %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> poison)
%ext = zext <vscale x 8 x i16> %load to <vscale x 8 x i64>
ret <vscale x 8 x i64> %ext
}
@@ -102,7 +102,7 @@ define <vscale x 2 x double> @masked_zload_2i16_2f64(ptr noalias %in, <vscale x
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ucvtf z0.d, p0/m, z0.d
; CHECK-NEXT: ret
- %wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
%zext = zext <vscale x 2 x i16> %wide.load to <vscale x 2 x i32>
%res = uitofp <vscale x 2 x i32> %zext to <vscale x 2 x double>
ret <vscale x 2 x double> %res
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
index 94e525d22b825..da541158276d3 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
@@ -83,10 +83,7 @@ define void @masked_scatter_splat_constant_pointer (<vscale x 4 x i1> %pg) {
; CHECK-NEXT: st1w { z0.d }, p0, [z0.d]
; CHECK-NEXT: ret
vector.body:
- call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> undef,
- <vscale x 4 x ptr> shufflevector (<vscale x 4 x ptr> insertelement (<vscale x 4 x ptr> poison, ptr null, i32 0), <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer),
- i32 4,
- <vscale x 4 x i1> %pg)
+ call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> undef, <vscale x 4 x ptr> zeroinitializer, i32 4, <vscale x 4 x i1> %pg)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-nontemporal-masked-ldst.ll b/llvm/test/CodeGen/AArch64/sve-nontemporal-masked-ldst.ll
index bcb878ad744bb..390f5c21f7b44 100644
--- a/llvm/test/CodeGen/AArch64/sve-nontemporal-masked-ldst.ll
+++ b/llvm/test/CodeGen/AArch64/sve-nontemporal-masked-ldst.ll
@@ -12,7 +12,7 @@ define <4 x i32> @masked_load_v4i32(ptr %a, <4 x i1> %mask) nounwind {
; CHECK-NEXT: ldnt1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: ret
- %load = call <4 x i32> @llvm.masked.load.v4i32(ptr %a, i32 1, <4 x i1> %mask, <4 x i32> undef), !nontemporal !0
+ %load = call <4 x i32> @llvm.masked.load.v4i32(ptr %a, i32 1, <4 x i1> %mask, <4 x i32> poison), !nontemporal !0
ret <4 x i32> %load
}
@@ -36,7 +36,7 @@ define <4 x i32> @load_v4i32(ptr %a) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: ret
- %load = call <4 x i32> @llvm.masked.load.v4i32(ptr %a, i32 1, <4 x i1> <i1 1, i1 1, i1 1, i1 1>, <4 x i32> undef), !nontemporal !0
+ %load = call <4 x i32> @llvm.masked.load.v4i32(ptr %a, i32 1, <4 x i1> <i1 1, i1 1, i1 1, i1 1>, <4 x i32> poison), !nontemporal !0
ret <4 x i32> %load
}
@@ -55,7 +55,7 @@ define <vscale x 4 x i32> @masked_load_nxv4i32(ptr %a, <vscale x 4 x i1> %mask)
; CHECK: // %bb.0:
; CHECK-NEXT: ldnt1w { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef), !nontemporal !0
+ %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison), !nontemporal !0
ret <vscale x 4 x i32> %load
}
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
index 0ac8a946ec859..0c11cbc500640 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
@@ -20,7 +20,7 @@ define void @imm_out_of_range(ptr %base, <vscale x 2 x i1> %mask) nounwind {
%data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i64> undef)
+ <vscale x 2 x i64> poison)
%base_store = getelementptr <vscale x 2 x i64>, ptr %base, i64 -9
call void @llvm.masked.store.nxv2i64(<vscale x 2 x i64> %data,
ptr %base_store,
@@ -41,7 +41,7 @@ define void @test_masked_ldst_sv2i8(ptr %base, <vscale x 2 x i1> %mask) nounwind
%data = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i8> undef)
+ <vscale x 2 x i8> poison)
%base_store = getelementptr <vscale x 2 x i8>, ptr %base, i64 -7
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %data,
ptr %base_store,
@@ -60,7 +60,7 @@ define void @test_masked_ldst_sv2i16(ptr %base, <vscale x 2 x i1> %mask) nounwin
%data = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i16> undef)
+ <vscale x 2 x i16> poison)
%base_store = getelementptr <vscale x 2 x i16>, ptr %base, i64 -7
call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %data,
ptr %base_store,
@@ -80,7 +80,7 @@ define void @test_masked_ldst_sv2i32(ptr %base, <vscale x 2 x i1> %mask) nounwin
%data = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i32> undef)
+ <vscale x 2 x i32> poison)
%base_store = getelementptr <vscale x 2 x i32>, ptr %base, i64 -7
call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %data,
ptr %base_store,
@@ -99,7 +99,7 @@ define void @test_masked_ldst_sv2i64(ptr %base, <vscale x 2 x i1> %mask) nounwin
%data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i64> undef)
+ <vscale x 2 x i64> poison)
%base_store = getelementptr <vscale x 2 x i64>, ptr %base, i64 -7
call void @llvm.masked.store.nxv2i64(<vscale x 2 x i64> %data,
ptr %base_store,
@@ -118,7 +118,7 @@ define void @test_masked_ldst_sv2f16(ptr %base, <vscale x 2 x i1> %mask) nounwin
%data = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x half> undef)
+ <vscale x 2 x half> poison)
%base_store = getelementptr <vscale x 2 x half>, ptr %base, i64 -7
call void @llvm.masked.store.nxv2f16(<vscale x 2 x half> %data,
ptr %base_store,
@@ -138,7 +138,7 @@ define void @test_masked_ldst_sv2f32(ptr %base, <vscale x 2 x i1> %mask) nounwin
%data = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x float> undef)
+ <vscale x 2 x float> poison)
%base_store = getelementptr <vscale x 2 x float>, ptr %base, i64 -7
call void @llvm.masked.store.nxv2f32(<vscale x 2 x float> %data,
ptr %base_store,
@@ -157,7 +157,7 @@ define void @test_masked_ldst_sv2f64(ptr %base, <vscale x 2 x i1> %mask) nounwin
%data = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x double> undef)
+ <vscale x 2 x double> poison)
%base_store = getelementptr <vscale x 2 x double>, ptr %base, i64 -5
call void @llvm.masked.store.nxv2f64(<vscale x 2 x double> %data,
ptr %base_store,
@@ -177,7 +177,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(ptr %base, <vscale x 2 x
%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i8> undef)
+ <vscale x 2 x i8> poison)
%ext = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -191,7 +191,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(ptr %base, <vscale x 2 x
%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i8> undef)
+ <vscale x 2 x i8> poison)
%ext = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -205,7 +205,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(ptr %base, <vscale x 2
%load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i16> undef)
+ <vscale x 2 x i16> poison)
%ext = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -219,7 +219,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(ptr %base, <vscale x 2
%load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i16> undef)
+ <vscale x 2 x i16> poison)
%ext = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -233,7 +233,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(ptr %base, <vscale x 2
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i32> undef)
+ <vscale x 2 x i32> poison)
%ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -247,7 +247,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(ptr %base, <vscale x 2
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_load,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i32> undef)
+ <vscale x 2 x i32> poison)
%ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -309,7 +309,7 @@ define void @test_masked_ldst_sv4i8(ptr %base, <vscale x 4 x i1> %mask) nounwind
%data = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_load,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i8> undef)
+ <vscale x 4 x i8> poison)
%base_store = getelementptr <vscale x 4 x i8>, ptr %base, i64 2
call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %data,
ptr %base_store,
@@ -328,7 +328,7 @@ define void @test_masked_ldst_sv4i16(ptr %base, <vscale x 4 x i1> %mask) nounwin
%data = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_load,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i16> undef)
+ <vscale x 4 x i16> poison)
%base_store = getelementptr <vscale x 4 x i16>, ptr %base, i64 2
call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %data,
ptr %base_store,
@@ -347,7 +347,7 @@ define void @test_masked_ldst_sv4i32(ptr %base, <vscale x 4 x i1> %mask) nounwin
%data = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %base_load,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i32> undef)
+ <vscale x 4 x i32> poison)
%base_store = getelementptr <vscale x 4 x i32>, ptr %base, i64 7
call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %data,
ptr %base_store,
@@ -366,7 +366,7 @@ define void @test_masked_ldst_sv4f16(ptr %base, <vscale x 4 x i1> %mask) nounwin
%data = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr %base_load,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x half> undef)
+ <vscale x 4 x half> poison)
%base_store = getelementptr <vscale x 4 x half>, ptr %base, i64 2
call void @llvm.masked.store.nxv4f16(<vscale x 4 x half> %data,
ptr %base_store,
@@ -385,7 +385,7 @@ define void @test_masked_ldst_sv4f32(ptr %base, <vscale x 4 x i1> %mask) nounwin
%data = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr %base_load,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x float> undef)
+ <vscale x 4 x float> poison)
%base_store = getelementptr <vscale x 4 x float>, ptr %base, i64 2
call void @llvm.masked.store.nxv4f32(<vscale x 4 x float> %data,
ptr %base_store,
@@ -405,7 +405,7 @@ define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(ptr %base, <vscale x 4 x
%load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_load,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i8> undef)
+ <vscale x 4 x i8> poison)
%ext = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -419,7 +419,7 @@ define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(ptr %base, <vscale x 4 x
%load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_load,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i8> undef)
+ <vscale x 4 x i8> poison)
%ext = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -433,7 +433,7 @@ define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(ptr %base, <vscale x 4
%load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_load,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i16> undef)
+ <vscale x 4 x i16> poison)
%ext = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -447,7 +447,7 @@ define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(ptr %base, <vscale x 4
%load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_load,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i16> undef)
+ <vscale x 4 x i16> poison)
%ext = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -495,7 +495,7 @@ define void @test_masked_ldst_sv8i8(ptr %base, <vscale x 8 x i1> %mask) nounwind
%data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_load,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x i8> undef)
+ <vscale x 8 x i8> poison)
%base_store = getelementptr <vscale x 8 x i8>, ptr %base, i64 7
call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %data,
ptr %base_store,
@@ -514,7 +514,7 @@ define void @test_masked_ldst_sv8i16(ptr %base, <vscale x 8 x i1> %mask) nounwin
%data = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %base_load,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x i16> undef)
+ <vscale x 8 x i16> poison)
%base_store = getelementptr <vscale x 8 x i16>, ptr %base, i64 7
call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %data,
ptr %base_store,
@@ -533,7 +533,7 @@ define void @test_masked_ldst_sv8f16(ptr %base, <vscale x 8 x i1> %mask) nounwin
%data = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr %base_load,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x half> undef)
+ <vscale x 8 x half> poison)
%base_store = getelementptr <vscale x 8 x half>, ptr %base, i64 2
call void @llvm.masked.store.nxv8f16(<vscale x 8 x half> %data,
ptr %base_store,
@@ -552,7 +552,7 @@ define void @test_masked_ldst_sv8bf16(ptr %base, <vscale x 8 x i1> %mask) nounwi
%data = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr %base_load,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x bfloat> undef)
+ <vscale x 8 x bfloat> poison)
%base_store = getelementptr <vscale x 8 x bfloat>, ptr %base, i64 2
call void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat> %data,
ptr %base_store,
@@ -572,7 +572,7 @@ define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(ptr %base, <vscale x 8 x
%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_load,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x i8> undef)
+ <vscale x 8 x i8> poison)
%ext = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
ret <vscale x 8 x i16> %ext
}
@@ -586,7 +586,7 @@ define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(ptr %base, <vscale x 8 x
%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_load,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x i8> undef)
+ <vscale x 8 x i8> poison)
%ext = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
ret <vscale x 8 x i16> %ext
}
@@ -619,7 +619,7 @@ define void @test_masked_ldst_sv16i8(ptr %base, <vscale x 16 x i1> %mask) nounwi
%data = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %base_load,
i32 1,
<vscale x 16 x i1> %mask,
- <vscale x 16 x i8> undef)
+ <vscale x 16 x i8> poison)
%base_store = getelementptr <vscale x 16 x i8>, ptr %base, i64 7
call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %data,
ptr %base_store,
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
index 32f43a8c9ff79..bc59e3b641cb8 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
@@ -11,7 +11,7 @@ define void @test_masked_ldst_sv2i8(ptr %base, <vscale x 2 x i1> %mask, i64 %off
%data = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_i8,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i8> undef)
+ <vscale x 2 x i8> poison)
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %data,
ptr %base_i8,
i32 1,
@@ -28,7 +28,7 @@ define void @test_masked_ldst_sv2i16(ptr %base, <vscale x 2 x i1> %mask, i64 %of
%data = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_i16,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i16> undef)
+ <vscale x 2 x i16> poison)
call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %data,
ptr %base_i16,
i32 1,
@@ -45,7 +45,7 @@ define void @test_masked_ldst_sv2i32(ptr %base, <vscale x 2 x i1> %mask, i64 %of
%data = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_i32,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i32> undef)
+ <vscale x 2 x i32> poison)
call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %data,
ptr %base_i32,
i32 1,
@@ -62,7 +62,7 @@ define void @test_masked_ldst_sv2i64(ptr %base, <vscale x 2 x i1> %mask, i64 %of
%data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %base_i64,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i64> undef)
+ <vscale x 2 x i64> poison)
call void @llvm.masked.store.nxv2i64(<vscale x 2 x i64> %data,
ptr %base_i64,
i32 1,
@@ -79,7 +79,7 @@ define void @test_masked_ldst_sv2f16(ptr %base, <vscale x 2 x i1> %mask, i64 %of
%data = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr %base_half,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x half> undef)
+ <vscale x 2 x half> poison)
call void @llvm.masked.store.nxv2f16(<vscale x 2 x half> %data,
ptr %base_half,
i32 1,
@@ -96,7 +96,7 @@ define void @test_masked_ldst_sv2f32(ptr %base, <vscale x 2 x i1> %mask, i64 %of
%data = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr %base_float,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x float> undef)
+ <vscale x 2 x float> poison)
call void @llvm.masked.store.nxv2f32(<vscale x 2 x float> %data,
ptr %base_float,
i32 1,
@@ -113,7 +113,7 @@ define void @test_masked_ldst_sv2f64(ptr %base, <vscale x 2 x i1> %mask, i64 %of
%data = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr %base_double,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x double> undef)
+ <vscale x 2 x double> poison)
call void @llvm.masked.store.nxv2f64(<vscale x 2 x double> %data,
ptr %base_double,
i32 1,
@@ -131,7 +131,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(ptr %base, <vscale x 2 x
%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_i8,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i8> undef)
+ <vscale x 2 x i8> poison)
%ext = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -144,7 +144,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(ptr %base, <vscale x 2 x
%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_i8,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i8> undef)
+ <vscale x 2 x i8> poison)
%ext = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -157,7 +157,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(ptr %base, <vscale x 2
%load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_i16,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i16> undef)
+ <vscale x 2 x i16> poison)
%ext = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -170,7 +170,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(ptr %base, <vscale x 2
%load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_i16,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i16> undef)
+ <vscale x 2 x i16> poison)
%ext = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -184,7 +184,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(ptr %base, <vscale x 2
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_i32,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i32> undef)
+ <vscale x 2 x i32> poison)
%ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -197,7 +197,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(ptr %base, <vscale x 2
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_i32,
i32 1,
<vscale x 2 x i1> %mask,
- <vscale x 2 x i32> undef)
+ <vscale x 2 x i32> poison)
%ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
@@ -254,7 +254,7 @@ define void @test_masked_ldst_sv4i8(ptr %base, <vscale x 4 x i1> %mask, i64 %off
%data = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_i8,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i8> undef)
+ <vscale x 4 x i8> poison)
call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %data,
ptr %base_i8,
i32 1,
@@ -271,7 +271,7 @@ define void @test_masked_ldst_sv4i16(ptr %base, <vscale x 4 x i1> %mask, i64 %of
%data = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_i16,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i16> undef)
+ <vscale x 4 x i16> poison)
call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %data,
ptr %base_i16,
i32 1,
@@ -288,7 +288,7 @@ define void @test_masked_ldst_sv4i32(ptr %base, <vscale x 4 x i1> %mask, i64 %of
%data = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %base_i32,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i32> undef)
+ <vscale x 4 x i32> poison)
call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %data,
ptr %base_i32,
i32 1,
@@ -305,7 +305,7 @@ define void @test_masked_ldst_sv4f16(ptr %base, <vscale x 4 x i1> %mask, i64 %of
%data = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr %base_f16,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x half> undef)
+ <vscale x 4 x half> poison)
call void @llvm.masked.store.nxv4f16(<vscale x 4 x half> %data,
ptr %base_f16,
i32 1,
@@ -322,7 +322,7 @@ define void @test_masked_ldst_sv4f32(ptr %base, <vscale x 4 x i1> %mask, i64 %of
%data = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr %base_f32,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x float> undef)
+ <vscale x 4 x float> poison)
call void @llvm.masked.store.nxv4f32(<vscale x 4 x float> %data,
ptr %base_f32,
i32 1,
@@ -340,7 +340,7 @@ define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(ptr %base, <vscale x 4 x
%load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_i8,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i8> undef)
+ <vscale x 4 x i8> poison)
%ext = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -353,7 +353,7 @@ define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(ptr %base, <vscale x 4 x
%load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_i8,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i8> undef)
+ <vscale x 4 x i8> poison)
%ext = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -366,7 +366,7 @@ define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(ptr %base, <vscale x 4
%load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_i16,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i16> undef)
+ <vscale x 4 x i16> poison)
%ext = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -379,7 +379,7 @@ define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(ptr %base, <vscale x 4
%load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_i16,
i32 1,
<vscale x 4 x i1> %mask,
- <vscale x 4 x i16> undef)
+ <vscale x 4 x i16> poison)
%ext = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
@@ -423,7 +423,7 @@ define void @test_masked_ldst_sv8i8(ptr %base, <vscale x 8 x i1> %mask, i64 %off
%data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_i8,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x i8> undef)
+ <vscale x 8 x i8> poison)
call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %data,
ptr %base_i8,
i32 1,
@@ -440,7 +440,7 @@ define void @test_masked_ldst_sv8i16(ptr %base, <vscale x 8 x i1> %mask, i64 %of
%data = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %base_i16,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x i16> undef)
+ <vscale x 8 x i16> poison)
call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %data,
ptr %base_i16,
i32 1,
@@ -457,7 +457,7 @@ define void @test_masked_ldst_sv8f16(ptr %base, <vscale x 8 x i1> %mask, i64 %of
%data = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr %base_f16,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x half> undef)
+ <vscale x 8 x half> poison)
call void @llvm.masked.store.nxv8f16(<vscale x 8 x half> %data,
ptr %base_f16,
i32 1,
@@ -474,7 +474,7 @@ define void @test_masked_ldst_sv8bf16(ptr %base, <vscale x 8 x i1> %mask, i64 %o
%data = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr %base_f16,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x bfloat> undef)
+ <vscale x 8 x bfloat> poison)
call void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat> %data,
ptr %base_f16,
i32 1,
@@ -492,7 +492,7 @@ define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(ptr %base, <vscale x 8 x
%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_i8,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x i8> undef)
+ <vscale x 8 x i8> poison)
%ext = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
ret <vscale x 8 x i16> %ext
}
@@ -505,7 +505,7 @@ define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(ptr %base, <vscale x 8 x
%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_i8,
i32 1,
<vscale x 8 x i1> %mask,
- <vscale x 8 x i8> undef)
+ <vscale x 8 x i8> poison)
%ext = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
ret <vscale x 8 x i16> %ext
}
@@ -536,7 +536,7 @@ define void @test_masked_ldst_sv16i8(ptr %base, <vscale x 16 x i1> %mask, i64 %o
%data = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %base_i8,
i32 1,
<vscale x 16 x i1> %mask,
- <vscale x 16 x i8> undef)
+ <vscale x 16 x i8> poison)
call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %data,
ptr %base_i8,
i32 1,
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-selectop.ll b/llvm/test/CodeGen/AArch64/sve-pred-selectop.ll
index 8438e9d88f5de..30ec2de2bd9cc 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-selectop.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-selectop.ll
@@ -910,8 +910,8 @@ define <vscale x 4 x i32> @addqr_v4i32(<vscale x 4 x i32> %z, <vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 4 x i32> %z, zeroinitializer
- %i = insertelement <vscale x 4 x i32> undef, i32 %y, i32 0
- %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %i = insertelement <vscale x 4 x i32> poison, i32 %y, i32 0
+ %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%a = add <vscale x 4 x i32> %x, %ys
%b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %z
ret <vscale x 4 x i32> %b
@@ -928,8 +928,8 @@ define <vscale x 8 x i16> @addqr_v8i16(<vscale x 8 x i16> %z, <vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 8 x i16> %z, zeroinitializer
- %i = insertelement <vscale x 8 x i16> undef, i16 %y, i32 0
- %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %i = insertelement <vscale x 8 x i16> poison, i16 %y, i32 0
+ %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%a = add <vscale x 8 x i16> %x, %ys
%b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %z
ret <vscale x 8 x i16> %b
@@ -946,8 +946,8 @@ define <vscale x 16 x i8> @addqr_v16i8(<vscale x 16 x i8> %z, <vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 16 x i8> %z, zeroinitializer
- %i = insertelement <vscale x 16 x i8> undef, i8 %y, i32 0
- %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %i = insertelement <vscale x 16 x i8> poison, i8 %y, i32 0
+ %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
%a = add <vscale x 16 x i8> %x, %ys
%b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %z
ret <vscale x 16 x i8> %b
@@ -964,8 +964,8 @@ define <vscale x 4 x i32> @subqr_v4i32(<vscale x 4 x i32> %z, <vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 4 x i32> %z, zeroinitializer
- %i = insertelement <vscale x 4 x i32> undef, i32 %y, i32 0
- %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %i = insertelement <vscale x 4 x i32> poison, i32 %y, i32 0
+ %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%a = sub <vscale x 4 x i32> %x, %ys
%b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %z
ret <vscale x 4 x i32> %b
@@ -982,8 +982,8 @@ define <vscale x 8 x i16> @subqr_v8i16(<vscale x 8 x i16> %z, <vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 8 x i16> %z, zeroinitializer
- %i = insertelement <vscale x 8 x i16> undef, i16 %y, i32 0
- %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %i = insertelement <vscale x 8 x i16> poison, i16 %y, i32 0
+ %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%a = sub <vscale x 8 x i16> %x, %ys
%b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %z
ret <vscale x 8 x i16> %b
@@ -1000,8 +1000,8 @@ define <vscale x 16 x i8> @subqr_v16i8(<vscale x 16 x i8> %z, <vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 16 x i8> %z, zeroinitializer
- %i = insertelement <vscale x 16 x i8> undef, i8 %y, i32 0
- %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %i = insertelement <vscale x 16 x i8> poison, i8 %y, i32 0
+ %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
%a = sub <vscale x 16 x i8> %x, %ys
%b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %z
ret <vscale x 16 x i8> %b
@@ -1018,8 +1018,8 @@ define <vscale x 4 x i32> @mulqr_v4i32(<vscale x 4 x i32> %z, <vscale x 4 x i32>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 4 x i32> %z, zeroinitializer
- %i = insertelement <vscale x 4 x i32> undef, i32 %y, i32 0
- %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %i = insertelement <vscale x 4 x i32> poison, i32 %y, i32 0
+ %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%a = mul <vscale x 4 x i32> %x, %ys
%b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %z
ret <vscale x 4 x i32> %b
@@ -1036,8 +1036,8 @@ define <vscale x 8 x i16> @mulqr_v8i16(<vscale x 8 x i16> %z, <vscale x 8 x i16>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 8 x i16> %z, zeroinitializer
- %i = insertelement <vscale x 8 x i16> undef, i16 %y, i32 0
- %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %i = insertelement <vscale x 8 x i16> poison, i16 %y, i32 0
+ %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%a = mul <vscale x 8 x i16> %x, %ys
%b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %z
ret <vscale x 8 x i16> %b
@@ -1054,8 +1054,8 @@ define <vscale x 16 x i8> @mulqr_v16i8(<vscale x 16 x i8> %z, <vscale x 16 x i8>
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 16 x i8> %z, zeroinitializer
- %i = insertelement <vscale x 16 x i8> undef, i8 %y, i32 0
- %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %i = insertelement <vscale x 16 x i8> poison, i8 %y, i32 0
+ %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
%a = mul <vscale x 16 x i8> %x, %ys
%b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %z
ret <vscale x 16 x i8> %b
@@ -1073,8 +1073,8 @@ define <vscale x 4 x float> @faddqr_v4f32(<vscale x 4 x float> %z, <vscale x 4 x
; CHECK-NEXT: ret
entry:
%c = fcmp oeq <vscale x 4 x float> %z, zeroinitializer
- %i = insertelement <vscale x 4 x float> undef, float %y, i32 0
- %ys = shufflevector <vscale x 4 x float> %i, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %i = insertelement <vscale x 4 x float> poison, float %y, i32 0
+ %ys = shufflevector <vscale x 4 x float> %i, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
%a = fadd <vscale x 4 x float> %x, %ys
%b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %z
ret <vscale x 4 x float> %b
@@ -1092,8 +1092,8 @@ define <vscale x 8 x half> @faddqr_v8f16(<vscale x 8 x half> %z, <vscale x 8 x h
; CHECK-NEXT: ret
entry:
%c = fcmp oeq <vscale x 8 x half> %z, zeroinitializer
- %i = insertelement <vscale x 8 x half> undef, half %y, i32 0
- %ys = shufflevector <vscale x 8 x half> %i, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+ %i = insertelement <vscale x 8 x half> poison, half %y, i32 0
+ %ys = shufflevector <vscale x 8 x half> %i, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%a = fadd <vscale x 8 x half> %x, %ys
%b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %z
ret <vscale x 8 x half> %b
@@ -1111,8 +1111,8 @@ define <vscale x 4 x float> @fsubqr_v4f32(<vscale x 4 x float> %z, <vscale x 4 x
; CHECK-NEXT: ret
entry:
%c = fcmp oeq <vscale x 4 x float> %z, zeroinitializer
- %i = insertelement <vscale x 4 x float> undef, float %y, i32 0
- %ys = shufflevector <vscale x 4 x float> %i, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %i = insertelement <vscale x 4 x float> poison, float %y, i32 0
+ %ys = shufflevector <vscale x 4 x float> %i, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
%a = fsub <vscale x 4 x float> %x, %ys
%b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %z
ret <vscale x 4 x float> %b
@@ -1130,8 +1130,8 @@ define <vscale x 8 x half> @fsubqr_v8f16(<vscale x 8 x half> %z, <vscale x 8 x h
; CHECK-NEXT: ret
entry:
%c = fcmp oeq <vscale x 8 x half> %z, zeroinitializer
- %i = insertelement <vscale x 8 x half> undef, half %y, i32 0
- %ys = shufflevector <vscale x 8 x half> %i, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+ %i = insertelement <vscale x 8 x half> poison, half %y, i32 0
+ %ys = shufflevector <vscale x 8 x half> %i, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%a = fsub <vscale x 8 x half> %x, %ys
%b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %z
ret <vscale x 8 x half> %b
@@ -1149,8 +1149,8 @@ define <vscale x 4 x float> @fmulqr_v4f32(<vscale x 4 x float> %z, <vscale x 4 x
; CHECK-NEXT: ret
entry:
%c = fcmp oeq <vscale x 4 x float> %z, zeroinitializer
- %i = insertelement <vscale x 4 x float> undef, float %y, i32 0
- %ys = shufflevector <vscale x 4 x float> %i, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %i = insertelement <vscale x 4 x float> poison, float %y, i32 0
+ %ys = shufflevector <vscale x 4 x float> %i, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
%a = fmul <vscale x 4 x float> %x, %ys
%b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %z
ret <vscale x 4 x float> %b
@@ -1168,8 +1168,8 @@ define <vscale x 8 x half> @fmulqr_v8f16(<vscale x 8 x half> %z, <vscale x 8 x h
; CHECK-NEXT: ret
entry:
%c = fcmp oeq <vscale x 8 x half> %z, zeroinitializer
- %i = insertelement <vscale x 8 x half> undef, half %y, i32 0
- %ys = shufflevector <vscale x 8 x half> %i, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+ %i = insertelement <vscale x 8 x half> poison, half %y, i32 0
+ %ys = shufflevector <vscale x 8 x half> %i, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%a = fmul <vscale x 8 x half> %x, %ys
%b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %z
ret <vscale x 8 x half> %b
@@ -1186,8 +1186,8 @@ define <vscale x 4 x i32> @sadd_satqr_v4i32(<vscale x 4 x i32> %z, <vscale x 4 x
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 4 x i32> %z, zeroinitializer
- %i = insertelement <vscale x 4 x i32> undef, i32 %y, i32 0
- %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %i = insertelement <vscale x 4 x i32> poison, i32 %y, i32 0
+ %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%a = call <vscale x 4 x i32> @llvm.sadd.sat.v4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %ys)
%b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %z
ret <vscale x 4 x i32> %b
@@ -1204,8 +1204,8 @@ define <vscale x 8 x i16> @sadd_satqr_v8i16(<vscale x 8 x i16> %z, <vscale x 8 x
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 8 x i16> %z, zeroinitializer
- %i = insertelement <vscale x 8 x i16> undef, i16 %y, i32 0
- %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %i = insertelement <vscale x 8 x i16> poison, i16 %y, i32 0
+ %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%a = call <vscale x 8 x i16> @llvm.sadd.sat.v8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %ys)
%b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %z
ret <vscale x 8 x i16> %b
@@ -1222,8 +1222,8 @@ define <vscale x 16 x i8> @sadd_satqr_v16i8(<vscale x 16 x i8> %z, <vscale x 16
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 16 x i8> %z, zeroinitializer
- %i = insertelement <vscale x 16 x i8> undef, i8 %y, i32 0
- %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %i = insertelement <vscale x 16 x i8> poison, i8 %y, i32 0
+ %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
%a = call <vscale x 16 x i8> @llvm.sadd.sat.v16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %ys)
%b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %z
ret <vscale x 16 x i8> %b
@@ -1240,8 +1240,8 @@ define <vscale x 4 x i32> @uadd_satqr_v4i32(<vscale x 4 x i32> %z, <vscale x 4 x
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 4 x i32> %z, zeroinitializer
- %i = insertelement <vscale x 4 x i32> undef, i32 %y, i32 0
- %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %i = insertelement <vscale x 4 x i32> poison, i32 %y, i32 0
+ %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%a = call <vscale x 4 x i32> @llvm.uadd.sat.v4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %ys)
%b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %z
ret <vscale x 4 x i32> %b
@@ -1258,8 +1258,8 @@ define <vscale x 8 x i16> @uadd_satqr_v8i16(<vscale x 8 x i16> %z, <vscale x 8 x
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 8 x i16> %z, zeroinitializer
- %i = insertelement <vscale x 8 x i16> undef, i16 %y, i32 0
- %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %i = insertelement <vscale x 8 x i16> poison, i16 %y, i32 0
+ %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%a = call <vscale x 8 x i16> @llvm.uadd.sat.v8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %ys)
%b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %z
ret <vscale x 8 x i16> %b
@@ -1276,8 +1276,8 @@ define <vscale x 16 x i8> @uadd_satqr_v16i8(<vscale x 16 x i8> %z, <vscale x 16
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 16 x i8> %z, zeroinitializer
- %i = insertelement <vscale x 16 x i8> undef, i8 %y, i32 0
- %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %i = insertelement <vscale x 16 x i8> poison, i8 %y, i32 0
+ %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
%a = call <vscale x 16 x i8> @llvm.uadd.sat.v16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %ys)
%b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %z
ret <vscale x 16 x i8> %b
@@ -1294,8 +1294,8 @@ define <vscale x 4 x i32> @ssub_satqr_v4i32(<vscale x 4 x i32> %z, <vscale x 4 x
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 4 x i32> %z, zeroinitializer
- %i = insertelement <vscale x 4 x i32> undef, i32 %y, i32 0
- %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %i = insertelement <vscale x 4 x i32> poison, i32 %y, i32 0
+ %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%a = call <vscale x 4 x i32> @llvm.ssub.sat.v4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %ys)
%b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %z
ret <vscale x 4 x i32> %b
@@ -1312,8 +1312,8 @@ define <vscale x 8 x i16> @ssub_satqr_v8i16(<vscale x 8 x i16> %z, <vscale x 8 x
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 8 x i16> %z, zeroinitializer
- %i = insertelement <vscale x 8 x i16> undef, i16 %y, i32 0
- %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %i = insertelement <vscale x 8 x i16> poison, i16 %y, i32 0
+ %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%a = call <vscale x 8 x i16> @llvm.ssub.sat.v8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %ys)
%b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %z
ret <vscale x 8 x i16> %b
@@ -1330,8 +1330,8 @@ define <vscale x 16 x i8> @ssub_satqr_v16i8(<vscale x 16 x i8> %z, <vscale x 16
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 16 x i8> %z, zeroinitializer
- %i = insertelement <vscale x 16 x i8> undef, i8 %y, i32 0
- %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %i = insertelement <vscale x 16 x i8> poison, i8 %y, i32 0
+ %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
%a = call <vscale x 16 x i8> @llvm.ssub.sat.v16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %ys)
%b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %z
ret <vscale x 16 x i8> %b
@@ -1348,8 +1348,8 @@ define <vscale x 4 x i32> @usub_satqr_v4i32(<vscale x 4 x i32> %z, <vscale x 4 x
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 4 x i32> %z, zeroinitializer
- %i = insertelement <vscale x 4 x i32> undef, i32 %y, i32 0
- %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %i = insertelement <vscale x 4 x i32> poison, i32 %y, i32 0
+ %ys = shufflevector <vscale x 4 x i32> %i, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%a = call <vscale x 4 x i32> @llvm.usub.sat.v4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %ys)
%b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %z
ret <vscale x 4 x i32> %b
@@ -1366,8 +1366,8 @@ define <vscale x 8 x i16> @usub_satqr_v8i16(<vscale x 8 x i16> %z, <vscale x 8 x
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 8 x i16> %z, zeroinitializer
- %i = insertelement <vscale x 8 x i16> undef, i16 %y, i32 0
- %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %i = insertelement <vscale x 8 x i16> poison, i16 %y, i32 0
+ %ys = shufflevector <vscale x 8 x i16> %i, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%a = call <vscale x 8 x i16> @llvm.usub.sat.v8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %ys)
%b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %z
ret <vscale x 8 x i16> %b
@@ -1384,8 +1384,8 @@ define <vscale x 16 x i8> @usub_satqr_v16i8(<vscale x 16 x i8> %z, <vscale x 16
; CHECK-NEXT: ret
entry:
%c = icmp eq <vscale x 16 x i8> %z, zeroinitializer
- %i = insertelement <vscale x 16 x i8> undef, i8 %y, i32 0
- %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %i = insertelement <vscale x 16 x i8> poison, i8 %y, i32 0
+ %ys = shufflevector <vscale x 16 x i8> %i, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
%a = call <vscale x 16 x i8> @llvm.usub.sat.v16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %ys)
%b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %z
ret <vscale x 16 x i8> %b
diff --git a/llvm/test/CodeGen/AArch64/sve-split-load.ll b/llvm/test/CodeGen/AArch64/sve-split-load.ll
index 754f0339702dc..065f49433c9aa 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-load.ll
@@ -73,7 +73,7 @@ define <vscale x 2 x i32> @masked_load_promote_2i32(ptr %a, <vscale x 2 x i1> %p
; CHECK: // %bb.0:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
- %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 1, <vscale x 2 x i1> %pg, <vscale x 2 x i32> undef)
+ %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 1, <vscale x 2 x i1> %pg, <vscale x 2 x i32> poison)
ret <vscale x 2 x i32> %load
}
@@ -83,7 +83,7 @@ define <vscale x 32 x i8> @masked_load_split_32i8(ptr %a, <vscale x 32 x i1> %pg
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0, #1, mul vl]
; CHECK-NEXT: ret
- %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i8> undef)
+ %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i8> poison)
ret <vscale x 32 x i8> %load
}
@@ -99,7 +99,7 @@ define <vscale x 32 x i16> @masked_load_split_32i16(ptr %a, <vscale x 32 x i1> %
; CHECK-NEXT: ld1h { z2.h }, p3/z, [x0, #2, mul vl]
; CHECK-NEXT: ld1h { z3.h }, p1/z, [x0, #3, mul vl]
; CHECK-NEXT: ret
- %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr %a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i16> undef)
+ %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr %a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i16> poison)
ret <vscale x 32 x i16> %load
}
@@ -111,7 +111,7 @@ define <vscale x 8 x i32> @masked_load_split_8i32(ptr %a, <vscale x 8 x i1> %pg)
; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ret
- %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr %a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i32> undef)
+ %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr %a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i32> poison)
ret <vscale x 8 x i32> %load
}
@@ -129,7 +129,7 @@ define <vscale x 8 x i64> @masked_load_split_8i64(ptr %a, <vscale x 8 x i1> %pg)
; CHECK-NEXT: ld1d { z2.d }, p3/z, [x0, #2, mul vl]
; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #3, mul vl]
; CHECK-NEXT: ret
- %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr %a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i64> undef)
+ %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr %a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i64> poison)
ret <vscale x 8 x i64> %load
}
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
index 9729a1d95cd91..3a6445dd1d99b 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
@@ -239,10 +239,10 @@ define void @build_vector_non_const_v4i1(i1 %a, i1 %b, i1 %c, i1 %d, ptr %out) {
; NONEON-NOSVE-NEXT: orr w8, w8, w3, lsl #3
; NONEON-NOSVE-NEXT: strb w8, [x4]
; NONEON-NOSVE-NEXT: ret
- %1 = insertelement <4 x i1> undef, i1 %a, i64 0
- %2 = insertelement <4 x i1> %1, i1 %b, i64 1
- %3 = insertelement <4 x i1> %2, i1 %c, i64 2
- %4 = insertelement <4 x i1> %3, i1 %d, i64 3
+ %1 = insertelement <4 x i1> poison, i1 %a, i64 0
+ %2 = insertelement <4 x i1> %1, i1 %b, i64 1
+ %3 = insertelement <4 x i1> %2, i1 %c, i64 2
+ %4 = insertelement <4 x i1> %3, i1 %d, i64 3
store <4 x i1> %4, ptr %out
ret void
}
@@ -264,8 +264,8 @@ define void @build_vector_non_const_v2f64(double %a, double %b, ptr %out) {
; NONEON-NOSVE-NEXT: str q0, [x0]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %1 = insertelement <2 x double> undef, double %a, i64 0
- %2 = insertelement <2 x double> %1, double %b, i64 1
+ %1 = insertelement <2 x double> poison, double %a, i64 0
+ %2 = insertelement <2 x double> %1, double %b, i64 1
store <2 x double> %2, ptr %out
ret void
}
@@ -288,8 +288,8 @@ define void @build_vector_non_const_v2f32(float %a, float %b, ptr %out) {
; NONEON-NOSVE-NEXT: str d0, [x0]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %1 = insertelement <2 x float> undef, float %a, i64 0
- %2 = insertelement <2 x float> %1, float %b, i64 1
+ %1 = insertelement <2 x float> poison, float %a, i64 0
+ %2 = insertelement <2 x float> %1, float %b, i64 1
store <2 x float> %2, ptr %out
ret void
}
@@ -317,10 +317,10 @@ define void @build_vector_non_const_v4f32(float %a, float %b, float %c, float %d
; NONEON-NOSVE-NEXT: str q0, [x0]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %1 = insertelement <4 x float> undef, float %a, i64 0
- %2 = insertelement <4 x float> %1, float %b, i64 1
- %3 = insertelement <4 x float> %2, float %c, i64 2
- %4 = insertelement <4 x float> %3, float %d, i64 3
+ %1 = insertelement <4 x float> poison, float %a, i64 0
+ %2 = insertelement <4 x float> %1, float %b, i64 1
+ %3 = insertelement <4 x float> %2, float %c, i64 2
+ %4 = insertelement <4 x float> %3, float %d, i64 3
store <4 x float> %4, ptr %out
ret void
}
@@ -346,10 +346,10 @@ define void @build_vector_non_const_v4f64(double %a, double %b, double %c, doubl
; NONEON-NOSVE-NEXT: stp q1, q0, [x0]
; NONEON-NOSVE-NEXT: add sp, sp, #32
; NONEON-NOSVE-NEXT: ret
- %1 = insertelement <4 x double> undef, double %a, i64 0
- %2 = insertelement <4 x double> %1, double %b, i64 1
- %3 = insertelement <4 x double> %2, double %c, i64 2
- %4 = insertelement <4 x double> %3, double %d, i64 3
+ %1 = insertelement <4 x double> poison, double %a, i64 0
+ %2 = insertelement <4 x double> %1, double %b, i64 1
+ %3 = insertelement <4 x double> %2, double %c, i64 2
+ %4 = insertelement <4 x double> %3, double %d, i64 3
store <4 x double> %4, ptr %out
ret void
}
@@ -391,14 +391,14 @@ define void @build_vector_non_const_v8f16(half %a, half %b, half %c, half %d, ha
; NONEON-NOSVE-NEXT: str q0, [x0]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %1 = insertelement <8 x half> undef, half %a, i64 0
- %2 = insertelement <8 x half> %1, half %b, i64 1
- %3 = insertelement <8 x half> %2, half %c, i64 2
- %4 = insertelement <8 x half> %3, half %d, i64 3
- %5 = insertelement <8 x half> %4, half %e, i64 4
- %6 = insertelement <8 x half> %5, half %f, i64 5
- %7 = insertelement <8 x half> %6, half %g, i64 6
- %8 = insertelement <8 x half> %7, half %h, i64 7
+ %1 = insertelement <8 x half> poison, half %a, i64 0
+ %2 = insertelement <8 x half> %1, half %b, i64 1
+ %3 = insertelement <8 x half> %2, half %c, i64 2
+ %4 = insertelement <8 x half> %3, half %d, i64 3
+ %5 = insertelement <8 x half> %4, half %e, i64 4
+ %6 = insertelement <8 x half> %5, half %f, i64 5
+ %7 = insertelement <8 x half> %6, half %g, i64 6
+ %8 = insertelement <8 x half> %7, half %h, i64 7
store <8 x half> %8, ptr %out
ret void
}
@@ -421,8 +421,8 @@ define void @build_vector_non_const_v2i32(i32 %a, i32 %b, ptr %out) {
; NONEON-NOSVE-NEXT: str d0, [x2]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %1 = insertelement <2 x i32> undef, i32 %a, i64 0
- %2 = insertelement <2 x i32> %1, i32 %b, i64 1
+ %1 = insertelement <2 x i32> poison, i32 %a, i64 0
+ %2 = insertelement <2 x i32> %1, i32 %b, i64 1
store <2 x i32> %2, ptr %out
ret void
}
@@ -463,14 +463,14 @@ define void @build_vector_non_const_v8i8(i8 %a, i8 %b, i8 %c, i8 %d, i8 %e, i8 %
; NONEON-NOSVE-NEXT: str d0, [x8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %1 = insertelement <8 x i8> undef, i8 %a, i64 0
- %2 = insertelement <8 x i8> %1, i8 %b, i64 1
- %3 = insertelement <8 x i8> %2, i8 %c, i64 2
- %4 = insertelement <8 x i8> %3, i8 %d, i64 3
- %5 = insertelement <8 x i8> %4, i8 %e, i64 4
- %6 = insertelement <8 x i8> %5, i8 %f, i64 5
- %7 = insertelement <8 x i8> %6, i8 %g, i64 6
- %8 = insertelement <8 x i8> %7, i8 %h, i64 7
+ %1 = insertelement <8 x i8> poison, i8 %a, i64 0
+ %2 = insertelement <8 x i8> %1, i8 %b, i64 1
+ %3 = insertelement <8 x i8> %2, i8 %c, i64 2
+ %4 = insertelement <8 x i8> %3, i8 %d, i64 3
+ %5 = insertelement <8 x i8> %4, i8 %e, i64 4
+ %6 = insertelement <8 x i8> %5, i8 %f, i64 5
+ %7 = insertelement <8 x i8> %6, i8 %g, i64 6
+ %8 = insertelement <8 x i8> %7, i8 %h, i64 7
store <8 x i8> %8, ptr %out
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
index 619840fc6afb2..6ec2b837eed2a 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
@@ -678,10 +678,10 @@ define void @concat_v32i8_undef(ptr %a, ptr %b) {
; NONEON-NOSVE-NEXT: str q0, [x1]
; NONEON-NOSVE-NEXT: ret
%op1 = load <16 x i8>, ptr %a
- %res = shufflevector <16 x i8> %op1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
- i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
- i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
- i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %res = shufflevector <16 x i8> %op1, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
+ i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+ i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
store <32 x i8> %res, ptr %b
ret void
}
@@ -699,8 +699,8 @@ define void @concat_v16i16_undef(ptr %a, ptr %b) {
; NONEON-NOSVE-NEXT: str q0, [x1]
; NONEON-NOSVE-NEXT: ret
%op1 = load <8 x i16>, ptr %a
- %res = shufflevector <8 x i16> %op1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
- i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %res = shufflevector <8 x i16> %op1, <8 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
store <16 x i16> %res, ptr %b
ret void
}
@@ -718,7 +718,7 @@ define void @concat_v8i32_undef(ptr %a, ptr %b) {
; NONEON-NOSVE-NEXT: str q0, [x1]
; NONEON-NOSVE-NEXT: ret
%op1 = load <4 x i32>, ptr %a
- %res = shufflevector <4 x i32> %op1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <4 x i32> %op1, <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i32> %res, ptr %b
ret void
}
@@ -736,7 +736,7 @@ define void @concat_v4i64_undef(ptr %a, ptr %b) {
; NONEON-NOSVE-NEXT: str q0, [x1]
; NONEON-NOSVE-NEXT: ret
%op1 = load <2 x i64>, ptr %a
- %res = shufflevector <2 x i64> %op1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <2 x i64> %op1, <2 x i64> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
store <4 x i64> %res, ptr %b
ret void
}
@@ -762,12 +762,12 @@ define void @concat_v32i8_4op(ptr %a, ptr %b) {
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
%op1 = load <8 x i8>, ptr %a
- %shuffle = shufflevector <8 x i8> %op1, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
- i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %res = shufflevector <16 x i8> %shuffle, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
- i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
- i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
- i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %shuffle = shufflevector <8 x i8> %op1, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %res = shufflevector <16 x i8> %shuffle, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
+ i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+ i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
store <32 x i8> %res, ptr %b
ret void
}
@@ -789,9 +789,9 @@ define void @concat_v16i16_4op(ptr %a, ptr %b) {
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
%op1 = load <4 x i16>, ptr %a
- %shuffle = shufflevector <4 x i16> %op1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %res = shufflevector <8 x i16> %shuffle, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
- i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle = shufflevector <4 x i16> %op1, <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %res = shufflevector <8 x i16> %shuffle, <8 x i16> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+ i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
store <16 x i16> %res, ptr %b
ret void
}
@@ -813,8 +813,8 @@ define void @concat_v8i32_4op(ptr %a, ptr %b) {
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
%op1 = load <2 x i32>, ptr %a
- %shuffle = shufflevector <2 x i32> %op1, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %res = shufflevector <4 x i32> %shuffle, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <2 x i32> %op1, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x i32> %shuffle, <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i32> %res, ptr %b
ret void
}
@@ -836,8 +836,8 @@ define void @concat_v4i64_4op(ptr %a, ptr %b) {
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
%op1 = load <1 x i64>, ptr %a
- %shuffle = shufflevector <1 x i64> %op1, <1 x i64> undef, <2 x i32> <i32 0, i32 1>
- %res = shufflevector <2 x i64> %shuffle, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle = shufflevector <1 x i64> %op1, <1 x i64> poison, <2 x i32> <i32 0, i32 1>
+ %res = shufflevector <2 x i64> %shuffle, <2 x i64> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
store <4 x i64> %res, ptr %b
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll
index a33e8537edf4e..c364abf2916e8 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll
@@ -85,7 +85,7 @@ define void @test_revbv16i16(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <32 x i8>, ptr %a
- %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14, i32 17, i32 16, i32 19, i32 18, i32 21, i32 20, i32 23, i32 22, i32 undef, i32 24, i32 27, i32 undef, i32 29, i32 28, i32 undef, i32 undef>
+ %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> poison, <32 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14, i32 17, i32 16, i32 19, i32 18, i32 21, i32 20, i32 23, i32 22, i32 poison, i32 24, i32 27, i32 poison, i32 29, i32 28, i32 poison, i32 poison>
store <32 x i8> %tmp2, ptr %a
ret void
}
@@ -169,7 +169,7 @@ define void @test_revbv8i32(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <32 x i8>, ptr %a
- %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20, i32 27, i32 undef, i32 undef, i32 undef, i32 31, i32 30, i32 29, i32 undef>
+ %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> poison, <32 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20, i32 27, i32 poison, i32 poison, i32 poison, i32 31, i32 30, i32 29, i32 poison>
store <32 x i8> %tmp2, ptr %a
ret void
}
@@ -253,7 +253,7 @@ define void @test_revbv4i64(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <32 x i8>, ptr %a
- %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 31, i32 30, i32 29, i32 undef, i32 27, i32 undef, i32 undef, i32 undef>
+ %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> poison, <32 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 31, i32 30, i32 29, i32 poison, i32 27, i32 poison, i32 poison, i32 poison>
store <32 x i8> %tmp2, ptr %a
ret void
}
@@ -301,7 +301,7 @@ define void @test_revhv8i32(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <16 x i16>, ptr %a
- %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+ %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
store <16 x i16> %tmp2, ptr %a
ret void
}
@@ -349,7 +349,7 @@ define void @test_revhv8f32(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <16 x half>, ptr %a
- %tmp2 = shufflevector <16 x half> %tmp1, <16 x half> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+ %tmp2 = shufflevector <16 x half> %tmp1, <16 x half> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
store <16 x half> %tmp2, ptr %a
ret void
}
@@ -397,7 +397,7 @@ define void @test_revhv4i64(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <16 x i16>, ptr %a
- %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
+ %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
store <16 x i16> %tmp2, ptr %a
ret void
}
@@ -433,7 +433,7 @@ define void @test_revwv4i64(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
- %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
store <8 x i32> %tmp2, ptr %a
ret void
}
@@ -469,7 +469,7 @@ define void @test_revwv4f64(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <8 x float>, ptr %a
- %tmp2 = shufflevector <8 x float> %tmp1, <8 x float> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ %tmp2 = shufflevector <8 x float> %tmp1, <8 x float> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
store <8 x float> %tmp2, ptr %a
ret void
}
@@ -524,7 +524,7 @@ define <16 x i8> @test_revv16i8(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #32
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <16 x i8>, ptr %a
- %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
+ %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> poison, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
ret <16 x i8> %tmp2
}
@@ -635,7 +635,7 @@ define void @test_revhv32i16(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #128
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <32 x i16>, ptr %a
- %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> undef, <32 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20, i32 27, i32 undef, i32 undef, i32 undef, i32 31, i32 30, i32 29, i32 undef>
+ %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> poison, <32 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20, i32 27, i32 poison, i32 poison, i32 poison, i32 31, i32 30, i32 29, i32 poison>
store <32 x i16> %tmp2, ptr %a
ret void
}
@@ -666,7 +666,7 @@ define void @test_rev_elts_fail(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <4 x i64>, ptr %a
- %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
store <4 x i64> %tmp2, ptr %a
ret void
}
@@ -692,7 +692,7 @@ define void @test_revdv4i64_sve2p1(ptr %a) #1 {
; NONEON-NOSVE-NEXT: stp q0, q1, [x0]
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <4 x i64>, ptr %a
- %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ %tmp2 = shufflevector <4 x i64> %tmp1, <4 x i64> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
store <4 x i64> %tmp2, ptr %a
ret void
}
@@ -716,7 +716,7 @@ define void @test_revdv4f64_sve2p1(ptr %a) #1 {
; NONEON-NOSVE-NEXT: stp q0, q1, [x0]
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <4 x double>, ptr %a
- %tmp2 = shufflevector <4 x double> %tmp1, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ %tmp2 = shufflevector <4 x double> %tmp1, <4 x double> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
store <4 x double> %tmp2, ptr %a
ret void
}
@@ -752,7 +752,7 @@ define void @test_revv8i32(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
- %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
store <8 x i32> %tmp2, ptr %a
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
index 168ad6a77badb..a20a330b39bb4 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
@@ -651,7 +651,7 @@ define void @zip1_v8i32_undef(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #48
; NONEON-NOSVE-NEXT: ret
%tmp1 = load volatile <8 x i32>, ptr %a
- %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
+ %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
store volatile <8 x i32> %tmp2, ptr %a
ret void
}
@@ -801,7 +801,7 @@ define void @trn_v32i8(ptr %a, ptr %b) {
%tmp1 = load <32 x i8>, ptr %a
%tmp2 = load <32 x i8>, ptr %b
%tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 0, i32 32, i32 2, i32 34, i32 4, i32 36, i32 6, i32 38, i32 8, i32 40, i32 10, i32 42, i32 12, i32 44, i32 14, i32 46, i32 16, i32 48, i32 18, i32 50, i32 20, i32 52, i32 22, i32 54, i32 24, i32 56, i32 26, i32 58, i32 28, i32 60, i32 30, i32 62>
- %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 1, i32 33, i32 3, i32 35, i32 undef, i32 37, i32 7, i32 undef, i32 undef, i32 41, i32 11, i32 43, i32 13, i32 45, i32 15, i32 47, i32 17, i32 49, i32 19, i32 51, i32 21, i32 53, i32 23, i32 55, i32 25, i32 57, i32 27, i32 59, i32 29, i32 61, i32 31, i32 63>
+ %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 1, i32 33, i32 3, i32 35, i32 poison, i32 37, i32 7, i32 poison, i32 poison, i32 41, i32 11, i32 43, i32 13, i32 45, i32 15, i32 47, i32 17, i32 49, i32 19, i32 51, i32 21, i32 53, i32 23, i32 55, i32 25, i32 57, i32 27, i32 59, i32 29, i32 61, i32 31, i32 63>
%tmp5 = add <32 x i8> %tmp3, %tmp4
store <32 x i8> %tmp5, ptr %a
ret void
@@ -853,7 +853,7 @@ define void @trn_v8i16(ptr %a, ptr %b) {
%tmp1 = load <8 x i16>, ptr %a
%tmp2 = load <8 x i16>, ptr %b
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 7, i32 2, i32 6, i32 4, i32 5, i32 1, i32 3>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 6, i32 3, i32 5, i32 undef, i32 4, i32 7, i32 undef>
+ %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 6, i32 3, i32 5, i32 poison, i32 4, i32 7, i32 poison>
%tmp5 = add <8 x i16> %tmp3, %tmp4
store <8 x i16> %tmp5, ptr %a
ret void
@@ -996,8 +996,8 @@ define void @trn_v8i32(ptr %a, ptr %b) {
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
%tmp2 = load <8 x i32>, ptr %b
- %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
+ %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 0, i32 8, i32 poison, i32 poison, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> %tmp2, <8 x i32> <i32 1, i32 poison, i32 3, i32 11, i32 5, i32 13, i32 poison, i32 poison>
%tmp5 = add <8 x i32> %tmp3, %tmp4
store <8 x i32> %tmp5, ptr %a
ret void
@@ -1125,8 +1125,8 @@ define void @trn_v8i32_undef(ptr %a) {
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
- %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
- %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
+ %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
%tmp5 = add <8 x i32> %tmp3, %tmp4
store <8 x i32> %tmp5, ptr %a
ret void
@@ -1424,7 +1424,7 @@ define void @zip2_v8i32_undef(ptr %a) #0{
; NONEON-NOSVE-NEXT: add sp, sp, #48
; NONEON-NOSVE-NEXT: ret
%tmp1 = load volatile <8 x i32>, ptr %a
- %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
+ %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
store volatile <8 x i32> %tmp2, ptr %a
ret void
}
@@ -1698,7 +1698,7 @@ define void @uzp_v32i8(ptr %a, ptr %b) #0{
%tmp1 = load <32 x i8>, ptr %a
%tmp2 = load <32 x i8>, ptr %b
%tmp3 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
- %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 9, i32 11, i32 13, i32 undef, i32 undef, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
+ %tmp4 = shufflevector <32 x i8> %tmp1, <32 x i8> %tmp2, <32 x i32> <i32 1, i32 3, i32 5, i32 poison, i32 9, i32 11, i32 13, i32 poison, i32 poison, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
%tmp5 = add <32 x i8> %tmp3, %tmp4
store <32 x i8> %tmp5, ptr %a
ret void
@@ -1740,7 +1740,7 @@ define void @uzp_v4i16(ptr %a, ptr %b) #0{
%tmp1 = load <4 x i16>, ptr %a
%tmp2 = load <4 x i16>, ptr %b
%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 3, i32 2, i32 1>
- %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 0, i32 2, i32 undef>
+ %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 0, i32 2, i32 poison>
%tmp5 = add <4 x i16> %tmp3, %tmp4
store <4 x i16> %tmp5, ptr %a
ret void
@@ -1958,8 +1958,8 @@ define void @uzp_v8f32(ptr %a, ptr %b) #0{
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <8 x float>, ptr %a
%tmp2 = load <8 x float>, ptr %b
- %tmp3 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> <i32 0, i32 undef, i32 4, i32 6, i32 undef, i32 10, i32 12, i32 14>
- %tmp4 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> <i32 1, i32 undef, i32 5, i32 7, i32 9, i32 11, i32 undef, i32 undef>
+ %tmp3 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> <i32 0, i32 poison, i32 4, i32 6, i32 poison, i32 10, i32 12, i32 14>
+ %tmp4 = shufflevector <8 x float> %tmp1, <8 x float> %tmp2, <8 x i32> <i32 1, i32 poison, i32 5, i32 7, i32 9, i32 11, i32 poison, i32 poison>
%tmp5 = fadd <8 x float> %tmp3, %tmp4
store <8 x float> %tmp5, ptr %a
ret void
@@ -2142,8 +2142,8 @@ define void @uzp_v8i32_undef(ptr %a) #0{
; NONEON-NOSVE-NEXT: add sp, sp, #48
; NONEON-NOSVE-NEXT: ret
%tmp1 = load <8 x i32>, ptr %a
- %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 0, i32 2, i32 4, i32 6>
- %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 1, i32 3, i32 5, i32 7>
+ %tmp3 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 0, i32 2, i32 4, i32 6>
+ %tmp4 = shufflevector <8 x i32> %tmp1, <8 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 1, i32 3, i32 5, i32 7>
%tmp5 = add <8 x i32> %tmp3, %tmp4
store <8 x i32> %tmp5, ptr %a
ret void
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
index c942f1eca8eba..8e12b861abf3f 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
@@ -23,7 +23,7 @@ define <4 x i1> @reshuffle_v4i1_nxv4i1(<vscale x 4 x i1> %a) {
%el1 = extractelement <vscale x 4 x i1> %a, i32 1
%el2 = extractelement <vscale x 4 x i1> %a, i32 2
%el3 = extractelement <vscale x 4 x i1> %a, i32 3
- %v0 = insertelement <4 x i1> undef, i1 %el0, i32 0
+ %v0 = insertelement <4 x i1> poison, i1 %el0, i32 0
%v1 = insertelement <4 x i1> %v0, i1 %el1, i32 1
%v2 = insertelement <4 x i1> %v1, i1 %el2, i32 2
%v3 = insertelement <4 x i1> %v2, i1 %el3, i32 3
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
index c7b2575266d65..cfe50ad78e7ba 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
@@ -28,8 +28,8 @@ define void @hang_when_merging_stores_after_legalisation(ptr %a, <2 x i32> %b) {
; NONEON-NOSVE-NEXT: stp q0, q0, [x0]
; NONEON-NOSVE-NEXT: add sp, sp, #32
; NONEON-NOSVE-NEXT: ret
- %splat = shufflevector <2 x i32> %b, <2 x i32> undef, <8 x i32> zeroinitializer
- %interleaved.vec = shufflevector <8 x i32> %splat, <8 x i32> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ %splat = shufflevector <2 x i32> %b, <2 x i32> poison, <8 x i32> zeroinitializer
+ %interleaved.vec = shufflevector <8 x i32> %splat, <8 x i32> poison, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, ptr %a, align 4
ret void
}
@@ -66,7 +66,7 @@ define void @interleave_store_without_splat(ptr %a, <4 x i32> %v1, <4 x i32> %v2
; NONEON-NOSVE-NEXT: add sp, sp, #64
; NONEON-NOSVE-NEXT: ret
%shuffle = shufflevector <4 x i32> %v1, <4 x i32> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %interleaved = shufflevector <8 x i32> %shuffle, <8 x i32> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ %interleaved = shufflevector <8 x i32> %shuffle, <8 x i32> poison, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved, ptr %a, align 1
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
index 939565216f586..335a1f88ad3cf 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
@@ -27,8 +27,8 @@ define <4 x i8> @splat_v4i8(i8 %a) {
; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <4 x i8> undef, i8 %a, i64 0
- %splat = shufflevector <4 x i8> %insert, <4 x i8> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x i8> poison, i8 %a, i64 0
+ %splat = shufflevector <4 x i8> %insert, <4 x i8> poison, <4 x i32> zeroinitializer
ret <4 x i8> %splat
}
@@ -54,8 +54,8 @@ define <8 x i8> @splat_v8i8(i8 %a) {
; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <8 x i8> undef, i8 %a, i64 0
- %splat = shufflevector <8 x i8> %insert, <8 x i8> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i8> poison, i8 %a, i64 0
+ %splat = shufflevector <8 x i8> %insert, <8 x i8> poison, <8 x i32> zeroinitializer
ret <8 x i8> %splat
}
@@ -88,8 +88,8 @@ define <16 x i8> @splat_v16i8(i8 %a) {
; NONEON-NOSVE-NEXT: strb w0, [sp]
; NONEON-NOSVE-NEXT: ldr q0, [sp], #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <16 x i8> undef, i8 %a, i64 0
- %splat = shufflevector <16 x i8> %insert, <16 x i8> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i8> poison, i8 %a, i64 0
+ %splat = shufflevector <16 x i8> %insert, <16 x i8> poison, <16 x i32> zeroinitializer
ret <16 x i8> %splat
}
@@ -124,8 +124,8 @@ define void @splat_v32i8(i8 %a, ptr %b) {
; NONEON-NOSVE-NEXT: stp q0, q0, [x1]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <32 x i8> undef, i8 %a, i64 0
- %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
+ %insert = insertelement <32 x i8> poison, i8 %a, i64 0
+ %splat = shufflevector <32 x i8> %insert, <32 x i8> poison, <32 x i32> zeroinitializer
store <32 x i8> %splat, ptr %b
ret void
}
@@ -145,8 +145,8 @@ define <2 x i16> @splat_v2i16(i16 %a) {
; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <2 x i16> undef, i16 %a, i64 0
- %splat = shufflevector <2 x i16> %insert, <2 x i16> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x i16> poison, i16 %a, i64 0
+ %splat = shufflevector <2 x i16> %insert, <2 x i16> poison, <2 x i32> zeroinitializer
ret <2 x i16> %splat
}
@@ -168,8 +168,8 @@ define <4 x i16> @splat_v4i16(i16 %a) {
; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <4 x i16> undef, i16 %a, i64 0
- %splat = shufflevector <4 x i16> %insert, <4 x i16> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x i16> poison, i16 %a, i64 0
+ %splat = shufflevector <4 x i16> %insert, <4 x i16> poison, <4 x i32> zeroinitializer
ret <4 x i16> %splat
}
@@ -194,8 +194,8 @@ define <8 x i16> @splat_v8i16(i16 %a) {
; NONEON-NOSVE-NEXT: strh w0, [sp]
; NONEON-NOSVE-NEXT: ldr q0, [sp], #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <8 x i16> undef, i16 %a, i64 0
- %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i16> poison, i16 %a, i64 0
+ %splat = shufflevector <8 x i16> %insert, <8 x i16> poison, <8 x i32> zeroinitializer
ret <8 x i16> %splat
}
@@ -222,8 +222,8 @@ define void @splat_v16i16(i16 %a, ptr %b) {
; NONEON-NOSVE-NEXT: stp q0, q0, [x1]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <16 x i16> undef, i16 %a, i64 0
- %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x i16> poison, i16 %a, i64 0
+ %splat = shufflevector <16 x i16> %insert, <16 x i16> poison, <16 x i32> zeroinitializer
store <16 x i16> %splat, ptr %b
ret void
}
@@ -243,8 +243,8 @@ define <2 x i32> @splat_v2i32(i32 %a) {
; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <2 x i32> undef, i32 %a, i64 0
- %splat = shufflevector <2 x i32> %insert, <2 x i32> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x i32> poison, i32 %a, i64 0
+ %splat = shufflevector <2 x i32> %insert, <2 x i32> poison, <2 x i32> zeroinitializer
ret <2 x i32> %splat
}
@@ -263,8 +263,8 @@ define <4 x i32> @splat_v4i32(i32 %a) {
; NONEON-NOSVE-NEXT: stp w0, w0, [sp]
; NONEON-NOSVE-NEXT: ldr q0, [sp], #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <4 x i32> undef, i32 %a, i64 0
- %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x i32> poison, i32 %a, i64 0
+ %splat = shufflevector <4 x i32> %insert, <4 x i32> poison, <4 x i32> zeroinitializer
ret <4 x i32> %splat
}
@@ -285,8 +285,8 @@ define void @splat_v8i32(i32 %a, ptr %b) {
; NONEON-NOSVE-NEXT: stp q0, q0, [x1]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <8 x i32> undef, i32 %a, i64 0
- %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x i32> poison, i32 %a, i64 0
+ %splat = shufflevector <8 x i32> %insert, <8 x i32> poison, <8 x i32> zeroinitializer
store <8 x i32> %splat, ptr %b
ret void
}
@@ -306,8 +306,8 @@ define <1 x i64> @splat_v1i64(i64 %a) {
; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <1 x i64> undef, i64 %a, i64 0
- %splat = shufflevector <1 x i64> %insert, <1 x i64> undef, <1 x i32> zeroinitializer
+ %insert = insertelement <1 x i64> poison, i64 %a, i64 0
+ %splat = shufflevector <1 x i64> %insert, <1 x i64> poison, <1 x i32> zeroinitializer
ret <1 x i64> %splat
}
@@ -324,8 +324,8 @@ define <2 x i64> @splat_v2i64(i64 %a) {
; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16
; NONEON-NOSVE-NEXT: ldr q0, [sp], #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <2 x i64> undef, i64 %a, i64 0
- %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x i64> poison, i64 %a, i64 0
+ %splat = shufflevector <2 x i64> %insert, <2 x i64> poison, <2 x i32> zeroinitializer
ret <2 x i64> %splat
}
@@ -344,8 +344,8 @@ define void @splat_v4i64(i64 %a, ptr %b) {
; NONEON-NOSVE-NEXT: stp q0, q0, [x1]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <4 x i64> undef, i64 %a, i64 0
- %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x i64> poison, i64 %a, i64 0
+ %splat = shufflevector <4 x i64> %insert, <4 x i64> poison, <4 x i32> zeroinitializer
store <4 x i64> %splat, ptr %b
ret void
}
@@ -371,8 +371,8 @@ define <2 x half> @splat_v2f16(half %a) {
; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <2 x half> undef, half %a, i64 0
- %splat = shufflevector <2 x half> %insert, <2 x half> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x half> poison, half %a, i64 0
+ %splat = shufflevector <2 x half> %insert, <2 x half> poison, <2 x i32> zeroinitializer
ret <2 x half> %splat
}
@@ -395,8 +395,8 @@ define <4 x half> @splat_v4f16(half %a) {
; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <4 x half> undef, half %a, i64 0
- %splat = shufflevector <4 x half> %insert, <4 x half> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x half> poison, half %a, i64 0
+ %splat = shufflevector <4 x half> %insert, <4 x half> poison, <4 x i32> zeroinitializer
ret <4 x half> %splat
}
@@ -422,8 +422,8 @@ define <8 x half> @splat_v8f16(half %a) {
; NONEON-NOSVE-NEXT: str h0, [sp]
; NONEON-NOSVE-NEXT: ldr q0, [sp], #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <8 x half> undef, half %a, i64 0
- %splat = shufflevector <8 x half> %insert, <8 x half> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x half> poison, half %a, i64 0
+ %splat = shufflevector <8 x half> %insert, <8 x half> poison, <8 x i32> zeroinitializer
ret <8 x half> %splat
}
@@ -451,8 +451,8 @@ define void @splat_v16f16(half %a, ptr %b) {
; NONEON-NOSVE-NEXT: stp q0, q0, [x0]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <16 x half> undef, half %a, i64 0
- %splat = shufflevector <16 x half> %insert, <16 x half> undef, <16 x i32> zeroinitializer
+ %insert = insertelement <16 x half> poison, half %a, i64 0
+ %splat = shufflevector <16 x half> %insert, <16 x half> poison, <16 x i32> zeroinitializer
store <16 x half> %splat, ptr %b
ret void
}
@@ -473,8 +473,8 @@ define <2 x float> @splat_v2f32(float %a, <2 x float> %op2) {
; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <2 x float> undef, float %a, i64 0
- %splat = shufflevector <2 x float> %insert, <2 x float> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x float> poison, float %a, i64 0
+ %splat = shufflevector <2 x float> %insert, <2 x float> poison, <2 x i32> zeroinitializer
ret <2 x float> %splat
}
@@ -494,8 +494,8 @@ define <4 x float> @splat_v4f32(float %a, <4 x float> %op2) {
; NONEON-NOSVE-NEXT: stp s0, s0, [sp]
; NONEON-NOSVE-NEXT: ldr q0, [sp], #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <4 x float> undef, float %a, i64 0
- %splat = shufflevector <4 x float> %insert, <4 x float> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x float> poison, float %a, i64 0
+ %splat = shufflevector <4 x float> %insert, <4 x float> poison, <4 x i32> zeroinitializer
ret <4 x float> %splat
}
@@ -517,8 +517,8 @@ define void @splat_v8f32(float %a, ptr %b) {
; NONEON-NOSVE-NEXT: stp q0, q0, [x0]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <8 x float> undef, float %a, i64 0
- %splat = shufflevector <8 x float> %insert, <8 x float> undef, <8 x i32> zeroinitializer
+ %insert = insertelement <8 x float> poison, float %a, i64 0
+ %splat = shufflevector <8 x float> %insert, <8 x float> poison, <8 x i32> zeroinitializer
store <8 x float> %splat, ptr %b
ret void
}
@@ -536,8 +536,8 @@ define <1 x double> @splat_v1f64(double %a, <1 x double> %op2) {
; NONEON-NOSVE-NEXT: ldr d0, [sp, #8]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <1 x double> undef, double %a, i64 0
- %splat = shufflevector <1 x double> %insert, <1 x double> undef, <1 x i32> zeroinitializer
+ %insert = insertelement <1 x double> poison, double %a, i64 0
+ %splat = shufflevector <1 x double> %insert, <1 x double> poison, <1 x i32> zeroinitializer
ret <1 x double> %splat
}
@@ -555,8 +555,8 @@ define <2 x double> @splat_v2f64(double %a, <2 x double> %op2) {
; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16
; NONEON-NOSVE-NEXT: ldr q0, [sp], #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <2 x double> undef, double %a, i64 0
- %splat = shufflevector <2 x double> %insert, <2 x double> undef, <2 x i32> zeroinitializer
+ %insert = insertelement <2 x double> poison, double %a, i64 0
+ %splat = shufflevector <2 x double> %insert, <2 x double> poison, <2 x i32> zeroinitializer
ret <2 x double> %splat
}
@@ -576,8 +576,8 @@ define void @splat_v4f64(double %a, ptr %b) {
; NONEON-NOSVE-NEXT: stp q0, q0, [x0]
; NONEON-NOSVE-NEXT: add sp, sp, #16
; NONEON-NOSVE-NEXT: ret
- %insert = insertelement <4 x double> undef, double %a, i64 0
- %splat = shufflevector <4 x double> %insert, <4 x double> undef, <4 x i32> zeroinitializer
+ %insert = insertelement <4 x double> poison, double %a, i64 0
+ %splat = shufflevector <4 x double> %insert, <4 x double> poison, <4 x i32> zeroinitializer
store <4 x double> %splat, ptr %b
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/sve-unary-movprfx.ll b/llvm/test/CodeGen/AArch64/sve-unary-movprfx.ll
index 62fc20d15844a..a6c5abef19ab8 100644
--- a/llvm/test/CodeGen/AArch64/sve-unary-movprfx.ll
+++ b/llvm/test/CodeGen/AArch64/sve-unary-movprfx.ll
@@ -39,7 +39,7 @@ define <vscale x 16 x i8> @abs_i8_undef(<vscale x 16 x i8> %a, <vscale x 16 x i8
; CHECK-NEXT: abs z0.b, p0/m, z1.b
; CHECK-NEXT: ret
%pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
- %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
+ %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> poison, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %ret
}
@@ -98,7 +98,7 @@ define <vscale x 8 x i16> @abs_i16_undef(<vscale x 8 x i16> %a, <vscale x 8 x i1
; CHECK-NEXT: abs z0.h, p0/m, z1.h
; CHECK-NEXT: ret
%pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
- %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
+ %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %ret
}
@@ -156,7 +156,7 @@ define <vscale x 4 x i32> @abs_i32_undef(<vscale x 4 x i32> %a, <vscale x 4 x i3
; CHECK-NEXT: abs z0.s, p0/m, z1.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
+ %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %ret
}
@@ -214,7 +214,7 @@ define <vscale x 2 x i64> @abs_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x i6
; CHECK-NEXT: abs z0.d, p0/m, z1.d
; CHECK-NEXT: ret
%pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
- %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
+ %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %ret
}
@@ -250,7 +250,7 @@ define <vscale x 16 x i8> @cls_i8_dupreg(<vscale x 16 x i8> %a) #0 {
; CHECK-NEXT: cls z0.b, p0/m, z0.b
; CHECK-NEXT: ret
%pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
- %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
+ %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8> poison, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
ret <vscale x 16 x i8> %ret
}
@@ -262,7 +262,7 @@ define <vscale x 16 x i8> @cls_i8_undef(<vscale x 16 x i8> %a, <vscale x 16 x i8
; CHECK-NEXT: cls z0.b, p0/m, z1.b
; CHECK-NEXT: ret
%pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
- %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
+ %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8> poison, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %ret
}
@@ -297,7 +297,7 @@ define <vscale x 8 x i16> @cls_i16_dupreg(<vscale x 8 x i16> %a) #0 {
; CHECK-NEXT: cls z0.h, p0/m, z0.h
; CHECK-NEXT: ret
%pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
- %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
+ %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
ret <vscale x 8 x i16> %ret
}
@@ -309,7 +309,7 @@ define <vscale x 8 x i16> @cls_i16_undef(<vscale x 8 x i16> %a, <vscale x 8 x i1
; CHECK-NEXT: cls z0.h, p0/m, z1.h
; CHECK-NEXT: ret
%pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
- %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
+ %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %ret
}
@@ -345,7 +345,7 @@ define <vscale x 4 x i32> @cls_i32_dupreg(<vscale x 4 x i32> %a) #0 {
; CHECK-NEXT: cls z0.s, p0/m, z0.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cls.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
+ %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cls.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
ret <vscale x 4 x i32> %ret
}
@@ -357,7 +357,7 @@ define <vscale x 4 x i32> @cls_i32_undef(<vscale x 4 x i32> %a, <vscale x 4 x i3
; CHECK-NEXT: cls z0.s, p0/m, z1.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cls.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
+ %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cls.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %ret
}
@@ -393,7 +393,7 @@ define <vscale x 2 x i64> @cls_i64_dupreg(<vscale x 2 x i64> %a) #0 {
; CHECK-NEXT: cls z0.d, p0/m, z0.d
; CHECK-NEXT: ret
%pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
- %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cls.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
+ %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cls.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
ret <vscale x 2 x i64> %ret
}
@@ -405,7 +405,7 @@ define <vscale x 2 x i64> @cls_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x i6
; CHECK-NEXT: cls z0.d, p0/m, z1.d
; CHECK-NEXT: ret
%pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
- %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cls.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
+ %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cls.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %ret
}
@@ -463,7 +463,7 @@ define <vscale x 8 x half> @fabs_f16_undef(<vscale x 8 x half> %a, <vscale x 8 x
; CHECK-NEXT: fabs z0.h, p0/m, z1.h
; CHECK-NEXT: ret
%pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
- %ret = tail call <vscale x 8 x half> @llvm.aarch64.sve.fabs.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b)
+ %ret = tail call <vscale x 8 x half> @llvm.aarch64.sve.fabs.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b)
ret <vscale x 8 x half> %ret
}
@@ -521,7 +521,7 @@ define <vscale x 4 x float> @fabs_f32_undef(<vscale x 4 x float> %a, <vscale x 4
; CHECK-NEXT: fabs z0.s, p0/m, z1.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x float> @llvm.aarch64.sve.fabs.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b)
+ %ret = tail call <vscale x 4 x float> @llvm.aarch64.sve.fabs.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b)
ret <vscale x 4 x float> %ret
}
@@ -579,7 +579,7 @@ define <vscale x 2 x double> @fabs_f64_undef(<vscale x 2 x double> %a, <vscale x
; CHECK-NEXT: fabs z0.d, p0/m, z1.d
; CHECK-NEXT: ret
%pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
- %ret = tail call <vscale x 2 x double> @llvm.aarch64.sve.fabs.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b)
+ %ret = tail call <vscale x 2 x double> @llvm.aarch64.sve.fabs.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b)
ret <vscale x 2 x double> %ret
}
@@ -637,7 +637,7 @@ define <vscale x 8 x half> @fsqrt_f16_undef(<vscale x 8 x half> %a, <vscale x 8
; CHECK-NEXT: fsqrt z0.h, p0/m, z1.h
; CHECK-NEXT: ret
%pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
- %ret = tail call <vscale x 8 x half> @llvm.aarch64.sve.fsqrt.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b)
+ %ret = tail call <vscale x 8 x half> @llvm.aarch64.sve.fsqrt.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b)
ret <vscale x 8 x half> %ret
}
@@ -695,7 +695,7 @@ define <vscale x 4 x float> @fsqrt_f32_undef(<vscale x 4 x float> %a, <vscale x
; CHECK-NEXT: fsqrt z0.s, p0/m, z1.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x float> @llvm.aarch64.sve.fsqrt.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b)
+ %ret = tail call <vscale x 4 x float> @llvm.aarch64.sve.fsqrt.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b)
ret <vscale x 4 x float> %ret
}
@@ -753,7 +753,7 @@ define <vscale x 2 x double> @fsqrt_f64_undef(<vscale x 2 x double> %a, <vscale
; CHECK-NEXT: fsqrt z0.d, p0/m, z1.d
; CHECK-NEXT: ret
%pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
- %ret = tail call <vscale x 2 x double> @llvm.aarch64.sve.fsqrt.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b)
+ %ret = tail call <vscale x 2 x double> @llvm.aarch64.sve.fsqrt.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b)
ret <vscale x 2 x double> %ret
}
@@ -811,7 +811,7 @@ define <vscale x 8 x i16> @sxtb_i16_undef(<vscale x 8 x i16> %a, <vscale x 8 x i
; CHECK-NEXT: sxtb z0.h, p0/m, z1.h
; CHECK-NEXT: ret
%pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
- %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
+ %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %ret
}
@@ -869,7 +869,7 @@ define <vscale x 4 x i32> @sxtb_i32_undef(<vscale x 4 x i32> %a, <vscale x 4 x i
; CHECK-NEXT: sxtb z0.s, p0/m, z1.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
+ %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %ret
}
@@ -927,7 +927,7 @@ define <vscale x 2 x i64> @sxtb_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x i
; CHECK-NEXT: sxtb z0.d, p0/m, z1.d
; CHECK-NEXT: ret
%pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
- %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
+ %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %ret
}
@@ -985,7 +985,7 @@ define <vscale x 4 x i32> @sxth_i32_undef(<vscale x 4 x i32> %a, <vscale x 4 x i
; CHECK-NEXT: sxth z0.s, p0/m, z1.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxth.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
+ %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxth.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %ret
}
@@ -1043,7 +1043,7 @@ define <vscale x 2 x i64> @sxth_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x i
; CHECK-NEXT: sxth z0.d, p0/m, z1.d
; CHECK-NEXT: ret
%pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
- %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxth.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
+ %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxth.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %ret
}
@@ -1101,7 +1101,7 @@ define <vscale x 2 x i64> @sxtw_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x i
; CHECK-NEXT: sxtw z0.d, p0/m, z1.d
; CHECK-NEXT: ret
%pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
- %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
+ %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %ret
}
diff --git a/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll b/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
index fc315b3a4730a..8fa23ed2038ed 100644
--- a/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
@@ -15,7 +15,7 @@ define <vscale x 8 x i16> @uunpklo_i8_valid(ptr %b) #0 {
; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%mask = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11)
- %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %b, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+ %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %b, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> poison)
%uzp = call <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8> %load)
ret <vscale x 8 x i16> %uzp
}
@@ -28,7 +28,7 @@ define <vscale x 8 x i16> @uunpklo_i8_invalid(ptr %b) #0 {
; CHECK-NEXT: uunpklo z0.h, z0.b
; CHECK-NEXT: ret
%mask = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 12)
- %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %b, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+ %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %b, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> poison)
%uzp = call <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8> %load)
ret <vscale x 8 x i16> %uzp
}
@@ -40,7 +40,7 @@ define <vscale x 4 x i32> @uunpklo_i16_valid(ptr %b) #0 {
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%mask = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 10)
- %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %b, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+ %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %b, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> poison)
%uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16> %load)
ret <vscale x 4 x i32> %uzp
}
@@ -53,7 +53,7 @@ define <vscale x 4 x i32> @uunpklo_i16_invalid(ptr %b) #0 {
; CHECK-NEXT: uunpklo z0.s, z0.h
; CHECK-NEXT: ret
%mask = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 11)
- %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %b, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+ %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %b, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> poison)
%uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16> %load)
ret <vscale x 4 x i32> %uzp
}
@@ -65,7 +65,7 @@ define <vscale x 2 x i64> @uunpklo_i32_valid(ptr %b) #0 {
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 9)
- %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
%uzp = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %load)
ret <vscale x 2 x i64> %uzp
}
@@ -78,7 +78,7 @@ define <vscale x 2 x i64> @uunpklo_i32_invalid(ptr %b) #0 {
; CHECK-NEXT: uunpklo z0.d, z0.s
; CHECK-NEXT: ret
%mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 10)
- %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
%uzp = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %load)
ret <vscale x 2 x i64> %uzp
}
@@ -91,7 +91,7 @@ define <vscale x 2 x i64> @uunpklo_invalid_all(ptr %b) #0 {
; CHECK-NEXT: uunpklo z0.d, z0.s
; CHECK-NEXT: ret
%mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
%uzp = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %load)
ret <vscale x 2 x i64> %uzp
}
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
index 84c15e4fbc33c..8c198ee518873 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
@@ -6,7 +6,7 @@ define <vscale x 2 x i8> @test_compress_nxv2i8(<vscale x 2 x i8> %vec, <vscale x
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.d, p0, z0.d
; CHECK-NEXT: ret
- %out = call <vscale x 2 x i8> @llvm.experimental.vector.compress(<vscale x 2 x i8> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+ %out = call <vscale x 2 x i8> @llvm.experimental.vector.compress(<vscale x 2 x i8> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x i8> poison)
ret <vscale x 2 x i8> %out
}
@@ -15,7 +15,7 @@ define <vscale x 2 x i16> @test_compress_nxv2i16(<vscale x 2 x i16> %vec, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.d, p0, z0.d
; CHECK-NEXT: ret
- %out = call <vscale x 2 x i16> @llvm.experimental.vector.compress(<vscale x 2 x i16> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+ %out = call <vscale x 2 x i16> @llvm.experimental.vector.compress(<vscale x 2 x i16> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x i16> poison)
ret <vscale x 2 x i16> %out
}
@@ -24,7 +24,7 @@ define <vscale x 2 x i32> @test_compress_nxv2i32(<vscale x 2 x i32> %vec, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.d, p0, z0.d
; CHECK-NEXT: ret
- %out = call <vscale x 2 x i32> @llvm.experimental.vector.compress(<vscale x 2 x i32> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+ %out = call <vscale x 2 x i32> @llvm.experimental.vector.compress(<vscale x 2 x i32> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x i32> poison)
ret <vscale x 2 x i32> %out
}
@@ -33,7 +33,7 @@ define <vscale x 2 x i64> @test_compress_nxv2i64(<vscale x 2 x i64> %vec, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.d, p0, z0.d
; CHECK-NEXT: ret
- %out = call <vscale x 2 x i64> @llvm.experimental.vector.compress(<vscale x 2 x i64> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+ %out = call <vscale x 2 x i64> @llvm.experimental.vector.compress(<vscale x 2 x i64> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x i64> poison)
ret <vscale x 2 x i64> %out
}
@@ -42,7 +42,7 @@ define <vscale x 2 x float> @test_compress_nxv2f32(<vscale x 2 x float> %vec, <v
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.d, p0, z0.d
; CHECK-NEXT: ret
- %out = call <vscale x 2 x float> @llvm.experimental.vector.compress(<vscale x 2 x float> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+ %out = call <vscale x 2 x float> @llvm.experimental.vector.compress(<vscale x 2 x float> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x float> poison)
ret <vscale x 2 x float> %out
}
@@ -51,7 +51,7 @@ define <vscale x 2 x double> @test_compress_nxv2f64(<vscale x 2 x double> %vec,
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.d, p0, z0.d
; CHECK-NEXT: ret
- %out = call <vscale x 2 x double> @llvm.experimental.vector.compress(<vscale x 2 x double> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+ %out = call <vscale x 2 x double> @llvm.experimental.vector.compress(<vscale x 2 x double> %vec, <vscale x 2 x i1> %mask, <vscale x 2 x double> poison)
ret <vscale x 2 x double> %out
}
@@ -60,7 +60,7 @@ define <vscale x 4 x i8> @test_compress_nxv4i8(<vscale x 4 x i8> %vec, <vscale x
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.s, p0, z0.s
; CHECK-NEXT: ret
- %out = call <vscale x 4 x i8> @llvm.experimental.vector.compress(<vscale x 4 x i8> %vec, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+ %out = call <vscale x 4 x i8> @llvm.experimental.vector.compress(<vscale x 4 x i8> %vec, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
ret <vscale x 4 x i8> %out
}
@@ -69,7 +69,7 @@ define <vscale x 4 x i16> @test_compress_nxv4i16(<vscale x 4 x i16> %vec, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.s, p0, z0.s
; CHECK-NEXT: ret
- %out = call <vscale x 4 x i16> @llvm.experimental.vector.compress(<vscale x 4 x i16> %vec, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+ %out = call <vscale x 4 x i16> @llvm.experimental.vector.compress(<vscale x 4 x i16> %vec, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
ret <vscale x 4 x i16> %out
}
@@ -78,7 +78,7 @@ define <vscale x 4 x i32> @test_compress_nxv4i32(<vscale x 4 x i32> %vec, <vscal
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.s, p0, z0.s
; CHECK-NEXT: ret
- %out = call <vscale x 4 x i32> @llvm.experimental.vector.compress(<vscale x 4 x i32> %vec, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+ %out = call <vscale x 4 x i32> @llvm.experimental.vector.compress(<vscale x 4 x i32> %vec, <vscale x 4 x i1> %mask, <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %out
}
@@ -87,7 +87,7 @@ define <vscale x 4 x float> @test_compress_nxv4f32(<vscale x 4 x float> %vec, <v
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.s, p0, z0.s
; CHECK-NEXT: ret
- %out = call <vscale x 4 x float> @llvm.experimental.vector.compress(<vscale x 4 x float> %vec, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+ %out = call <vscale x 4 x float> @llvm.experimental.vector.compress(<vscale x 4 x float> %vec, <vscale x 4 x i1> %mask, <vscale x 4 x float> poison)
ret <vscale x 4 x float> %out
}
@@ -96,7 +96,7 @@ define <vscale x 4 x i4> @test_compress_illegal_element_type(<vscale x 4 x i4> %
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.s, p0, z0.s
; CHECK-NEXT: ret
- %out = call <vscale x 4 x i4> @llvm.experimental.vector.compress(<vscale x 4 x i4> %vec, <vscale x 4 x i1> %mask, <vscale x 4 x i4> undef)
+ %out = call <vscale x 4 x i4> @llvm.experimental.vector.compress(<vscale x 4 x i4> %vec, <vscale x 4 x i1> %mask, <vscale x 4 x i4> poison)
ret <vscale x 4 x i4> %out
}
@@ -126,7 +126,7 @@ define <vscale x 8 x i32> @test_compress_large(<vscale x 8 x i32> %vec, <vscale
; CHECK-NEXT: addvl sp, sp, #2
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
- %out = call <vscale x 8 x i32> @llvm.experimental.vector.compress(<vscale x 8 x i32> %vec, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef)
+ %out = call <vscale x 8 x i32> @llvm.experimental.vector.compress(<vscale x 8 x i32> %vec, <vscale x 8 x i1> %mask, <vscale x 8 x i32> poison)
ret <vscale x 8 x i32> %out
}
@@ -137,21 +137,21 @@ define <vscale x 4 x i32> @test_compress_const_splat1_mask(<vscale x 4 x i32> %i
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.d, z1.d
; CHECK-NEXT: ret
- %out = call <vscale x 4 x i32> @llvm.experimental.vector.compress(<vscale x 4 x i32> %vec, <vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i32> undef)
+ %out = call <vscale x 4 x i32> @llvm.experimental.vector.compress(<vscale x 4 x i32> %vec, <vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %out
}
define <vscale x 4 x i32> @test_compress_const_splat0_mask(<vscale x 4 x i32> %ignore, <vscale x 4 x i32> %vec) {
; CHECK-LABEL: test_compress_const_splat0_mask:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
- %out = call <vscale x 4 x i32> @llvm.experimental.vector.compress(<vscale x 4 x i32> %vec, <vscale x 4 x i1> splat (i1 0), <vscale x 4 x i32> undef)
+ %out = call <vscale x 4 x i32> @llvm.experimental.vector.compress(<vscale x 4 x i32> %vec, <vscale x 4 x i1> splat (i1 0), <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %out
}
-define <vscale x 4 x i32> @test_compress_undef_mask(<vscale x 4 x i32> %ignore, <vscale x 4 x i32> %vec) {
-; CHECK-LABEL: test_compress_undef_mask:
+define <vscale x 4 x i32> @test_compress_poison_mask(<vscale x 4 x i32> %ignore, <vscale x 4 x i32> %vec) {
+; CHECK-LABEL: test_compress_poison_mask:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
- %out = call <vscale x 4 x i32> @llvm.experimental.vector.compress(<vscale x 4 x i32> %vec, <vscale x 4 x i1> undef, <vscale x 4 x i32> undef)
+ %out = call <vscale x 4 x i32> @llvm.experimental.vector.compress(<vscale x 4 x i32> %vec, <vscale x 4 x i1> poison, <vscale x 4 x i32> poison)
ret <vscale x 4 x i32> %out
}
@@ -168,7 +168,7 @@ define <4 x i32> @test_compress_v4i32_with_sve(<4 x i32> %vec, <4 x i1> %mask) {
; CHECK-NEXT: compact z0.s, p0, z0.s
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: ret
- %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> undef)
+ %out = call <4 x i32> @llvm.experimental.vector.compress(<4 x i32> %vec, <4 x i1> %mask, <4 x i32> poison)
ret <4 x i32> %out
}
@@ -186,7 +186,7 @@ define <1 x i32> @test_compress_v1i32_with_sve(<1 x i32> %vec, <1 x i1> %mask) {
; CHECK-NEXT: compact z0.d, p0, z0.d
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: ret
- %out = call <1 x i32> @llvm.experimental.vector.compress(<1 x i32> %vec, <1 x i1> %mask, <1 x i32> undef)
+ %out = call <1 x i32> @llvm.experimental.vector.compress(<1 x i32> %vec, <1 x i1> %mask, <1 x i32> poison)
ret <1 x i32> %out
}
@@ -221,7 +221,7 @@ define <4 x double> @test_compress_v4f64_with_sve(<4 x double> %vec, <4 x i1> %m
; CHECK-NEXT: str q1, [x9, x8]
; CHECK-NEXT: ldp q0, q1, [sp], #32
; CHECK-NEXT: ret
- %out = call <4 x double> @llvm.experimental.vector.compress(<4 x double> %vec, <4 x i1> %mask, <4 x double> undef)
+ %out = call <4 x double> @llvm.experimental.vector.compress(<4 x double> %vec, <4 x i1> %mask, <4 x double> poison)
ret <4 x double> %out
}
@@ -236,7 +236,7 @@ define <2 x i16> @test_compress_v2i16_with_sve(<2 x i16> %vec, <2 x i1> %mask) {
; CHECK-NEXT: compact z0.d, p0, z0.d
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: ret
- %out = call <2 x i16> @llvm.experimental.vector.compress(<2 x i16> %vec, <2 x i1> %mask, <2 x i16> undef)
+ %out = call <2 x i16> @llvm.experimental.vector.compress(<2 x i16> %vec, <2 x i1> %mask, <2 x i16> poison)
ret <2 x i16> %out
}
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
index 8dd433b6f23c6..4534e8f6de05e 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
@@ -8,8 +8,8 @@ define <vscale x 16 x i8> @sve_splat_16xi8(i8 %val) {
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.b, w0
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
- %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %ins = insertelement <vscale x 16 x i8> poison, i8 %val, i32 0
+ %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i8> %splat
}
@@ -18,8 +18,8 @@ define <vscale x 8 x i16> @sve_splat_8xi16(i16 %val) {
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.h, w0
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
- %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x i16> poison, i16 %val, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %splat
}
@@ -28,8 +28,8 @@ define <vscale x 4 x i32> @sve_splat_4xi32(i32 %val) {
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.s, w0
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
- %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i32> poison, i32 %val, i32 0
+ %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %splat
}
@@ -38,8 +38,8 @@ define <vscale x 2 x i64> @sve_splat_2xi64(i64 %val) {
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.d, x0
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
- %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i64> poison, i64 %val, i32 0
+ %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %splat
}
@@ -107,8 +107,8 @@ define <vscale x 2 x i8> @sve_splat_2xi8(i8 %val) {
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK-NEXT: mov z0.d, x0
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 2 x i8> undef, i8 %val, i32 0
- %splat = shufflevector <vscale x 2 x i8> %ins, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i8> poison, i8 %val, i32 0
+ %splat = shufflevector <vscale x 2 x i8> %ins, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i8> %splat
}
@@ -117,8 +117,8 @@ define <vscale x 4 x i8> @sve_splat_4xi8(i8 %val) {
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.s, w0
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 4 x i8> undef, i8 %val, i32 0
- %splat = shufflevector <vscale x 4 x i8> %ins, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i8> poison, i8 %val, i32 0
+ %splat = shufflevector <vscale x 4 x i8> %ins, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i8> %splat
}
@@ -127,8 +127,8 @@ define <vscale x 8 x i8> @sve_splat_8xi8(i8 %val) {
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.h, w0
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 8 x i8> undef, i8 %val, i32 0
- %splat = shufflevector <vscale x 8 x i8> %ins, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x i8> poison, i8 %val, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %ins, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i8> %splat
}
@@ -146,8 +146,8 @@ define <vscale x 2 x i16> @sve_splat_2xi16(i16 %val) {
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK-NEXT: mov z0.d, x0
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 2 x i16> undef, i16 %val, i32 0
- %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i16> poison, i16 %val, i32 0
+ %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i16> %splat
}
@@ -156,8 +156,8 @@ define <vscale x 4 x i16> @sve_splat_4xi16(i16 %val) {
; CHECK: // %bb.0:
; CHECK-NEXT: mov z0.s, w0
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 4 x i16> undef, i16 %val, i32 0
- %splat = shufflevector <vscale x 4 x i16> %ins, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i16> poison, i16 %val, i32 0
+ %splat = shufflevector <vscale x 4 x i16> %ins, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i16> %splat
}
@@ -175,8 +175,8 @@ define <vscale x 2 x i32> @sve_splat_2xi32(i32 %val) {
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK-NEXT: mov z0.d, x0
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 2 x i32> undef, i32 %val, i32 0
- %splat = shufflevector <vscale x 2 x i32> %ins, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i32> poison, i32 %val, i32 0
+ %splat = shufflevector <vscale x 2 x i32> %ins, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i32> %splat
}
@@ -196,8 +196,8 @@ define <vscale x 1 x i32> @sve_splat_1xi32(i32 %val) {
; CHECK-NEXT: mov z0.s, w0
; CHECK-NEXT: ret
entry:
- %ins = insertelement <vscale x 1 x i32> undef, i32 %val, i32 0
- %splat = shufflevector <vscale x 1 x i32> %ins, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
+ %ins = insertelement <vscale x 1 x i32> poison, i32 %val, i32 0
+ %splat = shufflevector <vscale x 1 x i32> %ins, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
ret <vscale x 1 x i32> %splat
}
@@ -208,8 +208,8 @@ define <vscale x 12 x i32> @sve_splat_12xi32(i32 %val) {
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mov z2.d, z0.d
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 12 x i32> undef, i32 %val, i32 0
- %splat = shufflevector <vscale x 12 x i32> %ins, <vscale x 12 x i32> undef, <vscale x 12 x i32> zeroinitializer
+ %ins = insertelement <vscale x 12 x i32> poison, i32 %val, i32 0
+ %splat = shufflevector <vscale x 12 x i32> %ins, <vscale x 12 x i32> poison, <vscale x 12 x i32> zeroinitializer
ret <vscale x 12 x i32> %splat
}
@@ -220,8 +220,8 @@ define <vscale x 2 x i1> @sve_splat_2xi1(i1 %val) {
; CHECK-NEXT: sbfx x8, x0, #0, #1
; CHECK-NEXT: whilelo p0.d, xzr, x8
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 2 x i1> undef, i1 %val, i32 0
- %splat = shufflevector <vscale x 2 x i1> %ins, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %ins = insertelement <vscale x 2 x i1> poison, i1 %val, i32 0
+ %splat = shufflevector <vscale x 2 x i1> %ins, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i1> %splat
}
@@ -232,8 +232,8 @@ define <vscale x 4 x i1> @sve_splat_4xi1(i1 %val) {
; CHECK-NEXT: sbfx x8, x0, #0, #1
; CHECK-NEXT: whilelo p0.s, xzr, x8
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 4 x i1> undef, i1 %val, i32 0
- %splat = shufflevector <vscale x 4 x i1> %ins, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %ins = insertelement <vscale x 4 x i1> poison, i1 %val, i32 0
+ %splat = shufflevector <vscale x 4 x i1> %ins, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i1> %splat
}
@@ -244,8 +244,8 @@ define <vscale x 8 x i1> @sve_splat_8xi1(i1 %val) {
; CHECK-NEXT: sbfx x8, x0, #0, #1
; CHECK-NEXT: whilelo p0.h, xzr, x8
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 8 x i1> undef, i1 %val, i32 0
- %splat = shufflevector <vscale x 8 x i1> %ins, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %ins = insertelement <vscale x 8 x i1> poison, i1 %val, i32 0
+ %splat = shufflevector <vscale x 8 x i1> %ins, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i1> %splat
}
@@ -256,8 +256,8 @@ define <vscale x 16 x i1> @sve_splat_16xi1(i1 %val) {
; CHECK-NEXT: sbfx x8, x0, #0, #1
; CHECK-NEXT: whilelo p0.b, xzr, x8
; CHECK-NEXT: ret
- %ins = insertelement <vscale x 16 x i1> undef, i1 %val, i32 0
- %splat = shufflevector <vscale x 16 x i1> %ins, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %ins = insertelement <vscale x 16 x i1> poison, i1 %val, i32 0
+ %splat = shufflevector <vscale x 16 x i1> %ins, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i1> %splat
}
@@ -269,8 +269,8 @@ define <vscale x 8 x bfloat> @splat_nxv8bf16(bfloat %val) #0 {
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: mov z0.h, h0
; CHECK-NEXT: ret
- %1 = insertelement <vscale x 8 x bfloat> undef, bfloat %val, i32 0
- %2 = shufflevector <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> undef, <vscale x 8 x i32> zeroinitializer
+ %1 = insertelement <vscale x 8 x bfloat> poison, bfloat %val, i32 0
+ %2 = shufflevector <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x bfloat> %2
}
@@ -280,8 +280,8 @@ define <vscale x 4 x bfloat> @splat_nxv4bf16(bfloat %val) #0 {
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: mov z0.h, h0
; CHECK-NEXT: ret
- %1 = insertelement <vscale x 4 x bfloat> undef, bfloat %val, i32 0
- %2 = shufflevector <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> undef, <vscale x 4 x i32> zeroinitializer
+ %1 = insertelement <vscale x 4 x bfloat> poison, bfloat %val, i32 0
+ %2 = shufflevector <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x bfloat> %2
}
@@ -291,8 +291,8 @@ define <vscale x 2 x bfloat> @splat_nxv2bf16(bfloat %val) #0 {
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: mov z0.h, h0
; CHECK-NEXT: ret
- %1 = insertelement <vscale x 2 x bfloat> undef, bfloat %val, i32 0
- %2 = shufflevector <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> undef, <vscale x 2 x i32> zeroinitializer
+ %1 = insertelement <vscale x 2 x bfloat> poison, bfloat %val, i32 0
+ %2 = shufflevector <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x bfloat> %2
}
@@ -302,8 +302,8 @@ define <vscale x 8 x half> @splat_nxv8f16(half %val) {
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: mov z0.h, h0
; CHECK-NEXT: ret
- %1 = insertelement <vscale x 8 x half> undef, half %val, i32 0
- %2 = shufflevector <vscale x 8 x half> %1, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+ %1 = insertelement <vscale x 8 x half> poison, half %val, i32 0
+ %2 = shufflevector <vscale x 8 x half> %1, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x half> %2
}
@@ -313,8 +313,8 @@ define <vscale x 4 x half> @splat_nxv4f16(half %val) {
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: mov z0.h, h0
; CHECK-NEXT: ret
- %1 = insertelement <vscale x 4 x half> undef, half %val, i32 0
- %2 = shufflevector <vscale x 4 x half> %1, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+ %1 = insertelement <vscale x 4 x half> poison, half %val, i32 0
+ %2 = shufflevector <vscale x 4 x half> %1, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x half> %2
}
@@ -324,8 +324,8 @@ define <vscale x 2 x half> @splat_nxv2f16(half %val) {
; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: mov z0.h, h0
; CHECK-NEXT: ret
- %1 = insertelement <vscale x 2 x half> undef, half %val, i32 0
- %2 = shufflevector <vscale x 2 x half> %1, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+ %1 = insertelement <vscale x 2 x half> poison, half %val, i32 0
+ %2 = shufflevector <vscale x 2 x half> %1, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x half> %2
}
@@ -335,8 +335,8 @@ define <vscale x 4 x float> @splat_nxv4f32(float %val) {
; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0
; CHECK-NEXT: mov z0.s, s0
; CHECK-NEXT: ret
- %1 = insertelement <vscale x 4 x float> undef, float %val, i32 0
- %2 = shufflevector <vscale x 4 x float> %1, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %1 = insertelement <vscale x 4 x float> poison, float %val, i32 0
+ %2 = shufflevector <vscale x 4 x float> %1, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x float> %2
}
@@ -346,8 +346,8 @@ define <vscale x 2 x float> @splat_nxv2f32(float %val) {
; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0
; CHECK-NEXT: mov z0.s, s0
; CHECK-NEXT: ret
- %1 = insertelement <vscale x 2 x float> undef, float %val, i32 0
- %2 = shufflevector <vscale x 2 x float> %1, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+ %1 = insertelement <vscale x 2 x float> poison, float %val, i32 0
+ %2 = shufflevector <vscale x 2 x float> %1, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x float> %2
}
@@ -357,8 +357,8 @@ define <vscale x 2 x double> @splat_nxv2f64(double %val) {
; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: mov z0.d, d0
; CHECK-NEXT: ret
- %1 = insertelement <vscale x 2 x double> undef, double %val, i32 0
- %2 = shufflevector <vscale x 2 x double> %1, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+ %1 = insertelement <vscale x 2 x double> poison, double %val, i32 0
+ %2 = shufflevector <vscale x 2 x double> %1, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x double> %2
}
diff --git a/llvm/test/CodeGen/AArch64/sve-vl-arith.ll b/llvm/test/CodeGen/AArch64/sve-vl-arith.ll
index dad357c8a0c13..a6c0e5aa70583 100644
--- a/llvm/test/CodeGen/AArch64/sve-vl-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vl-arith.ll
@@ -16,8 +16,8 @@ define <vscale x 8 x i16> @inch_vec(<vscale x 8 x i16> %a) {
; CHECK-NEXT: ret
%vscale = call i16 @llvm.vscale.i16()
%mul = mul i16 %vscale, 8
- %vl = insertelement <vscale x 8 x i16> undef, i16 %mul, i32 0
- %vl.splat = shufflevector <vscale x 8 x i16> %vl, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vl = insertelement <vscale x 8 x i16> poison, i16 %mul, i32 0
+ %vl.splat = shufflevector <vscale x 8 x i16> %vl, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%res = add <vscale x 8 x i16> %a, %vl.splat
ret <vscale x 8 x i16> %res
}
@@ -34,8 +34,8 @@ define <vscale x 4 x i32> @incw_vec(<vscale x 4 x i32> %a) {
; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%mul = mul i32 %vscale, 4
- %vl = insertelement <vscale x 4 x i32> undef, i32 %mul, i32 0
- %vl.splat = shufflevector <vscale x 4 x i32> %vl, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %vl = insertelement <vscale x 4 x i32> poison, i32 %mul, i32 0
+ %vl.splat = shufflevector <vscale x 4 x i32> %vl, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%res = add <vscale x 4 x i32> %a, %vl.splat
ret <vscale x 4 x i32> %res
}
@@ -52,8 +52,8 @@ define <vscale x 2 x i64> @incd_vec(<vscale x 2 x i64> %a) {
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%mul = mul i64 %vscale, 2
- %vl = insertelement <vscale x 2 x i64> undef, i64 %mul, i32 0
- %vl.splat = shufflevector <vscale x 2 x i64> %vl, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %vl = insertelement <vscale x 2 x i64> poison, i64 %mul, i32 0
+ %vl.splat = shufflevector <vscale x 2 x i64> %vl, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%res = add <vscale x 2 x i64> %a, %vl.splat
ret <vscale x 2 x i64> %res
}
@@ -70,8 +70,8 @@ define <vscale x 8 x i16> @dech_vec(<vscale x 8 x i16> %a) {
; CHECK-NEXT: ret
%vscale = call i16 @llvm.vscale.i16()
%mul = mul i16 %vscale, 16
- %vl = insertelement <vscale x 8 x i16> undef, i16 %mul, i32 0
- %vl.splat = shufflevector <vscale x 8 x i16> %vl, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vl = insertelement <vscale x 8 x i16> poison, i16 %mul, i32 0
+ %vl.splat = shufflevector <vscale x 8 x i16> %vl, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%res = sub <vscale x 8 x i16> %a, %vl.splat
ret <vscale x 8 x i16> %res
}
@@ -88,8 +88,8 @@ define <vscale x 4 x i32> @decw_vec(<vscale x 4 x i32> %a) {
; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%mul = mul i32 %vscale, 16
- %vl = insertelement <vscale x 4 x i32> undef, i32 %mul, i32 0
- %vl.splat = shufflevector <vscale x 4 x i32> %vl, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %vl = insertelement <vscale x 4 x i32> poison, i32 %mul, i32 0
+ %vl.splat = shufflevector <vscale x 4 x i32> %vl, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%res = sub <vscale x 4 x i32> %a, %vl.splat
ret <vscale x 4 x i32> %res
}
@@ -106,8 +106,8 @@ define <vscale x 2 x i64> @decd_vec(<vscale x 2 x i64> %a) {
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%mul = mul i64 %vscale, 16
- %vl = insertelement <vscale x 2 x i64> undef, i64 %mul, i32 0
- %vl.splat = shufflevector <vscale x 2 x i64> %vl, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %vl = insertelement <vscale x 2 x i64> poison, i64 %mul, i32 0
+ %vl.splat = shufflevector <vscale x 2 x i64> %vl, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%res = sub <vscale x 2 x i64> %a, %vl.splat
ret <vscale x 2 x i64> %res
}
diff --git a/llvm/test/CodeGen/AArch64/sve2-unary-movprfx.ll b/llvm/test/CodeGen/AArch64/sve2-unary-movprfx.ll
index b82b656f710ff..ae4562af88f09 100644
--- a/llvm/test/CodeGen/AArch64/sve2-unary-movprfx.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-unary-movprfx.ll
@@ -15,7 +15,7 @@ define <vscale x 16 x i8> @sqabs_i8_dupreg(<vscale x 16 x i8> %a) #0 {
; CHECK-NEXT: sqabs z0.b, p0/m, z0.b
; CHECK-NEXT: ret
%pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
- %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.sqabs.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
+ %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.sqabs.nxv16i8(<vscale x 16 x i8> poison, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
ret <vscale x 16 x i8> %ret
}
@@ -28,7 +28,7 @@ define <vscale x 16 x i8> @sqabs_i8_undef(<vscale x 16 x i8> %a, <vscale x 16 x
; CHECK-NEXT: sqabs z0.b, p0/m, z1.b
; CHECK-NEXT: ret
%pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
- %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.sqabs.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
+ %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.sqabs.nxv16i8(<vscale x 16 x i8> poison, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
ret <vscale x 16 x i8> %ret
}
@@ -65,7 +65,7 @@ define <vscale x 8 x i16> @sqabs_i16_dupreg(<vscale x 8 x i16> %a) #0 {
; CHECK-NEXT: sqabs z0.h, p0/m, z0.h
; CHECK-NEXT: ret
%pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
- %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sqabs.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
+ %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sqabs.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
ret <vscale x 8 x i16> %ret
}
@@ -77,7 +77,7 @@ define <vscale x 8 x i16> @sqabs_i16_undef(<vscale x 8 x i16> %a, <vscale x 8 x
; CHECK-NEXT: sqabs z0.h, p0/m, z1.h
; CHECK-NEXT: ret
%pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
- %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sqabs.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
+ %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sqabs.nxv8i16(<vscale x 8 x i16> poison, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
ret <vscale x 8 x i16> %ret
}
@@ -113,7 +113,7 @@ define <vscale x 4 x i32> @sqabs_i32_dupreg(<vscale x 4 x i32> %a) #0 {
; CHECK-NEXT: sqabs z0.s, p0/m, z0.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqabs.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
+ %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqabs.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
ret <vscale x 4 x i32> %ret
}
@@ -125,7 +125,7 @@ define <vscale x 4 x i32> @sqabs_i32_undef(<vscale x 4 x i32> %a, <vscale x 4 x
; CHECK-NEXT: sqabs z0.s, p0/m, z1.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqabs.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
+ %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqabs.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %ret
}
@@ -161,7 +161,7 @@ define <vscale x 2 x i64> @sqabs_i64_dupreg(<vscale x 2 x i64> %a) #0 {
; CHECK-NEXT: sqabs z0.d, p0/m, z0.d
; CHECK-NEXT: ret
%pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
- %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sqabs.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
+ %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sqabs.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
ret <vscale x 2 x i64> %ret
}
@@ -173,7 +173,7 @@ define <vscale x 2 x i64> @sqabs_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x
; CHECK-NEXT: sqabs z0.d, p0/m, z1.d
; CHECK-NEXT: ret
%pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
- %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sqabs.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
+ %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sqabs.nxv2i64(<vscale x 2 x i64> poison, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
ret <vscale x 2 x i64> %ret
}
@@ -209,7 +209,7 @@ define <vscale x 4 x i32> @urecpe_i32_dupreg(<vscale x 4 x i32> %a) #0 {
; CHECK-NEXT: urecpe z0.s, p0/m, z0.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.urecpe.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
+ %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.urecpe.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
ret <vscale x 4 x i32> %ret
}
@@ -221,7 +221,7 @@ define <vscale x 4 x i32> @urecpe_i32_undef(<vscale x 4 x i32> %a, <vscale x 4 x
; CHECK-NEXT: urecpe z0.s, p0/m, z1.s
; CHECK-NEXT: ret
%pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
- %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.urecpe.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
+ %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.urecpe.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
ret <vscale x 4 x i32> %ret
}
diff --git a/llvm/test/CodeGen/AArch64/vector-insert-dag-combines.ll b/llvm/test/CodeGen/AArch64/vector-insert-dag-combines.ll
index c63808ed48385..0e05a63ef86de 100644
--- a/llvm/test/CodeGen/AArch64/vector-insert-dag-combines.ll
+++ b/llvm/test/CodeGen/AArch64/vector-insert-dag-combines.ll
@@ -26,7 +26,7 @@ target triple = "aarch64-unknown-linux-gnu"
define <16 x i8> @insert_small_fixed_into_big_fixed(<8 x i8> %a) #0 {
%extract = call <4 x i8> @llvm.vector.extract(<8 x i8> %a, i64 0)
- %insert = call <16 x i8> @llvm.vector.insert(<16 x i8> undef, <4 x i8> %extract, i64 0)
+ %insert = call <16 x i8> @llvm.vector.insert(<16 x i8> poison, <4 x i8> %extract, i64 0)
ret <16 x i8> %insert
}
@@ -49,7 +49,7 @@ define <16 x i8> @insert_small_fixed_into_big_fixed(<8 x i8> %a) #0 {
define <vscale x 16 x i8> @insert_small_fixed_into_big_scalable(<8 x i8> %a) #0 {
%extract = call <4 x i8> @llvm.vector.extract(<8 x i8> %a, i64 0)
- %insert = call <vscale x 16 x i8> @llvm.vector.insert(<vscale x 16 x i8> undef, <4 x i8> %extract, i64 0)
+ %insert = call <vscale x 16 x i8> @llvm.vector.insert(<vscale x 16 x i8> poison, <4 x i8> %extract, i64 0)
ret <vscale x 16 x i8> %insert
}
@@ -76,7 +76,7 @@ define <vscale x 16 x i8> @insert_small_fixed_into_big_scalable(<8 x i8> %a) #0
; Resulting insert would not be legal, so there's no transformation.
define <16 x i8> @insert_small_scalable_into_big_fixed(<vscale x 8 x i8> %a) #0 {
%extract = call <4 x i8> @llvm.vector.extract(<vscale x 8 x i8> %a, i64 0)
- %insert = call <16 x i8> @llvm.vector.insert(<16 x i8> undef, <4 x i8> %extract, i64 0)
+ %insert = call <16 x i8> @llvm.vector.insert(<16 x i8> poison, <4 x i8> %extract, i64 0)
ret <16 x i8> %insert
}
@@ -101,7 +101,7 @@ define <16 x i8> @insert_small_scalable_into_big_fixed(<vscale x 8 x i8> %a) #0
define <vscale x 16 x i8> @insert_small_scalable_into_big_scalable_1(<vscale x 8 x i8> %a) #0 {
%extract = call <4 x i8> @llvm.vector.extract(<vscale x 8 x i8> %a, i64 0)
- %insert = call <vscale x 16 x i8> @llvm.vector.insert(<vscale x 16 x i8> undef, <4 x i8> %extract, i64 0)
+ %insert = call <vscale x 16 x i8> @llvm.vector.insert(<vscale x 16 x i8> poison, <4 x i8> %extract, i64 0)
ret <vscale x 16 x i8> %insert
}
@@ -126,7 +126,7 @@ define <vscale x 16 x i8> @insert_small_scalable_into_big_scalable_1(<vscale x 8
define <vscale x 16 x i8> @insert_small_scalable_into_big_scalable_2(<vscale x 8 x i8> %a) #0 {
%extract = call <vscale x 4 x i8> @llvm.vector.extract(<vscale x 8 x i8> %a, i64 0)
- %insert = call <vscale x 16 x i8> @llvm.vector.insert(<vscale x 16 x i8> undef, <vscale x 4 x i8> %extract, i64 0)
+ %insert = call <vscale x 16 x i8> @llvm.vector.insert(<vscale x 16 x i8> poison, <vscale x 4 x i8> %extract, i64 0)
ret <vscale x 16 x i8> %insert
}
@@ -149,7 +149,7 @@ define <vscale x 16 x i8> @insert_small_scalable_into_big_scalable_2(<vscale x 8
define <8 x i8> @extract_small_fixed_from_big_fixed(<16 x i8> %a) #0 {
%extract = call <4 x i8> @llvm.vector.extract(<16 x i8> %a, i64 0)
- %insert = call <8 x i8> @llvm.vector.insert(<8 x i8> undef, <4 x i8> %extract, i64 0)
+ %insert = call <8 x i8> @llvm.vector.insert(<8 x i8> poison, <4 x i8> %extract, i64 0)
ret <8 x i8> %insert
}
@@ -176,7 +176,7 @@ define <8 x i8> @extract_small_fixed_from_big_fixed(<16 x i8> %a) #0 {
; Resulting insert would not be legal, so there's no transformation.
define <vscale x 8 x i8> @extract_small_scalable_from_big_fixed(<16 x i8> %a) #0 {
%extract = call <4 x i8> @llvm.vector.extract(<16 x i8> %a, i64 0)
- %insert = call <vscale x 8 x i8> @llvm.vector.insert(<vscale x 8 x i8> undef, <4 x i8> %extract, i64 0)
+ %insert = call <vscale x 8 x i8> @llvm.vector.insert(<vscale x 8 x i8> poison, <4 x i8> %extract, i64 0)
ret <vscale x 8 x i8> %insert
}
@@ -199,7 +199,7 @@ define <vscale x 8 x i8> @extract_small_scalable_from_big_fixed(<16 x i8> %a) #0
define <8 x i8> @extract_small_fixed_from_big_scalable(<vscale x 16 x i8> %a) #0 {
%extract = call <4 x i8> @llvm.vector.extract(<vscale x 16 x i8> %a, i64 0)
- %insert = call <8 x i8> @llvm.vector.insert(<8 x i8> undef, <4 x i8> %extract, i64 0)
+ %insert = call <8 x i8> @llvm.vector.insert(<8 x i8> poison, <4 x i8> %extract, i64 0)
ret <8 x i8> %insert
}
@@ -224,7 +224,7 @@ define <8 x i8> @extract_small_fixed_from_big_scalable(<vscale x 16 x i8> %a) #0
define <vscale x 8 x i8> @extract_small_scalable_from_big_scalable_1(<vscale x 16 x i8> %a) #0 {
%extract = call <4 x i8> @llvm.vector.extract(<vscale x 16 x i8> %a, i64 0)
- %insert = call <vscale x 8 x i8> @llvm.vector.insert(<vscale x 8 x i8> undef, <4 x i8> %extract, i64 0)
+ %insert = call <vscale x 8 x i8> @llvm.vector.insert(<vscale x 8 x i8> poison, <4 x i8> %extract, i64 0)
ret <vscale x 8 x i8> %insert
}
@@ -249,7 +249,7 @@ define <vscale x 8 x i8> @extract_small_scalable_from_big_scalable_1(<vscale x 1
define <vscale x 8 x i8> @extract_small_scalable_from_big_scalable_2(<vscale x 16 x i8> %a) #0 {
%extract = call <vscale x 4 x i8> @llvm.vector.extract(<vscale x 16 x i8> %a, i64 0)
- %insert = call <vscale x 8 x i8> @llvm.vector.insert(<vscale x 8 x i8> undef, <vscale x 4 x i8> %extract, i64 0)
+ %insert = call <vscale x 8 x i8> @llvm.vector.insert(<vscale x 8 x i8> poison, <vscale x 4 x i8> %extract, i64 0)
ret <vscale x 8 x i8> %insert
}
@@ -276,7 +276,7 @@ define <vscale x 8 x i8> @extract_small_scalable_from_big_scalable_2(<vscale x 1
; opposite transformation and emit an extract instead.
define <16 x i8> @extract_fixed_from_scalable(<vscale x 16 x i8> %a) #0 {
%extract = call <4 x i8> @llvm.vector.extract(<vscale x 16 x i8> %a, i64 0)
- %insert = call <16 x i8> @llvm.vector.insert(<16 x i8> undef, <4 x i8> %extract, i64 0)
+ %insert = call <16 x i8> @llvm.vector.insert(<16 x i8> poison, <4 x i8> %extract, i64 0)
ret <16 x i8> %insert
}
@@ -303,7 +303,7 @@ define <16 x i8> @extract_fixed_from_scalable(<vscale x 16 x i8> %a) #0 {
; opposite transformation and emit an insert instead.
define <vscale x 16 x i8> @insert_fixed_into_scalable(<16 x i8> %a) #0 {
%extract = call <4 x i8> @llvm.vector.extract(<16 x i8> %a, i64 0)
- %insert = call <vscale x 16 x i8> @llvm.vector.insert(<vscale x 16 x i8> undef, <4 x i8> %extract, i64 0)
+ %insert = call <vscale x 16 x i8> @llvm.vector.insert(<vscale x 16 x i8> poison, <4 x i8> %extract, i64 0)
ret <vscale x 16 x i8> %insert
}
More information about the llvm-commits
mailing list