[clang] [llvm] Reapply "[AArch64][SVE] Improve fixed-length addressing modes. (#130263)" (PR #130625)
Ricardo Jesus via cfe-commits
cfe-commits at lists.llvm.org
Mon Mar 10 08:54:14 PDT 2025
https://github.com/rj-jesus created https://github.com/llvm/llvm-project/pull/130625
This restores commit f01e760c08365426de95f02dc2c2dc670eb47352.
The original patch from #129732 exposed what seems to be a bug in `SelectAddrModeIndexedSVE`.
Currently, the offset returned by `SelectAddrModeIndexedSVE` is computed by dividing a VL-based offset (`MulImm`) by the known minimum width of `MemVT`. This works when `MemVT` is a scalable vector type because scalable types are intrinsically VL-based. However, for fixed vector types, `MemVT` is not scaled to the SVE vector length, which may seemingly lead to inaccurate results. For example, for `vscale * 32`, I expect the offset returned to be `2*VL`, irrespective of the width of `MemVT` (unless the latter is an unpacked SVE type). VLA codegen seems to agree with this. However, for `<8 x i32>` vectors, VLS codegen (which uses `SelectAddrModeIndexedSVE`) returns `1*VL`: https://godbolt.org/z/7149fejGo.
Is this intentional?
Although this seems to affect both VSCALE-based and Constant-based offsets, I believe we didn't come across it earlier because we don't generate combinations of VSCALE offsets + fixed vectors often. Enabling the Constant-based path made the problem (assuming _it is_ a problem) obvious because combinations of Constant offsets + fixed vectors are common.
To work around the issue temporarily, I added an early exit to the Constant-based path for fixed vector types.
This doesn't affect the VSCALE path because I wanted to confirm whether the current behaviour is intentional or not.
I think the long-term solution is to set `MemWidthBytes = 16` for fixed vectors, which should fix the address calculation for both paths. I'm happy to do this here or open a separate PR, but first I wanted to confirm whether this is a viable solution (hence why I added a more conservative solution for the time being).
What do you think?
>From 03471cbf9270d1707191057de46dd38409c8a046 Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Mon, 10 Mar 2025 01:57:20 -0700
Subject: [PATCH 1/3] Reapply "[AArch64][SVE] Improve fixed-length addressing
modes." (#130263)
This reverts commit 21610e3ecc8bc727f99047e544186b35b1291bcd.
---
.../CodeGen/AArch64/sve-vector-bits-codegen.c | 9 +-
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 15 +-
llvm/lib/Target/AArch64/AArch64Subtarget.h | 12 +-
.../AArch64/sve-fixed-length-offsets.ll | 362 ++++++++++++++++++
.../AArch64/sve-fixed-length-shuffles.ll | 90 ++---
5 files changed, 434 insertions(+), 54 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
diff --git a/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c b/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c
index 0ed14b4b3b793..1391a1b09fbd1 100644
--- a/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c
+++ b/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c
@@ -13,12 +13,9 @@
void func(int *restrict a, int *restrict b) {
// CHECK-LABEL: func
-// CHECK256-COUNT-1: str
-// CHECK256-COUNT-7: st1w
-// CHECK512-COUNT-1: str
-// CHECK512-COUNT-3: st1w
-// CHECK1024-COUNT-1: str
-// CHECK1024-COUNT-1: st1w
+// CHECK256-COUNT-8: str
+// CHECK512-COUNT-4: str
+// CHECK1024-COUNT-2: str
// CHECK2048-COUNT-1: st1w
#pragma clang loop vectorize(enable)
for (int i = 0; i < 64; ++i)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 3ca9107cb2ce5..07bcd802962fa 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -7380,12 +7380,23 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
return false;
SDValue VScale = N.getOperand(1);
- if (VScale.getOpcode() != ISD::VSCALE)
+ int64_t MulImm = std::numeric_limits<int64_t>::max();
+ if (VScale.getOpcode() == ISD::VSCALE) {
+ MulImm = cast<ConstantSDNode>(VScale.getOperand(0))->getSExtValue();
+ } else if (auto C = dyn_cast<ConstantSDNode>(VScale)) {
+ int64_t ByteOffset = C->getSExtValue();
+ const auto KnownVScale =
+ Subtarget->getSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock;
+
+ if (!KnownVScale || ByteOffset % KnownVScale != 0)
+ return false;
+
+ MulImm = ByteOffset / KnownVScale;
+ } else
return false;
TypeSize TS = MemVT.getSizeInBits();
int64_t MemWidthBytes = static_cast<int64_t>(TS.getKnownMinValue()) / 8;
- int64_t MulImm = cast<ConstantSDNode>(VScale.getOperand(0))->getSExtValue();
if ((MulImm % MemWidthBytes) != 0)
return false;
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h
index c6eb77e3bc3ba..f5ffc72cae537 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -391,7 +391,7 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo {
void mirFileLoaded(MachineFunction &MF) const override;
// Return the known range for the bit length of SVE data registers. A value
- // of 0 means nothing is known about that particular limit beyong what's
+ // of 0 means nothing is known about that particular limit beyond what's
// implied by the architecture.
unsigned getMaxSVEVectorSizeInBits() const {
assert(isSVEorStreamingSVEAvailable() &&
@@ -405,6 +405,16 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo {
return MinSVEVectorSizeInBits;
}
+ // Return the known bit length of SVE data registers. A value of 0 means the
+ // length is unkown beyond what's implied by the architecture.
+ unsigned getSVEVectorSizeInBits() const {
+ assert(isSVEorStreamingSVEAvailable() &&
+ "Tried to get SVE vector length without SVE support!");
+ if (MinSVEVectorSizeInBits == MaxSVEVectorSizeInBits)
+ return MaxSVEVectorSizeInBits;
+ return 0;
+ }
+
bool useSVEForFixedLengthVectors() const {
if (!isSVEorStreamingSVEAvailable())
return false;
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
new file mode 100644
index 0000000000000..700bbe4f060ca
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
@@ -0,0 +1,362 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefix=CHECK-128
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefix=CHECK-256
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=512 -aarch64-sve-vector-bits-max=512 < %s | FileCheck %s --check-prefix=CHECK-512
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=1024 -aarch64-sve-vector-bits-max=1024 < %s | FileCheck %s --check-prefix=CHECK-1024
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=2048 -aarch64-sve-vector-bits-max=2048 < %s | FileCheck %s --check-prefix=CHECK-2048
+
+define void @nxv16i8(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.b
+; CHECK-NEXT: mov w8, #256 // =0x100
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, x8]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, x8]
+; CHECK-NEXT: ret
+;
+; CHECK-128-LABEL: nxv16i8:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr z0, [x0, #16, mul vl]
+; CHECK-128-NEXT: str z0, [x1, #16, mul vl]
+; CHECK-128-NEXT: ret
+;
+; CHECK-256-LABEL: nxv16i8:
+; CHECK-256: // %bb.0:
+; CHECK-256-NEXT: ldr z0, [x0, #8, mul vl]
+; CHECK-256-NEXT: str z0, [x1, #8, mul vl]
+; CHECK-256-NEXT: ret
+;
+; CHECK-512-LABEL: nxv16i8:
+; CHECK-512: // %bb.0:
+; CHECK-512-NEXT: ldr z0, [x0, #4, mul vl]
+; CHECK-512-NEXT: str z0, [x1, #4, mul vl]
+; CHECK-512-NEXT: ret
+;
+; CHECK-1024-LABEL: nxv16i8:
+; CHECK-1024: // %bb.0:
+; CHECK-1024-NEXT: ldr z0, [x0, #2, mul vl]
+; CHECK-1024-NEXT: str z0, [x1, #2, mul vl]
+; CHECK-1024-NEXT: ret
+;
+; CHECK-2048-LABEL: nxv16i8:
+; CHECK-2048: // %bb.0:
+; CHECK-2048-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-2048-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-2048-NEXT: ret
+ %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 256
+ %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 256
+ %x = load <vscale x 16 x i8>, ptr %ldoff, align 1
+ store <vscale x 16 x i8> %x, ptr %stoff, align 1
+ ret void
+}
+
+define void @nxv8i16(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: mov x8, #128 // =0x80
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1, x8, lsl #1]
+; CHECK-NEXT: ret
+;
+; CHECK-128-LABEL: nxv8i16:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr z0, [x0, #16, mul vl]
+; CHECK-128-NEXT: str z0, [x1, #16, mul vl]
+; CHECK-128-NEXT: ret
+;
+; CHECK-256-LABEL: nxv8i16:
+; CHECK-256: // %bb.0:
+; CHECK-256-NEXT: ldr z0, [x0, #8, mul vl]
+; CHECK-256-NEXT: str z0, [x1, #8, mul vl]
+; CHECK-256-NEXT: ret
+;
+; CHECK-512-LABEL: nxv8i16:
+; CHECK-512: // %bb.0:
+; CHECK-512-NEXT: ldr z0, [x0, #4, mul vl]
+; CHECK-512-NEXT: str z0, [x1, #4, mul vl]
+; CHECK-512-NEXT: ret
+;
+; CHECK-1024-LABEL: nxv8i16:
+; CHECK-1024: // %bb.0:
+; CHECK-1024-NEXT: ldr z0, [x0, #2, mul vl]
+; CHECK-1024-NEXT: str z0, [x1, #2, mul vl]
+; CHECK-1024-NEXT: ret
+;
+; CHECK-2048-LABEL: nxv8i16:
+; CHECK-2048: // %bb.0:
+; CHECK-2048-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-2048-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-2048-NEXT: ret
+ %ldoff = getelementptr inbounds nuw i16, ptr %ldptr, i64 128
+ %stoff = getelementptr inbounds nuw i16, ptr %stptr, i64 128
+ %x = load <vscale x 8 x i16>, ptr %ldoff, align 2
+ store <vscale x 8 x i16> %x, ptr %stoff, align 2
+ ret void
+}
+
+define void @nxv4i32(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: mov x8, #64 // =0x40
+; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; CHECK-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2]
+; CHECK-NEXT: ret
+;
+; CHECK-128-LABEL: nxv4i32:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr z0, [x0, #16, mul vl]
+; CHECK-128-NEXT: str z0, [x1, #16, mul vl]
+; CHECK-128-NEXT: ret
+;
+; CHECK-256-LABEL: nxv4i32:
+; CHECK-256: // %bb.0:
+; CHECK-256-NEXT: ldr z0, [x0, #8, mul vl]
+; CHECK-256-NEXT: str z0, [x1, #8, mul vl]
+; CHECK-256-NEXT: ret
+;
+; CHECK-512-LABEL: nxv4i32:
+; CHECK-512: // %bb.0:
+; CHECK-512-NEXT: ldr z0, [x0, #4, mul vl]
+; CHECK-512-NEXT: str z0, [x1, #4, mul vl]
+; CHECK-512-NEXT: ret
+;
+; CHECK-1024-LABEL: nxv4i32:
+; CHECK-1024: // %bb.0:
+; CHECK-1024-NEXT: ldr z0, [x0, #2, mul vl]
+; CHECK-1024-NEXT: str z0, [x1, #2, mul vl]
+; CHECK-1024-NEXT: ret
+;
+; CHECK-2048-LABEL: nxv4i32:
+; CHECK-2048: // %bb.0:
+; CHECK-2048-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-2048-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-2048-NEXT: ret
+ %ldoff = getelementptr inbounds nuw i32, ptr %ldptr, i64 64
+ %stoff = getelementptr inbounds nuw i32, ptr %stptr, i64 64
+ %x = load <vscale x 4 x i32>, ptr %ldoff, align 4
+ store <vscale x 4 x i32> %x, ptr %stoff, align 4
+ ret void
+}
+
+define void @nxv2i64(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov x8, #32 // =0x20
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; CHECK-NEXT: st1d { z0.d }, p0, [x1, x8, lsl #3]
+; CHECK-NEXT: ret
+;
+; CHECK-128-LABEL: nxv2i64:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr z0, [x0, #16, mul vl]
+; CHECK-128-NEXT: str z0, [x1, #16, mul vl]
+; CHECK-128-NEXT: ret
+;
+; CHECK-256-LABEL: nxv2i64:
+; CHECK-256: // %bb.0:
+; CHECK-256-NEXT: ldr z0, [x0, #8, mul vl]
+; CHECK-256-NEXT: str z0, [x1, #8, mul vl]
+; CHECK-256-NEXT: ret
+;
+; CHECK-512-LABEL: nxv2i64:
+; CHECK-512: // %bb.0:
+; CHECK-512-NEXT: ldr z0, [x0, #4, mul vl]
+; CHECK-512-NEXT: str z0, [x1, #4, mul vl]
+; CHECK-512-NEXT: ret
+;
+; CHECK-1024-LABEL: nxv2i64:
+; CHECK-1024: // %bb.0:
+; CHECK-1024-NEXT: ldr z0, [x0, #2, mul vl]
+; CHECK-1024-NEXT: str z0, [x1, #2, mul vl]
+; CHECK-1024-NEXT: ret
+;
+; CHECK-2048-LABEL: nxv2i64:
+; CHECK-2048: // %bb.0:
+; CHECK-2048-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-2048-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-2048-NEXT: ret
+ %ldoff = getelementptr inbounds nuw i64, ptr %ldptr, i64 32
+ %stoff = getelementptr inbounds nuw i64, ptr %stptr, i64 32
+ %x = load <vscale x 2 x i64>, ptr %ldoff, align 8
+ store <vscale x 2 x i64> %x, ptr %stoff, align 8
+ ret void
+}
+
+define void @nxv4i8(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: mov w8, #32 // =0x20
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, x8]
+; CHECK-NEXT: st1b { z0.s }, p0, [x1, x8]
+; CHECK-NEXT: ret
+;
+; CHECK-128-LABEL: nxv4i8:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ptrue p0.s
+; CHECK-128-NEXT: mov w8, #32 // =0x20
+; CHECK-128-NEXT: ld1b { z0.s }, p0/z, [x0, x8]
+; CHECK-128-NEXT: st1b { z0.s }, p0, [x1, x8]
+; CHECK-128-NEXT: ret
+;
+; CHECK-256-LABEL: nxv4i8:
+; CHECK-256: // %bb.0:
+; CHECK-256-NEXT: ptrue p0.s
+; CHECK-256-NEXT: ld1b { z0.s }, p0/z, [x0, #4, mul vl]
+; CHECK-256-NEXT: st1b { z0.s }, p0, [x1, #4, mul vl]
+; CHECK-256-NEXT: ret
+;
+; CHECK-512-LABEL: nxv4i8:
+; CHECK-512: // %bb.0:
+; CHECK-512-NEXT: ptrue p0.s
+; CHECK-512-NEXT: ld1b { z0.s }, p0/z, [x0, #2, mul vl]
+; CHECK-512-NEXT: st1b { z0.s }, p0, [x1, #2, mul vl]
+; CHECK-512-NEXT: ret
+;
+; CHECK-1024-LABEL: nxv4i8:
+; CHECK-1024: // %bb.0:
+; CHECK-1024-NEXT: ptrue p0.s
+; CHECK-1024-NEXT: ld1b { z0.s }, p0/z, [x0, #1, mul vl]
+; CHECK-1024-NEXT: st1b { z0.s }, p0, [x1, #1, mul vl]
+; CHECK-1024-NEXT: ret
+;
+; CHECK-2048-LABEL: nxv4i8:
+; CHECK-2048: // %bb.0:
+; CHECK-2048-NEXT: ptrue p0.s
+; CHECK-2048-NEXT: mov w8, #32 // =0x20
+; CHECK-2048-NEXT: ld1b { z0.s }, p0/z, [x0, x8]
+; CHECK-2048-NEXT: st1b { z0.s }, p0, [x1, x8]
+; CHECK-2048-NEXT: ret
+ %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 32
+ %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 32
+ %x = load <vscale x 4 x i8>, ptr %ldoff, align 1
+ store <vscale x 4 x i8> %x, ptr %stoff, align 1
+ ret void
+}
+
+define void @nxv2f32(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov x8, #16 // =0x10
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
+; CHECK-NEXT: st1w { z0.d }, p0, [x1, x8, lsl #2]
+; CHECK-NEXT: ret
+;
+; CHECK-128-LABEL: nxv2f32:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ptrue p0.d
+; CHECK-128-NEXT: mov x8, #16 // =0x10
+; CHECK-128-NEXT: ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
+; CHECK-128-NEXT: st1w { z0.d }, p0, [x1, x8, lsl #2]
+; CHECK-128-NEXT: ret
+;
+; CHECK-256-LABEL: nxv2f32:
+; CHECK-256: // %bb.0:
+; CHECK-256-NEXT: ptrue p0.d
+; CHECK-256-NEXT: ld1w { z0.d }, p0/z, [x0, #4, mul vl]
+; CHECK-256-NEXT: st1w { z0.d }, p0, [x1, #4, mul vl]
+; CHECK-256-NEXT: ret
+;
+; CHECK-512-LABEL: nxv2f32:
+; CHECK-512: // %bb.0:
+; CHECK-512-NEXT: ptrue p0.d
+; CHECK-512-NEXT: ld1w { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-512-NEXT: st1w { z0.d }, p0, [x1, #2, mul vl]
+; CHECK-512-NEXT: ret
+;
+; CHECK-1024-LABEL: nxv2f32:
+; CHECK-1024: // %bb.0:
+; CHECK-1024-NEXT: ptrue p0.d
+; CHECK-1024-NEXT: ld1w { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-1024-NEXT: st1w { z0.d }, p0, [x1, #1, mul vl]
+; CHECK-1024-NEXT: ret
+;
+; CHECK-2048-LABEL: nxv2f32:
+; CHECK-2048: // %bb.0:
+; CHECK-2048-NEXT: ptrue p0.d
+; CHECK-2048-NEXT: mov x8, #16 // =0x10
+; CHECK-2048-NEXT: ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
+; CHECK-2048-NEXT: st1w { z0.d }, p0, [x1, x8, lsl #2]
+; CHECK-2048-NEXT: ret
+ %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 64
+ %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 64
+ %x = load <vscale x 2 x float>, ptr %ldoff, align 1
+ store <vscale x 2 x float> %x, ptr %stoff, align 1
+ ret void
+}
+
+define void @nxv4f64(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv4f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov x8, #16 // =0x10
+; CHECK-NEXT: add x9, x0, #128
+; CHECK-NEXT: ldr z1, [x9, #1, mul vl]
+; CHECK-NEXT: add x9, x1, #128
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; CHECK-NEXT: st1d { z0.d }, p0, [x1, x8, lsl #3]
+; CHECK-NEXT: str z1, [x9, #1, mul vl]
+; CHECK-NEXT: ret
+;
+; CHECK-128-LABEL: nxv4f64:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: add x8, x0, #128
+; CHECK-128-NEXT: ldr z1, [x0, #8, mul vl]
+; CHECK-128-NEXT: ldr z0, [x8, #1, mul vl]
+; CHECK-128-NEXT: add x8, x1, #128
+; CHECK-128-NEXT: str z0, [x8, #1, mul vl]
+; CHECK-128-NEXT: str z1, [x1, #8, mul vl]
+; CHECK-128-NEXT: ret
+;
+; CHECK-256-LABEL: nxv4f64:
+; CHECK-256: // %bb.0:
+; CHECK-256-NEXT: add x8, x0, #128
+; CHECK-256-NEXT: ldr z1, [x0, #4, mul vl]
+; CHECK-256-NEXT: ldr z0, [x8, #1, mul vl]
+; CHECK-256-NEXT: add x8, x1, #128
+; CHECK-256-NEXT: str z0, [x8, #1, mul vl]
+; CHECK-256-NEXT: str z1, [x1, #4, mul vl]
+; CHECK-256-NEXT: ret
+;
+; CHECK-512-LABEL: nxv4f64:
+; CHECK-512: // %bb.0:
+; CHECK-512-NEXT: add x8, x0, #128
+; CHECK-512-NEXT: ldr z1, [x0, #2, mul vl]
+; CHECK-512-NEXT: ldr z0, [x8, #1, mul vl]
+; CHECK-512-NEXT: add x8, x1, #128
+; CHECK-512-NEXT: str z0, [x8, #1, mul vl]
+; CHECK-512-NEXT: str z1, [x1, #2, mul vl]
+; CHECK-512-NEXT: ret
+;
+; CHECK-1024-LABEL: nxv4f64:
+; CHECK-1024: // %bb.0:
+; CHECK-1024-NEXT: add x8, x0, #128
+; CHECK-1024-NEXT: ldr z1, [x0, #1, mul vl]
+; CHECK-1024-NEXT: ldr z0, [x8, #1, mul vl]
+; CHECK-1024-NEXT: add x8, x1, #128
+; CHECK-1024-NEXT: str z0, [x8, #1, mul vl]
+; CHECK-1024-NEXT: str z1, [x1, #1, mul vl]
+; CHECK-1024-NEXT: ret
+;
+; CHECK-2048-LABEL: nxv4f64:
+; CHECK-2048: // %bb.0:
+; CHECK-2048-NEXT: ptrue p0.d
+; CHECK-2048-NEXT: mov x8, #16 // =0x10
+; CHECK-2048-NEXT: add x9, x0, #128
+; CHECK-2048-NEXT: ldr z1, [x9, #1, mul vl]
+; CHECK-2048-NEXT: add x9, x1, #128
+; CHECK-2048-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; CHECK-2048-NEXT: st1d { z0.d }, p0, [x1, x8, lsl #3]
+; CHECK-2048-NEXT: str z1, [x9, #1, mul vl]
+; CHECK-2048-NEXT: ret
+ %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 128
+ %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 128
+ %x = load <vscale x 4 x double>, ptr %ldoff, align 1
+ store <vscale x 4 x double> %x, ptr %stoff, align 1
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
index e33bc8da97c05..2d4cdfa7278b9 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
@@ -30,64 +30,64 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang
; CHECK-NEXT: // %bb.1: // %vector.body
; CHECK-NEXT: mov z0.b, #0 // =0x0
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: mov x9, #8 // =0x8
-; CHECK-NEXT: mov x10, #24 // =0x18
+; CHECK-NEXT: mov x9, #24 // =0x18
; CHECK-NEXT: umov w8, v0.b[8]
-; CHECK-NEXT: mov v1.16b, v0.16b
-; CHECK-NEXT: mov v1.b[1], v0.b[1]
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: mov x8, #16 // =0x10
-; CHECK-NEXT: mov v2.b[1], v0.b[9]
-; CHECK-NEXT: mov v1.b[2], v0.b[2]
-; CHECK-NEXT: mov v2.b[2], v0.b[10]
-; CHECK-NEXT: mov v1.b[3], v0.b[3]
-; CHECK-NEXT: mov v2.b[3], v0.b[11]
-; CHECK-NEXT: mov v1.b[4], v0.b[4]
-; CHECK-NEXT: mov v2.b[4], v0.b[12]
-; CHECK-NEXT: mov v1.b[5], v0.b[5]
-; CHECK-NEXT: mov v2.b[5], v0.b[13]
-; CHECK-NEXT: mov v1.b[6], v0.b[6]
-; CHECK-NEXT: mov v2.b[6], v0.b[14]
-; CHECK-NEXT: mov v1.b[7], v0.b[7]
-; CHECK-NEXT: mov v2.b[7], v0.b[15]
-; CHECK-NEXT: ext z0.b, z0.b, z0.b, #16
-; CHECK-NEXT: uunpklo z1.h, z1.b
-; CHECK-NEXT: ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: mov v2.16b, v0.16b
+; CHECK-NEXT: mov z3.d, z0.d
+; CHECK-NEXT: mov v2.b[1], v0.b[1]
+; CHECK-NEXT: ext z3.b, z3.b, z0.b, #16
+; CHECK-NEXT: fmov s1, w8
+; CHECK-NEXT: mov x8, #8 // =0x8
+; CHECK-NEXT: ext v4.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT: mov v1.b[1], v0.b[9]
+; CHECK-NEXT: mov v2.b[2], v0.b[2]
+; CHECK-NEXT: mov v1.b[2], v0.b[10]
+; CHECK-NEXT: mov v2.b[3], v0.b[3]
+; CHECK-NEXT: mov v1.b[3], v0.b[11]
+; CHECK-NEXT: mov v2.b[4], v0.b[4]
+; CHECK-NEXT: mov v1.b[4], v0.b[12]
+; CHECK-NEXT: mov v2.b[5], v0.b[5]
+; CHECK-NEXT: mov v1.b[5], v0.b[13]
+; CHECK-NEXT: mov v2.b[6], v0.b[6]
+; CHECK-NEXT: mov v1.b[6], v0.b[14]
+; CHECK-NEXT: mov v2.b[7], v0.b[7]
+; CHECK-NEXT: mov v1.b[7], v0.b[15]
; CHECK-NEXT: uunpklo z2.h, z2.b
-; CHECK-NEXT: uunpklo z1.s, z1.h
-; CHECK-NEXT: uunpklo z3.h, z3.b
-; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.h, z1.b
+; CHECK-NEXT: uunpklo z1.h, z3.b
+; CHECK-NEXT: uunpklo z3.h, z4.b
; CHECK-NEXT: uunpklo z2.s, z2.h
-; CHECK-NEXT: lsl z1.s, z1.s, #31
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.s, z1.h
; CHECK-NEXT: uunpklo z3.s, z3.h
-; CHECK-NEXT: lsl z0.s, z0.s, #31
-; CHECK-NEXT: asr z1.s, z1.s, #31
; CHECK-NEXT: lsl z2.s, z2.s, #31
-; CHECK-NEXT: asr z0.s, z0.s, #31
-; CHECK-NEXT: and z1.s, z1.s, #0x1
+; CHECK-NEXT: lsl z0.s, z0.s, #31
+; CHECK-NEXT: lsl z1.s, z1.s, #31
; CHECK-NEXT: lsl z3.s, z3.s, #31
; CHECK-NEXT: asr z2.s, z2.s, #31
-; CHECK-NEXT: and z0.s, z0.s, #0x1
-; CHECK-NEXT: cmpne p4.s, p0/z, z1.s, #0
-; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT: asr z0.s, z0.s, #31
+; CHECK-NEXT: asr z1.s, z1.s, #31
; CHECK-NEXT: asr z3.s, z3.s, #31
; CHECK-NEXT: and z2.s, z2.s, #0x1
-; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0
-; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; CHECK-NEXT: and z0.s, z0.s, #0x1
+; CHECK-NEXT: and z1.s, z1.s, #0x1
; CHECK-NEXT: and z3.s, z3.s, #0x1
-; CHECK-NEXT: cmpne p2.s, p0/z, z2.s, #0
-; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0, x9, lsl #2]
-; CHECK-NEXT: mov z1.s, p4/m, #0 // =0x0
+; CHECK-NEXT: cmpne p4.s, p0/z, z2.s, #0
+; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0]
+; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0
+; CHECK-NEXT: cmpne p2.s, p0/z, z1.s, #0
; CHECK-NEXT: cmpne p3.s, p0/z, z3.s, #0
-; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, x10, lsl #2]
+; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, x9, lsl #2]
+; CHECK-NEXT: mov z2.s, p4/m, #0 // =0x0
; CHECK-NEXT: mov z0.s, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z2.s, p2/m, #0 // =0x0
-; CHECK-NEXT: st1w { z1.s }, p0, [x0]
-; CHECK-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
+; CHECK-NEXT: mov z1.s, p2/m, #0 // =0x0
; CHECK-NEXT: mov z3.s, p3/m, #0 // =0x0
-; CHECK-NEXT: st1w { z2.s }, p0, [x0, x9, lsl #2]
-; CHECK-NEXT: st1w { z3.s }, p0, [x0, x10, lsl #2]
+; CHECK-NEXT: st1w { z2.s }, p0, [x0]
+; CHECK-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
+; CHECK-NEXT: st1w { z1.s }, p0, [x0, #1, mul vl]
+; CHECK-NEXT: st1w { z3.s }, p0, [x0, x9, lsl #2]
; CHECK-NEXT: .LBB1_2: // %exit
; CHECK-NEXT: ret
%broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer
>From 114d8cda4971542067c81cc8d7ee1d39e7c636d5 Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Mon, 10 Mar 2025 04:05:23 -0700
Subject: [PATCH 2/3] Add tests
---
.../AArch64/sve-fixed-length-offsets.ll | 117 +++++++++++++++++-
1 file changed, 113 insertions(+), 4 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
index 700bbe4f060ca..8b2026e85155f 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
@@ -285,8 +285,8 @@ define void @nxv2f32(ptr %ldptr, ptr %stptr) {
; CHECK-2048-NEXT: ret
%ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 64
%stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 64
- %x = load <vscale x 2 x float>, ptr %ldoff, align 1
- store <vscale x 2 x float> %x, ptr %stoff, align 1
+ %x = load <vscale x 2 x float>, ptr %ldoff, align 4
+ store <vscale x 2 x float> %x, ptr %stoff, align 4
ret void
}
@@ -356,7 +356,116 @@ define void @nxv4f64(ptr %ldptr, ptr %stptr) {
; CHECK-2048-NEXT: ret
%ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 128
%stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 128
- %x = load <vscale x 4 x double>, ptr %ldoff, align 1
- store <vscale x 4 x double> %x, ptr %stoff, align 1
+ %x = load <vscale x 4 x double>, ptr %ldoff, align 8
+ store <vscale x 4 x double> %x, ptr %stoff, align 8
+ ret void
+}
+
+define void @v8i32(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: v8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0, #64]
+; CHECK-NEXT: ldp q3, q2, [x0, #32]
+; CHECK-NEXT: stp q0, q1, [x1, #64]
+; CHECK-NEXT: stp q3, q2, [x1, #32]
+; CHECK-NEXT: ret
+;
+; CHECK-128-LABEL: v8i32:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldp q0, q1, [x0, #64]
+; CHECK-128-NEXT: ldp q3, q2, [x0, #32]
+; CHECK-128-NEXT: stp q0, q1, [x1, #64]
+; CHECK-128-NEXT: stp q3, q2, [x1, #32]
+; CHECK-128-NEXT: ret
+;
+; CHECK-256-LABEL: v8i32:
+; CHECK-256: // %bb.0:
+; CHECK-256-NEXT: ptrue p0.s
+; CHECK-256-NEXT: mov x8, #8 // =0x8
+; CHECK-256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; CHECK-256-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-256-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2]
+; CHECK-256-NEXT: st1w { z1.s }, p0, [x1, #1, mul vl]
+; CHECK-256-NEXT: ret
+;
+; CHECK-512-LABEL: v8i32:
+; CHECK-512: // %bb.0:
+; CHECK-512-NEXT: ptrue p0.s
+; CHECK-512-NEXT: mov x8, #8 // =0x8
+; CHECK-512-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; CHECK-512-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2]
+; CHECK-512-NEXT: ret
+;
+; CHECK-1024-LABEL: v8i32:
+; CHECK-1024: // %bb.0:
+; CHECK-1024-NEXT: ptrue p0.s, vl16
+; CHECK-1024-NEXT: mov x8, #8 // =0x8
+; CHECK-1024-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; CHECK-1024-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2]
+; CHECK-1024-NEXT: ret
+;
+; CHECK-2048-LABEL: v8i32:
+; CHECK-2048: // %bb.0:
+; CHECK-2048-NEXT: ptrue p0.s, vl16
+; CHECK-2048-NEXT: mov x8, #8 // =0x8
+; CHECK-2048-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; CHECK-2048-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2]
+; CHECK-2048-NEXT: ret
+ %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 32
+ %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 32
+ %x = load <16 x i32>, ptr %ldoff, align 4
+ store <16 x i32> %x, ptr %stoff, align 4
+ ret void
+}
+
+; FIXME: This is wrong for VLS.
+define void @v8i32_vscale(ptr %0) {
+; CHECK-LABEL: v8i32_vscale:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.4s, #1
+; CHECK-NEXT: rdvl x8, #2
+; CHECK-NEXT: add x8, x0, x8
+; CHECK-NEXT: stp q0, q0, [x8]
+; CHECK-NEXT: ret
+;
+; CHECK-128-LABEL: v8i32_vscale:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: movi v0.4s, #1
+; CHECK-128-NEXT: rdvl x8, #2
+; CHECK-128-NEXT: add x8, x0, x8
+; CHECK-128-NEXT: stp q0, q0, [x8]
+; CHECK-128-NEXT: ret
+;
+; CHECK-256-LABEL: v8i32_vscale:
+; CHECK-256: // %bb.0:
+; CHECK-256-NEXT: mov z0.s, #1 // =0x1
+; CHECK-256-NEXT: ptrue p0.s
+; CHECK-256-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-256-NEXT: ret
+;
+; CHECK-512-LABEL: v8i32_vscale:
+; CHECK-512: // %bb.0:
+; CHECK-512-NEXT: mov z0.s, #1 // =0x1
+; CHECK-512-NEXT: ptrue p0.s, vl8
+; CHECK-512-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-512-NEXT: ret
+;
+; CHECK-1024-LABEL: v8i32_vscale:
+; CHECK-1024: // %bb.0:
+; CHECK-1024-NEXT: mov z0.s, #1 // =0x1
+; CHECK-1024-NEXT: ptrue p0.s, vl8
+; CHECK-1024-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-1024-NEXT: ret
+;
+; CHECK-2048-LABEL: v8i32_vscale:
+; CHECK-2048: // %bb.0:
+; CHECK-2048-NEXT: mov z0.s, #1 // =0x1
+; CHECK-2048-NEXT: ptrue p0.s, vl8
+; CHECK-2048-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-2048-NEXT: ret
+ %vl = call i64 @llvm.vscale()
+ %vlx = shl i64 %vl, 5
+ %2 = getelementptr inbounds nuw i8, ptr %0, i64 %vlx
+ store <8 x i32> splat (i32 1), ptr %2, align 4
ret void
}
>From 2a1ed4e2fbbf42bec09dbad51077091960798334 Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Mon, 10 Mar 2025 04:41:40 -0700
Subject: [PATCH 3/3] Bail out if MemVT is a fixed-length vector
---
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 3 +-
.../AArch64/sve-fixed-length-offsets.ll | 7 +-
.../AArch64/sve-fixed-length-shuffles.ll | 90 +++++++++----------
3 files changed, 51 insertions(+), 49 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 07bcd802962fa..d338c22267885 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -7388,7 +7388,8 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
const auto KnownVScale =
Subtarget->getSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock;
- if (!KnownVScale || ByteOffset % KnownVScale != 0)
+ if (!KnownVScale || ByteOffset % KnownVScale != 0 ||
+ !MemVT.isScalableVector())
return false;
MulImm = ByteOffset / KnownVScale;
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
index 8b2026e85155f..84ab5493b03ee 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll
@@ -381,11 +381,12 @@ define void @v8i32(ptr %ldptr, ptr %stptr) {
; CHECK-256-LABEL: v8i32:
; CHECK-256: // %bb.0:
; CHECK-256-NEXT: ptrue p0.s
-; CHECK-256-NEXT: mov x8, #8 // =0x8
+; CHECK-256-NEXT: mov x8, #16 // =0x10
+; CHECK-256-NEXT: mov x9, #8 // =0x8
; CHECK-256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; CHECK-256-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-256-NEXT: ld1w { z1.s }, p0/z, [x0, x9, lsl #2]
; CHECK-256-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2]
-; CHECK-256-NEXT: st1w { z1.s }, p0, [x1, #1, mul vl]
+; CHECK-256-NEXT: st1w { z1.s }, p0, [x1, x9, lsl #2]
; CHECK-256-NEXT: ret
;
; CHECK-512-LABEL: v8i32:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
index 2d4cdfa7278b9..e33bc8da97c05 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
@@ -30,64 +30,64 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang
; CHECK-NEXT: // %bb.1: // %vector.body
; CHECK-NEXT: mov z0.b, #0 // =0x0
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: mov x9, #24 // =0x18
+; CHECK-NEXT: mov x9, #8 // =0x8
+; CHECK-NEXT: mov x10, #24 // =0x18
; CHECK-NEXT: umov w8, v0.b[8]
-; CHECK-NEXT: mov v2.16b, v0.16b
-; CHECK-NEXT: mov z3.d, z0.d
-; CHECK-NEXT: mov v2.b[1], v0.b[1]
-; CHECK-NEXT: ext z3.b, z3.b, z0.b, #16
-; CHECK-NEXT: fmov s1, w8
-; CHECK-NEXT: mov x8, #8 // =0x8
-; CHECK-NEXT: ext v4.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT: mov v1.b[1], v0.b[9]
-; CHECK-NEXT: mov v2.b[2], v0.b[2]
-; CHECK-NEXT: mov v1.b[2], v0.b[10]
-; CHECK-NEXT: mov v2.b[3], v0.b[3]
-; CHECK-NEXT: mov v1.b[3], v0.b[11]
-; CHECK-NEXT: mov v2.b[4], v0.b[4]
-; CHECK-NEXT: mov v1.b[4], v0.b[12]
-; CHECK-NEXT: mov v2.b[5], v0.b[5]
-; CHECK-NEXT: mov v1.b[5], v0.b[13]
-; CHECK-NEXT: mov v2.b[6], v0.b[6]
-; CHECK-NEXT: mov v1.b[6], v0.b[14]
-; CHECK-NEXT: mov v2.b[7], v0.b[7]
-; CHECK-NEXT: mov v1.b[7], v0.b[15]
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: mov v1.b[1], v0.b[1]
+; CHECK-NEXT: fmov s2, w8
+; CHECK-NEXT: mov x8, #16 // =0x10
+; CHECK-NEXT: mov v2.b[1], v0.b[9]
+; CHECK-NEXT: mov v1.b[2], v0.b[2]
+; CHECK-NEXT: mov v2.b[2], v0.b[10]
+; CHECK-NEXT: mov v1.b[3], v0.b[3]
+; CHECK-NEXT: mov v2.b[3], v0.b[11]
+; CHECK-NEXT: mov v1.b[4], v0.b[4]
+; CHECK-NEXT: mov v2.b[4], v0.b[12]
+; CHECK-NEXT: mov v1.b[5], v0.b[5]
+; CHECK-NEXT: mov v2.b[5], v0.b[13]
+; CHECK-NEXT: mov v1.b[6], v0.b[6]
+; CHECK-NEXT: mov v2.b[6], v0.b[14]
+; CHECK-NEXT: mov v1.b[7], v0.b[7]
+; CHECK-NEXT: mov v2.b[7], v0.b[15]
+; CHECK-NEXT: ext z0.b, z0.b, z0.b, #16
+; CHECK-NEXT: uunpklo z1.h, z1.b
+; CHECK-NEXT: ext v3.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: uunpklo z0.h, z0.b
; CHECK-NEXT: uunpklo z2.h, z2.b
-; CHECK-NEXT: uunpklo z0.h, z1.b
-; CHECK-NEXT: uunpklo z1.h, z3.b
-; CHECK-NEXT: uunpklo z3.h, z4.b
-; CHECK-NEXT: uunpklo z2.s, z2.h
-; CHECK-NEXT: uunpklo z0.s, z0.h
; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpklo z3.h, z3.b
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z2.s, z2.h
+; CHECK-NEXT: lsl z1.s, z1.s, #31
; CHECK-NEXT: uunpklo z3.s, z3.h
-; CHECK-NEXT: lsl z2.s, z2.s, #31
; CHECK-NEXT: lsl z0.s, z0.s, #31
-; CHECK-NEXT: lsl z1.s, z1.s, #31
+; CHECK-NEXT: asr z1.s, z1.s, #31
+; CHECK-NEXT: lsl z2.s, z2.s, #31
+; CHECK-NEXT: asr z0.s, z0.s, #31
+; CHECK-NEXT: and z1.s, z1.s, #0x1
; CHECK-NEXT: lsl z3.s, z3.s, #31
; CHECK-NEXT: asr z2.s, z2.s, #31
-; CHECK-NEXT: asr z0.s, z0.s, #31
-; CHECK-NEXT: asr z1.s, z1.s, #31
+; CHECK-NEXT: and z0.s, z0.s, #0x1
+; CHECK-NEXT: cmpne p4.s, p0/z, z1.s, #0
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
; CHECK-NEXT: asr z3.s, z3.s, #31
; CHECK-NEXT: and z2.s, z2.s, #0x1
-; CHECK-NEXT: and z0.s, z0.s, #0x1
-; CHECK-NEXT: and z1.s, z1.s, #0x1
-; CHECK-NEXT: and z3.s, z3.s, #0x1
-; CHECK-NEXT: cmpne p4.s, p0/z, z2.s, #0
-; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0]
; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0
-; CHECK-NEXT: cmpne p2.s, p0/z, z1.s, #0
-; CHECK-NEXT: cmpne p3.s, p0/z, z3.s, #0
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
-; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, x9, lsl #2]
-; CHECK-NEXT: mov z2.s, p4/m, #0 // =0x0
+; CHECK-NEXT: and z3.s, z3.s, #0x1
+; CHECK-NEXT: cmpne p2.s, p0/z, z2.s, #0
+; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0, x9, lsl #2]
+; CHECK-NEXT: mov z1.s, p4/m, #0 // =0x0
+; CHECK-NEXT: cmpne p3.s, p0/z, z3.s, #0
+; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, x10, lsl #2]
; CHECK-NEXT: mov z0.s, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z1.s, p2/m, #0 // =0x0
-; CHECK-NEXT: mov z3.s, p3/m, #0 // =0x0
-; CHECK-NEXT: st1w { z2.s }, p0, [x0]
+; CHECK-NEXT: mov z2.s, p2/m, #0 // =0x0
+; CHECK-NEXT: st1w { z1.s }, p0, [x0]
; CHECK-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; CHECK-NEXT: st1w { z1.s }, p0, [x0, #1, mul vl]
-; CHECK-NEXT: st1w { z3.s }, p0, [x0, x9, lsl #2]
+; CHECK-NEXT: mov z3.s, p3/m, #0 // =0x0
+; CHECK-NEXT: st1w { z2.s }, p0, [x0, x9, lsl #2]
+; CHECK-NEXT: st1w { z3.s }, p0, [x0, x10, lsl #2]
; CHECK-NEXT: .LBB1_2: // %exit
; CHECK-NEXT: ret
%broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer
More information about the cfe-commits
mailing list