[llvm] 4f66ca3 - [AArch64][SME] Disable ZA LDR/STR addressing optimisations
David Sherwood via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 5 07:46:49 PDT 2023
Author: David Sherwood
Date: 2023-04-05T14:46:41Z
New Revision: 4f66ca3fb7d5a1cdc9d597604b58e230ad6ba0dc
URL: https://github.com/llvm/llvm-project/commit/4f66ca3fb7d5a1cdc9d597604b58e230ad6ba0dc
DIFF: https://github.com/llvm/llvm-project/commit/4f66ca3fb7d5a1cdc9d597604b58e230ad6ba0dc.diff
LOG: [AArch64][SME] Disable ZA LDR/STR addressing optimisations
Since the same encoded offset is used for both the vector
select offset and the address offset we have to spot two
patterns simulatenously in the ldr/str intrinsic inputs, i.e.
vector select = base + off
address = base + (off * VL)
whereas currently we only look for the address pattern. I
don't think this is possible in tablegen, so I suspect we'll
have to do this manually as part of lowering or as a target
DAG combine. For now, I've removed these tablegen patterns
so that we at least do the correct thing even if the code
quality isn't great.
I've also changed some of the ldr/str tests to pass in the
same vector select pattern (base + off) as the address
pattern.
Differential Revision: https://reviews.llvm.org/D147433
Added:
Modified:
llvm/lib/Target/AArch64/SMEInstrFormats.td
llvm/test/CodeGen/AArch64/sme-intrinsics-loads.ll
llvm/test/CodeGen/AArch64/sme-intrinsics-stores.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 41cf7e36fb183..ee311ccdf322c 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -760,12 +760,6 @@ multiclass sme_spill<string opcodestr> {
// base
def : Pat<(int_aarch64_sme_str MatrixIndexGPR32Op12_15:$idx, GPR64sp:$base),
(!cast<Instruction>(NAME) ZA, $idx, 0, $base, 0)>;
- // scalar + immediate (mul vl)
- let AddedComplexity = 2 in {
- def : Pat<(int_aarch64_sme_str MatrixIndexGPR32Op12_15:$idx,
- (am_sme_indexed_b4 GPR64sp:$base, imm0_15:$imm4)),
- (!cast<Instruction>(NAME) ZA, $idx, 0, $base, $imm4)>;
- }
}
multiclass sme_fill<string opcodestr> {
@@ -785,12 +779,6 @@ multiclass sme_fill<string opcodestr> {
// base
def : Pat<(int_aarch64_sme_ldr MatrixIndexGPR32Op12_15:$idx, GPR64sp:$base),
(!cast<Instruction>(NAME # _PSEUDO) $idx, 0, $base)>;
- // scalar + immediate (mul vl)
- let AddedComplexity = 2 in {
- def : Pat<(int_aarch64_sme_ldr MatrixIndexGPR32Op12_15:$idx,
- (am_sme_indexed_b4 GPR64sp:$base, imm0_15:$imm4)),
- (!cast<Instruction>(NAME # _PSEUDO) $idx, $imm4, $base)>;
- }
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/AArch64/sme-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sme-intrinsics-loads.ll
index 96c0d61483760..ccb3975f0c5b4 100644
--- a/llvm/test/CodeGen/AArch64/sme-intrinsics-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sme-intrinsics-loads.ll
@@ -259,39 +259,40 @@ define void @ldr(ptr %ptr) {
define void @ldr_with_off_15(ptr %ptr) {
; CHECK-LABEL: ldr_with_off_15:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w12, wzr
+; CHECK-NEXT: mov w12, #15 // =0xf
; CHECK-NEXT: add x8, x0, #15
; CHECK-NEXT: ldr za[w12, 0], [x8]
; CHECK-NEXT: ret
%base = getelementptr i8, ptr %ptr, i64 15
- call void @llvm.aarch64.sme.ldr(i32 0, ptr %base)
+ call void @llvm.aarch64.sme.ldr(i32 15, ptr %base)
ret void;
}
define void @ldr_with_off_15mulvl(ptr %ptr) {
; CHECK-LABEL: ldr_with_off_15mulvl:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w12, wzr
-; CHECK-NEXT: ldr za[w12, 15], [x0, #15, mul vl]
+; CHECK-NEXT: mov w12, #15 // =0xf
+; CHECK-NEXT: addvl x8, x0, #15
+; CHECK-NEXT: ldr za[w12, 0], [x8]
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%mulvl = mul i64 %vscale, 240
%base = getelementptr i8, ptr %ptr, i64 %mulvl
- call void @llvm.aarch64.sme.ldr(i32 0, ptr %base)
+ call void @llvm.aarch64.sme.ldr(i32 15, ptr %base)
ret void;
}
define void @ldr_with_off_16mulvl(ptr %ptr) {
; CHECK-LABEL: ldr_with_off_16mulvl:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w12, wzr
+; CHECK-NEXT: mov w12, #16 // =0x10
; CHECK-NEXT: addvl x8, x0, #16
; CHECK-NEXT: ldr za[w12, 0], [x8]
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%mulvl = mul i64 %vscale, 256
%base = getelementptr i8, ptr %ptr, i64 %mulvl
- call void @llvm.aarch64.sme.ldr(i32 0, ptr %base)
+ call void @llvm.aarch64.sme.ldr(i32 16, ptr %base)
ret void;
}
diff --git a/llvm/test/CodeGen/AArch64/sme-intrinsics-stores.ll b/llvm/test/CodeGen/AArch64/sme-intrinsics-stores.ll
index 5963abaaac8df..ddff4c7d3cd3e 100644
--- a/llvm/test/CodeGen/AArch64/sme-intrinsics-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sme-intrinsics-stores.ll
@@ -259,39 +259,40 @@ define void @str(ptr %ptr) {
define void @str_with_off_15(ptr %ptr) {
; CHECK-LABEL: str_with_off_15:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w12, wzr
+; CHECK-NEXT: mov w12, #15 // =0xf
; CHECK-NEXT: add x8, x0, #15
; CHECK-NEXT: str za[w12, 0], [x8]
; CHECK-NEXT: ret
%base = getelementptr i8, ptr %ptr, i64 15
- call void @llvm.aarch64.sme.str(i32 0, ptr %base)
+ call void @llvm.aarch64.sme.str(i32 15, ptr %base)
ret void;
}
define void @str_with_off_15mulvl(ptr %ptr) {
; CHECK-LABEL: str_with_off_15mulvl:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w12, wzr
-; CHECK-NEXT: str za[w12, 0], [x0, #15, mul vl]
+; CHECK-NEXT: mov w12, #15 // =0xf
+; CHECK-NEXT: addvl x8, x0, #15
+; CHECK-NEXT: str za[w12, 0], [x8]
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%mulvl = mul i64 %vscale, 240
%base = getelementptr i8, ptr %ptr, i64 %mulvl
- call void @llvm.aarch64.sme.str(i32 0, ptr %base)
+ call void @llvm.aarch64.sme.str(i32 15, ptr %base)
ret void;
}
define void @str_with_off_16mulvl(ptr %ptr) {
; CHECK-LABEL: str_with_off_16mulvl:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w12, wzr
+; CHECK-NEXT: mov w12, #16 // =0x10
; CHECK-NEXT: addvl x8, x0, #16
; CHECK-NEXT: str za[w12, 0], [x8]
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%mulvl = mul i64 %vscale, 256
%base = getelementptr i8, ptr %ptr, i64 %mulvl
- call void @llvm.aarch64.sme.str(i32 0, ptr %base)
+ call void @llvm.aarch64.sme.str(i32 16, ptr %base)
ret void;
}
More information about the llvm-commits
mailing list