[llvm] [RISCV] Tuple intrinsics are creating overly aligned memory operands (PR #115804)

Brandon Wu via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 11 18:56:56 PST 2024


https://github.com/4vtomat created https://github.com/llvm/llvm-project/pull/115804

The alignment should be same as its element type.


>From 7df1bb6990427dedba072309d227c2b4acf42e56 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Mon, 11 Nov 2024 21:25:37 +0800
Subject: [PATCH] [RISCV] Tuple intrinsics are creating overly aligned memory
 operands

The alignment should be same as its element type.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 11 ++-
 .../CodeGen/RISCV/rvv/vector-tuple-align.ll   | 68 +++++++++++++++++++
 .../RISCV/rvv/vleff-vlseg2ff-output.ll        |  6 +-
 3 files changed, 80 insertions(+), 5 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 831b0b30d47fcc..7b803edab935ba 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1623,10 +1623,17 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
       MemTy = MemTy->getScalarType();
 
     Info.memVT = getValueType(DL, MemTy);
-    if (MemTy->isTargetExtTy())
+    if (MemTy->isTargetExtTy()) {
+      // RISC-V vector tuple type's alignment type should be its element type.
+      if (cast<TargetExtType>(MemTy)->getName() == "riscv.vector.tuple")
+        MemTy = Type::getIntNTy(
+            MemTy->getContext(),
+            1 << cast<ConstantInt>(I.getArgOperand(I.arg_size() - 1))
+                     ->getZExtValue());
       Info.align = DL.getABITypeAlign(MemTy);
-    else
+    } else {
       Info.align = Align(DL.getTypeSizeInBits(MemTy->getScalarType()) / 8);
+    }
     Info.size = MemoryLocation::UnknownSize;
     Info.flags |=
         IsStore ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll b/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll
new file mode 100644
index 00000000000000..4a24e2342fabc5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll
@@ -0,0 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s
+
+declare target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr , i64, i64)
+
+define target("riscv.vector.tuple", <vscale x 8 x i8>, 2)  @test_vlseg_nxv8i8(ptr %p, i64 %vl) {
+  ; CHECK-LABEL: name: test_vlseg_nxv8i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 1)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E8_V_M1_]]
+  ; CHECK-NEXT:   $v8_v9 = COPY [[COPY2]]
+  ; CHECK-NEXT:   PseudoRET implicit $v8_v9
+entry:
+  %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 3)
+  ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0
+}
+
+define target("riscv.vector.tuple", <vscale x 8 x i8>, 2)  @test_vlseg_nxv4i16(ptr %p, i64 %vl) {
+  ; CHECK-LABEL: name: test_vlseg_nxv4i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16_V_M1 $noreg, [[COPY1]], [[COPY]], 4 /* e16 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 2)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E16_V_M1_]]
+  ; CHECK-NEXT:   $v8_v9 = COPY [[COPY2]]
+  ; CHECK-NEXT:   PseudoRET implicit $v8_v9
+entry:
+  %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 4)
+  ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0
+}
+
+define target("riscv.vector.tuple", <vscale x 8 x i8>, 2)  @test_vlseg_nxv2i32(ptr %p, i64 %vl) {
+  ; CHECK-LABEL: name: test_vlseg_nxv2i32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32_V_M1 $noreg, [[COPY1]], [[COPY]], 5 /* e32 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 4)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E32_V_M1_]]
+  ; CHECK-NEXT:   $v8_v9 = COPY [[COPY2]]
+  ; CHECK-NEXT:   PseudoRET implicit $v8_v9
+entry:
+  %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 5)
+  ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0
+}
+
+define target("riscv.vector.tuple", <vscale x 8 x i8>, 2)  @test_vlseg_nxv1i64(ptr %p, i64 %vl) {
+  ; CHECK-LABEL: name: test_vlseg_nxv1i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64_V_M1 $noreg, [[COPY1]], [[COPY]], 6 /* e64 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 8)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrn2m1 = COPY [[PseudoVLSEG2E64_V_M1_]]
+  ; CHECK-NEXT:   $v8_v9 = COPY [[COPY2]]
+  ; CHECK-NEXT:   PseudoRET implicit $v8_v9
+entry:
+  %0 = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %p, i64 %vl, i64 6)
+  ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
index c91f34f010aa28..737ef6bae4e429 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
@@ -66,7 +66,7 @@ define i64 @test_vlseg2ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 16)
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
@@ -83,7 +83,7 @@ define i64 @test_vlseg2ff_nxv8i8_tu(target("riscv.vector.tuple", <vscale x 8 x i
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrn2m1 = COPY $v8_v9
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 16)
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:
@@ -102,7 +102,7 @@ define i64 @test_vlseg2ff_nxv8i8_mask(target("riscv.vector.tuple", <vscale x 8 x
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vrn2m1nov0 = COPY $v8_v9
   ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 16)
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
   ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]]
   ; CHECK-NEXT:   PseudoRET implicit $x10
 entry:



More information about the llvm-commits mailing list