[llvm] a1dc6d4 - [AArch64] Do not use ABI alignment for mops.memset.tag

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 1 05:38:00 PST 2022


Author: Nikita Popov
Date: 2022-02-01T14:37:53+01:00
New Revision: a1dc6d4b83a38a52daab08ad52b68a7b80103709

URL: https://github.com/llvm/llvm-project/commit/a1dc6d4b83a38a52daab08ad52b68a7b80103709
DIFF: https://github.com/llvm/llvm-project/commit/a1dc6d4b83a38a52daab08ad52b68a7b80103709.diff

LOG: [AArch64] Do not use ABI alignment for mops.memset.tag

Pointer element types do not imply that the pointer is ABI aligned.
We should be using either an explicit align attribute here, or fall
back to an alignment of 1. This fixes a new element type access
introduced in D117764.

I don't think this makes any practical difference though, as the
lowering does not depend on alignment.

Differential Revision: https://reviews.llvm.org/D118681

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index eba73fd7e5f9..c539c8617d99 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -11928,12 +11928,11 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
   case Intrinsic::aarch64_mops_memset_tag: {
     Value *Dst = I.getArgOperand(0);
     Value *Val = I.getArgOperand(1);
-    PointerType *PtrTy = cast<PointerType>(Dst->getType());
     Info.opc = ISD::INTRINSIC_W_CHAIN;
     Info.memVT = MVT::getVT(Val->getType());
     Info.ptrVal = Dst;
     Info.offset = 0;
-    Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
+    Info.align = I.getParamAlign(0).valueOrOne();
     Info.flags = MachineMemOperand::MOStore;
     // The size of the memory being operated on is unknown at this point
     Info.size = MemoryLocation::UnknownSize;

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll b/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll
index 6ad195492758..5b71648ac1a9 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll
@@ -324,3 +324,34 @@ entry:
   %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 %size)
   ret i8* %r
 }
+
+define i8* @memset_tagged_size_aligned(i8* %dst, i64 %size, i32 %value) {
+; GISel-O0-LABEL: memset_tagged_size_aligned:
+; GISel-O0:       // %bb.0: // %entry
+; GISel-O0-NEXT:    // implicit-def: $x8
+; GISel-O0-NEXT:    mov w8, w2
+; GISel-O0-NEXT:    setgp [x0]!, x1!, x8
+; GISel-O0-NEXT:    setgm [x0]!, x1!, x8
+; GISel-O0-NEXT:    setge [x0]!, x1!, x8
+; GISel-O0-NEXT:    ret
+;
+; GISel-LABEL: memset_tagged_size_aligned:
+; GISel:       // %bb.0: // %entry
+; GISel-NEXT:    // kill: def $w2 killed $w2 def $x2
+; GISel-NEXT:    setgp [x0]!, x1!, x2
+; GISel-NEXT:    setgm [x0]!, x1!, x2
+; GISel-NEXT:    setge [x0]!, x1!, x2
+; GISel-NEXT:    ret
+;
+; SDAG-LABEL: memset_tagged_size_aligned:
+; SDAG:       // %bb.0: // %entry
+; SDAG-NEXT:    // kill: def $w2 killed $w2 def $x2
+; SDAG-NEXT:    setgp [x0]!, x1!, x2
+; SDAG-NEXT:    setgm [x0]!, x1!, x2
+; SDAG-NEXT:    setge [x0]!, x1!, x2
+; SDAG-NEXT:    ret
+entry:
+  %value_trunc = trunc i32 %value to i8
+  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* align 16 %dst, i8 %value_trunc, i64 %size)
+  ret i8* %r
+}


        


More information about the llvm-commits mailing list