[llvm] r354376 - [X86] Don't consider functions ABI compatible for ArgumentPromotion pass if they view 512-bit vectors differently.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 19 12:12:20 PST 2019


Author: ctopper
Date: Tue Feb 19 12:12:20 2019
New Revision: 354376

URL: http://llvm.org/viewvc/llvm-project?rev=354376&view=rev
Log:
[X86] Don't consider functions ABI compatible for ArgumentPromotion pass if they view 512-bit vectors differently.

The use of the -mprefer-vector-width=256 command line option mixed with functions
using vector intrinsics can create situations where one function thinks 512 vectors
are legal, but another fucntion does not.

If a 512 bit vector is passed between them via a pointer, its possible ArgumentPromotion
might try to pass by value instead. This will result in type legalization for the two
functions handling the 512 bit vector differently leading to runtime failures.

Had the 512 bit vector been passed by value from clang codegen, both functions would
have been tagged with a min-legal-vector-width=512 function attribute. That would
make them be legalized the same way.

I observed this issue in 32-bit mode where a union containing a 512 bit vector was
being passed by a function that used intrinsics to one that did not. The caller
ended up passing in zmm0 and the callee tried to read it from ymm0 and ymm1.

The fix implemented here is just to consider it a mismatch if two functions
would handle 512 bit differently without looking at the types that are being
considered. This is the easist and safest fix, but it can be improved in the future.

Differential Revision: https://reviews.llvm.org/D58390

Added:
    llvm/trunk/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll
Modified:
    llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
    llvm/trunk/lib/Target/X86/X86TargetTransformInfo.h

Modified: llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp?rev=354376&r1=354375&r2=354376&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp Tue Feb 19 12:12:20 2019
@@ -3070,6 +3070,22 @@ bool X86TTIImpl::areInlineCompatible(con
   return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
 }
 
+bool X86TTIImpl::areFunctionArgsABICompatible(
+    const Function *Caller, const Function *Callee,
+    SmallPtrSetImpl<Argument *> &Args) const {
+  if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
+    return false;
+
+  // If we get here, we know the target features match. If one function
+  // considers 512-bit vectors legal and the other does not, consider them
+  // incompatible.
+  // FIXME Look at the arguments and only consider 512 bit or larger vectors?
+  const TargetMachine &TM = getTLI()->getTargetMachine();
+
+  return TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
+         TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs();
+}
+
 const X86TTIImpl::TTI::MemCmpExpansionOptions *
 X86TTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const {
   // Only enable vector loads for equality comparison.

Modified: llvm/trunk/lib/Target/X86/X86TargetTransformInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetTransformInfo.h?rev=354376&r1=354375&r2=354376&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetTransformInfo.h (original)
+++ llvm/trunk/lib/Target/X86/X86TargetTransformInfo.h Tue Feb 19 12:12:20 2019
@@ -189,6 +189,9 @@ public:
   bool isFCmpOrdCheaperThanFCmpZero(Type *Ty);
   bool areInlineCompatible(const Function *Caller,
                            const Function *Callee) const;
+  bool areFunctionArgsABICompatible(const Function *Caller,
+                                    const Function *Callee,
+                                    SmallPtrSetImpl<Argument *> &Args) const;
   const TTI::MemCmpExpansionOptions *enableMemCmpExpansion(
       bool IsZeroCmp) const;
   bool enableInterleavedAccessVectorization();

Added: llvm/trunk/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll?rev=354376&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll (added)
+++ llvm/trunk/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll Tue Feb 19 12:12:20 2019
@@ -0,0 +1,184 @@
+; RUN: opt -S -argpromotion < %s | FileCheck %s
+; RUN: opt -S -passes=argpromotion < %s | FileCheck %s
+; Test that we only promote arguments when the caller/callee have compatible
+; function attrubtes.
+
+target triple = "x86_64-unknown-linux-gnu"
+
+; This should promote
+; CHECK-LABEL: @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64> %arg1.val)
+define internal fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #0 {
+bb:
+  %tmp = load <8 x i64>, <8 x i64>* %arg1
+  store <8 x i64> %tmp, <8 x i64>* %arg
+  ret void
+}
+
+define void @avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %arg) #0 {
+bb:
+  %tmp = alloca <8 x i64>, align 32
+  %tmp2 = alloca <8 x i64>, align 32
+  %tmp3 = bitcast <8 x i64>* %tmp to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false)
+  call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %tmp2, <8 x i64>* %tmp)
+  %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32
+  store <8 x i64> %tmp4, <8 x i64>* %arg, align 2
+  ret void
+}
+
+; This should promote
+; CHECK-LABEL: @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64> %arg1.val)
+define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 {
+bb:
+  %tmp = load <8 x i64>, <8 x i64>* %arg1
+  store <8 x i64> %tmp, <8 x i64>* %arg
+  ret void
+}
+
+define void @avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg) #1 {
+bb:
+  %tmp = alloca <8 x i64>, align 32
+  %tmp2 = alloca <8 x i64>, align 32
+  %tmp3 = bitcast <8 x i64>* %tmp to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false)
+  call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp)
+  %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32
+  store <8 x i64> %tmp4, <8 x i64>* %arg, align 2
+  ret void
+}
+
+; This should promote
+; CHECK-LABEL: @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64> %arg1.val)
+define internal fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 {
+bb:
+  %tmp = load <8 x i64>, <8 x i64>* %arg1
+  store <8 x i64> %tmp, <8 x i64>* %arg
+  ret void
+}
+
+define void @avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %arg) #0 {
+bb:
+  %tmp = alloca <8 x i64>, align 32
+  %tmp2 = alloca <8 x i64>, align 32
+  %tmp3 = bitcast <8 x i64>* %tmp to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false)
+  call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp)
+  %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32
+  store <8 x i64> %tmp4, <8 x i64>* %arg, align 2
+  ret void
+}
+
+; This should promote
+; CHECK-LABEL: @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64> %arg1.val)
+define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #0 {
+bb:
+  %tmp = load <8 x i64>, <8 x i64>* %arg1
+  store <8 x i64> %tmp, <8 x i64>* %arg
+  ret void
+}
+
+define void @avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %arg) #1 {
+bb:
+  %tmp = alloca <8 x i64>, align 32
+  %tmp2 = alloca <8 x i64>, align 32
+  %tmp3 = bitcast <8 x i64>* %tmp to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false)
+  call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %tmp2, <8 x i64>* %tmp)
+  %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32
+  store <8 x i64> %tmp4, <8 x i64>* %arg, align 2
+  ret void
+}
+
+; This should not promote
+; CHECK-LABEL: @callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1)
+define internal fastcc void @callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 {
+bb:
+  %tmp = load <8 x i64>, <8 x i64>* %arg1
+  store <8 x i64> %tmp, <8 x i64>* %arg
+  ret void
+}
+
+define void @avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg) #2 {
+bb:
+  %tmp = alloca <8 x i64>, align 32
+  %tmp2 = alloca <8 x i64>, align 32
+  %tmp3 = bitcast <8 x i64>* %tmp to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false)
+  call fastcc void @callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp)
+  %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32
+  store <8 x i64> %tmp4, <8 x i64>* %arg, align 2
+  ret void
+}
+
+; This should not promote
+; CHECK-LABEL: @callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1)
+define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #2 {
+bb:
+  %tmp = load <8 x i64>, <8 x i64>* %arg1
+  store <8 x i64> %tmp, <8 x i64>* %arg
+  ret void
+}
+
+define void @avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %arg) #1 {
+bb:
+  %tmp = alloca <8 x i64>, align 32
+  %tmp2 = alloca <8 x i64>, align 32
+  %tmp3 = bitcast <8 x i64>* %tmp to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false)
+  call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp)
+  %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32
+  store <8 x i64> %tmp4, <8 x i64>* %arg, align 2
+  ret void
+}
+
+; This should promote
+; CHECK-LABEL: @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %arg, <8 x i64> %arg1.val)
+define internal fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #3 {
+bb:
+  %tmp = load <8 x i64>, <8 x i64>* %arg1
+  store <8 x i64> %tmp, <8 x i64>* %arg
+  ret void
+}
+
+define void @avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %arg) #4 {
+bb:
+  %tmp = alloca <8 x i64>, align 32
+  %tmp2 = alloca <8 x i64>, align 32
+  %tmp3 = bitcast <8 x i64>* %tmp to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false)
+  call fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp)
+  %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32
+  store <8 x i64> %tmp4, <8 x i64>* %arg, align 2
+  ret void
+}
+
+; This should promote
+; CHECK-LABEL: @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %arg, <8 x i64> %arg1.val)
+define internal fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #4 {
+bb:
+  %tmp = load <8 x i64>, <8 x i64>* %arg1
+  store <8 x i64> %tmp, <8 x i64>* %arg
+  ret void
+}
+
+define void @avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %arg) #3 {
+bb:
+  %tmp = alloca <8 x i64>, align 32
+  %tmp2 = alloca <8 x i64>, align 32
+  %tmp3 = bitcast <8 x i64>* %tmp to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false)
+  call fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp)
+  %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32
+  store <8 x i64> %tmp4, <8 x i64>* %arg, align 2
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #5
+
+attributes #0 = { inlinehint norecurse nounwind uwtable "target-features"="+avx512vl" "min-legal-vector-width"="512" "prefer-vector-width"="512" }
+attributes #1 = { inlinehint norecurse nounwind uwtable "target-features"="+avx512vl" "min-legal-vector-width"="512" "prefer-vector-width"="256" }
+attributes #2 = { inlinehint norecurse nounwind uwtable "target-features"="+avx512vl" "min-legal-vector-width"="256" "prefer-vector-width"="256" }
+attributes #3 = { inlinehint norecurse nounwind uwtable "target-features"="+avx2" "min-legal-vector-width"="512" "prefer-vector-width"="256" }
+attributes #4 = { inlinehint norecurse nounwind uwtable "target-features"="+avx2" "min-legal-vector-width"="256" "prefer-vector-width"="256" }
+attributes #5 = { argmemonly nounwind }




More information about the llvm-commits mailing list