[llvm] 66fbf5f - [X86][AMX] Prevent transforming load pointer from <256 x i32>* to x86_amx*.

via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 13 17:25:14 PST 2021


Author: Luo, Yuanke
Date: 2021-03-14T09:24:56+08:00
New Revision: 66fbf5fafb1672e866c1f93e892024e7e6f52ccd

URL: https://github.com/llvm/llvm-project/commit/66fbf5fafb1672e866c1f93e892024e7e6f52ccd
DIFF: https://github.com/llvm/llvm-project/commit/66fbf5fafb1672e866c1f93e892024e7e6f52ccd.diff

LOG: [X86][AMX] Prevent transforming load pointer from <256 x i32>* to x86_amx*.

The load/store instruction will be transformed to amx intrinsics
in the pass of AMX type lowering. Prohibiting the pointer cast
make that pass happy.

Differential Revision: https://reviews.llvm.org/D98247

Added: 
    

Modified: 
    llvm/lib/IR/Type.cpp
    llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
    llvm/test/Transforms/InstCombine/X86/x86-amx.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp
index bade7dc325f4..f65016e9ee3a 100644
--- a/llvm/lib/IR/Type.cpp
+++ b/llvm/lib/IR/Type.cpp
@@ -95,8 +95,15 @@ bool Type::canLosslesslyBitCastTo(Type *Ty) const {
   // else is not lossless. Conservatively assume we can't losslessly convert
   // between pointers with 
diff erent address spaces.
   if (auto *PTy = dyn_cast<PointerType>(this)) {
-    if (auto *OtherPTy = dyn_cast<PointerType>(Ty))
+    if (auto *OtherPTy = dyn_cast<PointerType>(Ty)) {
+      // Don't bitcast "load <256 x i32>, <256 x i32>*" to
+      // "load x86_amx, x86_amx*", because we don't have a corresponding
+      // instruction to load x86_amx. Doing the transform causes trouble
+      // to lower "load x86_amx" instruction in backend.
+      if (OtherPTy->getElementType()->isX86_AMXTy())
+        return false;
       return PTy->getAddressSpace() == OtherPTy->getAddressSpace();
+    }
     return false;
   }
   return false;  // Other types have no identity values

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 07e68c44416d..e6e90b915bb8 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -2403,6 +2403,11 @@ Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI,
         Value *Addr = LI->getOperand(0);
         if (Addr == &CI || isa<LoadInst>(Addr))
           return nullptr;
+        // If there is any loss for the pointer bitcast, abandon.
+        auto *DestPtrTy = DestTy->getPointerTo(LI->getPointerAddressSpace());
+        auto *SrcPtrTy = Addr->getType();
+        if (!SrcPtrTy->canLosslesslyBitCastTo(DestPtrTy))
+          return nullptr;
         if (LI->hasOneUse() && LI->isSimple())
           continue;
         // If a LoadInst has more than one use, changing the type of loaded

diff  --git a/llvm/test/Transforms/InstCombine/X86/x86-amx.ll b/llvm/test/Transforms/InstCombine/X86/x86-amx.ll
index 254f3c15bc44..9f922a057029 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-amx.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-amx.ll
@@ -9,22 +9,22 @@ define linkonce_odr dso_local void @foo(<256 x i32>* %arrayidx16, <256 x i32>* %
 ; CHECK:       for.cond9:
 ; CHECK-NEXT:    br i1 undef, label [[FOR_BODY14:%.*]], label [[EXIT:%.*]]
 ; CHECK:       for.body14:
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <256 x i32>* [[ARRAYIDX16:%.*]] to x86_amx*
-; CHECK-NEXT:    [[T51:%.*]] = load x86_amx, x86_amx* [[TMP0]], align 64
+; CHECK-NEXT:    [[T5:%.*]] = load <256 x i32>, <256 x i32>* [[ARRAYIDX16:%.*]], align 64
 ; CHECK-NEXT:    br label [[FOR_COND18:%.*]]
 ; CHECK:       for.cond18:
-; CHECK-NEXT:    [[TMP1:%.*]] = phi x86_amx [ [[T51]], [[FOR_BODY14]] ], [ [[T11:%.*]], [[FOR_BODY24:%.*]] ]
+; CHECK-NEXT:    [[SUB_C_SROA_0_0:%.*]] = phi <256 x i32> [ [[T5]], [[FOR_BODY14]] ], [ [[T12:%.*]], [[FOR_BODY24:%.*]] ]
 ; CHECK-NEXT:    br i1 undef, label [[FOR_BODY24]], label [[FOR_COND_CLEANUP23:%.*]]
 ; CHECK:       for.cond.cleanup23:
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast x86_amx [[TMP1]] to <256 x i32>
-; CHECK-NEXT:    store <256 x i32> [[TMP2]], <256 x i32>* [[ARRAYIDX16]], align 64
+; CHECK-NEXT:    store <256 x i32> [[SUB_C_SROA_0_0]], <256 x i32>* [[ARRAYIDX16]], align 64
 ; CHECK-NEXT:    br label [[FOR_COND9]]
 ; CHECK:       for.body24:
 ; CHECK-NEXT:    [[T6:%.*]] = load <256 x i32>, <256 x i32>* [[ARRAYIDX29:%.*]], align 64
 ; CHECK-NEXT:    [[T7:%.*]] = load <256 x i32>, <256 x i32>* [[ARRAYIDX35:%.*]], align 64
+; CHECK-NEXT:    [[T8:%.*]] = bitcast <256 x i32> [[SUB_C_SROA_0_0]] to x86_amx
 ; CHECK-NEXT:    [[T9:%.*]] = bitcast <256 x i32> [[T6]] to x86_amx
 ; CHECK-NEXT:    [[T10:%.*]] = bitcast <256 x i32> [[T7]] to x86_amx
-; CHECK-NEXT:    [[T11]] = call x86_amx @llvm.x86.tdpbssd.internal(i16 1, i16 4, i16 4, x86_amx [[TMP1]], x86_amx [[T9]], x86_amx [[T10]])
+; CHECK-NEXT:    [[T11:%.*]] = call x86_amx @llvm.x86.tdpbssd.internal(i16 1, i16 4, i16 4, x86_amx [[T8]], x86_amx [[T9]], x86_amx [[T10]])
+; CHECK-NEXT:    [[T12]] = bitcast x86_amx [[T11]] to <256 x i32>
 ; CHECK-NEXT:    br label [[FOR_COND18]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void


        


More information about the llvm-commits mailing list