[llvm] 6536d60 - Revert "[SVE][CodeGen][DAGCombiner] Fix TypeSize warning in redundant store elimination"

Peter Waller via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 26 09:38:32 PDT 2020


Author: Peter Waller
Date: 2020-10-26T16:37:00Z
New Revision: 6536d6040f5cd20d554901e265519b80dd8119f2

URL: https://github.com/llvm/llvm-project/commit/6536d6040f5cd20d554901e265519b80dd8119f2
DIFF: https://github.com/llvm/llvm-project/commit/6536d6040f5cd20d554901e265519b80dd8119f2.diff

LOG: Revert "[SVE][CodeGen][DAGCombiner] Fix TypeSize warning in redundant store elimination"

This reverts commit 4604441386dc5fcd3165f4b39f5fa2e2c600f1bc.

Reverting because it was not the intended version of the patch, which
follows this patch.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AArch64/Redundantstore.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4d1074560886..f4cf77ba8bc0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -17326,12 +17326,11 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
           !ST1->getBasePtr().isUndef() &&
           // BaseIndexOffset and the code below requires knowing the size
           // of a vector, so bail out if MemoryVT is scalable.
-          !ST->getMemoryVT().isScalableVector() &&
           !ST1->getMemoryVT().isScalableVector()) {
         const BaseIndexOffset STBase = BaseIndexOffset::match(ST, DAG);
         const BaseIndexOffset ChainBase = BaseIndexOffset::match(ST1, DAG);
-        unsigned STBitSize = ST->getMemoryVT().getFixedSizeInBits();
-        unsigned ChainBitSize = ST1->getMemoryVT().getFixedSizeInBits();
+        unsigned STBitSize = ST->getMemoryVT().getSizeInBits();
+        unsigned ChainBitSize = ST1->getMemoryVT().getSizeInBits();
         // If this is a store who's preceding store to a subset of the current
         // location and no one other node is chained to that store we can
         // effectively drop the store. Do not remove stores to undef as they may

diff  --git a/llvm/test/CodeGen/AArch64/Redundantstore.ll b/llvm/test/CodeGen/AArch64/Redundantstore.ll
index 6807a861d6cb..b7822a882b4a 100644
--- a/llvm/test/CodeGen/AArch64/Redundantstore.ll
+++ b/llvm/test/CodeGen/AArch64/Redundantstore.ll
@@ -1,11 +1,8 @@
-; RUN: llc < %s -O3 -mtriple=aarch64-eabi 2>&1 | FileCheck %s
+; RUN: llc < %s -O3 -mtriple=aarch64-eabi | FileCheck %s 
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 @end_of_array = common global i8* null, align 8
 
-; The tests in this file should not produce a TypeSize warning.
-; CHECK-NOT: warning: {{.*}}TypeSize is not scalable
-
 ; CHECK-LABEL: @test
 ; CHECK: stur
 ; CHECK-NOT: stur
@@ -26,23 +23,3 @@ entry:
   ret i8* %0
 }
 
-; #include <arm_sve.h>
-; #include <stdint.h>
-;
-; void redundant_store(uint32_t *x) {
-;     *x = 1;
-;     *(svint32_t *)x = svdup_s32(0);
-; }
-
-; CHECK-LABEL: @redundant_store
-define void @redundant_store(i32* nocapture %x) local_unnamed_addr #0 {
-  %1 = bitcast i32* %x to <vscale x 4 x i32>*
-  store i32 1, i32* %x, align 4
-  %2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 0)
-  store <vscale x 4 x i32> %2, <vscale x 4 x i32>* %1, align 16
-  ret void
-}
-
-declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32)
-
-attributes #0 = { "target-cpu"="generic" "target-features"="+neon,+sve,+v8.2a" }


        


More information about the llvm-commits mailing list