[llvm] 922ac64 - [VE] Avoid vectorizing store/load in scalar mode

Kazushi Marukawa via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 16 10:15:59 PDT 2023


Author: Kazushi (Jam) Marukawa
Date: 2023-08-17T02:15:54+09:00
New Revision: 922ac64b04b12f70135a53460610326ba2b72283

URL: https://github.com/llvm/llvm-project/commit/922ac64b04b12f70135a53460610326ba2b72283
DIFF: https://github.com/llvm/llvm-project/commit/922ac64b04b12f70135a53460610326ba2b72283.diff

LOG: [VE] Avoid vectorizing store/load in scalar mode

Avoid vectorizing store and load instructions in scalar mode.

Reviewed By: efocht

Differential Revision: https://reviews.llvm.org/D158049

Added: 
    llvm/test/CodeGen/VE/Vector/ticket-64420.ll

Modified: 
    llvm/lib/Target/VE/VEISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index d208fa06dfc8a3..70003daa4866a2 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -1428,11 +1428,10 @@ static SDValue lowerLoadI1(SDValue Op, SelectionDAG &DAG) {
 
 SDValue VETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
   LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
-
   EVT MemVT = LdNode->getMemoryVT();
 
-  // Dispatch to vector isel.
-  if (MemVT.isVector() && !isMaskType(MemVT))
+  // If VPU is enabled, always expand non-mask vector loads to VVP
+  if (Subtarget->enableVPU() && MemVT.isVector() && !isMaskType(MemVT))
     return lowerToVVP(Op, DAG);
 
   SDValue BasePtr = LdNode->getBasePtr();
@@ -1542,10 +1541,10 @@ static SDValue lowerStoreI1(SDValue Op, SelectionDAG &DAG) {
 SDValue VETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
   StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
   assert(StNode && StNode->getOffset().isUndef() && "Unexpected node type");
-
-  // always expand non-mask vector loads to VVP
   EVT MemVT = StNode->getMemoryVT();
-  if (MemVT.isVector() && !isMaskType(MemVT))
+
+  // If VPU is enabled, always expand non-mask vector stores to VVP
+  if (Subtarget->enableVPU() && MemVT.isVector() && !isMaskType(MemVT))
     return lowerToVVP(Op, DAG);
 
   SDValue BasePtr = StNode->getBasePtr();

diff  --git a/llvm/test/CodeGen/VE/Vector/ticket-64420.ll b/llvm/test/CodeGen/VE/Vector/ticket-64420.ll
new file mode 100644
index 00000000000000..000df9482468db
--- /dev/null
+++ b/llvm/test/CodeGen/VE/Vector/ticket-64420.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -mtriple=ve -mattr=-vpu | FileCheck --check-prefix=SCALAR %s
+
+; Check vector and scalar code generation for vector load instruction.
+; For the case of scalar, generates 2 stores of 8 bytes length.
+
+; This is taken from a ticket below.
+;   https://github.com/llvm/llvm-project/issues/64420
+
+; SCALAR-LABEL: func:
+; SCALAR:       # %bb.1:
+; SCALAR-NEXT:    st %s1, 8(, %s0)
+; SCALAR-NEXT:    st %s1, (, %s0)
+; SCALAR-NEXT:    b.l.t (, %s10)
+
+; ModuleID = 'bugpoint-reduced-simplified.bc'
+source_filename = "test.c"
+target datalayout = "e-m:e-i64:64-n32:64-S128-v64:64:64-v128:64:64-v256:64:64-v512:64:64-v1024:64:64-v2048:64:64-v4096:64:64-v8192:64:64-v16384:64:64"
+target triple = "ve-unknown-linux-gnu"
+
+define dso_local void @func(ptr %_0) unnamed_addr #0 {
+start:
+  br i1 poison, label %bb7, label %panic3
+
+bb7:                                              ; preds = %start
+  store <4 x i32> zeroinitializer, ptr %_0, align 4
+  ret void
+
+panic3:                                           ; preds = %start
+  unreachable
+}
+
+attributes #0 = { "target-features"="+vpu" }


        


More information about the llvm-commits mailing list