[llvm] 1e9d002 - [RISCV][GISel] Split LoadStoreActions in LoadActions and StoreActions.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 20 15:39:47 PDT 2024


Author: Craig Topper
Date: 2024-08-20T15:39:26-07:00
New Revision: 1e9d0028d35ae69263aa848b4cb02245f442eb5c

URL: https://github.com/llvm/llvm-project/commit/1e9d0028d35ae69263aa848b4cb02245f442eb5c
DIFF: https://github.com/llvm/llvm-project/commit/1e9d0028d35ae69263aa848b4cb02245f442eb5c.diff

LOG: [RISCV][GISel] Split LoadStoreActions in LoadActions and StoreActions.

Remove widenToNextPow2 from StoreActions.
Reorder clampScalar and lowerIfMemSizeNotByteSizePow2 for StoreActions.

These match AArch64 and got me further on a test case I was playing with
that contained a i129 store.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 348b0e73af6b4f..64e8ee76e83915 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -285,8 +285,15 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
       .clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)
       .clampScalar(1, sXLen, sXLen);
 
-  auto &LoadStoreActions =
-      getActionDefinitionsBuilder({G_LOAD, G_STORE})
+  auto &LoadActions = getActionDefinitionsBuilder(G_LOAD);
+  auto &StoreActions = getActionDefinitionsBuilder(G_STORE);
+
+  LoadActions
+          .legalForTypesWithMemDesc({{s32, p0, s8, 8},
+                                     {s32, p0, s16, 16},
+                                     {s32, p0, s32, 32},
+                                     {p0, p0, sXLen, XLen}});
+  StoreActions
           .legalForTypesWithMemDesc({{s32, p0, s8, 8},
                                      {s32, p0, s16, 16},
                                      {s32, p0, s32, 32},
@@ -295,58 +302,94 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
       getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
           .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
   if (XLen == 64) {
-    LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
-                                               {s64, p0, s16, 16},
-                                               {s64, p0, s32, 32},
-                                               {s64, p0, s64, 64}});
+    LoadActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
+                                          {s64, p0, s16, 16},
+                                          {s64, p0, s32, 32},
+                                          {s64, p0, s64, 64}});
+    StoreActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
+                                           {s64, p0, s16, 16},
+                                           {s64, p0, s32, 32},
+                                           {s64, p0, s64, 64}});
     ExtLoadActions.legalForTypesWithMemDesc(
         {{s64, p0, s8, 8}, {s64, p0, s16, 16}, {s64, p0, s32, 32}});
   } else if (ST.hasStdExtD()) {
-    LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
+    LoadActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
+    StoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
   }
 
   // Vector loads/stores.
   if (ST.hasVInstructions()) {
-    LoadStoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
-                                               {nxv4s8, p0, nxv4s8, 8},
-                                               {nxv8s8, p0, nxv8s8, 8},
-                                               {nxv16s8, p0, nxv16s8, 8},
-                                               {nxv32s8, p0, nxv32s8, 8},
-                                               {nxv64s8, p0, nxv64s8, 8},
-                                               {nxv2s16, p0, nxv2s16, 16},
-                                               {nxv4s16, p0, nxv4s16, 16},
-                                               {nxv8s16, p0, nxv8s16, 16},
-                                               {nxv16s16, p0, nxv16s16, 16},
-                                               {nxv32s16, p0, nxv32s16, 16},
-                                               {nxv2s32, p0, nxv2s32, 32},
-                                               {nxv4s32, p0, nxv4s32, 32},
-                                               {nxv8s32, p0, nxv8s32, 32},
-                                               {nxv16s32, p0, nxv16s32, 32}});
-
-    if (ST.getELen() == 64)
-      LoadStoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
-                                                 {nxv1s16, p0, nxv1s16, 16},
-                                                 {nxv1s32, p0, nxv1s32, 32}});
-
-    if (ST.hasVInstructionsI64())
-      LoadStoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
-                                                 {nxv2s64, p0, nxv2s64, 64},
-                                                 {nxv4s64, p0, nxv4s64, 64},
-                                                 {nxv8s64, p0, nxv8s64, 64}});
+    LoadActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
+                                          {nxv4s8, p0, nxv4s8, 8},
+                                          {nxv8s8, p0, nxv8s8, 8},
+                                          {nxv16s8, p0, nxv16s8, 8},
+                                          {nxv32s8, p0, nxv32s8, 8},
+                                          {nxv64s8, p0, nxv64s8, 8},
+                                          {nxv2s16, p0, nxv2s16, 16},
+                                          {nxv4s16, p0, nxv4s16, 16},
+                                          {nxv8s16, p0, nxv8s16, 16},
+                                          {nxv16s16, p0, nxv16s16, 16},
+                                          {nxv32s16, p0, nxv32s16, 16},
+                                          {nxv2s32, p0, nxv2s32, 32},
+                                          {nxv4s32, p0, nxv4s32, 32},
+                                          {nxv8s32, p0, nxv8s32, 32},
+                                          {nxv16s32, p0, nxv16s32, 32}});
+    StoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
+                                           {nxv4s8, p0, nxv4s8, 8},
+                                           {nxv8s8, p0, nxv8s8, 8},
+                                           {nxv16s8, p0, nxv16s8, 8},
+                                           {nxv32s8, p0, nxv32s8, 8},
+                                           {nxv64s8, p0, nxv64s8, 8},
+                                           {nxv2s16, p0, nxv2s16, 16},
+                                           {nxv4s16, p0, nxv4s16, 16},
+                                           {nxv8s16, p0, nxv8s16, 16},
+                                           {nxv16s16, p0, nxv16s16, 16},
+                                           {nxv32s16, p0, nxv32s16, 16},
+                                           {nxv2s32, p0, nxv2s32, 32},
+                                           {nxv4s32, p0, nxv4s32, 32},
+                                           {nxv8s32, p0, nxv8s32, 32},
+                                           {nxv16s32, p0, nxv16s32, 32}});
+
+    if (ST.getELen() == 64) {
+      LoadActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
+                                            {nxv1s16, p0, nxv1s16, 16},
+                                            {nxv1s32, p0, nxv1s32, 32}});
+      StoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
+                                             {nxv1s16, p0, nxv1s16, 16},
+                                             {nxv1s32, p0, nxv1s32, 32}});
+    }
+
+    if (ST.hasVInstructionsI64()) {
+      LoadActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
+                                            {nxv2s64, p0, nxv2s64, 64},
+                                            {nxv4s64, p0, nxv4s64, 64},
+                                            {nxv8s64, p0, nxv8s64, 64}});
+      StoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
+                                             {nxv2s64, p0, nxv2s64, 64},
+                                             {nxv4s64, p0, nxv4s64, 64},
+                                             {nxv8s64, p0, nxv8s64, 64}});
+    }
 
     // we will take the custom lowering logic if we have scalable vector types
     // with non-standard alignments
-    LoadStoreActions.customIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST));
+    LoadActions.customIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST));
+    StoreActions.customIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST));
 
     // Pointers require that XLen sized elements are legal.
-    if (XLen <= ST.getELen())
-      LoadStoreActions.customIf(typeIsLegalPtrVec(0, PtrVecTys, ST));
+    if (XLen <= ST.getELen()) {
+      LoadActions.customIf(typeIsLegalPtrVec(0, PtrVecTys, ST));
+      StoreActions.customIf(typeIsLegalPtrVec(0, PtrVecTys, ST));
+    }
   }
 
-  LoadStoreActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
+  LoadActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
       .lowerIfMemSizeNotByteSizePow2()
       .clampScalar(0, s32, sXLen)
       .lower();
+  StoreActions
+      .clampScalar(0, s32, sXLen)
+      .lowerIfMemSizeNotByteSizePow2()
+      .lower();
 
   ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
 


        


More information about the llvm-commits mailing list