[llvm] f6b825f - Revert "Revert "[AArch64][GlobalISel] Fix incorrect selection of monotonic s32->s64 anyext load.""

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 7 23:28:40 PST 2024


Author: Amara Emerson
Date: 2024-03-07T23:28:33-08:00
New Revision: f6b825f51ec8a67c4ace43aaacc27bfd4a78f706

URL: https://github.com/llvm/llvm-project/commit/f6b825f51ec8a67c4ace43aaacc27bfd4a78f706
DIFF: https://github.com/llvm/llvm-project/commit/f6b825f51ec8a67c4ace43aaacc27bfd4a78f706.diff

LOG: Revert "Revert "[AArch64][GlobalISel] Fix incorrect selection of monotonic s32->s64 anyext load.""

Attempt 2. The first one was trying to call isa<> on an MI reference that was free'd.

This reverts commit ee24409c40ff35c3221892d9723331c233ca9f0e.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 6652883792391b..0f3c3cb96e6ce3 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -2997,13 +2997,14 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
       }
     }
 
-    if (IsZExtLoad) {
-      // The zextload from a smaller type to i32 should be handled by the
+    if (IsZExtLoad || (Opcode == TargetOpcode::G_LOAD &&
+                       ValTy == LLT::scalar(64) && MemSizeInBits == 32)) {
+      // The any/zextload from a smaller type to i32 should be handled by the
       // importer.
       if (MRI.getType(LoadStore->getOperand(0).getReg()).getSizeInBits() != 64)
         return false;
-      // If we have a ZEXTLOAD then change the load's type to be a narrower reg
-      // and zero_extend with SUBREG_TO_REG.
+      // If we have an extending load then change the load's type to be a
+      // narrower reg and zero_extend with SUBREG_TO_REG.
       Register LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
       Register DstReg = LoadStore->getOperand(0).getReg();
       LoadStore->getOperand(0).setReg(LdReg);

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
index 5787f914b965d3..6b4bbb85b2ec44 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
@@ -9,6 +9,11 @@
     ret i8 %v
   }
 
+  define i32 @anyext_load_monotonic_i32() {
+    %v = load atomic i32, ptr null monotonic, align 4
+    ret i32 %v
+  }
+
 ...
 ---
 name:            load_acq_i8
@@ -25,13 +30,33 @@ body:             |
 
     ; CHECK-LABEL: name: load_acq_i8
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDARB:%[0-9]+]]:gpr32 = LDARB [[COPY]] :: (load acquire (s8) from %ir.ptr, align 8)
-    ; CHECK: $w0 = COPY [[LDARB]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDARB:%[0-9]+]]:gpr32 = LDARB [[COPY]] :: (load acquire (s8) from %ir.ptr, align 8)
+    ; CHECK-NEXT: $w0 = COPY [[LDARB]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(p0) = COPY $x0
     %2:gpr(s32) = G_LOAD %0(p0) :: (load acquire (s8) from %ir.ptr, align 8)
     $w0 = COPY %2(s32)
     RET_ReallyLR implicit $w0
 
 ...
+---
+name:            anyext_load_monotonic_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: anyext_load_monotonic_i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64common = COPY $xzr
+    ; CHECK-NEXT: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load monotonic (s32) from `ptr null`)
+    ; CHECK-NEXT: %ld:gpr64all = SUBREG_TO_REG 0, [[LDRWui]], %subreg.sub_32
+    ; CHECK-NEXT: $x0 = COPY %ld
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %1:gpr(p0) = G_CONSTANT i64 0
+    %ld:gpr(s64) = G_LOAD %1(p0) :: (load monotonic (s32) from `ptr null`)
+    $x0 = COPY %ld(s64)
+    RET_ReallyLR implicit $x0
+
+...


        


More information about the llvm-commits mailing list