[llvm] 775de20 - [RISCV][GISel] Support unaligned-scalar-mem. (#108905)

via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 18 15:35:08 PDT 2024


Author: Craig Topper
Date: 2024-09-18T15:35:05-07:00
New Revision: 775de20c3a0a149158cdafce66ef29510a436f1f

URL: https://github.com/llvm/llvm-project/commit/775de20c3a0a149158cdafce66ef29510a436f1f
DIFF: https://github.com/llvm/llvm-project/commit/775de20c3a0a149158cdafce66ef29510a436f1f.diff

LOG: [RISCV][GISel] Support unaligned-scalar-mem. (#108905)

We need to set the required alignment to 8 with unaligned-scalar-mem. If we don't do this, the legalizer will try to lower the unaligned load/store and the lower code will call allowsMemoryAccess to verify what its supposed to do. allowsMemoryAccess will say the unaligned access is allowed. So the legalizer gives up.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 192ba375d5a5d9..055193bcc2c8db 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -287,34 +287,48 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
 
   auto &LoadActions = getActionDefinitionsBuilder(G_LOAD);
   auto &StoreActions = getActionDefinitionsBuilder(G_STORE);
+  auto &ExtLoadActions = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD});
 
-  LoadActions
-          .legalForTypesWithMemDesc({{s32, p0, s8, 8},
-                                     {s32, p0, s16, 16},
-                                     {s32, p0, s32, 32},
-                                     {p0, p0, sXLen, XLen}});
-  StoreActions
-          .legalForTypesWithMemDesc({{s32, p0, s8, 8},
-                                     {s32, p0, s16, 16},
-                                     {s32, p0, s32, 32},
-                                     {p0, p0, sXLen, XLen}});
-  auto &ExtLoadActions =
-      getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
-          .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
+  // Return the alignment needed for scalar memory ops. If unaligned scalar mem
+  // is supported, we only require byte alignment. Otherwise, we need the memory
+  // op to be natively aligned.
+  auto getScalarMemAlign = [&ST](unsigned Size) {
+    return ST.enableUnalignedScalarMem() ? 8 : Size;
+  };
+
+  LoadActions.legalForTypesWithMemDesc(
+      {{s32, p0, s8, getScalarMemAlign(8)},
+       {s32, p0, s16, getScalarMemAlign(16)},
+       {s32, p0, s32, getScalarMemAlign(32)},
+       {p0, p0, sXLen, getScalarMemAlign(XLen)}});
+  StoreActions.legalForTypesWithMemDesc(
+      {{s32, p0, s8, getScalarMemAlign(8)},
+       {s32, p0, s16, getScalarMemAlign(16)},
+       {s32, p0, s32, getScalarMemAlign(32)},
+       {p0, p0, sXLen, getScalarMemAlign(XLen)}});
+  ExtLoadActions.legalForTypesWithMemDesc(
+      {{s32, p0, s8, getScalarMemAlign(8)},
+       {s32, p0, s16, getScalarMemAlign(16)}});
   if (XLen == 64) {
-    LoadActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
-                                          {s64, p0, s16, 16},
-                                          {s64, p0, s32, 32},
-                                          {s64, p0, s64, 64}});
-    StoreActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
-                                           {s64, p0, s16, 16},
-                                           {s64, p0, s32, 32},
-                                           {s64, p0, s64, 64}});
+    LoadActions.legalForTypesWithMemDesc(
+        {{s64, p0, s8, getScalarMemAlign(8)},
+         {s64, p0, s16, getScalarMemAlign(16)},
+         {s64, p0, s32, getScalarMemAlign(32)},
+         {s64, p0, s64, getScalarMemAlign(64)}});
+    StoreActions.legalForTypesWithMemDesc(
+        {{s64, p0, s8, getScalarMemAlign(8)},
+         {s64, p0, s16, getScalarMemAlign(16)},
+         {s64, p0, s32, getScalarMemAlign(32)},
+         {s64, p0, s64, getScalarMemAlign(64)}});
     ExtLoadActions.legalForTypesWithMemDesc(
-        {{s64, p0, s8, 8}, {s64, p0, s16, 16}, {s64, p0, s32, 32}});
+        {{s64, p0, s8, getScalarMemAlign(8)},
+         {s64, p0, s16, getScalarMemAlign(16)},
+         {s64, p0, s32, getScalarMemAlign(32)}});
   } else if (ST.hasStdExtD()) {
-    LoadActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
-    StoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
+    LoadActions.legalForTypesWithMemDesc(
+        {{s64, p0, s64, getScalarMemAlign(64)}});
+    StoreActions.legalForTypesWithMemDesc(
+        {{s64, p0, s64, getScalarMemAlign(64)}});
   }
 
   // Vector loads/stores.

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir
index f925d245150864..bed44eb657da91 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir
@@ -1,6 +1,8 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s
+# RUN:   | FileCheck %s
+# RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -run-pass=legalizer %s -o - \
+# RUN:   | FileCheck %s --check-prefix=UNALIGNED
 
 ---
 name:            load_i8
@@ -26,6 +28,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i8
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s8) = G_LOAD %0(p0) :: (load (s8))
     %2:_(s32) = G_ANYEXT %1(s8)
@@ -57,6 +67,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i16
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s16) = G_LOAD %0(p0) :: (load (s16))
     %2:_(s32) = G_ANYEXT %1(s16)
@@ -87,6 +105,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32))
     $x10 = COPY %1(s32)
@@ -122,6 +148,18 @@ body:             |
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s32)
     ; CHECK-NEXT: $x11 = COPY [[LOAD1]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    ;
+    ; UNALIGNED-LABEL: name: load_i64
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
+    ; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+    ; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10, implicit $x11
     %0:_(p0) = COPY $x10
     %1:_(s64) = G_LOAD %0(p0) :: (load (s64))
     %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %1(s64)
@@ -153,6 +191,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load (p0), align 8)
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](p0)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_ptr
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load (p0), align 8)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](p0)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(p0) = G_LOAD %0(p0) :: (load (p0), align 8)
     $x10 = COPY %1(p0)
@@ -189,6 +235,14 @@ body:             |
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i16_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 1)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s16) = G_LOAD %0(p0) :: (load (s16), align 1)
     %2:_(s32) = G_ANYEXT %1(s16)
@@ -237,6 +291,14 @@ body:             |
     ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
     ; CHECK-NEXT: $x10 = COPY [[OR2]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32), align 1)
     $x10 = COPY %1(s32)
@@ -272,6 +334,14 @@ body:             |
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32_align2
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 2)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32), align 2)
     $x10 = COPY %1(s32)
@@ -343,6 +413,18 @@ body:             |
     ; CHECK-NEXT: $x10 = COPY [[OR2]](s32)
     ; CHECK-NEXT: $x11 = COPY [[OR5]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    ;
+    ; UNALIGNED-LABEL: name: load_i64_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
+    ; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+    ; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4, align 1)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10, implicit $x11
     %0:_(p0) = COPY $x10
     %1:_(s64) = G_LOAD %0(p0) :: (load (s64), align 1)
     %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %1(s64)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
index 933bc589f6018e..491e4a358b1ad6 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
@@ -1,6 +1,8 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s
+# RUN:   | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -run-pass=legalizer %s -o - \
+# RUN:   | FileCheck %s --check-prefix=UNALIGNED
 
 ---
 name:            load_i8
@@ -27,6 +29,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i8
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s8) = G_LOAD %0(p0) :: (load (s8))
     %2:_(s64) = G_ANYEXT %1(s8)
@@ -59,6 +70,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i16
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s16) = G_LOAD %0(p0) :: (load (s16))
     %2:_(s64) = G_ANYEXT %1(s16)
@@ -91,6 +111,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32))
     %2:_(s64) = G_ANYEXT %1(s32)
@@ -121,6 +150,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i64
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s64) = G_LOAD %0(p0) :: (load (s64))
     $x10 = COPY %1(s64)
@@ -156,6 +193,18 @@ body:             |
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s64)
     ; CHECK-NEXT: $x11 = COPY [[LOAD1]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    ;
+    ; UNALIGNED-LABEL: name: load_i128
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
+    ; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
+    ; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10, implicit $x11
     %0:_(p0) = COPY $x10
     %1:_(s128) = G_LOAD %0(p0) :: (load (s128), align 8)
     %2:_(s64), %3:_(s64) = G_UNMERGE_VALUES %1(s128)
@@ -187,6 +236,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load (p0))
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](p0)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_ptr
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load (p0))
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](p0)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(p0) = G_LOAD %0(p0) :: (load (p0))
     $x10 = COPY %1(p0)
@@ -224,6 +281,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i16_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 1)
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s16) = G_LOAD %0(p0) :: (load (s16), align 1)
     %2:_(s64) = G_ANYEXT %1(s16)
@@ -274,6 +340,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR2]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32), align 1)
     %2:_(s64) = G_ANYEXT %1(s32)
@@ -312,6 +387,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32_align2
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 2)
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32), align 2)
     %2:_(s64) = G_ANYEXT %1(s32)
@@ -384,6 +468,14 @@ body:             |
     ; CHECK-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[OR2]]
     ; CHECK-NEXT: $x10 = COPY [[OR6]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i64_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64), align 1)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s64) = G_LOAD %0(p0) :: (load (s64), align 1)
     $x10 = COPY %1(s64)
@@ -431,6 +523,14 @@ body:             |
     ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[OR]]
     ; CHECK-NEXT: $x10 = COPY [[OR2]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i64_align2
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64), align 2)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s64) = G_LOAD %0(p0) :: (load (s64), align 2)
     $x10 = COPY %1(s64)
@@ -550,6 +650,18 @@ body:             |
     ; CHECK-NEXT: $x10 = COPY [[OR6]](s64)
     ; CHECK-NEXT: $x11 = COPY [[OR13]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    ;
+    ; UNALIGNED-LABEL: name: load_i128_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64), align 1)
+    ; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8, align 1)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
+    ; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10, implicit $x11
     %0:_(p0) = COPY $x10
     %1:_(s128) = G_LOAD %0(p0) :: (load (s128), align 1)
     %2:_(s64), %3:_(s64) = G_UNMERGE_VALUES %1(s128)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir
index 2ece5a8c9d4142..791bdb30c490f9 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir
@@ -1,6 +1,8 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s
+# RUN:   | FileCheck %s
+# RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -run-pass=legalizer %s -o - \
+# RUN:   | FileCheck %s --check-prefix=UNALIGNED
 
 ---
 name:            store_i8
@@ -26,6 +28,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s8))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i8
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s8))
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s32) = COPY $x10
     %0:_(s8) = G_TRUNC %2(s32)
     %1:_(p0) = COPY $x11
@@ -57,6 +67,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s16))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i16
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s16))
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s32) = COPY $x10
     %0:_(s16) = G_TRUNC %2(s32)
     %1:_(p0) = COPY $x11
@@ -87,6 +105,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32))
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s32) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s32), %1(p0) :: (store (s32))
@@ -122,6 +148,18 @@ body:             |
     ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
     ; CHECK-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i64
+    ; UNALIGNED: liveins: $x10, $x11, $x12
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; UNALIGNED-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY2]](p0) :: (store (s32), align 8)
+    ; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
+    ; UNALIGNED-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s32) = COPY $x10
     %3:_(s32) = COPY $x11
     %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
@@ -153,6 +191,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](p0), [[COPY1]](p0) :: (store (p0), align 8)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_ptr
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](p0), [[COPY1]](p0) :: (store (p0), align 8)
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(p0), %1(p0) :: (store (p0), align 8)
@@ -190,6 +236,14 @@ body:             |
     ; CHECK-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s8))
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 1)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i16_unaligned
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s16), align 1)
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s32) = COPY $x10
     %0:_(s16) = G_TRUNC %2(s32)
     %1:_(p0) = COPY $x11
@@ -238,6 +292,14 @@ body:             |
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 2)
     ; CHECK-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p0) :: (store (s8) into unknown-address + 3)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32_unaligned
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32), align 1)
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s32) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s32), %1(p0) :: (store (s32), align 1)
@@ -273,6 +335,14 @@ body:             |
     ; CHECK-NEXT: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store (s16))
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 2)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32_align2
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32), align 2)
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s32) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s32), %1(p0) :: (store (s32), align 2)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
index 85055561c4f927..860bc932d8560b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
@@ -1,6 +1,8 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s
+# RUN:   | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -run-pass=legalizer %s -o - \
+# RUN:   | FileCheck %s --check-prefix=UNALIGNED
 
 ---
 name:            store_i8
@@ -27,6 +29,15 @@ body:             |
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s8))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i8
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s8))
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s8) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -59,6 +70,15 @@ body:             |
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s16))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i16
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s16))
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s16) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -91,6 +111,15 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s32))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s32))
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s32) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -121,6 +150,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i64
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s64) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s64), %1(p0) :: (store (s64))
@@ -150,6 +187,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i128
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s64) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s64), %1(p0) :: (store (s64))
@@ -179,6 +224,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](p0), [[COPY1]](p0) :: (store (p0))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_ptr
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](p0), [[COPY1]](p0) :: (store (p0))
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(p0), %1(p0) :: (store (p0))
@@ -217,6 +270,15 @@ body:             |
     ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s8))
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 1)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i16_unaligned
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s16), align 1)
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s16) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -267,6 +329,15 @@ body:             |
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 2)
     ; CHECK-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p0) :: (store (s8) into unknown-address + 3)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32_unaligned
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s32), align 1)
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s32) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -305,6 +376,15 @@ body:             |
     ; CHECK-NEXT: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store (s16))
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 2)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32_align2
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s32), align 2)
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s32) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -353,6 +433,14 @@ body:             |
     ; CHECK-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 4)
     ; CHECK-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p0) :: (store (s16) into unknown-address + 6)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i64_align2
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64), align 2)
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s64) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s64), %1(p0) :: (store (s64), align 2)


        


More information about the llvm-commits mailing list