[llvm] [RISCV][GISel] Support unaligned-scalar-mem. (PR #108905)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 16 19:24:11 PDT 2024


https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/108905

>From 52e9bbd595dda9292f298a1cbebe0f155a5da989 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 16 Sep 2024 17:41:00 -0700
Subject: [PATCH 1/2] [RISCV][GISel] Support unaligned-scalar-mem.

We need to set the required alignment to 8 with unaligned-scalar-mem
otherwise the legalizer gets confused. If we don't do this, we'll try
lower an unaligned load/store and the lower function will call
allowsMemoryAccess to verify what its supposed to do. allowsMemoryAccess
will say the unaligned access is allowed. So the legalizer gives up.
---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  63 ++++++----
 .../legalizer/legalize-load-rv32.mir          |  84 ++++++++++++-
 .../legalizer/legalize-load-rv64.mir          | 114 +++++++++++++++++-
 .../legalizer/legalize-store-rv32.mir         |  72 ++++++++++-
 .../legalizer/legalize-store-rv64.mir         |  90 +++++++++++++-
 5 files changed, 394 insertions(+), 29 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index c204683f4e79f8..f25496b8617ad1 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -287,34 +287,47 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
 
   auto &LoadActions = getActionDefinitionsBuilder(G_LOAD);
   auto &StoreActions = getActionDefinitionsBuilder(G_STORE);
-
-  LoadActions
-          .legalForTypesWithMemDesc({{s32, p0, s8, 8},
-                                     {s32, p0, s16, 16},
-                                     {s32, p0, s32, 32},
-                                     {p0, p0, sXLen, XLen}});
-  StoreActions
-          .legalForTypesWithMemDesc({{s32, p0, s8, 8},
-                                     {s32, p0, s16, 16},
-                                     {s32, p0, s32, 32},
-                                     {p0, p0, sXLen, XLen}});
-  auto &ExtLoadActions =
-      getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
-          .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
+  auto &ExtLoadActions = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD});
+
+  // Return the alignment needed for scalar memory ops. If unaligned scalar mem
+  // is supported, we only require byte alignment. Otherwise, we need the memory
+  // op to be natively aligned.
+  auto getScalarMemAlign =
+      [&ST](unsigned Size) { return ST.enableUnalignedScalarMem() ? 8 : Size; };
+
+  LoadActions.legalForTypesWithMemDesc(
+      {{s32, p0, s8, getScalarMemAlign(8)},
+       {s32, p0, s16, getScalarMemAlign(16)},
+       {s32, p0, s32, getScalarMemAlign(32)},
+       {p0, p0, sXLen, getScalarMemAlign(XLen)}});
+  StoreActions.legalForTypesWithMemDesc(
+      {{s32, p0, s8, getScalarMemAlign(8)},
+       {s32, p0, s16, getScalarMemAlign(16)},
+       {s32, p0, s32, getScalarMemAlign(32)},
+       {p0, p0, sXLen, getScalarMemAlign(XLen)}});
+  ExtLoadActions.legalForTypesWithMemDesc(
+      {{s32, p0, s8, getScalarMemAlign(8)},
+       {s32, p0, s16, getScalarMemAlign(16)}});
   if (XLen == 64) {
-    LoadActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
-                                          {s64, p0, s16, 16},
-                                          {s64, p0, s32, 32},
-                                          {s64, p0, s64, 64}});
-    StoreActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
-                                           {s64, p0, s16, 16},
-                                           {s64, p0, s32, 32},
-                                           {s64, p0, s64, 64}});
+    LoadActions.legalForTypesWithMemDesc(
+        {{s64, p0, s8, getScalarMemAlign(8)},
+         {s64, p0, s16, getScalarMemAlign(16)},
+         {s64, p0, s32, getScalarMemAlign(32)},
+         {s64, p0, s64, getScalarMemAlign(64)}});
+    StoreActions.legalForTypesWithMemDesc(
+        {{s64, p0, s8, getScalarMemAlign(8)},
+         {s64, p0, s16, getScalarMemAlign(16)},
+         {s64, p0, s32, getScalarMemAlign(32)},
+         {s64, p0, s64, getScalarMemAlign(64)}});
     ExtLoadActions.legalForTypesWithMemDesc(
-        {{s64, p0, s8, 8}, {s64, p0, s16, 16}, {s64, p0, s32, 32}});
+        {{s64, p0, s8, getScalarMemAlign(8)},
+         {s64, p0, s16, getScalarMemAlign(16)},
+         {s64, p0, s32, getScalarMemAlign(32)}});
   } else if (ST.hasStdExtD()) {
-    LoadActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
-    StoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
+    LoadActions.legalForTypesWithMemDesc(
+        {{s64, p0, s64, getScalarMemAlign(64)}});
+    StoreActions.legalForTypesWithMemDesc(
+        {{s64, p0, s64, getScalarMemAlign(64)}});
   }
 
   // Vector loads/stores.
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir
index f925d245150864..bed44eb657da91 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv32.mir
@@ -1,6 +1,8 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s
+# RUN:   | FileCheck %s
+# RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -run-pass=legalizer %s -o - \
+# RUN:   | FileCheck %s --check-prefix=UNALIGNED
 
 ---
 name:            load_i8
@@ -26,6 +28,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i8
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s8) = G_LOAD %0(p0) :: (load (s8))
     %2:_(s32) = G_ANYEXT %1(s8)
@@ -57,6 +67,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i16
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s16) = G_LOAD %0(p0) :: (load (s16))
     %2:_(s32) = G_ANYEXT %1(s16)
@@ -87,6 +105,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32))
     $x10 = COPY %1(s32)
@@ -122,6 +148,18 @@ body:             |
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s32)
     ; CHECK-NEXT: $x11 = COPY [[LOAD1]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    ;
+    ; UNALIGNED-LABEL: name: load_i64
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 8)
+    ; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+    ; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10, implicit $x11
     %0:_(p0) = COPY $x10
     %1:_(s64) = G_LOAD %0(p0) :: (load (s64))
     %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %1(s64)
@@ -153,6 +191,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load (p0), align 8)
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](p0)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_ptr
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load (p0), align 8)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](p0)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(p0) = G_LOAD %0(p0) :: (load (p0), align 8)
     $x10 = COPY %1(p0)
@@ -189,6 +235,14 @@ body:             |
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i16_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 1)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s16) = G_LOAD %0(p0) :: (load (s16), align 1)
     %2:_(s32) = G_ANYEXT %1(s16)
@@ -237,6 +291,14 @@ body:             |
     ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[SHL2]], [[OR]]
     ; CHECK-NEXT: $x10 = COPY [[OR2]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32), align 1)
     $x10 = COPY %1(s32)
@@ -272,6 +334,14 @@ body:             |
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32_align2
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 2)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32), align 2)
     $x10 = COPY %1(s32)
@@ -343,6 +413,18 @@ body:             |
     ; CHECK-NEXT: $x10 = COPY [[OR2]](s32)
     ; CHECK-NEXT: $x11 = COPY [[OR5]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    ;
+    ; UNALIGNED-LABEL: name: load_i64_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
+    ; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+    ; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 4, align 1)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s32)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10, implicit $x11
     %0:_(p0) = COPY $x10
     %1:_(s64) = G_LOAD %0(p0) :: (load (s64), align 1)
     %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %1(s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
index 933bc589f6018e..491e4a358b1ad6 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
@@ -1,6 +1,8 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s
+# RUN:   | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -run-pass=legalizer %s -o - \
+# RUN:   | FileCheck %s --check-prefix=UNALIGNED
 
 ---
 name:            load_i8
@@ -27,6 +29,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i8
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s8) = G_LOAD %0(p0) :: (load (s8))
     %2:_(s64) = G_ANYEXT %1(s8)
@@ -59,6 +70,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i16
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s16) = G_LOAD %0(p0) :: (load (s16))
     %2:_(s64) = G_ANYEXT %1(s16)
@@ -91,6 +111,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32))
     %2:_(s64) = G_ANYEXT %1(s32)
@@ -121,6 +150,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i64
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s64) = G_LOAD %0(p0) :: (load (s64))
     $x10 = COPY %1(s64)
@@ -156,6 +193,18 @@ body:             |
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](s64)
     ; CHECK-NEXT: $x11 = COPY [[LOAD1]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    ;
+    ; UNALIGNED-LABEL: name: load_i128
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
+    ; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
+    ; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10, implicit $x11
     %0:_(p0) = COPY $x10
     %1:_(s128) = G_LOAD %0(p0) :: (load (s128), align 8)
     %2:_(s64), %3:_(s64) = G_UNMERGE_VALUES %1(s128)
@@ -187,6 +236,14 @@ body:             |
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load (p0))
     ; CHECK-NEXT: $x10 = COPY [[LOAD]](p0)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_ptr
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load (p0))
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](p0)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(p0) = G_LOAD %0(p0) :: (load (p0))
     $x10 = COPY %1(p0)
@@ -224,6 +281,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i16_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16), align 1)
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s16) = G_LOAD %0(p0) :: (load (s16), align 1)
     %2:_(s64) = G_ANYEXT %1(s16)
@@ -274,6 +340,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR2]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32), align 1)
     %2:_(s64) = G_ANYEXT %1(s32)
@@ -312,6 +387,15 @@ body:             |
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i32_align2
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 2)
+    ; UNALIGNED-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
+    ; UNALIGNED-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32), align 2)
     %2:_(s64) = G_ANYEXT %1(s32)
@@ -384,6 +468,14 @@ body:             |
     ; CHECK-NEXT: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL6]], [[OR2]]
     ; CHECK-NEXT: $x10 = COPY [[OR6]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i64_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64), align 1)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s64) = G_LOAD %0(p0) :: (load (s64), align 1)
     $x10 = COPY %1(s64)
@@ -431,6 +523,14 @@ body:             |
     ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[OR]]
     ; CHECK-NEXT: $x10 = COPY [[OR2]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
+    ;
+    ; UNALIGNED-LABEL: name: load_i64_align2
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64), align 2)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10
     %0:_(p0) = COPY $x10
     %1:_(s64) = G_LOAD %0(p0) :: (load (s64), align 2)
     $x10 = COPY %1(s64)
@@ -550,6 +650,18 @@ body:             |
     ; CHECK-NEXT: $x10 = COPY [[OR6]](s64)
     ; CHECK-NEXT: $x11 = COPY [[OR13]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    ;
+    ; UNALIGNED-LABEL: name: load_i128_unaligned
+    ; UNALIGNED: liveins: $x10
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64), align 1)
+    ; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; UNALIGNED-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8, align 1)
+    ; UNALIGNED-NEXT: $x10 = COPY [[LOAD]](s64)
+    ; UNALIGNED-NEXT: $x11 = COPY [[LOAD1]](s64)
+    ; UNALIGNED-NEXT: PseudoRET implicit $x10, implicit $x11
     %0:_(p0) = COPY $x10
     %1:_(s128) = G_LOAD %0(p0) :: (load (s128), align 1)
     %2:_(s64), %3:_(s64) = G_UNMERGE_VALUES %1(s128)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir
index 2ece5a8c9d4142..791bdb30c490f9 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv32.mir
@@ -1,6 +1,8 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s
+# RUN:   | FileCheck %s
+# RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -run-pass=legalizer %s -o - \
+# RUN:   | FileCheck %s --check-prefix=UNALIGNED
 
 ---
 name:            store_i8
@@ -26,6 +28,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s8))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i8
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s8))
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s32) = COPY $x10
     %0:_(s8) = G_TRUNC %2(s32)
     %1:_(p0) = COPY $x11
@@ -57,6 +67,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s16))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i16
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s16))
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s32) = COPY $x10
     %0:_(s16) = G_TRUNC %2(s32)
     %1:_(p0) = COPY $x11
@@ -87,6 +105,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32))
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s32) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s32), %1(p0) :: (store (s32))
@@ -122,6 +148,18 @@ body:             |
     ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
     ; CHECK-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i64
+    ; UNALIGNED: liveins: $x10, $x11, $x12
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; UNALIGNED-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY2]](p0) :: (store (s32), align 8)
+    ; UNALIGNED-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; UNALIGNED-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
+    ; UNALIGNED-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s32) = COPY $x10
     %3:_(s32) = COPY $x11
     %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
@@ -153,6 +191,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](p0), [[COPY1]](p0) :: (store (p0), align 8)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_ptr
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](p0), [[COPY1]](p0) :: (store (p0), align 8)
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(p0), %1(p0) :: (store (p0), align 8)
@@ -190,6 +236,14 @@ body:             |
     ; CHECK-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s8))
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 1)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i16_unaligned
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s16), align 1)
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s32) = COPY $x10
     %0:_(s16) = G_TRUNC %2(s32)
     %1:_(p0) = COPY $x11
@@ -238,6 +292,14 @@ body:             |
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 2)
     ; CHECK-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p0) :: (store (s8) into unknown-address + 3)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32_unaligned
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32), align 1)
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s32) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s32), %1(p0) :: (store (s32), align 1)
@@ -273,6 +335,14 @@ body:             |
     ; CHECK-NEXT: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store (s16))
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 2)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32_align2
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32), align 2)
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s32) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s32), %1(p0) :: (store (s32), align 2)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
index 85055561c4f927..860bc932d8560b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
@@ -1,6 +1,8 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s
+# RUN:   | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -run-pass=legalizer %s -o - \
+# RUN:   | FileCheck %s --check-prefix=UNALIGNED
 
 ---
 name:            store_i8
@@ -27,6 +29,15 @@ body:             |
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s8))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i8
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s8))
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s8) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -59,6 +70,15 @@ body:             |
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s16))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i16
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s16))
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s16) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -91,6 +111,15 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s32))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s32))
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s32) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -121,6 +150,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i64
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s64) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s64), %1(p0) :: (store (s64))
@@ -150,6 +187,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i128
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s64) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s64), %1(p0) :: (store (s64))
@@ -179,6 +224,14 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: G_STORE [[COPY]](p0), [[COPY1]](p0) :: (store (p0))
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_ptr
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](p0), [[COPY1]](p0) :: (store (p0))
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(p0), %1(p0) :: (store (p0))
@@ -217,6 +270,15 @@ body:             |
     ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s8))
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 1)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i16_unaligned
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s16), align 1)
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s16) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -267,6 +329,15 @@ body:             |
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 2)
     ; CHECK-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p0) :: (store (s8) into unknown-address + 3)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32_unaligned
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s32), align 1)
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s32) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -305,6 +376,15 @@ body:             |
     ; CHECK-NEXT: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store (s16))
     ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 2)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i32_align2
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s32), align 2)
+    ; UNALIGNED-NEXT: PseudoRET
     %2:_(s64) = COPY $x10
     %0:_(s32) = G_TRUNC %2(s64)
     %1:_(p0) = COPY $x11
@@ -353,6 +433,14 @@ body:             |
     ; CHECK-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 4)
     ; CHECK-NEXT: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p0) :: (store (s16) into unknown-address + 6)
     ; CHECK-NEXT: PseudoRET
+    ;
+    ; UNALIGNED-LABEL: name: store_i64_align2
+    ; UNALIGNED: liveins: $x10, $x11
+    ; UNALIGNED-NEXT: {{  $}}
+    ; UNALIGNED-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+    ; UNALIGNED-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+    ; UNALIGNED-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64), align 2)
+    ; UNALIGNED-NEXT: PseudoRET
     %0:_(s64) = COPY $x10
     %1:_(p0) = COPY $x11
     G_STORE %0(s64), %1(p0) :: (store (s64), align 2)

>From c0f9c71e244cb57133a26e43fd758eba3e2ba07c Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 16 Sep 2024 19:23:52 -0700
Subject: [PATCH 2/2] fixup! clang-format

---
 llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index f25496b8617ad1..1c66894e2b473e 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -292,8 +292,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
   // Return the alignment needed for scalar memory ops. If unaligned scalar mem
   // is supported, we only require byte alignment. Otherwise, we need the memory
   // op to be natively aligned.
-  auto getScalarMemAlign =
-      [&ST](unsigned Size) { return ST.enableUnalignedScalarMem() ? 8 : Size; };
+  auto getScalarMemAlign = [&ST](unsigned Size) {
+    return ST.enableUnalignedScalarMem() ? 8 : Size;
+  };
 
   LoadActions.legalForTypesWithMemDesc(
       {{s32, p0, s8, getScalarMemAlign(8)},



More information about the llvm-commits mailing list