[llvm] [Xtensa] Implement volatile load/store. (PR #110292)

Andrei Safronov via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 2 15:04:12 PDT 2024


https://github.com/andreisfr updated https://github.com/llvm/llvm-project/pull/110292

>From 1db96f6651218e421ecb306715e28e86f048588f Mon Sep 17 00:00:00 2001
From: Andrei Safronov <safronov at espressif.com>
Date: Fri, 27 Sep 2024 19:02:30 +0300
Subject: [PATCH] [Xtensa] Implement volatile load/store.

Add a memory wait "MEMW" instruction before volatile
load/store operations, as implemented in GCC.
---
 llvm/lib/Target/Xtensa/XtensaISelLowering.cpp |  17 ++
 llvm/lib/Target/Xtensa/XtensaInstrInfo.td     |   4 +-
 llvm/test/CodeGen/Xtensa/blockaddress.ll      |  25 ++-
 llvm/test/CodeGen/Xtensa/volatile.ll          | 176 ++++++++++++++++++
 4 files changed, 207 insertions(+), 15 deletions(-)
 create mode 100644 llvm/test/CodeGen/Xtensa/volatile.ll

diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
index 670930e99334f2..5450222a7b2e1d 100644
--- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
@@ -13,6 +13,7 @@
 
 #include "XtensaISelLowering.h"
 #include "XtensaConstantPoolValue.h"
+#include "XtensaInstrInfo.h"
 #include "XtensaSubtarget.h"
 #include "XtensaTargetMachine.h"
 #include "llvm/CodeGen/CallingConvLower.h"
@@ -1104,10 +1105,26 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI,
 MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter(
     MachineInstr &MI, MachineBasicBlock *MBB) const {
   DebugLoc DL = MI.getDebugLoc();
+  const XtensaInstrInfo &TII = *Subtarget.getInstrInfo();
 
   switch (MI.getOpcode()) {
   case Xtensa::SELECT:
     return emitSelectCC(MI, MBB);
+  case Xtensa::S8I:
+  case Xtensa::S16I:
+  case Xtensa::S32I:
+  case Xtensa::L8UI:
+  case Xtensa::L16SI:
+  case Xtensa::L16UI:
+  case Xtensa::L32I: {
+    // Insert memory wait instruction "memw" before volatile load/store as it is
+    // implemented in gcc. If memoperands is empty then assume that it aslo
+    // maybe volatile load/store and insert "memw".
+    if (MI.memoperands_empty() || (*MI.memoperands_begin())->isVolatile()) {
+      BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW));
+    }
+    return MBB;
+  }
   default:
     llvm_unreachable("Unexpected instr type to insert");
   }
diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
index 0d01864b54bc38..9773480624e92e 100644
--- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
+++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
@@ -195,7 +195,7 @@ def SSAI : RRR_Inst<0x00, 0x00, 0x04, (outs), (ins uimm5:$imm),
 //===----------------------------------------------------------------------===//
 
 // Load instructions
-let mayLoad = 1 in {
+let mayLoad = 1, usesCustomInserter = 1 in {
 
   class Load_RRI8<bits<4> oper, string instrAsm, SDPatternOperator opNode,
         ComplexPattern addrOp, Operand memOp>
@@ -216,7 +216,7 @@ def L16UI : Load_RRI8<0x01, "l16ui", zextloadi16, addr_ish2, mem16>;
 def L32I  : Load_RRI8<0x02, "l32i", load, addr_ish4, mem32>;
 
 // Store instructions
-let mayStore = 1 in {
+let mayStore = 1, usesCustomInserter = 1 in {
   class Store_II8<bits<4> oper, string instrAsm, SDPatternOperator opNode,
         ComplexPattern addrOp, Operand memOp>
 	  : RRI8_Inst<0x02, (outs), (ins AR:$t, memOp:$addr),
diff --git a/llvm/test/CodeGen/Xtensa/blockaddress.ll b/llvm/test/CodeGen/Xtensa/blockaddress.ll
index bbeb1790a1b785..e3c23cf8cedf02 100644
--- a/llvm/test/CodeGen/Xtensa/blockaddress.ll
+++ b/llvm/test/CodeGen/Xtensa/blockaddress.ll
@@ -1,22 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc --mtriple=xtensa < %s | FileCheck %s
 
 @addr = global ptr null
 
 define void @test_blockaddress() {
-
-  store volatile ptr blockaddress(@test_blockaddress, %block), ptr @addr
-; CHECK:      .literal_position
-; CHECK-NEXT: .literal .LCPI0_0, addr
-; CHECK-NEXT: .literal .LCPI0_1, .Ltmp0
 ; CHECK-LABEL: test_blockaddress:
-; CHECK:      # %bb.0:
-; CHECK-NEXT: l32r a8, .LCPI0_0
-; CHECK-NEXT: l32r a9, .LCPI0_1
-; CHECK-NEXT: s32i a9, a8, 0
-; CHECK-NEXT: l32i a8, a8, 0
-; CHECK-NEXT: jx a8
-; CHECK-NEXT: .Ltmp0:
-; CHECK-NEXT: .LBB0_1:
+; CHECK:         l32r a8, .LCPI0_0
+; CHECK-NEXT:    l32r a9, .LCPI0_1
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a9, a8, 0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a8, a8, 0
+; CHECK-NEXT:    jx a8
+; CHECK-NEXT:  .Ltmp0: # Block address taken
+; CHECK-NEXT:  .LBB0_1: # %block
+; CHECK-NEXT:    ret
+  store volatile ptr blockaddress(@test_blockaddress, %block), ptr @addr
 
   %val = load volatile ptr, ptr @addr
   indirectbr ptr %val, [label %block]
diff --git a/llvm/test/CodeGen/Xtensa/volatile.ll b/llvm/test/CodeGen/Xtensa/volatile.ll
new file mode 100644
index 00000000000000..5be6a4a7995c9e
--- /dev/null
+++ b/llvm/test/CodeGen/Xtensa/volatile.ll
@@ -0,0 +1,176 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+ at x_i8 = common dso_local global i8 0, align 8
+ at y_i8 = common dso_local global i8 0, align 8
+ at x_i16 = common dso_local global i16 0, align 8
+ at y_i16 = common dso_local global i16 0, align 8
+ at x_i32 = common dso_local global i32 0, align 8
+ at y_i32 = common dso_local global i32 0, align 8
+ at x_i64 = common dso_local global i64 0, align 8
+ at y_i64 = common dso_local global i64 0, align 8
+ at x_float = common dso_local global float 0.0, align 8
+ at y_float = common dso_local global float 0.0, align 8
+ at x_double = common dso_local global double 0.0, align 8
+ at y_double = common dso_local global double 0.0, align 8
+ at x_vec = common dso_local global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 8
+ at y_vec = common dso_local global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 8
+
+define void @test() {
+; CHECK-LABEL: test:
+; CHECK:         l32r a8, .LCPI0_0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l8ui a8, a8, 0
+; CHECK-NEXT:    l32r a9, .LCPI0_1
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s8i a8, a9, 0
+; CHECK-NEXT:    l32r a8, .LCPI0_2
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l16ui a8, a8, 0
+; CHECK-NEXT:    l32r a9, .LCPI0_3
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s16i a8, a9, 0
+; CHECK-NEXT:    l32r a8, .LCPI0_4
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a8, a8, 0
+; CHECK-NEXT:    l32r a9, .LCPI0_5
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a8, a9, 0
+; CHECK-NEXT:    ret
+
+entry:
+  %0 = load volatile i8, ptr @x_i8, align 4
+  store volatile i8 %0, ptr @y_i8, align 4
+  %1 = load volatile i16, ptr @x_i16, align 4
+  store volatile i16 %1, ptr @y_i16, align 4
+  %2 = load volatile i32, ptr @x_i32, align 4
+  store volatile i32 %2, ptr @y_i32, align 4
+  ret void
+}
+
+
+define void @test_i8() {
+; CHECK-LABEL: test_i8:
+; CHECK:         l32r a8, .LCPI1_0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l8ui a8, a8, 0
+; CHECK-NEXT:    l32r a9, .LCPI1_1
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s8i a8, a9, 0
+; CHECK-NEXT:    ret
+entry:
+  %a = load volatile i8, ptr @x_i8, align 4
+  store volatile i8 %a, ptr @y_i8, align 4
+  ret void
+}
+
+define void @test_i16() {
+; CHECK-LABEL: test_i16:
+; CHECK:         l32r a8, .LCPI2_0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l16ui a8, a8, 0
+; CHECK-NEXT:    l32r a9, .LCPI2_1
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s16i a8, a9, 0
+; CHECK-NEXT:    ret
+entry:
+  %a = load volatile i16, ptr @x_i16, align 4
+  store volatile i16 %a, ptr @y_i16, align 4
+  ret void
+}
+
+define void @test_i32() {
+; CHECK-LABEL: test_i32:
+; CHECK:         l32r a8, .LCPI3_0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a8, a8, 0
+; CHECK-NEXT:    l32r a9, .LCPI3_1
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a8, a9, 0
+; CHECK-NEXT:    ret
+entry:
+  %a = load volatile i32, ptr @x_i32, align 4
+  store volatile i32 %a, ptr @y_i32, align 4
+  ret void
+}
+
+define void @test_i64() {
+; CHECK-LABEL: test_i64:
+; CHECK:         l32r a8, .LCPI4_0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a9, a8, 0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a8, a8, 4
+; CHECK-NEXT:    l32r a10, .LCPI4_1
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a8, a10, 4
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a9, a10, 0
+; CHECK-NEXT:    ret
+entry:
+  %a = load volatile i64, ptr @x_i64, align 4
+  store volatile i64 %a, ptr @y_i64, align 4
+  ret void
+}
+
+define void @test_float() {
+; CHECK-LABEL: test_float:
+; CHECK:         l32r a8, .LCPI5_0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a8, a8, 0
+; CHECK-NEXT:    l32r a9, .LCPI5_1
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a8, a9, 0
+; CHECK-NEXT:    ret
+entry:
+  %a = load volatile float, ptr @x_float, align 4
+  store volatile float %a, ptr @y_float, align 4
+  ret void
+}
+
+define void @test_double() {
+; CHECK-LABEL: test_double:
+; CHECK:         l32r a8, .LCPI6_0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a9, a8, 0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a8, a8, 4
+; CHECK-NEXT:    l32r a10, .LCPI6_1
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a8, a10, 4
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a9, a10, 0
+; CHECK-NEXT:    ret
+entry:
+  %a = load volatile double, ptr @x_double, align 4
+  store volatile double %a, ptr @y_double, align 4
+  ret void
+}
+
+define void @test_vec() {
+; CHECK-LABEL: test_vec:
+; CHECK:         l32r a8, .LCPI7_0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a9, a8, 0
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a10, a8, 4
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a11, a8, 8
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    l32i a8, a8, 12
+; CHECK-NEXT:    l32r a7, .LCPI7_1
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a8, a7, 12
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a11, a7, 8
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a10, a7, 4
+; CHECK-NEXT:    memw
+; CHECK-NEXT:    s32i a9, a7, 0
+; CHECK-NEXT:    ret
+entry:
+  %a = load volatile <4 x i32>, ptr @x_vec, align 4
+  store volatile <4 x i32> %a, ptr @y_vec, align 4
+  ret void
+}



More information about the llvm-commits mailing list