[llvm] 3166647 - [AVR] Fix atomicrmw result value

Ayke van Laethem via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 2 00:10:51 PST 2022


Author: Ayke van Laethem
Date: 2022-02-02T09:10:39+01:00
New Revision: 316664783df8061b39febeef4c800c85f735eef1

URL: https://github.com/llvm/llvm-project/commit/316664783df8061b39febeef4c800c85f735eef1
DIFF: https://github.com/llvm/llvm-project/commit/316664783df8061b39febeef4c800c85f735eef1.diff

LOG: [AVR] Fix atomicrmw result value

This patch fixes the atomicrmw result value to be the value before the
operation instead of the value after the operation. This was a bug, left
as a FIXME in the code (see https://reviews.llvm.org/D97127).

>From the LangRef:

> The contents of memory at the location specified by the <pointer>
> operand are atomically read, modified, and written back. The original
> value at the location is returned.

Doing this expansion early allows the register allocator to arrange
registers in such a way that commutable operations are simply swapped
around as needed, which results in shorter code while still being
correct.

Differential Revision: https://reviews.llvm.org/D117725

Added: 
    

Modified: 
    llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp
    llvm/lib/Target/AVR/AVRFrameLowering.cpp
    llvm/lib/Target/AVR/AVRISelLowering.cpp
    llvm/lib/Target/AVR/AVRISelLowering.h
    llvm/lib/Target/AVR/AVRInstrInfo.td
    llvm/lib/Target/AVR/AVRRegisterInfo.cpp
    llvm/lib/Target/AVR/AVRSubtarget.h
    llvm/test/CodeGen/AVR/atomics/load16.ll
    llvm/test/CodeGen/AVR/atomics/load8.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp b/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp
index 144ae2b320f9c..51eabbabbf610 100644
--- a/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp
@@ -54,8 +54,6 @@ class AVRExpandPseudo : public MachineFunctionPass {
   const Register SCRATCH_REGISTER = AVR::R0;
   /// The register that will always contain zero.
   const Register ZERO_REGISTER = AVR::R1;
-  /// The IO address of the status register.
-  const unsigned SREG_ADDR = 0x3f;
 
   bool expandMBB(Block &MBB);
   bool expandMI(Block &MBB, BlockIt MBBI);
@@ -86,9 +84,6 @@ class AVRExpandPseudo : public MachineFunctionPass {
 
   bool expandAtomicBinaryOp(unsigned Opcode, Block &MBB, BlockIt MBBI);
 
-  bool expandAtomicArithmeticOp(unsigned MemOpcode, unsigned ArithOpcode,
-                                Block &MBB, BlockIt MBBI);
-
   /// Specific shift implementation.
   bool expandLSLB7Rd(Block &MBB, BlockIt MBBI);
   bool expandLSRB7Rd(Block &MBB, BlockIt MBBI);
@@ -917,13 +912,13 @@ bool AVRExpandPseudo::expand<AVR::ELPMWRdZPi>(Block &MBB, BlockIt MBBI) {
 
 template <typename Func>
 bool AVRExpandPseudo::expandAtomic(Block &MBB, BlockIt MBBI, Func f) {
-  // Remove the pseudo instruction.
   MachineInstr &MI = *MBBI;
+  const AVRSubtarget &STI = MBB.getParent()->getSubtarget<AVRSubtarget>();
 
   // Store the SREG.
   buildMI(MBB, MBBI, AVR::INRdA)
       .addReg(SCRATCH_REGISTER, RegState::Define)
-      .addImm(SREG_ADDR);
+      .addImm(STI.getIORegSREG());
 
   // Disable exceptions.
   buildMI(MBB, MBBI, AVR::BCLRs).addImm(7); // CLI
@@ -931,7 +926,9 @@ bool AVRExpandPseudo::expandAtomic(Block &MBB, BlockIt MBBI, Func f) {
   f(MI);
 
   // Restore the status reg.
-  buildMI(MBB, MBBI, AVR::OUTARr).addImm(SREG_ADDR).addReg(SCRATCH_REGISTER);
+  buildMI(MBB, MBBI, AVR::OUTARr)
+      .addImm(STI.getIORegSREG())
+      .addReg(SCRATCH_REGISTER);
 
   MI.eraseFromParent();
   return true;
@@ -955,31 +952,6 @@ bool AVRExpandPseudo::expandAtomicBinaryOp(unsigned Opcode, Block &MBB,
   return expandAtomicBinaryOp(Opcode, MBB, MBBI, [](MachineInstr &MI) {});
 }
 
-bool AVRExpandPseudo::expandAtomicArithmeticOp(unsigned Width,
-                                               unsigned ArithOpcode, Block &MBB,
-                                               BlockIt MBBI) {
-  return expandAtomic(MBB, MBBI, [&](MachineInstr &MI) {
-    auto DstReg = MI.getOperand(0).getReg();
-    auto PtrOp = MI.getOperand(1);
-    auto SrcReg = MI.getOperand(2).getReg();
-
-    unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
-    unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
-
-    // FIXME: this returns the new value (after the operation), not the old
-    // value as the atomicrmw instruction is supposed to do!
-
-    // Create the load
-    buildMI(MBB, MBBI, LoadOpcode, DstReg).addReg(PtrOp.getReg());
-
-    // Create the arithmetic op
-    buildMI(MBB, MBBI, ArithOpcode, DstReg).addReg(DstReg).addReg(SrcReg);
-
-    // Create the store
-    buildMI(MBB, MBBI, StoreOpcode).add(PtrOp).addReg(DstReg);
-  });
-}
-
 Register AVRExpandPseudo::scavengeGPR8(MachineInstr &MI) {
   MachineBasicBlock &MBB = *MI.getParent();
   RegScavenger RS;
@@ -1025,56 +997,6 @@ bool AVRExpandPseudo::expand<AVR::AtomicStore16>(Block &MBB, BlockIt MBBI) {
   return expandAtomicBinaryOp(AVR::STWPtrRr, MBB, MBBI);
 }
 
-template <>
-bool AVRExpandPseudo::expand<AVR::AtomicLoadAdd8>(Block &MBB, BlockIt MBBI) {
-  return expandAtomicArithmeticOp(8, AVR::ADDRdRr, MBB, MBBI);
-}
-
-template <>
-bool AVRExpandPseudo::expand<AVR::AtomicLoadAdd16>(Block &MBB, BlockIt MBBI) {
-  return expandAtomicArithmeticOp(16, AVR::ADDWRdRr, MBB, MBBI);
-}
-
-template <>
-bool AVRExpandPseudo::expand<AVR::AtomicLoadSub8>(Block &MBB, BlockIt MBBI) {
-  return expandAtomicArithmeticOp(8, AVR::SUBRdRr, MBB, MBBI);
-}
-
-template <>
-bool AVRExpandPseudo::expand<AVR::AtomicLoadSub16>(Block &MBB, BlockIt MBBI) {
-  return expandAtomicArithmeticOp(16, AVR::SUBWRdRr, MBB, MBBI);
-}
-
-template <>
-bool AVRExpandPseudo::expand<AVR::AtomicLoadAnd8>(Block &MBB, BlockIt MBBI) {
-  return expandAtomicArithmeticOp(8, AVR::ANDRdRr, MBB, MBBI);
-}
-
-template <>
-bool AVRExpandPseudo::expand<AVR::AtomicLoadAnd16>(Block &MBB, BlockIt MBBI) {
-  return expandAtomicArithmeticOp(16, AVR::ANDWRdRr, MBB, MBBI);
-}
-
-template <>
-bool AVRExpandPseudo::expand<AVR::AtomicLoadOr8>(Block &MBB, BlockIt MBBI) {
-  return expandAtomicArithmeticOp(8, AVR::ORRdRr, MBB, MBBI);
-}
-
-template <>
-bool AVRExpandPseudo::expand<AVR::AtomicLoadOr16>(Block &MBB, BlockIt MBBI) {
-  return expandAtomicArithmeticOp(16, AVR::ORWRdRr, MBB, MBBI);
-}
-
-template <>
-bool AVRExpandPseudo::expand<AVR::AtomicLoadXor8>(Block &MBB, BlockIt MBBI) {
-  return expandAtomicArithmeticOp(8, AVR::EORRdRr, MBB, MBBI);
-}
-
-template <>
-bool AVRExpandPseudo::expand<AVR::AtomicLoadXor16>(Block &MBB, BlockIt MBBI) {
-  return expandAtomicArithmeticOp(16, AVR::EORWRdRr, MBB, MBBI);
-}
-
 template <>
 bool AVRExpandPseudo::expand<AVR::AtomicFence>(Block &MBB, BlockIt MBBI) {
   // On AVR, there is only one core and so atomic fences do nothing.
@@ -2256,6 +2178,7 @@ bool AVRExpandPseudo::expand<AVR::SPREAD>(Block &MBB, BlockIt MBBI) {
 
 template <>
 bool AVRExpandPseudo::expand<AVR::SPWRITE>(Block &MBB, BlockIt MBBI) {
+  const AVRSubtarget &STI = MBB.getParent()->getSubtarget<AVRSubtarget>();
   MachineInstr &MI = *MBBI;
   Register SrcLoReg, SrcHiReg;
   Register SrcReg = MI.getOperand(1).getReg();
@@ -2265,7 +2188,7 @@ bool AVRExpandPseudo::expand<AVR::SPWRITE>(Block &MBB, BlockIt MBBI) {
 
   buildMI(MBB, MBBI, AVR::INRdA)
       .addReg(AVR::R0, RegState::Define)
-      .addImm(SREG_ADDR)
+      .addImm(STI.getIORegSREG())
       .setMIFlags(Flags);
 
   buildMI(MBB, MBBI, AVR::BCLRs).addImm(0x07).setMIFlags(Flags);
@@ -2276,7 +2199,7 @@ bool AVRExpandPseudo::expand<AVR::SPWRITE>(Block &MBB, BlockIt MBBI) {
       .setMIFlags(Flags);
 
   buildMI(MBB, MBBI, AVR::OUTARr)
-      .addImm(SREG_ADDR)
+      .addImm(STI.getIORegSREG())
       .addReg(AVR::R0, RegState::Kill)
       .setMIFlags(Flags);
 
@@ -2330,16 +2253,6 @@ bool AVRExpandPseudo::expandMI(Block &MBB, BlockIt MBBI) {
     EXPAND(AVR::AtomicLoad16);
     EXPAND(AVR::AtomicStore8);
     EXPAND(AVR::AtomicStore16);
-    EXPAND(AVR::AtomicLoadAdd8);
-    EXPAND(AVR::AtomicLoadAdd16);
-    EXPAND(AVR::AtomicLoadSub8);
-    EXPAND(AVR::AtomicLoadSub16);
-    EXPAND(AVR::AtomicLoadAnd8);
-    EXPAND(AVR::AtomicLoadAnd16);
-    EXPAND(AVR::AtomicLoadOr8);
-    EXPAND(AVR::AtomicLoadOr16);
-    EXPAND(AVR::AtomicLoadXor8);
-    EXPAND(AVR::AtomicLoadXor16);
     EXPAND(AVR::AtomicFence);
     EXPAND(AVR::STSWKRr);
     EXPAND(AVR::STWPtrRr);

diff  --git a/llvm/lib/Target/AVR/AVRFrameLowering.cpp b/llvm/lib/Target/AVR/AVRFrameLowering.cpp
index b3bc9ede205eb..a6817834b2ef9 100644
--- a/llvm/lib/Target/AVR/AVRFrameLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRFrameLowering.cpp
@@ -73,7 +73,7 @@ void AVRFrameLowering::emitPrologue(MachineFunction &MF,
         .setMIFlag(MachineInstr::FrameSetup);
 
     BuildMI(MBB, MBBI, DL, TII.get(AVR::INRdA), AVR::R0)
-        .addImm(0x3f)
+        .addImm(STI.getIORegSREG())
         .setMIFlag(MachineInstr::FrameSetup);
     BuildMI(MBB, MBBI, DL, TII.get(AVR::PUSHRr))
         .addReg(AVR::R0, RegState::Kill)
@@ -144,7 +144,7 @@ static void restoreStatusRegister(MachineFunction &MF, MachineBasicBlock &MBB) {
   if (AFI->isInterruptOrSignalHandler()) {
     BuildMI(MBB, MBBI, DL, TII.get(AVR::POPRd), AVR::R0);
     BuildMI(MBB, MBBI, DL, TII.get(AVR::OUTARr))
-        .addImm(0x3f)
+        .addImm(STI.getIORegSREG())
         .addReg(AVR::R0, RegState::Kill);
     BuildMI(MBB, MBBI, DL, TII.get(AVR::POPWRd), AVR::R1R0);
   }

diff  --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index a58fedf6cd364..a635a1e6ea283 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -1707,6 +1707,60 @@ AVRTargetLowering::insertCopyR1(MachineInstr &MI, MachineBasicBlock *BB) const {
   return BB;
 }
 
+// Lower atomicrmw operation to disable interrupts, do operation, and restore
+// interrupts. This works because all AVR microcontrollers are single core.
+MachineBasicBlock *AVRTargetLowering::insertAtomicArithmeticOp(
+    MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, int Width) const {
+  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+  MachineBasicBlock::iterator I(MI);
+  const Register SCRATCH_REGISTER = AVR::R0;
+  DebugLoc dl = MI.getDebugLoc();
+
+  // Example instruction sequence, for an atomic 8-bit add:
+  //   ldi r25, 5
+  //   in r0, SREG
+  //   cli
+  //   ld r24, X
+  //   add r25, r24
+  //   st X, r25
+  //   out SREG, r0
+
+  const TargetRegisterClass *RC =
+      (Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
+  unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
+  unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
+
+  // Disable interrupts.
+  BuildMI(*BB, I, dl, TII.get(AVR::INRdA), SCRATCH_REGISTER)
+      .addImm(Subtarget.getIORegSREG());
+  BuildMI(*BB, I, dl, TII.get(AVR::BCLRs)).addImm(7);
+
+  // Load the original value.
+  BuildMI(*BB, I, dl, TII.get(LoadOpcode), MI.getOperand(0).getReg())
+      .add(MI.getOperand(1));
+
+  // Do the arithmetic operation.
+  Register Result = MRI.createVirtualRegister(RC);
+  BuildMI(*BB, I, dl, TII.get(Opcode), Result)
+      .addReg(MI.getOperand(0).getReg())
+      .add(MI.getOperand(2));
+
+  // Store the result.
+  BuildMI(*BB, I, dl, TII.get(StoreOpcode))
+      .add(MI.getOperand(1))
+      .addReg(Result);
+
+  // Restore interrupts.
+  BuildMI(*BB, I, dl, TII.get(AVR::OUTARr))
+      .addImm(Subtarget.getIORegSREG())
+      .addReg(SCRATCH_REGISTER);
+
+  // Remove the pseudo instruction.
+  MI.eraseFromParent();
+  return BB;
+}
+
 MachineBasicBlock *
 AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
                                                MachineBasicBlock *MBB) const {
@@ -1731,6 +1785,26 @@ AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     return insertMul(MI, MBB);
   case AVR::CopyR1:
     return insertCopyR1(MI, MBB);
+  case AVR::AtomicLoadAdd8:
+    return insertAtomicArithmeticOp(MI, MBB, AVR::ADDRdRr, 8);
+  case AVR::AtomicLoadAdd16:
+    return insertAtomicArithmeticOp(MI, MBB, AVR::ADDWRdRr, 16);
+  case AVR::AtomicLoadSub8:
+    return insertAtomicArithmeticOp(MI, MBB, AVR::SUBRdRr, 8);
+  case AVR::AtomicLoadSub16:
+    return insertAtomicArithmeticOp(MI, MBB, AVR::SUBWRdRr, 16);
+  case AVR::AtomicLoadAnd8:
+    return insertAtomicArithmeticOp(MI, MBB, AVR::ANDRdRr, 8);
+  case AVR::AtomicLoadAnd16:
+    return insertAtomicArithmeticOp(MI, MBB, AVR::ANDWRdRr, 16);
+  case AVR::AtomicLoadOr8:
+    return insertAtomicArithmeticOp(MI, MBB, AVR::ORRdRr, 8);
+  case AVR::AtomicLoadOr16:
+    return insertAtomicArithmeticOp(MI, MBB, AVR::ORWRdRr, 16);
+  case AVR::AtomicLoadXor8:
+    return insertAtomicArithmeticOp(MI, MBB, AVR::EORRdRr, 8);
+  case AVR::AtomicLoadXor16:
+    return insertAtomicArithmeticOp(MI, MBB, AVR::EORWRdRr, 16);
   }
 
   assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&

diff  --git a/llvm/lib/Target/AVR/AVRISelLowering.h b/llvm/lib/Target/AVR/AVRISelLowering.h
index 116417b615669..c5c937c983ed4 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.h
+++ b/llvm/lib/Target/AVR/AVRISelLowering.h
@@ -189,6 +189,9 @@ class AVRTargetLowering : public TargetLowering {
   MachineBasicBlock *insertMul(MachineInstr &MI, MachineBasicBlock *BB) const;
   MachineBasicBlock *insertCopyR1(MachineInstr &MI,
                                   MachineBasicBlock *BB) const;
+  MachineBasicBlock *insertAtomicArithmeticOp(MachineInstr &MI,
+                                              MachineBasicBlock *BB,
+                                              unsigned Opcode, int Width) const;
 };
 
 } // end namespace llvm

diff  --git a/llvm/lib/Target/AVR/AVRInstrInfo.td b/llvm/lib/Target/AVR/AVRInstrInfo.td
index 2b96dc0b833ad..09e4398bb1c12 100644
--- a/llvm/lib/Target/AVR/AVRInstrInfo.td
+++ b/llvm/lib/Target/AVR/AVRInstrInfo.td
@@ -1446,27 +1446,14 @@ class AtomicStore<PatFrag Op, RegisterClass DRC, RegisterClass PTRRC>
                             : $rd, DRC
                             : $rr)]>;
 
-let Constraints =
-    "@earlyclobber $rd" in class AtomicLoadOp<PatFrag Op, RegisterClass DRC,
-                                              RegisterClass PTRRC>
-    : Pseudo<(outs DRC
-              : $rd),
-             (ins PTRRC
-              : $rr, DRC
-              : $operand),
-             "atomic_op", [(set DRC
-                            : $rd, (Op i16
-                                    : $rr, DRC
-                                    : $operand))]>;
-
-// FIXME: I think 16-bit atomic binary ops need to mark
-// r0 as clobbered.
+class AtomicLoadOp<PatFrag Op, RegisterClass DRC, RegisterClass PTRRC>
+    : Pseudo<(outs DRC:$rd),
+             (ins PTRRC:$rr, DRC:$operand),
+             "atomic_op", [(set DRC:$rd, (Op i16:$rr, DRC:$operand))]>;
 
 // Atomic instructions
 // ===================
 //
-// These are all expanded by AVRExpandPseudoInsts
-//
 // 8-bit operations can use any pointer register because
 // they are expanded directly into an LD/ST instruction.
 //
@@ -1482,16 +1469,18 @@ def AtomicStore16 : AtomicStore<atomic_store_16, DREGS, PTRDISPREGS>;
 class AtomicLoadOp8<PatFrag Op> : AtomicLoadOp<Op, GPR8, PTRREGS>;
 class AtomicLoadOp16<PatFrag Op> : AtomicLoadOp<Op, DREGS, PTRDISPREGS>;
 
-def AtomicLoadAdd8 : AtomicLoadOp8<atomic_load_add_8>;
-def AtomicLoadAdd16 : AtomicLoadOp16<atomic_load_add_16>;
-def AtomicLoadSub8 : AtomicLoadOp8<atomic_load_sub_8>;
-def AtomicLoadSub16 : AtomicLoadOp16<atomic_load_sub_16>;
-def AtomicLoadAnd8 : AtomicLoadOp8<atomic_load_and_8>;
-def AtomicLoadAnd16 : AtomicLoadOp16<atomic_load_and_16>;
-def AtomicLoadOr8 : AtomicLoadOp8<atomic_load_or_8>;
-def AtomicLoadOr16 : AtomicLoadOp16<atomic_load_or_16>;
-def AtomicLoadXor8 : AtomicLoadOp8<atomic_load_xor_8>;
-def AtomicLoadXor16 : AtomicLoadOp16<atomic_load_xor_16>;
+let usesCustomInserter=1 in {
+  def AtomicLoadAdd8 : AtomicLoadOp8<atomic_load_add_8>;
+  def AtomicLoadAdd16 : AtomicLoadOp16<atomic_load_add_16>;
+  def AtomicLoadSub8 : AtomicLoadOp8<atomic_load_sub_8>;
+  def AtomicLoadSub16 : AtomicLoadOp16<atomic_load_sub_16>;
+  def AtomicLoadAnd8 : AtomicLoadOp8<atomic_load_and_8>;
+  def AtomicLoadAnd16 : AtomicLoadOp16<atomic_load_and_16>;
+  def AtomicLoadOr8 : AtomicLoadOp8<atomic_load_or_8>;
+  def AtomicLoadOr16 : AtomicLoadOp16<atomic_load_or_16>;
+  def AtomicLoadXor8 : AtomicLoadOp8<atomic_load_xor_8>;
+  def AtomicLoadXor16 : AtomicLoadOp16<atomic_load_xor_16>;
+}
 def AtomicFence
     : Pseudo<(outs), (ins), "atomic_fence", [(atomic_fence timm, timm)]>;
 
@@ -2122,15 +2111,17 @@ def ROL : InstAlias<"rol\t$rd", (ADCRdRr GPR8 : $rd, GPR8 : $rd)>;
 // Sets all bits in a register.
 def : InstAlias<"ser\t$rd", (LDIRdK LD8 : $rd, 0xff), 0>;
 
-let Defs = [SREG] in def BSETs : FS<0, (outs),
-                                    (ins i8imm
-                                     : $s),
-                                    "bset\t$s", []>;
+let hasSideEffects=1 in {
+  let Defs = [SREG] in def BSETs : FS<0,
+                                      (outs),
+                                      (ins i8imm:$s),
+                                      "bset\t$s", []>;
 
-let Defs = [SREG] in def BCLRs : FS<1, (outs),
-                                    (ins i8imm
-                                     : $s),
-                                    "bclr\t$s", []>;
+  let Defs = [SREG] in def BCLRs : FS<1,
+                                      (outs),
+                                      (ins i8imm:$s),
+                                      "bclr\t$s", []>;
+}
 
 // Set/clear aliases for the carry (C) status flag (bit 0).
 def : InstAlias<"sec", (BSETs 0)>;

diff  --git a/llvm/lib/Target/AVR/AVRRegisterInfo.cpp b/llvm/lib/Target/AVR/AVRRegisterInfo.cpp
index 5dd7f5c556959..048509546e91c 100644
--- a/llvm/lib/Target/AVR/AVRRegisterInfo.cpp
+++ b/llvm/lib/Target/AVR/AVRRegisterInfo.cpp
@@ -137,6 +137,7 @@ void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
   const TargetInstrInfo &TII = *TM.getSubtargetImpl()->getInstrInfo();
   const MachineFrameInfo &MFI = MF.getFrameInfo();
   const TargetFrameLowering *TFI = TM.getSubtargetImpl()->getFrameLowering();
+  const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>();
   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
   int Offset = MFI.getObjectOffset(FrameIndex);
 
@@ -219,7 +220,8 @@ void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
     // a compare and branch, invalidating the contents of SREG set by the
     // compare instruction because of the add/sub pairs. Conservatively save and
     // restore SREG before and after each add/sub pair.
-    BuildMI(MBB, II, dl, TII.get(AVR::INRdA), AVR::R0).addImm(0x3f);
+    BuildMI(MBB, II, dl, TII.get(AVR::INRdA), AVR::R0)
+        .addImm(STI.getIORegSREG());
 
     MachineInstr *New = BuildMI(MBB, II, dl, TII.get(AddOpc), AVR::R29R28)
                             .addReg(AVR::R29R28, RegState::Kill)
@@ -228,7 +230,7 @@ void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
 
     // Restore SREG.
     BuildMI(MBB, std::next(II), dl, TII.get(AVR::OUTARr))
-        .addImm(0x3f)
+        .addImm(STI.getIORegSREG())
         .addReg(AVR::R0, RegState::Kill);
 
     // No need to set SREG as dead here otherwise if the next instruction is a

diff  --git a/llvm/lib/Target/AVR/AVRSubtarget.h b/llvm/lib/Target/AVR/AVRSubtarget.h
index f8ca191b18680..08269820c3605 100644
--- a/llvm/lib/Target/AVR/AVRSubtarget.h
+++ b/llvm/lib/Target/AVR/AVRSubtarget.h
@@ -93,6 +93,7 @@ class AVRSubtarget : public AVRGenSubtargetInfo {
 
   /// Get I/O register address.
   int getIORegRAMPZ(void) const { return 0x3b; }
+  int getIORegSREG(void) const { return 0x3f; }
 
 private:
   /// The ELF e_flags architecture.

diff  --git a/llvm/test/CodeGen/AVR/atomics/load16.ll b/llvm/test/CodeGen/AVR/atomics/load16.ll
index 6b96edbdab8e7..c730ab2aa8bf4 100644
--- a/llvm/test/CodeGen/AVR/atomics/load16.ll
+++ b/llvm/test/CodeGen/AVR/atomics/load16.ll
@@ -31,10 +31,10 @@ define i16 @atomic_load_cmp_swap16(i16* %foo) {
 ; CHECK-NEXT: cli
 ; CHECK-NEXT: ld [[RR1:r[0-9]+]], [[RD:(X|Y|Z)]]
 ; CHECK-NEXT: ldd [[RR2:r[0-9]+]], [[RD]]+1
-; CHECK-NEXT: add [[RR1]], [[TMP:r[0-9]+]]
-; CHECK-NEXT: adc [[RR2]], [[TMP:r[0-9]+]]
-; CHECK-NEXT: st [[RD]], [[RR1]]
-; CHECK-NEXT: std [[RD]]+1, [[A:r[0-9]+]]
+; CHECK-NEXT: add [[TMP1:r[0-9]+]], [[RR1]]
+; CHECK-NEXT: adc [[TMP2:r[0-9]+]], [[RR2]]
+; CHECK-NEXT: st [[RD]], [[TMP1]]
+; CHECK-NEXT: std [[RD]]+1, [[TMP2]]
 ; CHECK-NEXT: out 63, r0
 define i16 @atomic_load_add16(i16* %foo) {
   %val = atomicrmw add i16* %foo, i16 13 seq_cst
@@ -46,10 +46,11 @@ define i16 @atomic_load_add16(i16* %foo) {
 ; CHECK-NEXT: cli
 ; CHECK-NEXT: ld [[RR1:r[0-9]+]], [[RD:(X|Y|Z)]]
 ; CHECK-NEXT: ldd [[RR2:r[0-9]+]], [[RD]]+1
-; CHECK-NEXT: sub [[RR1]], [[TMP:r[0-9]+]]
-; CHECK-NEXT: sbc [[RR2]], [[TMP:r[0-9]+]]
-; CHECK-NEXT: st [[RD]], [[RR1]]
-; CHECK-NEXT: std [[RD]]+1, [[A:r[0-9]+]]
+; CHECK-NEXT: movw
+; CHECK-NEXT: sub [[TMP1:r[0-9]+]], [[IN1:r[0-9]+]]
+; CHECK-NEXT: sbc [[TMP2:r[0-9]+]], [[IN2:r[0-9]+]]
+; CHECK-NEXT: st [[RD]], [[TMP1]]
+; CHECK-NEXT: std [[RD]]+1, [[TMP2]]
 ; CHECK-NEXT: out 63, r0
 define i16 @atomic_load_sub16(i16* %foo) {
   %val = atomicrmw sub i16* %foo, i16 13 seq_cst
@@ -61,10 +62,10 @@ define i16 @atomic_load_sub16(i16* %foo) {
 ; CHECK-NEXT: cli
 ; CHECK-NEXT: ld [[RR1:r[0-9]+]], [[RD:(X|Y|Z)]]
 ; CHECK-NEXT: ldd [[RR2:r[0-9]+]], [[RD]]+1
-; CHECK-NEXT: and [[RR1]], [[TMP:r[0-9]+]]
-; CHECK-NEXT: and [[RR2]], [[TMP:r[0-9]+]]
-; CHECK-NEXT: st [[RD]], [[RR1]]
-; CHECK-NEXT: std [[RD]]+1, [[A:r[0-9]+]]
+; CHECK-NEXT: and [[TMP1:r[0-9]+]], [[RR1]]
+; CHECK-NEXT: and [[TMP2:r[0-9]+]], [[RR2]]
+; CHECK-NEXT: st [[RD]], [[TMP1]]
+; CHECK-NEXT: std [[RD]]+1, [[TMP2]]
 ; CHECK-NEXT: out 63, r0
 define i16 @atomic_load_and16(i16* %foo) {
   %val = atomicrmw and i16* %foo, i16 13 seq_cst
@@ -76,10 +77,10 @@ define i16 @atomic_load_and16(i16* %foo) {
 ; CHECK-NEXT: cli
 ; CHECK-NEXT: ld [[RR1:r[0-9]+]], [[RD:(X|Y|Z)]]
 ; CHECK-NEXT: ldd [[RR2:r[0-9]+]], [[RD]]+1
-; CHECK-NEXT: or [[RR1]], [[TMP:r[0-9]+]]
-; CHECK-NEXT: or [[RR2]], [[TMP:r[0-9]+]]
-; CHECK-NEXT: st [[RD]], [[RR1]]
-; CHECK-NEXT: std [[RD]]+1, [[A:r[0-9]+]]
+; CHECK-NEXT: or [[TMP1:r[0-9]+]], [[RR1]]
+; CHECK-NEXT: or [[TMP2:r[0-9]+]], [[RR2]]
+; CHECK-NEXT: st [[RD]], [[TMP1]]
+; CHECK-NEXT: std [[RD]]+1, [[TMP2]]
 ; CHECK-NEXT: out 63, r0
 define i16 @atomic_load_or16(i16* %foo) {
   %val = atomicrmw or i16* %foo, i16 13 seq_cst
@@ -91,10 +92,10 @@ define i16 @atomic_load_or16(i16* %foo) {
 ; CHECK-NEXT: cli
 ; CHECK-NEXT: ld [[RR1:r[0-9]+]], [[RD:(X|Y|Z)]]
 ; CHECK-NEXT: ldd [[RR2:r[0-9]+]], [[RD]]+1
-; CHECK-NEXT: eor [[RR1]], [[TMP:r[0-9]+]]
-; CHECK-NEXT: eor [[RR2]], [[TMP:r[0-9]+]]
-; CHECK-NEXT: st [[RD]], [[RR1]]
-; CHECK-NEXT: std [[RD]]+1, [[A:r[0-9]+]]
+; CHECK-NEXT: eor [[TMP1:r[0-9]+]], [[RR1]]
+; CHECK-NEXT: eor [[TMP2:r[0-9]+]], [[RR2]]
+; CHECK-NEXT: st [[RD]], [[TMP1]]
+; CHECK-NEXT: std [[RD]]+1, [[TMP2]]
 ; CHECK-NEXT: out 63, r0
 define i16 @atomic_load_xor16(i16* %foo) {
   %val = atomicrmw xor i16* %foo, i16 13 seq_cst

diff  --git a/llvm/test/CodeGen/AVR/atomics/load8.ll b/llvm/test/CodeGen/AVR/atomics/load8.ll
index a20e662614666..bf9ac369c19dd 100644
--- a/llvm/test/CodeGen/AVR/atomics/load8.ll
+++ b/llvm/test/CodeGen/AVR/atomics/load8.ll
@@ -31,8 +31,8 @@ define i8 @atomic_load_cmp_swap8(i8* %foo) {
 ; CHECK:      in r0, 63
 ; CHECK-NEXT: cli
 ; CHECK-NEXT: ld [[RD:r[0-9]+]], [[RR:(X|Y|Z)]]
-; CHECK-NEXT: add [[RD]], [[RR1:r[0-9]+]]
-; CHECK-NEXT: st [[RR]], [[RD]]
+; CHECK-NEXT: add [[RR1:r[0-9]+]], [[RD]]
+; CHECK-NEXT: st [[RR]], [[RR1]]
 ; CHECK-NEXT: out 63, r0
 define i8 @atomic_load_add8(i8* %foo) {
   %val = atomicrmw add i8* %foo, i8 13 seq_cst
@@ -43,8 +43,9 @@ define i8 @atomic_load_add8(i8* %foo) {
 ; CHECK:      in r0, 63
 ; CHECK-NEXT: cli
 ; CHECK-NEXT: ld [[RD:r[0-9]+]], [[RR:(X|Y|Z)]]
-; CHECK-NEXT: sub [[RD]], [[RR1:r[0-9]+]]
-; CHECK-NEXT: st [[RR]], [[RD]]
+; CHECK-NEXT: mov [[TMP:r[0-9]+]], [[RD]]
+; CHECK-NEXT: sub [[TMP]], [[RR1:r[0-9]+]]
+; CHECK-NEXT: st [[RR]], [[TMP]]
 ; CHECK-NEXT: out 63, r0
 define i8 @atomic_load_sub8(i8* %foo) {
   %val = atomicrmw sub i8* %foo, i8 13 seq_cst
@@ -55,8 +56,8 @@ define i8 @atomic_load_sub8(i8* %foo) {
 ; CHECK:      in r0, 63
 ; CHECK-NEXT: cli
 ; CHECK-NEXT: ld [[RD:r[0-9]+]], [[RR:(X|Y|Z)]]
-; CHECK-NEXT: and [[RD]], [[RR1:r[0-9]+]]
-; CHECK-NEXT: st [[RR]], [[RD]]
+; CHECK-NEXT: and [[RR1:r[0-9]+]], [[RD]]
+; CHECK-NEXT: st [[RR]], [[RR1]]
 ; CHECK-NEXT: out 63, r0
 define i8 @atomic_load_and8(i8* %foo) {
   %val = atomicrmw and i8* %foo, i8 13 seq_cst
@@ -67,8 +68,8 @@ define i8 @atomic_load_and8(i8* %foo) {
 ; CHECK:      in r0, 63
 ; CHECK-NEXT: cli
 ; CHECK-NEXT: ld [[RD:r[0-9]+]], [[RR:(X|Y|Z)]]
-; CHECK-NEXT: or [[RD]], [[RR1:r[0-9]+]]
-; CHECK-NEXT: st [[RR]], [[RD]]
+; CHECK-NEXT: or [[RR1:r[0-9]+]], [[RD]]
+; CHECK-NEXT: st [[RR]], [[RR1]]
 ; CHECK-NEXT: out 63, r0
 define i8 @atomic_load_or8(i8* %foo) {
   %val = atomicrmw or i8* %foo, i8 13 seq_cst
@@ -79,8 +80,8 @@ define i8 @atomic_load_or8(i8* %foo) {
 ; CHECK:      in r0, 63
 ; CHECK-NEXT: cli
 ; CHECK-NEXT: ld [[RD:r[0-9]+]], [[RR:(X|Y|Z)]]
-; CHECK-NEXT: eor [[RD]], [[RR1:r[0-9]+]]
-; CHECK-NEXT: st [[RR]], [[RD]]
+; CHECK-NEXT: eor [[RR1:r[0-9]+]], [[RD]]
+; CHECK-NEXT: st [[RR]], [[RR1]]
 ; CHECK-NEXT: out 63, r0
 define i8 @atomic_load_xor8(i8* %foo) {
   %val = atomicrmw xor i8* %foo, i8 13 seq_cst


        


More information about the llvm-commits mailing list