[llvm] r348075 - [AMDGPU] Split 64-Bit XNOR to 64-Bit NOT/XOR

Graham Sellers via llvm-commits llvm-commits at lists.llvm.org
Sat Dec 1 04:27:53 PST 2018


Author: gsellers
Date: Sat Dec  1 04:27:53 2018
New Revision: 348075

URL: http://llvm.org/viewvc/llvm-project?rev=348075&view=rev
Log:
[AMDGPU] Split 64-Bit XNOR to 64-Bit NOT/XOR

The identity ~(x ^ y) == (~x ^ y) == (x ^ ~y) allows XNOR (XOR/NOT) to turn into NOT/XOR. Handling this case with its own split means we can make the NOT remain in the scalar unit. Previously, we split 64-bit XNOR into two 32-bit XNOR, then lowered. Now, we get three instructions (s_not, v_xor, v_xor) rather than four in the case where either of the sources is a scalar 64-bit.

Add test cases to xnor.ll to attempt XNOR Vx, Sy and XNOR Sx, Vy. Also adding test that uses the opposite identity such that (~x ^ y) on the scalar unit (or vector for gfx906) can generate XNOR. This already worked, but I didn't see a test for it.

Differential: https://reviews.llvm.org/D55071

Modified:
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h
    llvm/trunk/test/CodeGen/AMDGPU/xnor.ll

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp?rev=348075&r1=348074&r2=348075&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp Sat Dec  1 04:27:53 2018
@@ -876,7 +876,7 @@ void SIInstrInfo::storeRegToStackSlot(Ma
   MachineFunction *MF = MBB.getParent();
   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
-  DebugLoc DL = MBB.findDebugLoc(MI);
+  const DebugLoc &DL = MBB.findDebugLoc(MI);
 
   unsigned Size = FrameInfo.getObjectSize(FrameIndex);
   unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
@@ -977,7 +977,7 @@ void SIInstrInfo::loadRegFromStackSlot(M
   MachineFunction *MF = MBB.getParent();
   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
-  DebugLoc DL = MBB.findDebugLoc(MI);
+  const DebugLoc &DL = MBB.findDebugLoc(MI);
   unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
   unsigned Size = FrameInfo.getObjectSize(FrameIndex);
   unsigned SpillSize = TRI->getSpillSize(*RC);
@@ -1032,7 +1032,7 @@ unsigned SIInstrInfo::calculateLDSSpillA
   MachineFunction *MF = MBB.getParent();
   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
-  DebugLoc DL = MBB.findDebugLoc(MI);
+  const DebugLoc &DL = MBB.findDebugLoc(MI);
   unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
   unsigned WavefrontSize = ST.getWavefrontSize();
 
@@ -1040,7 +1040,7 @@ unsigned SIInstrInfo::calculateLDSSpillA
   if (!MFI->hasCalculatedTID()) {
     MachineBasicBlock &Entry = MBB.getParent()->front();
     MachineBasicBlock::iterator Insert = Entry.front();
-    DebugLoc DL = Insert->getDebugLoc();
+    const DebugLoc &DL = Insert->getDebugLoc();
 
     TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass,
                                    *MF);
@@ -4162,7 +4162,10 @@ void SIInstrInfo::moveToVALU(MachineInst
       continue;
 
     case AMDGPU::S_XNOR_B64:
-      splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
+      if (ST.hasDLInsts())
+        splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
+      else
+        splitScalar64BitXnor(Worklist, Inst, MDT);
       Inst.eraseFromParent();
       continue;
 
@@ -4753,13 +4756,55 @@ void SIInstrInfo::splitScalar64BitBinary
   addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
 }
 
+void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist,
+                                       MachineInstr &Inst,
+                                       MachineDominatorTree *MDT) const {
+  MachineBasicBlock &MBB = *Inst.getParent();
+  MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+
+  MachineOperand &Dest = Inst.getOperand(0);
+  MachineOperand &Src0 = Inst.getOperand(1);
+  MachineOperand &Src1 = Inst.getOperand(2);
+  const DebugLoc &DL = Inst.getDebugLoc();
+
+  MachineBasicBlock::iterator MII = Inst;
+
+  const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
+
+  unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+
+  MachineOperand* Op0;
+  MachineOperand* Op1;
+
+  if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) {
+    Op0 = &Src0;
+    Op1 = &Src1;
+  } else {
+    Op0 = &Src1;
+    Op1 = &Src0;
+  }
+
+  BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm)
+    .add(*Op0);
+
+  unsigned NewDest = MRI.createVirtualRegister(DestRC);
+
+  MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest)
+    .addReg(Interm)
+    .add(*Op1);
+
+  MRI.replaceRegWith(Dest.getReg(), NewDest);
+
+  Worklist.insert(&Xor);
+}
+
 void SIInstrInfo::splitScalar64BitBCNT(
     SetVectorType &Worklist, MachineInstr &Inst) const {
   MachineBasicBlock &MBB = *Inst.getParent();
   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
 
   MachineBasicBlock::iterator MII = Inst;
-  DebugLoc DL = Inst.getDebugLoc();
+  const DebugLoc &DL = Inst.getDebugLoc();
 
   MachineOperand &Dest = Inst.getOperand(0);
   MachineOperand &Src = Inst.getOperand(1);
@@ -4795,7 +4840,7 @@ void SIInstrInfo::splitScalar64BitBFE(Se
   MachineBasicBlock &MBB = *Inst.getParent();
   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
   MachineBasicBlock::iterator MII = Inst;
-  DebugLoc DL = Inst.getDebugLoc();
+  const DebugLoc &DL = Inst.getDebugLoc();
 
   MachineOperand &Dest = Inst.getOperand(0);
   uint32_t Imm = Inst.getOperand(2).getImm();

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h?rev=348075&r1=348074&r2=348075&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h Sat Dec  1 04:27:53 2018
@@ -107,6 +107,9 @@ private:
                                 unsigned Opcode,
                                 MachineDominatorTree *MDT = nullptr) const;
 
+  void splitScalar64BitXnor(SetVectorType &Worklist, MachineInstr &Inst,
+                                MachineDominatorTree *MDT = nullptr) const;
+
   void splitScalar64BitBCNT(SetVectorType &Worklist,
                             MachineInstr &Inst) const;
   void splitScalar64BitBFE(SetVectorType &Worklist,

Modified: llvm/trunk/test/CodeGen/AMDGPU/xnor.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/xnor.ll?rev=348075&r1=348074&r2=348075&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/xnor.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/xnor.ll Sat Dec  1 04:27:53 2018
@@ -74,9 +74,9 @@ entry:
 ; GCN-LABEL: {{^}}vector_xnor_i64_one_use
 ; GCN-NOT: s_xnor_b64
 ; GCN: v_not_b32
-; GCN: v_xor_b32
 ; GCN: v_not_b32
 ; GCN: v_xor_b32
+; GCN: v_xor_b32
 ; GCN-DL: v_xnor_b32
 ; GCN-DL: v_xnor_b32
 define i64 @vector_xnor_i64_one_use(i64 %a, i64 %b) {
@@ -110,5 +110,89 @@ define amdgpu_kernel void @xnor_v_s_i32_
   ret void
 }
 
+; GCN-LABEL: {{^}}xnor_i64_s_v_one_use
+; GCN-NOT: s_xnor_b64
+; GCN: s_not_b64
+; GCN: v_xor_b32
+; GCN: v_xor_b32
+; GCN-DL: v_xnor_b32
+; GCN-DL: v_xnor_b32
+define amdgpu_kernel void @xnor_i64_s_v_one_use(
+  i64 addrspace(1)* %r0, i64 %a) {
+entry:
+  %b32 = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %b64 = zext i32 %b32 to i64
+  %b = shl i64 %b64, 29
+  %xor = xor i64 %a, %b
+  %r0.val = xor i64 %xor, -1
+  store i64 %r0.val, i64 addrspace(1)* %r0
+  ret void
+}
+
+; GCN-LABEL: {{^}}xnor_i64_v_s_one_use
+; GCN-NOT: s_xnor_b64
+; GCN: s_not_b64
+; GCN: v_xor_b32
+; GCN: v_xor_b32
+; GCN-DL: v_xnor_b32
+; GCN-DL: v_xnor_b32
+define amdgpu_kernel void @xnor_i64_v_s_one_use(
+  i64 addrspace(1)* %r0, i64 %a) {
+entry:
+  %b32 = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %b64 = zext i32 %b32 to i64
+  %b = shl i64 %b64, 29
+  %xor = xor i64 %b, %a
+  %r0.val = xor i64 %xor, -1
+  store i64 %r0.val, i64 addrspace(1)* %r0
+  ret void
+}
+
+; GCN-LABEL: {{^}}vector_xor_na_b_i32_one_use
+; GCN-NOT: s_xnor_b32
+; GCN: v_not_b32
+; GCN: v_xor_b32
+; GCN-DL: v_xnor_b32
+define i32 @vector_xor_na_b_i32_one_use(i32 %a, i32 %b) {
+entry:
+  %na = xor i32 %a, -1
+  %r = xor i32 %na, %b
+  ret i32 %r
+}
+
+; GCN-LABEL: {{^}}vector_xor_a_nb_i32_one_use
+; GCN-NOT: s_xnor_b32
+; GCN: v_not_b32
+; GCN: v_xor_b32
+; GCN-DL: v_xnor_b32
+define i32 @vector_xor_a_nb_i32_one_use(i32 %a, i32 %b) {
+entry:
+  %nb = xor i32 %b, -1
+  %r = xor i32 %a, %nb
+  ret i32 %r
+}
+
+; GCN-LABEL: {{^}}scalar_xor_a_nb_i64_one_use
+; GCN: s_xnor_b64
+define amdgpu_kernel void @scalar_xor_a_nb_i64_one_use(
+    i64 addrspace(1)* %r0, i64 %a, i64 %b) {
+entry:
+  %nb = xor i64 %b, -1
+  %r0.val = xor i64 %a, %nb
+  store i64 %r0.val, i64 addrspace(1)* %r0
+  ret void
+}
+
+; GCN-LABEL: {{^}}scalar_xor_na_b_i64_one_use
+; GCN: s_xnor_b64
+define amdgpu_kernel void @scalar_xor_na_b_i64_one_use(
+    i64 addrspace(1)* %r0, i64 %a, i64 %b) {
+entry:
+  %na = xor i64 %a, -1
+  %r0.val = xor i64 %na, %b
+  store i64 %r0.val, i64 addrspace(1)* %r0
+  ret void
+}
+
 ; Function Attrs: nounwind readnone
 declare i32 @llvm.amdgcn.workitem.id.x() #0




More information about the llvm-commits mailing list