[PATCH] AMDGPU: really don't commute REV opcodes if the target variant doesn't exist
Marek Olšák
maraeo at gmail.com
Thu Jun 25 04:08:01 PDT 2015
From: Marek Olšák <marek.olsak at amd.com>
If pseudoToMCOpcode failed, we would return the original opcode, so operands
would be swapped, but the instruction would remain the same.
It resulted in LSHLREV a, b ---> LSHLREV b, a.
This fixes Glamor text rendering and
piglit/arb_sample_shading-builtin-gl-sample-mask.
This is a candidate for stable branches.
---
lib/Target/AMDGPU/SIInstrInfo.cpp | 20 +++++++-----
lib/Target/AMDGPU/SIInstrInfo.h | 2 +-
test/CodeGen/AMDGPU/commute-shifts.ll | 57 +++++++++++++++++++++++++++++++++++
3 files changed, 70 insertions(+), 9 deletions(-)
create mode 100644 test/CodeGen/AMDGPU/commute-shifts.ll
diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
index 47bc178..b517fb2 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -440,22 +440,22 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
}
}
-unsigned SIInstrInfo::commuteOpcode(const MachineInstr &MI) const {
+int SIInstrInfo::commuteOpcode(const MachineInstr &MI) const {
const unsigned Opcode = MI.getOpcode();
int NewOpc;
// Try to map original to commuted opcode
NewOpc = AMDGPU::getCommuteRev(Opcode);
- // Check if the commuted (REV) opcode exists on the target.
- if (NewOpc != -1 && pseudoToMCOpcode(NewOpc) != -1)
- return NewOpc;
+ if (NewOpc != -1)
+ // Check if the commuted (REV) opcode exists on the target.
+ return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
// Try to map commuted to original opcode
NewOpc = AMDGPU::getCommuteOrig(Opcode);
- // Check if the original (non-REV) opcode exists on the target.
- if (NewOpc != -1 && pseudoToMCOpcode(NewOpc) != -1)
- return NewOpc;
+ if (NewOpc != -1)
+ // Check if the original (non-REV) opcode exists on the target.
+ return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
return Opcode;
}
@@ -771,6 +771,10 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
if (MI->getNumOperands() < 3)
return nullptr;
+ int CommutedOpcode = commuteOpcode(*MI);
+ if (CommutedOpcode == -1)
+ return nullptr;
+
int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
AMDGPU::OpName::src0);
assert(Src0Idx != -1 && "Should always have src0 operand");
@@ -833,7 +837,7 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
}
if (MI)
- MI->setDesc(get(commuteOpcode(*MI)));
+ MI->setDesc(get(CommutedOpcode));
return MI;
}
diff --git a/lib/Target/AMDGPU/SIInstrInfo.h b/lib/Target/AMDGPU/SIInstrInfo.h
index 6fafb94..0382272 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/lib/Target/AMDGPU/SIInstrInfo.h
@@ -117,7 +117,7 @@ public:
// register. If there is no hardware instruction that can store to \p
// DstRC, then AMDGPU::COPY is returned.
unsigned getMovOpcode(const TargetRegisterClass *DstRC) const;
- unsigned commuteOpcode(const MachineInstr &MI) const;
+ int commuteOpcode(const MachineInstr &MI) const;
MachineInstr *commuteInstruction(MachineInstr *MI,
bool NewMI = false) const override;
diff --git a/test/CodeGen/AMDGPU/commute-shifts.ll b/test/CodeGen/AMDGPU/commute-shifts.ll
new file mode 100644
index 0000000..c6bbb90
--- /dev/null
+++ b/test/CodeGen/AMDGPU/commute-shifts.ll
@@ -0,0 +1,57 @@
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}main:
+; SI: v_lshl_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; VI: v_lshlrev_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 1
+
+define void @main([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [34 x <8 x i32>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, i32, float, float) #0 {
+main_body:
+ %22 = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %1, i64 0, i64 0
+ %23 = load <16 x i8>, <16 x i8> addrspace(2)* %22, align 16, !tbaa !0
+ %24 = call float @llvm.SI.load.const(<16 x i8> %23, i32 0)
+ %25 = call float @llvm.SI.load.const(<16 x i8> %23, i32 4)
+ %26 = call float @llvm.SI.load.const(<16 x i8> %23, i32 8)
+ %27 = call float @llvm.SI.load.const(<16 x i8> %23, i32 12)
+ %28 = call float @llvm.SI.load.const(<16 x i8> %23, i32 16)
+ %29 = call float @llvm.SI.load.const(<16 x i8> %23, i32 20)
+ %30 = call float @llvm.SI.load.const(<16 x i8> %23, i32 24)
+ %31 = call float @llvm.SI.load.const(<16 x i8> %23, i32 28)
+ %32 = bitcast [34 x <8 x i32>] addrspace(2)* %3 to <32 x i8> addrspace(2)*
+ %33 = load <32 x i8>, <32 x i8> addrspace(2)* %32, align 32, !tbaa !0
+ %34 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %5, <2 x i32> %7)
+ %35 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %5, <2 x i32> %7)
+ %36 = fptosi float %34 to i32
+ %37 = fptosi float %35 to i32
+ %38 = ashr i32 %36, 3
+ %39 = insertelement <4 x i32> undef, i32 %38, i32 0
+ %40 = insertelement <4 x i32> %39, i32 %37, i32 1
+ %41 = insertelement <4 x i32> %40, i32 0, i32 2
+ %42 = call <4 x i32> @llvm.SI.imageload.v4i32(<4 x i32> %41, <32 x i8> %33, i32 2)
+ %43 = extractelement <4 x i32> %42, i32 0
+ %44 = and i32 %36, 7
+ %45 = shl i32 1, %44
+ %46 = and i32 %43, %45
+ %47 = icmp eq i32 %46, 0
+ %. = select i1 %47, float %28, float %24
+ %.8 = select i1 %47, float %29, float %25
+ %.9 = select i1 %47, float %30, float %26
+ %.10 = select i1 %47, float %31, float %27
+ %48 = call i32 @llvm.SI.packf16(float %., float %.8)
+ %49 = bitcast i32 %48 to float
+ %50 = call i32 @llvm.SI.packf16(float %.9, float %.10)
+ %51 = bitcast i32 %50 to float
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %49, float %51, float %49, float %51)
+ ret void
+}
+
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
+declare <4 x i32> @llvm.SI.imageload.v4i32(<4 x i32>, <32 x i8>, i32) #1
+declare i32 @llvm.SI.packf16(float, float) #1
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" "enable-no-nans-fp-math"="true" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!"const", null, i32 1}
--
2.1.0
More information about the llvm-commits
mailing list