[llvm] r336319 - [ARM] ParallelDSP: only support i16 loads for now
Sjoerd Meijer via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 5 01:21:40 PDT 2018
Author: sjoerdmeijer
Date: Thu Jul 5 01:21:40 2018
New Revision: 336319
URL: http://llvm.org/viewvc/llvm-project?rev=336319&view=rev
Log:
[ARM] ParallelDSP: only support i16 loads for now
We were miscompiling i8 loads, so reject them as unsupported narrow operations
for now.
Differential Revision: https://reviews.llvm.org/D48944
Modified:
llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp
llvm/trunk/test/CodeGen/ARM/smlad1.ll
Modified: llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp?rev=336319&r1=336318&r2=336319&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp Thu Jul 5 01:21:40 2018
@@ -165,9 +165,14 @@ namespace {
};
}
-template<unsigned BitWidth>
+// MaxBitwidth: the maximum supported bitwidth of the elements in the DSP
+// instructions, which is set to 16. So here we should collect all i8 and i16
+// narrow operations.
+// TODO: we currently only collect i16, and will support i8 later, so that's
+// why we check that types are equal to MaxBitWidth, and not <= MaxBitWidth.
+template<unsigned MaxBitWidth>
static bool IsNarrowSequence(Value *V, ValueList &VL) {
- LLVM_DEBUG(dbgs() << "Is narrow sequence: "; V->dump());
+ LLVM_DEBUG(dbgs() << "Is narrow sequence? "; V->dump());
ConstantInt *CInt;
if (match(V, m_ConstantInt(CInt))) {
@@ -180,38 +185,30 @@ static bool IsNarrowSequence(Value *V, V
return false;
Value *Val, *LHS, *RHS;
- bool isNarrow = false;
-
if (match(V, m_Trunc(m_Value(Val)))) {
- if (cast<TruncInst>(I)->getDestTy()->getIntegerBitWidth() == BitWidth)
- isNarrow = IsNarrowSequence<BitWidth>(Val, VL);
+ if (cast<TruncInst>(I)->getDestTy()->getIntegerBitWidth() == MaxBitWidth)
+ return IsNarrowSequence<MaxBitWidth>(Val, VL);
} else if (match(V, m_Add(m_Value(LHS), m_Value(RHS)))) {
// TODO: we need to implement sadd16/sadd8 for this, which enables to
// also do the rewrite for smlad8.ll, but it is unsupported for now.
- isNarrow = false;
+ LLVM_DEBUG(dbgs() << "No, unsupported Op:\t"; I->dump());
+ return false;
} else if (match(V, m_ZExtOrSExt(m_Value(Val)))) {
- if (cast<CastInst>(I)->getSrcTy()->getIntegerBitWidth() == BitWidth)
- isNarrow = true;
- else
- LLVM_DEBUG(dbgs() << "Wrong SrcTy size of CastInst: " <<
- cast<CastInst>(I)->getSrcTy()->getIntegerBitWidth());
-
- if (match(Val, m_Load(m_Value(Val)))) {
- auto *Ld = dyn_cast<LoadInst>(I->getOperand(0));
- LLVM_DEBUG(dbgs() << "Found narrow Load:\t"; Ld->dump());
- VL.push_back(Ld);
- isNarrow = true;
- } else if (!isa<Instruction>(I->getOperand(0)))
- VL.push_back(I->getOperand(0));
+ if (cast<CastInst>(I)->getSrcTy()->getIntegerBitWidth() != MaxBitWidth) {
+ LLVM_DEBUG(dbgs() << "No, wrong SrcTy size: " <<
+ cast<CastInst>(I)->getSrcTy()->getIntegerBitWidth() << "\n");
+ return false;
+ }
+
+ if (match(Val, m_Load(m_Value()))) {
+ LLVM_DEBUG(dbgs() << "Yes, found narrow Load:\t"; Val->dump());
+ VL.push_back(Val);
+ VL.push_back(I);
+ return true;
+ }
}
-
- if (isNarrow) {
- LLVM_DEBUG(dbgs() << "Found narrow Op:\t"; I->dump());
- VL.push_back(I);
- } else
- LLVM_DEBUG(dbgs() << "Found unsupported Op:\t"; I->dump());
-
- return isNarrow;
+ LLVM_DEBUG(dbgs() << "No, unsupported Op:\t"; I->dump());
+ return false;
}
// Element-by-element comparison of Value lists returning true if they are
Modified: llvm/trunk/test/CodeGen/ARM/smlad1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad1.ll?rev=336319&r1=336318&r2=336319&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad1.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/smlad1.ll Thu Jul 5 01:21:40 2018
@@ -1,5 +1,6 @@
; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+; CHECK-LABEL: @test1
; CHECK: %mac1{{\.}}026 = phi i32 [ [[V8:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
; CHECK: [[V4:%[0-9]+]] = bitcast i16* %arrayidx3 to i32*
; CHECK: [[V5:%[0-9]+]] = load i32, i32* [[V4]], align 2
@@ -7,7 +8,7 @@
; CHECK: [[V7:%[0-9]+]] = load i32, i32* [[V6]], align 2
; CHECK: [[V8]] = call i32 @llvm.arm.smlad(i32 [[V5]], i32 [[V7]], i32 %mac1{{\.}}026)
-define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+define dso_local i32 @test1(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
entry:
%cmp24 = icmp sgt i32 %arg, 0
br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
@@ -48,3 +49,47 @@ for.body:
br i1 %exitcond, label %for.body, label %for.cond.cleanup
}
+; Here we have i8 loads, which we do want to support, but don't handle yet.
+;
+; CHECK-LABEL: @test2
+; CHECK-NOT: call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test2(i32 %arg, i32* nocapture readnone %arg1, i8* nocapture readonly %arg2, i8* nocapture readonly %arg3) {
+entry:
+ %cmp24 = icmp sgt i32 %arg, 0
+ br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %.pre = load i8, i8* %arg3, align 2
+ %.pre27 = load i8, i8* %arg2, align 2
+ br label %for.body
+
+for.cond.cleanup:
+ %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+ ret i32 %mac1.0.lcssa
+
+for.body:
+ %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+ %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i8, i8* %arg3, i32 %i.025
+ %0 = load i8, i8* %arrayidx, align 2
+ %add = add nuw nsw i32 %i.025, 1
+ %arrayidx1 = getelementptr inbounds i8, i8* %arg3, i32 %add
+ %1 = load i8, i8* %arrayidx1, align 2
+ %arrayidx3 = getelementptr inbounds i8, i8* %arg2, i32 %i.025
+ %2 = load i8, i8* %arrayidx3, align 2
+ %conv = sext i8 %2 to i32
+ %conv4 = sext i8 %0 to i32
+ %mul = mul nsw i32 %conv, %conv4
+ %arrayidx6 = getelementptr inbounds i8, i8* %arg2, i32 %add
+ %3 = load i8, i8* %arrayidx6, align 2
+ %conv7 = sext i8 %3 to i32
+ %conv8 = sext i8 %1 to i32
+ %mul9 = mul nsw i32 %conv7, %conv8
+ %add10 = add i32 %mul, %mac1.026
+ %add11 = add i32 %add10, %mul9
+ %exitcond = icmp ne i32 %add, %arg
+ br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+
More information about the llvm-commits
mailing list