[llvm] [AArch64] Lower extending sitofp using tbl (PR #92528)
Momchil Velikov via llvm-commits
llvm-commits at lists.llvm.org
Fri May 17 07:26:42 PDT 2024
https://github.com/momchil-velikov updated https://github.com/llvm/llvm-project/pull/92528
>From 20aab37f8678eadd3d0348d86d73f87de34cbb01 Mon Sep 17 00:00:00 2001
From: Momchil Velikov <momchil.velikov at arm.com>
Date: Fri, 17 May 2024 11:50:31 +0100
Subject: [PATCH 1/3] [AArch64] Refactor creation of a shuffle mask for TBL
(NFC)
---
.../Target/AArch64/AArch64ISelLowering.cpp | 88 +++++++++++--------
1 file changed, 50 insertions(+), 38 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1e0071fffe666..6db26a0973d9e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -15691,48 +15691,51 @@ bool AArch64TargetLowering::shouldSinkOperands(
return false;
}
-static bool createTblShuffleForZExt(ZExtInst *ZExt, FixedVectorType *DstTy,
- bool IsLittleEndian) {
- Value *Op = ZExt->getOperand(0);
- auto *SrcTy = cast<FixedVectorType>(Op->getType());
- auto SrcWidth = cast<IntegerType>(SrcTy->getElementType())->getBitWidth();
- auto DstWidth = cast<IntegerType>(DstTy->getElementType())->getBitWidth();
+static bool createTblShuffleMask(unsigned SrcWidth, unsigned DstWidth,
+ unsigned NumElts, bool IsLittleEndian,
+ SmallVectorImpl<int> &Mask) {
if (DstWidth % 8 != 0 || DstWidth <= 16 || DstWidth >= 64)
return false;
- assert(DstWidth % SrcWidth == 0 &&
- "TBL lowering is not supported for a ZExt instruction with this "
- "source & destination element type.");
- unsigned ZExtFactor = DstWidth / SrcWidth;
+ if (DstWidth % SrcWidth != 0)
+ return false;
+
+ unsigned Factor = DstWidth / SrcWidth;
+ unsigned MaskLen = NumElts * Factor;
+
+ Mask.clear();
+ Mask.resize(MaskLen, NumElts);
+
+ unsigned SrcIndex = 0;
+ for (unsigned I = 0; I < MaskLen; I += Factor)
+ Mask[I] = SrcIndex++;
+
+ if (!IsLittleEndian)
+ std::rotate(Mask.rbegin(), Mask.rbegin() + Factor - 1, Mask.rend());
+
+ return true;
+}
+
+static Value *createTblShuffleForZExt(IRBuilderBase &Builder, Value *Op,
+ FixedVectorType *ZExtTy,
+ FixedVectorType *DstTy,
+ bool IsLittleEndian) {
+ auto *SrcTy = cast<FixedVectorType>(Op->getType());
unsigned NumElts = SrcTy->getNumElements();
- IRBuilder<> Builder(ZExt);
+ auto SrcWidth = cast<IntegerType>(SrcTy->getElementType())->getBitWidth();
+ auto DstWidth = cast<IntegerType>(DstTy->getElementType())->getBitWidth();
+
SmallVector<int> Mask;
- // Create a mask that selects <0,...,Op[i]> for each lane of the destination
- // vector to replace the original ZExt. This can later be lowered to a set of
- // tbl instructions.
- for (unsigned i = 0; i < NumElts * ZExtFactor; i++) {
- if (IsLittleEndian) {
- if (i % ZExtFactor == 0)
- Mask.push_back(i / ZExtFactor);
- else
- Mask.push_back(NumElts);
- } else {
- if ((i + 1) % ZExtFactor == 0)
- Mask.push_back((i - ZExtFactor + 1) / ZExtFactor);
- else
- Mask.push_back(NumElts);
- }
- }
+ if (!createTblShuffleMask(SrcWidth, DstWidth, NumElts, IsLittleEndian, Mask))
+ return nullptr;
auto *FirstEltZero = Builder.CreateInsertElement(
PoisonValue::get(SrcTy), Builder.getInt8(0), uint64_t(0));
Value *Result = Builder.CreateShuffleVector(Op, FirstEltZero, Mask);
Result = Builder.CreateBitCast(Result, DstTy);
- if (DstTy != ZExt->getType())
- Result = Builder.CreateZExt(Result, ZExt->getType());
- ZExt->replaceAllUsesWith(Result);
- ZExt->eraseFromParent();
- return true;
+ if (DstTy != ZExtTy)
+ Result = Builder.CreateZExt(Result, ZExtTy);
+ return Result;
}
static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) {
@@ -15897,21 +15900,30 @@ bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
DstTy = TruncDstType;
}
-
- return createTblShuffleForZExt(ZExt, DstTy, Subtarget->isLittleEndian());
+ IRBuilder<> Builder(ZExt);
+ Value *Result = createTblShuffleForZExt(
+ Builder, ZExt->getOperand(0), cast<FixedVectorType>(ZExt->getType()),
+ DstTy, Subtarget->isLittleEndian());
+ if (!Result)
+ return false;
+ ZExt->replaceAllUsesWith(Result);
+ ZExt->eraseFromParent();
+ return true;
}
auto *UIToFP = dyn_cast<UIToFPInst>(I);
if (UIToFP && SrcTy->getElementType()->isIntegerTy(8) &&
DstTy->getElementType()->isFloatTy()) {
IRBuilder<> Builder(I);
- auto *ZExt = cast<ZExtInst>(
- Builder.CreateZExt(I->getOperand(0), VectorType::getInteger(DstTy)));
+ Value *ZExt = createTblShuffleForZExt(
+ Builder, I->getOperand(0), FixedVectorType::getInteger(DstTy),
+ FixedVectorType::getInteger(DstTy), Subtarget->isLittleEndian());
+ if (!ZExt)
+ return false;
auto *UI = Builder.CreateUIToFP(ZExt, DstTy);
I->replaceAllUsesWith(UI);
I->eraseFromParent();
- return createTblShuffleForZExt(ZExt, cast<FixedVectorType>(ZExt->getType()),
- Subtarget->isLittleEndian());
+ return true;
}
// Convert 'fptoui <(8|16) x float> to <(8|16) x i8>' to a wide fptoui
>From 190852f5a8c75c3056b3d6348d0ede2d04a941ac Mon Sep 17 00:00:00 2001
From: Momchil Velikov <momchil.velikov at arm.com>
Date: Fri, 17 May 2024 12:46:32 +0100
Subject: [PATCH 2/3] [AArch64] Lower extending sitofp using tbl
In a similar manner as in https://reviews.llvm.org/D133494
use `TBL` to place bytes in the *upper* part of `i32` elements
and then convert to float using fixed-point `scvtf`, i.e.
scvtf Vd.4s, Vn.4s, #24
---
llvm/lib/CodeGen/CodeGenPrepare.cpp | 3 +-
.../Target/AArch64/AArch64ISelLowering.cpp | 58 ++++++
llvm/test/CodeGen/AArch64/sitofp-to-tbl.ll | 196 ++++++++++++++++++
3 files changed, 256 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AArch64/sitofp-to-tbl.ll
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 339a1f1f2f002..4c52dbfa23903 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -8333,7 +8333,8 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) {
if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
return true;
- if ((isa<UIToFPInst>(I) || isa<FPToUIInst>(I) || isa<TruncInst>(I)) &&
+ if ((isa<UIToFPInst>(I) || isa<SIToFPInst>(I) || isa<FPToUIInst>(I) ||
+ isa<TruncInst>(I)) &&
TLI->optimizeExtendOrTruncateConversion(
I, LI->getLoopFor(I->getParent()), *TTI))
return true;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 6db26a0973d9e..619616869b8b3 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -15738,6 +15738,24 @@ static Value *createTblShuffleForZExt(IRBuilderBase &Builder, Value *Op,
return Result;
}
+static Value *createTblShuffleForSExt(IRBuilderBase &Builder, Value *Op,
+ FixedVectorType *DstTy,
+ bool IsLittleEndian) {
+ auto *SrcTy = cast<FixedVectorType>(Op->getType());
+ auto SrcWidth = cast<IntegerType>(SrcTy->getElementType())->getBitWidth();
+ auto DstWidth = cast<IntegerType>(DstTy->getElementType())->getBitWidth();
+
+ SmallVector<int> Mask;
+ if (!createTblShuffleMask(SrcWidth, DstWidth, SrcTy->getNumElements(),
+ !IsLittleEndian, Mask))
+ return nullptr;
+
+ auto *FirstEltZero = Builder.CreateInsertElement(
+ PoisonValue::get(SrcTy), Builder.getInt8(0), uint64_t(0));
+
+ return Builder.CreateShuffleVector(Op, FirstEltZero, Mask);
+}
+
static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) {
IRBuilder<> Builder(TI);
SmallVector<Value *> Parts;
@@ -15926,6 +15944,23 @@ bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
return true;
}
+ auto *SIToFP = dyn_cast<SIToFPInst>(I);
+ if (SIToFP && SrcTy->getElementType()->isIntegerTy(8) &&
+ DstTy->getElementType()->isFloatTy()) {
+ IRBuilder<> Builder(I);
+ auto *Shuffle = createTblShuffleForSExt(Builder, I->getOperand(0),
+ FixedVectorType::getInteger(DstTy),
+ Subtarget->isLittleEndian());
+ if (!Shuffle)
+ return false;
+ auto *Cast = Builder.CreateBitCast(Shuffle, VectorType::getInteger(DstTy));
+ auto *AShr = Builder.CreateAShr(Cast, 24);
+ auto *SI = Builder.CreateSIToFP(AShr, DstTy);
+ I->replaceAllUsesWith(SI);
+ I->eraseFromParent();
+ return true;
+ }
+
// Convert 'fptoui <(8|16) x float> to <(8|16) x i8>' to a wide fptoui
// followed by a truncate lowered to using tbl.4.
auto *FPToUI = dyn_cast<FPToUIInst>(I);
@@ -17852,6 +17887,26 @@ static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
return SDValue();
}
+static SDValue performVectorIntToFpCombine(SDNode *N, SelectionDAG &DAG) {
+ if (N->getOpcode() != ISD::SINT_TO_FP || N->getValueType(0) != MVT::v4f32)
+ return SDValue();
+
+ SDNode *VASHR = N->getOperand(0).getNode();
+ if (VASHR->getOpcode() != AArch64ISD::VASHR ||
+ VASHR->getValueType(0) != MVT::v4i32)
+ return SDValue();
+
+ if (!isa<ConstantSDNode>(VASHR->getOperand(1).getNode()) ||
+ VASHR->getConstantOperandVal(1) != 24)
+ return SDValue();
+
+ return SDValue(DAG.getMachineNode(
+ AArch64::SCVTFv4i32_shift, SDLoc(N), N->getValueType(0),
+ {VASHR->getOperand(0),
+ DAG.getTargetConstant(24, SDLoc(N), MVT::i32)}),
+ 0);
+}
+
static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) {
// First try to optimize away the conversion when it's conditionally from
@@ -17859,6 +17914,9 @@ static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG))
return Res;
+ if (SDValue Res = performVectorIntToFpCombine(N, DAG))
+ return Res;
+
EVT VT = N->getValueType(0);
if (VT != MVT::f32 && VT != MVT::f64)
return SDValue();
diff --git a/llvm/test/CodeGen/AArch64/sitofp-to-tbl.ll b/llvm/test/CodeGen/AArch64/sitofp-to-tbl.ll
new file mode 100644
index 0000000000000..dbf15ab9936f6
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sitofp-to-tbl.ll
@@ -0,0 +1,196 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s
+
+target triple = "aarch64-linux"
+
+; CHECK-LABEL: .LCPI0_0:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 6
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 7
+; CHECK-NEXT: .LCPI0_1:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+
+define void @sitofp_v8i8_to_v8f32(ptr %src, ptr %dst) {
+; CHECK-LABEL: sitofp_v8i8_to_v8f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: adrp x8, .LCPI0_0
+; CHECK-NEXT: adrp x9, .LCPI0_1
+; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI0_1]
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: .LBB0_1: // %loop
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldr d2, [x0, x8, lsl #3]
+; CHECK-NEXT: add x9, x1, x8, lsl #5
+; CHECK-NEXT: add x8, x8, #1
+; CHECK-NEXT: cmp x8, #1000
+; CHECK-NEXT: tbl v3.16b, { v2.16b }, v0.16b
+; CHECK-NEXT: tbl v2.16b, { v2.16b }, v1.16b
+; CHECK-NEXT: scvtf v3.4s, v3.4s, #24
+; CHECK-NEXT: scvtf v2.4s, v2.4s, #24
+; CHECK-NEXT: stp q2, q3, [x9]
+; CHECK-NEXT: b.eq .LBB0_1
+; CHECK-NEXT: // %bb.2: // %exit
+; CHECK-NEXT: ret
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.src = getelementptr inbounds <8 x i8>, ptr %src, i64 %iv
+ %l = load <8 x i8>, ptr %gep.src
+ %conv = sitofp <8 x i8> %l to <8 x float>
+ %gep.dst = getelementptr inbounds <8 x float>, ptr %dst, i64 %iv
+ store <8 x float> %conv, ptr %gep.dst
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 1000
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+; CHECK-LABEL: .LCPI1_0:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 12
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 13
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 14
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 15
+; CHECK-NEXT: .LCPI1_1:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 9
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 10
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 11
+; CHECK-NEXT: .LCPI1_2:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 6
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 7
+; CHECK-NEXT: .LCPI1_3:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 3
+
+define void @sitofp_v16i8_to_v16f32(ptr %src, ptr %dst) {
+; CHECK-LABEL: sitofp_v16i8_to_v16f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: adrp x8, .LCPI1_0
+; CHECK-NEXT: adrp x9, .LCPI1_1
+; CHECK-NEXT: adrp x10, .LCPI1_2
+; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI1_0]
+; CHECK-NEXT: adrp x8, .LCPI1_3
+; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI1_1]
+; CHECK-NEXT: ldr q2, [x10, :lo12:.LCPI1_2]
+; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI1_3]
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: .LBB1_1: // %loop
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldr q4, [x0, x8, lsl #4]
+; CHECK-NEXT: add x9, x1, x8, lsl #6
+; CHECK-NEXT: add x8, x8, #1
+; CHECK-NEXT: cmp x8, #1000
+; CHECK-NEXT: tbl v5.16b, { v4.16b }, v0.16b
+; CHECK-NEXT: tbl v6.16b, { v4.16b }, v1.16b
+; CHECK-NEXT: tbl v7.16b, { v4.16b }, v2.16b
+; CHECK-NEXT: tbl v4.16b, { v4.16b }, v3.16b
+; CHECK-NEXT: scvtf v5.4s, v5.4s, #24
+; CHECK-NEXT: scvtf v6.4s, v6.4s, #24
+; CHECK-NEXT: scvtf v7.4s, v7.4s, #24
+; CHECK-NEXT: scvtf v4.4s, v4.4s, #24
+; CHECK-NEXT: stp q6, q5, [x9, #32]
+; CHECK-NEXT: stp q4, q7, [x9]
+; CHECK-NEXT: b.eq .LBB1_1
+; CHECK-NEXT: // %bb.2: // %exit
+; CHECK-NEXT: ret
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.src = getelementptr inbounds <16 x i8>, ptr %src, i64 %iv
+ %l = load <16 x i8>, ptr %gep.src
+ %conv = sitofp <16 x i8> %l to <16 x float>
+ %gep.dst = getelementptr inbounds <16 x float>, ptr %dst, i64 %iv
+ store <16 x float> %conv, ptr %gep.dst
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 1000
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
>From 14741137e3117f00a0db2967058098af141b90d5 Mon Sep 17 00:00:00 2001
From: Momchil Velikov <momchil.velikov at arm.com>
Date: Fri, 17 May 2024 15:22:53 +0100
Subject: [PATCH 3/3] [fixup] No need to restrict the shift to exactly 24
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 619616869b8b3..5e65a4fa1e318 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17896,14 +17896,17 @@ static SDValue performVectorIntToFpCombine(SDNode *N, SelectionDAG &DAG) {
VASHR->getValueType(0) != MVT::v4i32)
return SDValue();
- if (!isa<ConstantSDNode>(VASHR->getOperand(1).getNode()) ||
- VASHR->getConstantOperandVal(1) != 24)
+ if (!isa<ConstantSDNode>(VASHR->getOperand(1).getNode()))
+ return SDValue();
+
+ uint64_t Shift = VASHR->getConstantOperandVal(1);
+ if (Shift < 1 || Shift > 32)
return SDValue();
return SDValue(DAG.getMachineNode(
AArch64::SCVTFv4i32_shift, SDLoc(N), N->getValueType(0),
{VASHR->getOperand(0),
- DAG.getTargetConstant(24, SDLoc(N), MVT::i32)}),
+ DAG.getTargetConstant(Shift, SDLoc(N), MVT::i32)}),
0);
}
More information about the llvm-commits
mailing list