[llvm] [AMDGPU][AsmParser] Introduce MC representation for lit() and lit64(). (PR #160316)
Ivan Kosarev via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 23 09:51:29 PDT 2025
https://github.com/kosarev updated https://github.com/llvm/llvm-project/pull/160316
>From c40d91fb4a8597622d54bbc8dbc4e55cace3aa87 Mon Sep 17 00:00:00 2001
From: Ivan Kosarev <ivan.kosarev at amd.com>
Date: Tue, 23 Sep 2025 15:51:17 +0100
Subject: [PATCH] [AMDGPU][AsmParser] Introduce MC representation for lit() and
lit64().
And rework the lit64() support to use it.
The rules for when to add lit64() can be simplified and
improved. In this change, however, we just follow the existing
conventions on the assembler and disassembler sides.
In codegen we do not (and normally should not need to) add explicit
lit() and lit64() modifiers, so the codegen tests lose them. The change
is an NFCI otherwise.
Simplifies printing operands.
---
.../AMDGPU/AsmParser/AMDGPUAsmParser.cpp | 89 +++++++++++++----
.../Disassembler/AMDGPUDisassembler.cpp | 97 ++++++++++++++-----
.../AMDGPU/Disassembler/AMDGPUDisassembler.h | 15 +--
.../AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp | 42 ++++----
.../AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h | 7 +-
.../MCTargetDesc/AMDGPUMCCodeEmitter.cpp | 4 +-
.../AMDGPU/MCTargetDesc/AMDGPUMCExpr.cpp | 33 ++++++-
.../Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.h | 14 ++-
llvm/test/CodeGen/AMDGPU/add_u64.ll | 2 +-
.../test/CodeGen/AMDGPU/carryout-selection.ll | 12 +--
.../test/CodeGen/AMDGPU/code-size-estimate.ll | 4 +-
llvm/test/CodeGen/AMDGPU/ds_write2.ll | 4 +-
.../test/CodeGen/AMDGPU/flat-saddr-atomics.ll | 52 +++++-----
.../CodeGen/AMDGPU/global-atomicrmw-fadd.ll | 16 +--
llvm/test/CodeGen/AMDGPU/literal64.ll | 48 ++++-----
llvm/test/CodeGen/AMDGPU/llvm.prefetch.ll | 4 +-
llvm/test/CodeGen/AMDGPU/mad_64_32.ll | 2 +-
llvm/test/CodeGen/AMDGPU/mul.ll | 2 +-
llvm/test/CodeGen/AMDGPU/packed-fp32.ll | 20 ++--
llvm/test/CodeGen/AMDGPU/sub_u64.ll | 2 +-
20 files changed, 304 insertions(+), 165 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 56f79c2d67d3f..21dfdfd6bed04 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -55,8 +55,6 @@ class AMDGPUAsmParser;
enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_AGPR, IS_TTMP, IS_SPECIAL };
-enum class LitModifier { None, Lit, Lit64 };
-
//===----------------------------------------------------------------------===//
// Operand
//===----------------------------------------------------------------------===//
@@ -1591,10 +1589,14 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
return static_cast<AMDGPUTargetStreamer &>(TS);
}
- const MCRegisterInfo *getMRI() const {
+ MCContext &getContext() const {
// We need this const_cast because for some reason getContext() is not const
// in MCAsmParser.
- return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
+ return const_cast<AMDGPUAsmParser *>(this)->MCTargetAsmParser::getContext();
+ }
+
+ const MCRegisterInfo *getMRI() const {
+ return getContext().getRegisterInfo();
}
const MCInstrInfo *getMII() const {
@@ -2313,6 +2315,11 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
APInt Literal(64, Val);
uint8_t OpTy = InstDesc.operands()[OpNum].OperandType;
+ bool CanUse64BitLiterals =
+ AsmParser->has64BitLiterals() &&
+ !(InstDesc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P));
+ MCContext &Ctx = AsmParser->getContext();
+
if (Imm.IsFPImm) { // We got fp literal token
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT64:
@@ -2342,7 +2349,15 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
Val &= 0xffffffff00000000u;
}
- Inst.addOperand(MCOperand::createImm(Val));
+ if ((OpTy == AMDGPU::OPERAND_REG_IMM_FP64 ||
+ OpTy == AMDGPU::OPERAND_REG_INLINE_C_FP64 ||
+ OpTy == AMDGPU::OPERAND_REG_INLINE_AC_FP64) &&
+ CanUse64BitLiterals && Lo_32(Val) != 0) {
+ Inst.addOperand(MCOperand::createExpr(
+ AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
+ } else {
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
return;
}
@@ -2352,7 +2367,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
llvm_unreachable("fp literal in 64-bit integer instruction.");
case AMDGPU::OPERAND_KIMM64:
- Inst.addOperand(MCOperand::createImm(Val));
+ if (CanUse64BitLiterals && Lo_32(Val) != 0) {
+ Inst.addOperand(MCOperand::createExpr(
+ AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
+ } else {
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
return;
case AMDGPU::OPERAND_REG_IMM_BF16:
@@ -2442,7 +2462,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
getModifiers().Lit == LitModifier::Lit)
Val = Lo_32(Val);
- Inst.addOperand(MCOperand::createImm(Val));
+ if (CanUse64BitLiterals && (!isInt<32>(Val) || !isUInt<32>(Val))) {
+ Inst.addOperand(MCOperand::createExpr(
+ AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
+ } else {
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
return;
case AMDGPU::OPERAND_REG_IMM_FP64:
@@ -2469,7 +2494,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
Val = static_cast<uint64_t>(Val) << 32;
}
- Inst.addOperand(MCOperand::createImm(Val));
+ if (CanUse64BitLiterals && Lo_32(Val) != 0) {
+ Inst.addOperand(MCOperand::createExpr(
+ AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
+ } else {
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
return;
case AMDGPU::OPERAND_REG_IMM_INT16:
@@ -2491,7 +2521,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
getModifiers().Lit != LitModifier::Lit64)
Val <<= 32;
- Inst.addOperand(MCOperand::createImm(Val));
+ if (CanUse64BitLiterals && Lo_32(Val) != 0) {
+ Inst.addOperand(MCOperand::createExpr(
+ AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx)));
+ } else {
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
return;
default:
@@ -3640,7 +3675,7 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
const MCOperand &MO = Inst.getOperand(OpIdx);
- int64_t Val = MO.getImm();
+ int64_t Val = MO.isImm() ? MO.getImm() : getLitValue(MO.getExpr());
auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
switch (OpSize) { // expected operand size
@@ -4768,16 +4803,26 @@ bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst,
const MCOperand &MO = Inst.getOperand(OpIdx);
// Exclude special imm operands (like that used by s_set_gpr_idx_on)
if (AMDGPU::isSISrcOperand(Desc, OpIdx)) {
- if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
+ std::optional<int64_t> Imm;
+ if (MO.isImm()) {
+ Imm = MO.getImm();
+ } else if (MO.isExpr()) {
+ if (isLitExpr(MO.getExpr()))
+ Imm = getLitValue(MO.getExpr());
+ } else {
+ continue;
+ }
+
+ if (!Imm.has_value()) {
+ ++NumExprs;
+ } else if (!isInlineConstant(Inst, OpIdx)) {
auto OpType = static_cast<AMDGPU::OperandType>(
Desc.operands()[OpIdx].OperandType);
- int64_t Value = encode32BitLiteral(MO.getImm(), OpType);
+ int64_t Value = encode32BitLiteral(*Imm, OpType);
if (NumLiterals == 0 || LiteralValue != Value) {
LiteralValue = Value;
++NumLiterals;
}
- } else if (MO.isExpr()) {
- ++NumExprs;
}
}
}
@@ -5010,9 +5055,18 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
if (!isSISrcOperand(Desc, OpIdx))
continue;
+ std::optional<int64_t> Imm;
+ if (MO.isImm())
+ Imm = MO.getImm();
+ else if (MO.isExpr() && isLitExpr(MO.getExpr()))
+ Imm = getLitValue(MO.getExpr());
+
bool IsAnotherLiteral = false;
- if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
- uint64_t Value = static_cast<uint64_t>(MO.getImm());
+ if (!Imm.has_value()) {
+ // Literal value not known, so we conservately assume it's different.
+ IsAnotherLiteral = true;
+ } else if (!isInlineConstant(Inst, OpIdx)) {
+ uint64_t Value = *Imm;
bool IsForcedFP64 =
Desc.operands()[OpIdx].OperandType == AMDGPU::OPERAND_KIMM64 ||
(Desc.operands()[OpIdx].OperandType == AMDGPU::OPERAND_REG_IMM_FP64 &&
@@ -5033,9 +5087,6 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
IsAnotherLiteral = !LiteralValue || *LiteralValue != Value;
LiteralValue = Value;
- } else if (MO.isExpr()) {
- // Literal value not known, so we conservately assume it's different.
- IsAnotherLiteral = true;
}
if (IsAnotherLiteral && !HasMandatoryLiteral &&
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index d3db1b7394675..2d5ae29c1037c 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -17,6 +17,7 @@
// ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
#include "Disassembler/AMDGPUDisassembler.h"
+#include "MCTargetDesc/AMDGPUMCExpr.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIDefines.h"
#include "SIRegisterInfo.h"
@@ -123,14 +124,14 @@ static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, uint64_t Addr,
static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr,
const MCDisassembler *Decoder) {
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- return addOperand(Inst, DAsm->decodeBoolReg(Val));
+ return addOperand(Inst, DAsm->decodeBoolReg(Inst, Val));
}
static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val,
uint64_t Addr,
const MCDisassembler *Decoder) {
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- return addOperand(Inst, DAsm->decodeSplitBarrier(Val));
+ return addOperand(Inst, DAsm->decodeSplitBarrier(Inst, Val));
}
static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
@@ -164,7 +165,7 @@ static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
const MCDisassembler *Decoder) { \
assert(Imm < (1 << EncSize) && #EncSize "-bit encoding"); \
auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
- return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm)); \
+ return addOperand(Inst, DAsm->decodeSrcOp(Inst, OpWidth, EncImm)); \
}
static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize,
@@ -172,7 +173,7 @@ static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize,
const MCDisassembler *Decoder) {
assert(Imm < (1U << EncSize) && "Operand doesn't fit encoding!");
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm));
+ return addOperand(Inst, DAsm->decodeSrcOp(Inst, OpWidth, EncImm));
}
// Decoder for registers. Imm(7-bit) is number of register, uses decodeSrcOp to
@@ -317,7 +318,7 @@ static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm,
unsigned RegIdx = Imm & 0x7f;
return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
}
- return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
+ return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(Inst, OpWidth, Imm & 0xFF));
}
template <unsigned OpWidth>
@@ -332,7 +333,7 @@ static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
unsigned RegIdx = Imm & 0xff;
return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
}
- return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
+ return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(Inst, OpWidth, Imm & 0xFF));
}
static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm,
@@ -371,7 +372,7 @@ static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val,
static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm, unsigned Opw,
const MCDisassembler *Decoder) {
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256));
+ return addOperand(Inst, DAsm->decodeSrcOp(Inst, Opw, Imm | 256));
}
template <unsigned Opw>
@@ -386,7 +387,7 @@ static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm,
const MCDisassembler *Decoder) {
assert(Imm < (1 << 9) && "9-bit encoding");
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- return addOperand(Inst, DAsm->decodeSrcOp(64, Imm));
+ return addOperand(Inst, DAsm->decodeSrcOp(Inst, 64, Imm));
}
#define DECODE_SDWA(DecName) \
@@ -510,8 +511,8 @@ void AMDGPUDisassembler::decodeImmOperands(MCInst &MI,
}
if (Imm == AMDGPU::EncValues::LITERAL_CONST) {
- Op = decodeLiteralConstant(OpDesc.OperandType ==
- AMDGPU::OPERAND_REG_IMM_FP64);
+ Op = decodeLiteralConstant(
+ Desc, OpDesc, OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_FP64);
continue;
}
@@ -1543,10 +1544,16 @@ AMDGPUDisassembler::decodeMandatoryLiteral64Constant(uint64_t Val) const {
}
HasLiteral = true;
Literal = Literal64 = Val;
- return MCOperand::createImm(Literal64);
+
+ bool UseLit64 = Lo_32(Literal64) != 0;
+ return UseLit64 ? MCOperand::createExpr(AMDGPUMCExpr::createLit(
+ LitModifier::Lit64, Literal64, getContext()))
+ : MCOperand::createImm(Literal64);
}
-MCOperand AMDGPUDisassembler::decodeLiteralConstant(bool ExtendFP64) const {
+MCOperand AMDGPUDisassembler::decodeLiteralConstant(const MCInstrDesc &Desc,
+ const MCOperandInfo &OpDesc,
+ bool ExtendFP64) const {
// For now all literal constants are supposed to be unsigned integer
// ToDo: deal with signed/unsigned 64-bit integer constants
// ToDo: deal with float/double constants
@@ -1560,10 +1567,31 @@ MCOperand AMDGPUDisassembler::decodeLiteralConstant(bool ExtendFP64) const {
if (ExtendFP64)
Literal64 <<= 32;
}
- return MCOperand::createImm(ExtendFP64 ? Literal64 : Literal);
+
+ int64_t Val = ExtendFP64 ? Literal64 : Literal;
+
+ bool CanUse64BitLiterals =
+ STI.hasFeature(AMDGPU::Feature64BitLiterals) &&
+ !(Desc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P));
+
+ bool UseLit64 = false;
+ if (CanUse64BitLiterals) {
+ if (OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_INT64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT64)
+ UseLit64 = !isInt<32>(Val) || !isUInt<32>(Val);
+ else if (OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_FP64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_FP64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_AC_FP64)
+ UseLit64 = Lo_32(Val) != 0;
+ }
+
+ return UseLit64 ? MCOperand::createExpr(AMDGPUMCExpr::createLit(
+ LitModifier::Lit64, Val, getContext()))
+ : MCOperand::createImm(Val);
}
-MCOperand AMDGPUDisassembler::decodeLiteral64Constant() const {
+MCOperand
+AMDGPUDisassembler::decodeLiteral64Constant(const MCInst &Inst) const {
assert(STI.hasFeature(AMDGPU::Feature64BitLiterals));
if (!HasLiteral) {
@@ -1574,7 +1602,23 @@ MCOperand AMDGPUDisassembler::decodeLiteral64Constant() const {
HasLiteral = true;
Literal64 = eatBytes<uint64_t>(Bytes);
}
- return MCOperand::createImm(Literal64);
+
+ bool UseLit64 = false;
+ const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
+ const MCOperandInfo &OpDesc = Desc.operands()[Inst.getNumOperands()];
+ if (OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_INT64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT64) {
+ UseLit64 = !isInt<32>(Literal64) || !isUInt<32>(Literal64);
+ } else {
+ assert(OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_FP64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_FP64 ||
+ OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_AC_FP64);
+ UseLit64 = Lo_32(Literal64) != 0;
+ }
+
+ return UseLit64 ? MCOperand::createExpr(AMDGPUMCExpr::createLit(
+ LitModifier::Lit64, Literal64, getContext()))
+ : MCOperand::createImm(Literal64);
}
MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
@@ -1822,7 +1866,8 @@ int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
}
-MCOperand AMDGPUDisassembler::decodeSrcOp(unsigned Width, unsigned Val) const {
+MCOperand AMDGPUDisassembler::decodeSrcOp(const MCInst &Inst, unsigned Width,
+ unsigned Val) const {
using namespace AMDGPU::EncValues;
assert(Val < 1024); // enum10
@@ -1834,10 +1879,11 @@ MCOperand AMDGPUDisassembler::decodeSrcOp(unsigned Width, unsigned Val) const {
return createRegOperand(IsAGPR ? getAgprClassId(Width)
: getVgprClassId(Width), Val - VGPR_MIN);
}
- return decodeNonVGPRSrcOp(Width, Val & 0xFF);
+ return decodeNonVGPRSrcOp(Inst, Width, Val & 0xFF);
}
-MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(unsigned Width,
+MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(const MCInst &Inst,
+ unsigned Width,
unsigned Val) const {
// Cases when Val{8} is 1 (vgpr, agpr or true 16 vgpr) should have been
// decoded earlier.
@@ -1861,7 +1907,7 @@ MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(unsigned Width,
return MCOperand::createImm(Val);
if (Val == LITERAL64_CONST && STI.hasFeature(AMDGPU::Feature64BitLiterals)) {
- return decodeLiteral64Constant();
+ return decodeLiteral64Constant(Inst);
}
switch (Width) {
@@ -2053,13 +2099,16 @@ MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
return createRegOperand(IsWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC);
}
-MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
- return STI.hasFeature(AMDGPU::FeatureWavefrontSize32) ? decodeSrcOp(32, Val)
- : decodeSrcOp(64, Val);
+MCOperand AMDGPUDisassembler::decodeBoolReg(const MCInst &Inst,
+ unsigned Val) const {
+ return STI.hasFeature(AMDGPU::FeatureWavefrontSize32)
+ ? decodeSrcOp(Inst, 32, Val)
+ : decodeSrcOp(Inst, 64, Val);
}
-MCOperand AMDGPUDisassembler::decodeSplitBarrier(unsigned Val) const {
- return decodeSrcOp(32, Val);
+MCOperand AMDGPUDisassembler::decodeSplitBarrier(const MCInst &Inst,
+ unsigned Val) const {
+ return decodeSrcOp(Inst, 32, Val);
}
MCOperand AMDGPUDisassembler::decodeDpp8FI(unsigned Val) const {
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
index c1131c2936fc7..935c3836f2ed9 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
@@ -142,12 +142,15 @@ class AMDGPUDisassembler : public MCDisassembler {
MCOperand decodeMandatoryLiteralConstant(unsigned Imm) const;
MCOperand decodeMandatoryLiteral64Constant(uint64_t Imm) const;
- MCOperand decodeLiteralConstant(bool ExtendFP64) const;
- MCOperand decodeLiteral64Constant() const;
+ MCOperand decodeLiteralConstant(const MCInstrDesc &Desc,
+ const MCOperandInfo &OpDesc,
+ bool ExtendFP64) const;
+ MCOperand decodeLiteral64Constant(const MCInst &Inst) const;
- MCOperand decodeSrcOp(unsigned Width, unsigned Val) const;
+ MCOperand decodeSrcOp(const MCInst &Inst, unsigned Width, unsigned Val) const;
- MCOperand decodeNonVGPRSrcOp(unsigned Width, unsigned Val) const;
+ MCOperand decodeNonVGPRSrcOp(const MCInst &Inst, unsigned Width,
+ unsigned Val) const;
MCOperand decodeVOPDDstYOp(MCInst &Inst, unsigned Val) const;
MCOperand decodeSpecialReg32(unsigned Val) const;
@@ -159,8 +162,8 @@ class AMDGPUDisassembler : public MCDisassembler {
MCOperand decodeSDWASrc32(unsigned Val) const;
MCOperand decodeSDWAVopcDst(unsigned Val) const;
- MCOperand decodeBoolReg(unsigned Val) const;
- MCOperand decodeSplitBarrier(unsigned Val) const;
+ MCOperand decodeBoolReg(const MCInst &Inst, unsigned Val) const;
+ MCOperand decodeSplitBarrier(const MCInst &Inst, unsigned Val) const;
MCOperand decodeDpp8FI(unsigned Val) const;
MCOperand decodeVersionImm(unsigned Imm) const;
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index f098e7a3c6c67..ddf6370265de3 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -80,9 +80,13 @@ void AMDGPUInstPrinter::printFP64ImmOperand(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
// KIMM64
- const MCInstrDesc &Desc = MII.get(MI->getOpcode());
- uint64_t Imm = MI->getOperand(OpNo).getImm();
- printLiteral64(Desc, Imm, STI, O, /*IsFP=*/true);
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isExpr()) {
+ MAI.printExpr(O, *Op.getExpr());
+ return;
+ }
+
+ printLiteral64(Op.getImm(), O, /*IsFP=*/true);
}
void AMDGPUInstPrinter::printNamedBit(const MCInst *MI, unsigned OpNo,
@@ -652,7 +656,7 @@ void AMDGPUInstPrinter::printImmediate32(uint32_t Imm,
O << formatHex(static_cast<uint64_t>(Imm));
}
-void AMDGPUInstPrinter::printImmediate64(const MCInstrDesc &Desc, uint64_t Imm,
+void AMDGPUInstPrinter::printImmediate64(uint64_t Imm,
const MCSubtargetInfo &STI,
raw_ostream &O, bool IsFP) {
int64_t SImm = static_cast<int64_t>(Imm);
@@ -683,27 +687,15 @@ void AMDGPUInstPrinter::printImmediate64(const MCInstrDesc &Desc, uint64_t Imm,
STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
O << "0.15915494309189532";
else
- printLiteral64(Desc, Imm, STI, O, IsFP);
+ printLiteral64(Imm, O, IsFP);
}
-void AMDGPUInstPrinter::printLiteral64(const MCInstrDesc &Desc, uint64_t Imm,
- const MCSubtargetInfo &STI,
- raw_ostream &O, bool IsFP) {
- // This part needs to align with AMDGPUOperand::addLiteralImmOperand.
- bool CanUse64BitLiterals =
- STI.hasFeature(AMDGPU::Feature64BitLiterals) &&
- !(Desc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P));
- if (IsFP) {
- if (CanUse64BitLiterals && Lo_32(Imm))
- O << "lit64(" << formatHex(static_cast<uint64_t>(Imm)) << ')';
- else
- O << formatHex(static_cast<uint64_t>(Hi_32(Imm)));
- } else {
- if (CanUse64BitLiterals && (!isInt<32>(Imm) || !isUInt<32>(Imm)))
- O << "lit64(" << formatHex(static_cast<uint64_t>(Imm)) << ')';
- else
- O << formatHex(static_cast<uint64_t>(Imm));
- }
+void AMDGPUInstPrinter::printLiteral64(uint64_t Imm, raw_ostream &O,
+ bool IsFP) {
+ if (IsFP && Lo_32(Imm) == 0)
+ O << formatHex(static_cast<uint64_t>(Hi_32(Imm)));
+ else
+ O << formatHex(Imm);
}
void AMDGPUInstPrinter::printBLGP(const MCInst *MI, unsigned OpNo,
@@ -814,12 +806,12 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
break;
case AMDGPU::OPERAND_REG_IMM_INT64:
case AMDGPU::OPERAND_REG_INLINE_C_INT64:
- printImmediate64(Desc, Op.getImm(), STI, O, false);
+ printImmediate64(Op.getImm(), STI, O, false);
break;
case AMDGPU::OPERAND_REG_IMM_FP64:
case AMDGPU::OPERAND_REG_INLINE_C_FP64:
case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
- printImmediate64(Desc, Op.getImm(), STI, O, true);
+ printImmediate64(Op.getImm(), STI, O, true);
break;
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_IMM_INT16:
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h
index 21cc2f229de91..b27295e73ec99 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h
@@ -89,10 +89,9 @@ class AMDGPUInstPrinter : public MCInstPrinter {
raw_ostream &O);
void printImmediate32(uint32_t Imm, const MCSubtargetInfo &STI,
raw_ostream &O);
- void printImmediate64(const MCInstrDesc &Desc, uint64_t Imm,
- const MCSubtargetInfo &STI, raw_ostream &O, bool IsFP);
- void printLiteral64(const MCInstrDesc &Desc, uint64_t Imm,
- const MCSubtargetInfo &STI, raw_ostream &O, bool IsFP);
+ void printImmediate64(uint64_t Imm, const MCSubtargetInfo &STI,
+ raw_ostream &O, bool IsFP);
+ void printLiteral64(uint64_t Imm, raw_ostream &O, bool IsFP);
void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O);
void printRegularOperand(const MCInst *MI, unsigned OpNo,
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
index bf212bbca934c..f287911654c24 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
@@ -345,7 +345,7 @@ std::optional<uint64_t> AMDGPUMCCodeEmitter::getLitEncoding(
case AMDGPU::OPERAND_KIMM32:
case AMDGPU::OPERAND_KIMM16:
case AMDGPU::OPERAND_KIMM64:
- return MO.getImm();
+ return Imm;
default:
llvm_unreachable("invalid operand size");
}
@@ -457,6 +457,8 @@ void AMDGPUMCCodeEmitter::encodeInstruction(const MCInst &MI,
else if (Op.isExpr()) {
if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
Imm = C->getValue();
+ else if (AMDGPU::isLitExpr(Op.getExpr()))
+ Imm = AMDGPU::getLitValue(Op.getExpr());
} else // Exprs will be replaced with a fixup value.
llvm_unreachable("Must be immediate or expr");
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.cpp
index 6638fa2f687d8..205d1c8c402a5 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.cpp
@@ -75,6 +75,12 @@ void AMDGPUMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
case AGVK_Occupancy:
OS << "occupancy(";
break;
+ case AGVK_Lit:
+ OS << "lit(";
+ break;
+ case AGVK_Lit64:
+ OS << "lit64(";
+ break;
}
for (const auto *It = Args.begin(); It != Args.end(); ++It) {
MAI->printExpr(OS, **It);
@@ -259,6 +265,9 @@ bool AMDGPUMCExpr::evaluateAsRelocatableImpl(MCValue &Res,
return evaluateTotalNumVGPR(Res, Asm);
case AGVK_Occupancy:
return evaluateOccupancy(Res, Asm);
+ case AGVK_Lit:
+ case AGVK_Lit64:
+ return Args[0]->evaluateAsRelocatable(Res, Asm);
}
for (const MCExpr *Arg : Args) {
@@ -332,6 +341,14 @@ const AMDGPUMCExpr *AMDGPUMCExpr::createOccupancy(
Ctx);
}
+const AMDGPUMCExpr *AMDGPUMCExpr::createLit(LitModifier Lit, int64_t Value,
+ MCContext &Ctx) {
+ assert(Lit == LitModifier::Lit || Lit == Lit);
+ return create(Lit == LitModifier::Lit ? VariantKind::AGVK_Lit
+ : VariantKind::AGVK_Lit64,
+ {MCConstantExpr::create(Value, Ctx, /*PrintInHex=*/true)}, Ctx);
+}
+
static KnownBits fromOptionalToKnownBits(std::optional<bool> CompareResult) {
static constexpr unsigned BitWidth = 64;
const APInt True(BitWidth, 1);
@@ -513,7 +530,9 @@ static void targetOpKnownBitsMapHelper(const MCExpr *Expr, KnownBitsMap &KBM,
case AMDGPUMCExpr::VariantKind::AGVK_ExtraSGPRs:
case AMDGPUMCExpr::VariantKind::AGVK_TotalNumVGPRs:
case AMDGPUMCExpr::VariantKind::AGVK_AlignTo:
- case AMDGPUMCExpr::VariantKind::AGVK_Occupancy: {
+ case AMDGPUMCExpr::VariantKind::AGVK_Occupancy:
+ case AMDGPUMCExpr::VariantKind::AGVK_Lit:
+ case AMDGPUMCExpr::VariantKind::AGVK_Lit64: {
int64_t Val;
if (AGVK->evaluateAsAbsolute(Val)) {
APInt APValue(BitWidth, Val);
@@ -709,3 +728,15 @@ void llvm::AMDGPU::printAMDGPUMCExpr(const MCExpr *Expr, raw_ostream &OS,
MAI->printExpr(OS, *Expr);
}
+
+bool AMDGPU::isLitExpr(const MCExpr *Expr) {
+ const auto *E = dyn_cast<AMDGPUMCExpr>(Expr);
+ return E && (E->getKind() == AMDGPUMCExpr::AGVK_Lit ||
+ E->getKind() == AMDGPUMCExpr::AGVK_Lit64);
+}
+
+int64_t AMDGPU::getLitValue(const MCExpr *Expr) {
+ assert(isLitExpr(Expr));
+ return cast<MCConstantExpr>(cast<AMDGPUMCExpr>(Expr)->getArgs()[0])
+ ->getValue();
+}
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.h
index bc6fdf7f2e4cd..54fcd2af49ecd 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.h
@@ -17,6 +17,8 @@ namespace llvm {
class Function;
class GCNSubtarget;
+enum class LitModifier { None, Lit, Lit64 };
+
/// AMDGPU target specific MCExpr operations.
///
/// Takes in a minimum of 1 argument to be used with an operation. The supported
@@ -36,7 +38,9 @@ class AMDGPUMCExpr : public MCTargetExpr {
AGVK_ExtraSGPRs,
AGVK_TotalNumVGPRs,
AGVK_AlignTo,
- AGVK_Occupancy
+ AGVK_Occupancy,
+ AGVK_Lit,
+ AGVK_Lit64,
};
// Relocation specifiers.
@@ -99,6 +103,9 @@ class AMDGPUMCExpr : public MCTargetExpr {
const MCExpr *NumVGPRs, unsigned DynamicVGPRBlockSize,
const GCNSubtarget &STM, MCContext &Ctx);
+ static const AMDGPUMCExpr *createLit(LitModifier Lit, int64_t Value,
+ MCContext &Ctx);
+
ArrayRef<const MCExpr *> getArgs() const { return Args; }
VariantKind getKind() const { return Kind; }
const MCExpr *getSubExpr(size_t Index) const;
@@ -129,6 +136,11 @@ const MCExpr *foldAMDGPUMCExpr(const MCExpr *Expr, MCContext &Ctx);
static inline AMDGPUMCExpr::Specifier getSpecifier(const MCSymbolRefExpr *SRE) {
return AMDGPUMCExpr::Specifier(SRE->getKind());
}
+
+LLVM_READONLY bool isLitExpr(const MCExpr *Expr);
+
+LLVM_READONLY int64_t getLitValue(const MCExpr *Expr);
+
} // end namespace AMDGPU
} // end namespace llvm
diff --git a/llvm/test/CodeGen/AMDGPU/add_u64.ll b/llvm/test/CodeGen/AMDGPU/add_u64.ll
index 0373027201378..22acedc4d6e25 100644
--- a/llvm/test/CodeGen/AMDGPU/add_u64.ll
+++ b/llvm/test/CodeGen/AMDGPU/add_u64.ll
@@ -109,7 +109,7 @@ define amdgpu_ps <2 x float> @test_add_u64_v_64bit_imm(i64 %a) {
;
; GFX1250-LABEL: test_add_u64_v_64bit_imm:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], lit64(0x13b9ac9ff), v[0:1]
+; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], 0x13b9ac9ff, v[0:1]
; GFX1250-NEXT: ; return to shader part epilog
%add = add i64 %a, 5294967295
%ret = bitcast i64 %add to <2 x float>
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index b71885b54b5a2..9b5161961da6c 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -231,7 +231,7 @@ define amdgpu_kernel void @sadd64ri(ptr addrspace(1) %out, i64 %a) {
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], lit64(0x123456789876)
+; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], 0x123456789876
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -434,7 +434,7 @@ define amdgpu_kernel void @vadd64ri(ptr addrspace(1) %out) {
; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], lit64(0x123456789876), v[0:1]
+; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], 0x123456789876, v[0:1]
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: global_store_b64 v1, v[2:3], s[0:1]
; GFX1250-NEXT: s_endpgm
@@ -1210,7 +1210,7 @@ define amdgpu_kernel void @ssub64ri(ptr addrspace(1) %out, i64 %a) {
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_sub_nc_u64 s[2:3], lit64(0x123456789876), s[2:3]
+; GFX1250-NEXT: s_sub_nc_u64 s[2:3], 0x123456789876, s[2:3]
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -1413,7 +1413,7 @@ define amdgpu_kernel void @vsub64ri(ptr addrspace(1) %out) {
; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_sub_nc_u64_e32 v[2:3], lit64(0x123456789876), v[0:1]
+; GFX1250-NEXT: v_sub_nc_u64_e32 v[2:3], 0x123456789876, v[0:1]
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: global_store_b64 v1, v[2:3], s[0:1]
; GFX1250-NEXT: s_endpgm
@@ -3124,7 +3124,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3]
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_b64 s[0:1], s[0:1], lit64(0xffffffff00000000)
+; GFX1250-NEXT: s_and_b64 s[0:1], s[0:1], 0xffffffff00000000
; GFX1250-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-NEXT: s_cbranch_scc0 .LBB16_4
; GFX1250-NEXT: ; %bb.1:
@@ -3195,7 +3195,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_add_co_ci_u32 s7, s12, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_add_nc_u64 s[4:5], s[0:1], s[6:7]
-; GFX1250-NEXT: s_and_b64 s[6:7], s[4:5], lit64(0xffffffff00000000)
+; GFX1250-NEXT: s_and_b64 s[6:7], s[4:5], 0xffffffff00000000
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_or_b32 s6, s6, s4
; GFX1250-NEXT: s_mul_u64 s[4:5], s[2:3], s[6:7]
diff --git a/llvm/test/CodeGen/AMDGPU/code-size-estimate.ll b/llvm/test/CodeGen/AMDGPU/code-size-estimate.ll
index f9fae025e0bf8..79b44d6a92caa 100644
--- a/llvm/test/CodeGen/AMDGPU/code-size-estimate.ll
+++ b/llvm/test/CodeGen/AMDGPU/code-size-estimate.ll
@@ -686,7 +686,7 @@ define double @v_mul_f64_vop2_literal_64(double %x) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf]
; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf]
-; GFX1250-NEXT: v_mul_f64_e32 v[0:1], lit64(0x405ec66666666666), v[0:1] ; encoding: [0xfe,0x00,0x00,0x0c,0x66,0x66,0x66,0x66,0x66,0xc6,0x5e,0x40]
+; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 0x405ec66666666666, v[0:1] ; encoding: [0xfe,0x00,0x00,0x0c,0x66,0x66,0x66,0x66,0x66,0xc6,0x5e,0x40]
; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe]
%mul = fmul double %x, 123.1
ret double %mul
@@ -788,7 +788,7 @@ define i64 @v_add_u64_vop2_literal_64(i64 %x) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf]
; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf]
-; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], lit64(0x112345678), v[0:1] ; encoding: [0xfe,0x00,0x00,0x50,0x78,0x56,0x34,0x12,0x01,0x00,0x00,0x00]
+; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], 0x112345678, v[0:1] ; encoding: [0xfe,0x00,0x00,0x50,0x78,0x56,0x34,0x12,0x01,0x00,0x00,0x00]
; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe]
%add = add i64 %x, 4600387192
ret i64 %add
diff --git a/llvm/test/CodeGen/AMDGPU/ds_write2.ll b/llvm/test/CodeGen/AMDGPU/ds_write2.ll
index f82bb59eb7906..be60a00145c8a 100644
--- a/llvm/test/CodeGen/AMDGPU/ds_write2.ll
+++ b/llvm/test/CodeGen/AMDGPU/ds_write2.ll
@@ -1012,7 +1012,7 @@ define amdgpu_kernel void @store_constant_adjacent_offsets() {
;
; GFX1250-LABEL: store_constant_adjacent_offsets:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], lit64(0x7b0000007b)
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 0x7b0000007b
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: ds_store_b64 v2, v[0:1]
; GFX1250-NEXT: s_endpgm
@@ -1350,7 +1350,7 @@ define amdgpu_kernel void @write2_v2i32_align1_odd_offset() {
;
; GFX1250-LABEL: write2_v2i32_align1_odd_offset:
; GFX1250: ; %bb.0: ; %entry
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], lit64(0x1c80000007b)
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 0x1c80000007b
; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: ds_store_b64 v2, v[0:1] offset:65
; GFX1250-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
index 605026614c614..1e7855ccb3642 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
@@ -723,7 +723,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -1065,7 +1065,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -1586,7 +1586,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -1946,7 +1946,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -2483,7 +2483,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -2847,7 +2847,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -3386,7 +3386,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -3750,7 +3750,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -4289,7 +4289,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -4653,7 +4653,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -5192,7 +5192,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -5556,7 +5556,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -6057,7 +6057,7 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -6405,7 +6405,7 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -6898,7 +6898,7 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -7246,7 +7246,7 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -7739,7 +7739,7 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -8087,7 +8087,7 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -8580,7 +8580,7 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -8928,7 +8928,7 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -9480,7 +9480,7 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
@@ -9864,7 +9864,7 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
@@ -10382,7 +10382,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -10750,7 +10750,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
@@ -11264,7 +11264,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
@@ -11644,7 +11644,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80)
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
index b67a1c513c49f..05403f008276c 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
@@ -9818,7 +9818,7 @@ define half @global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_gra
; GFX1250-TRUE16: ; %bb.0:
; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800)
+; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800
; GFX1250-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1250-TRUE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
; GFX1250-TRUE16-NEXT: s_mov_b32 s0, 0
@@ -9861,7 +9861,7 @@ define half @global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_gra
; GFX1250-FAKE16: ; %bb.0:
; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800)
+; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800
; GFX1250-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1250-FAKE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
; GFX1250-FAKE16-NEXT: s_mov_b32 s0, 0
@@ -11339,7 +11339,7 @@ define void @global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_g
; GFX1250-TRUE16: ; %bb.0:
; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800)
+; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800
; GFX1250-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1250-TRUE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
; GFX1250-TRUE16-NEXT: s_mov_b32 s0, 0
@@ -11382,7 +11382,7 @@ define void @global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_g
; GFX1250-FAKE16: ; %bb.0:
; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800)
+; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800
; GFX1250-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1250-FAKE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
; GFX1250-FAKE16-NEXT: s_mov_b32 s0, 0
@@ -14855,7 +14855,7 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_
; GFX1250-TRUE16: ; %bb.0:
; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800)
+; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800
; GFX1250-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1250-TRUE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
; GFX1250-TRUE16-NEXT: s_mov_b32 s0, 0
@@ -14905,7 +14905,7 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_
; GFX1250-FAKE16: ; %bb.0:
; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800)
+; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800
; GFX1250-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX1250-FAKE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
; GFX1250-FAKE16-NEXT: s_mov_b32 s0, 0
@@ -16648,7 +16648,7 @@ define void @global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_
; GFX1250-TRUE16: ; %bb.0:
; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800)
+; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800
; GFX1250-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1250-TRUE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
; GFX1250-TRUE16-NEXT: s_mov_b32 s0, 0
@@ -16697,7 +16697,7 @@ define void @global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_
; GFX1250-FAKE16: ; %bb.0:
; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800)
+; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800
; GFX1250-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
; GFX1250-FAKE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
; GFX1250-FAKE16-NEXT: s_mov_b32 s0, 0
diff --git a/llvm/test/CodeGen/AMDGPU/literal64.ll b/llvm/test/CodeGen/AMDGPU/literal64.ll
index 98691d394abb3..20b876836082e 100644
--- a/llvm/test/CodeGen/AMDGPU/literal64.ll
+++ b/llvm/test/CodeGen/AMDGPU/literal64.ll
@@ -5,7 +5,7 @@
define amdgpu_ps i64 @s_add_u64(i64 inreg %a) {
; GCN-LABEL: s_add_u64:
; GCN: ; %bb.0:
-; GCN-NEXT: s_add_nc_u64 s[0:1], s[0:1], lit64(0xf12345678)
+; GCN-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0xf12345678
; GCN-NEXT: ; return to shader part epilog
%result = add i64 %a, 64729929336
ret i64 %result
@@ -14,7 +14,7 @@ define amdgpu_ps i64 @s_add_u64(i64 inreg %a) {
define amdgpu_ps void @v_add_u64(i64 %a, ptr addrspace(1) %out) {
; GCN-LABEL: v_add_u64:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u64_e32 v[0:1], lit64(0xf12345678), v[0:1]
+; GCN-NEXT: v_add_nc_u64_e32 v[0:1], 0xf12345678, v[0:1]
; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off
; GCN-NEXT: s_endpgm
%result = add i64 %a, 64729929336
@@ -25,7 +25,7 @@ define amdgpu_ps void @v_add_u64(i64 %a, ptr addrspace(1) %out) {
define amdgpu_ps i64 @s_add_neg_u64(i64 inreg %a) {
; GCN-LABEL: s_add_neg_u64:
; GCN: ; %bb.0:
-; GCN-NEXT: s_add_nc_u64 s[0:1], s[0:1], lit64(0xfffffff0edcba988)
+; GCN-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0xfffffff0edcba988
; GCN-NEXT: ; return to shader part epilog
%result = sub i64 %a, 64729929336
ret i64 %result
@@ -34,7 +34,7 @@ define amdgpu_ps i64 @s_add_neg_u64(i64 inreg %a) {
define amdgpu_ps void @v_add_neg_u64(i64 %a, ptr addrspace(1) %out) {
; GCN-LABEL: v_add_neg_u64:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u64_e32 v[0:1], lit64(0xfffffff0edcba988), v[0:1]
+; GCN-NEXT: v_add_nc_u64_e32 v[0:1], 0xfffffff0edcba988, v[0:1]
; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off
; GCN-NEXT: s_endpgm
%result = sub i64 %a, 64729929336
@@ -45,7 +45,7 @@ define amdgpu_ps void @v_add_neg_u64(i64 %a, ptr addrspace(1) %out) {
define amdgpu_ps i64 @s_sub_u64(i64 inreg %a) {
; GCN-LABEL: s_sub_u64:
; GCN: ; %bb.0:
-; GCN-NEXT: s_sub_nc_u64 s[0:1], lit64(0xf12345678), s[0:1]
+; GCN-NEXT: s_sub_nc_u64 s[0:1], 0xf12345678, s[0:1]
; GCN-NEXT: ; return to shader part epilog
%result = sub i64 64729929336, %a
ret i64 %result
@@ -54,7 +54,7 @@ define amdgpu_ps i64 @s_sub_u64(i64 inreg %a) {
define amdgpu_ps void @v_sub_u64(i64 %a, ptr addrspace(1) %out) {
; GCN-LABEL: v_sub_u64:
; GCN: ; %bb.0:
-; GCN-NEXT: v_sub_nc_u64_e32 v[0:1], lit64(0xf12345678), v[0:1]
+; GCN-NEXT: v_sub_nc_u64_e32 v[0:1], 0xf12345678, v[0:1]
; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off
; GCN-NEXT: s_endpgm
%result = sub i64 64729929336, %a
@@ -67,7 +67,7 @@ define void @v_mov_b64_double(ptr addrspace(1) %ptr) {
; GCN: ; %bb.0:
; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4063233333333333)
+; GCN-NEXT: v_mov_b64_e32 v[2:3], 0x4063233333333333
; GCN-NEXT: global_atomic_add_f64 v[0:1], v[2:3], off scope:SCOPE_SYS
; GCN-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fadd ptr addrspace(1) %ptr, double 153.1 monotonic
@@ -79,7 +79,7 @@ define void @v_mov_b64_int(ptr addrspace(1) %ptr) {
; GCN: ; %bb.0:
; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0xf12345678)
+; GCN-NEXT: v_mov_b64_e32 v[2:3], 0xf12345678
; GCN-NEXT: global_atomic_add_u64 v[0:1], v[2:3], off scope:SCOPE_SYS
; GCN-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw add ptr addrspace(1) %ptr, i64 64729929336 monotonic
@@ -91,7 +91,7 @@ define void @store_double(ptr addrspace(1) %ptr) {
; GCN: ; %bb.0:
; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4063233333333333)
+; GCN-NEXT: v_mov_b64_e32 v[2:3], 0x4063233333333333
; GCN-NEXT: global_store_b64 v[0:1], v[2:3], off
; GCN-NEXT: s_set_pc_i64 s[30:31]
store double 153.1, ptr addrspace(1) %ptr
@@ -104,7 +104,7 @@ define i1 @class_f64() noinline optnone {
; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-SDAG-NEXT: s_wait_kmcnt 0x0
; GCN-SDAG-NEXT: s_mov_b32 s2, 1
-; GCN-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0x4063233333333333)
+; GCN-SDAG-NEXT: s_mov_b64 s[0:1], 0x4063233333333333
; GCN-SDAG-NEXT: v_cmp_class_f64_e64 s0, s[0:1], s2
; GCN-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31]
@@ -114,7 +114,7 @@ define i1 @class_f64() noinline optnone {
; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-GISEL-NEXT: s_wait_kmcnt 0x0
; GCN-GISEL-NEXT: s_mov_b32 s2, 1
-; GCN-GISEL-NEXT: s_mov_b64 s[0:1], lit64(0x4063233333333333)
+; GCN-GISEL-NEXT: s_mov_b64 s[0:1], 0x4063233333333333
; GCN-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
; GCN-GISEL-NEXT: v_mov_b32_e32 v2, s2
; GCN-GISEL-NEXT: v_cmp_class_f64_e64 s0, v[0:1], v2
@@ -131,7 +131,7 @@ define double @rsq_f64() {
; GCN: ; %bb.0:
; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_rsq_f64_e32 v[0:1], lit64(0x4063233333333333)
+; GCN-NEXT: v_rsq_f64_e32 v[0:1], 0x4063233333333333
; GCN-NEXT: s_set_pc_i64 s[30:31]
%result = call double @llvm.amdgcn.rsq.f64(double 153.1) nounwind readnone
ret double %result
@@ -140,7 +140,7 @@ define double @rsq_f64() {
define amdgpu_ps i64 @s_and_b64(i64 inreg %a) {
; GCN-LABEL: s_and_b64:
; GCN: ; %bb.0:
-; GCN-NEXT: s_and_b64 s[0:1], s[0:1], lit64(0xf12345678)
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], 0xf12345678
; GCN-NEXT: ; return to shader part epilog
%result = and i64 %a, 64729929336
ret i64 %result
@@ -170,7 +170,7 @@ define amdgpu_ps void @v_and_b64(i64 %a, ptr addrspace(1) %out) {
define amdgpu_ps <2 x float> @v_add_f64_200.1(double %a) {
; GCN-LABEL: v_add_f64_200.1:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_f64_e32 v[0:1], lit64(0x4069033333333333), v[0:1]
+; GCN-NEXT: v_add_f64_e32 v[0:1], 0x4069033333333333, v[0:1]
; GCN-NEXT: ; return to shader part epilog
%add = fadd double %a, 200.1
%ret = bitcast double %add to <2 x float>
@@ -194,14 +194,14 @@ define amdgpu_ps <2 x float> @v_add_f64_200.0(double %a) {
define amdgpu_ps <2 x float> @v_lshl_add_u64(i64 %a) {
; GCN-SDAG-LABEL: v_lshl_add_u64:
; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xf12345678)
+; GCN-SDAG-NEXT: s_mov_b64 s[0:1], 0xf12345678
; GCN-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GCN-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 1, s[0:1]
; GCN-SDAG-NEXT: ; return to shader part epilog
;
; GCN-GISEL-LABEL: v_lshl_add_u64:
; GCN-GISEL: ; %bb.0:
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], lit64(0xf12345678)
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], 0xf12345678
; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GCN-GISEL-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 1, v[2:3]
; GCN-GISEL-NEXT: ; return to shader part epilog
@@ -216,10 +216,10 @@ define amdgpu_ps <2 x float> @v_lshl_add_u64(i64 %a) {
define amdgpu_ps <2 x float> @v_fma_f64(double %a, double %b) {
; GCN-SDAG-LABEL: v_fma_f64:
; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: v_fmaak_f64 v[4:5], v[0:1], v[2:3], lit64(0x4063233333333333)
-; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4069033333333333)
+; GCN-SDAG-NEXT: v_fmaak_f64 v[4:5], v[0:1], v[2:3], 0x4063233333333333
+; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x4069033333333333
; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GCN-SDAG-NEXT: v_fmaak_f64 v[0:1], v[0:1], v[4:5], lit64(0x4069033333333333)
+; GCN-SDAG-NEXT: v_fmaak_f64 v[0:1], v[0:1], v[4:5], 0x4069033333333333
; GCN-SDAG-NEXT: v_fmac_f64_e32 v[2:3], v[0:1], v[4:5]
; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GCN-SDAG-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3
@@ -227,11 +227,11 @@ define amdgpu_ps <2 x float> @v_fma_f64(double %a, double %b) {
;
; GCN-GISEL-LABEL: v_fma_f64:
; GCN-GISEL: ; %bb.0:
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[4:5], lit64(0x4063233333333333)
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[4:5], 0x4063233333333333
; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GCN-GISEL-NEXT: v_fmac_f64_e32 v[4:5], v[0:1], v[2:3]
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4069033333333333)
-; GCN-GISEL-NEXT: v_fmaak_f64 v[0:1], v[0:1], v[4:5], lit64(0x4069033333333333)
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], 0x4069033333333333
+; GCN-GISEL-NEXT: v_fmaak_f64 v[0:1], v[0:1], v[4:5], 0x4069033333333333
; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GCN-GISEL-NEXT: v_fmac_f64_e32 v[2:3], v[0:1], v[4:5]
; GCN-GISEL-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3
@@ -246,7 +246,7 @@ define amdgpu_ps <2 x float> @v_fma_f64(double %a, double %b) {
define amdgpu_ps <2 x float> @v_add_neg_f64(double %a) {
; GCN-SDAG-LABEL: v_add_neg_f64:
; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0x4069033333333333)
+; GCN-SDAG-NEXT: s_mov_b64 s[0:1], 0x4069033333333333
; GCN-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GCN-SDAG-NEXT: v_add_f64_e64 v[0:1], -v[0:1], s[0:1]
; GCN-SDAG-NEXT: ; return to shader part epilog
@@ -254,7 +254,7 @@ define amdgpu_ps <2 x float> @v_add_neg_f64(double %a) {
; GCN-GISEL-LABEL: v_add_neg_f64:
; GCN-GISEL: ; %bb.0:
; GCN-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
-; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4069033333333333)
+; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], 0x4069033333333333
; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GCN-GISEL-NEXT: v_add_f64_e64 v[0:1], -v[0:1], v[2:3]
; GCN-GISEL-NEXT: ; return to shader part epilog
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.prefetch.ll b/llvm/test/CodeGen/AMDGPU/llvm.prefetch.ll
index 6e24a6a348f2c..c265b05813ee7 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.prefetch.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.prefetch.ll
@@ -87,7 +87,7 @@ define amdgpu_ps void @prefetch_data_sgpr_min_offset(ptr addrspace(4) inreg %ptr
;
; GFX1250-SPREFETCH-SDAG-LABEL: prefetch_data_sgpr_min_offset:
; GFX1250-SPREFETCH-SDAG: ; %bb.0: ; %entry
-; GFX1250-SPREFETCH-SDAG-NEXT: s_mov_b64 s[2:3], lit64(0xffffffffff800000)
+; GFX1250-SPREFETCH-SDAG-NEXT: s_mov_b64 s[2:3], 0xffffffffff800000
; GFX1250-SPREFETCH-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SPREFETCH-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3]
; GFX1250-SPREFETCH-SDAG-NEXT: s_prefetch_data s[0:1], 0x0, null, 0
@@ -424,7 +424,7 @@ define amdgpu_ps void @prefetch_inst_sgpr_min_offset(ptr addrspace(4) inreg %ptr
;
; GFX1250-SPREFETCH-SDAG-LABEL: prefetch_inst_sgpr_min_offset:
; GFX1250-SPREFETCH-SDAG: ; %bb.0: ; %entry
-; GFX1250-SPREFETCH-SDAG-NEXT: s_mov_b64 s[2:3], lit64(0xffffffffff800000)
+; GFX1250-SPREFETCH-SDAG-NEXT: s_mov_b64 s[2:3], 0xffffffffff800000
; GFX1250-SPREFETCH-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-SPREFETCH-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3]
; GFX1250-SPREFETCH-SDAG-NEXT: s_prefetch_inst s[0:1], 0x0, null, 0
diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
index e6960a3f710da..dbcd3700a1605 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
@@ -2233,7 +2233,7 @@ define amdgpu_ps i64 @lshr_mad_i64_sgpr(i64 inreg %arg0) #0 {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_mov_b32 s3, 0
; GFX1250-NEXT: s_mov_b32 s2, s1
-; GFX1250-NEXT: s_mov_b64 s[4:5], lit64(0xffffffffffff1c18)
+; GFX1250-NEXT: s_mov_b64 s[4:5], 0xffffffffffff1c18
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_mul_u64 s[2:3], s[2:3], s[4:5]
; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[2:3], s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll
index 7e3d5c97391e1..baccb4c7d0859 100644
--- a/llvm/test/CodeGen/AMDGPU/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul.ll
@@ -3221,7 +3221,7 @@ define amdgpu_kernel void @s_mul_i128(ptr addrspace(1) %out, [8 x i32], i128 %a,
; GFX1250-NEXT: s_load_b128 s[12:15], s[4:5], 0x4c
; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-NEXT: s_wait_xcnt 0x0
-; GFX1250-NEXT: s_mov_b64 s[4:5], lit64(0xffffffff)
+; GFX1250-NEXT: s_mov_b64 s[4:5], 0xffffffff
; GFX1250-NEXT: s_mov_b32 s3, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_mov_b32 s7, s3
diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
index 9f27e1ffd9130..b0651ef53dd1b 100644
--- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
+++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
@@ -791,7 +791,7 @@ define amdgpu_kernel void @fadd_v2_v_lit_lo0(ptr addrspace(1) %a) {
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x3f80000000000000)
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x3f80000000000000
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -803,7 +803,7 @@ define amdgpu_kernel void @fadd_v2_v_lit_lo0(ptr addrspace(1) %a) {
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
-; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x3f80000000000000)
+; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x3f80000000000000
; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -851,7 +851,7 @@ define amdgpu_kernel void @fadd_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x400000003f800000)
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x400000003f800000
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -863,7 +863,7 @@ define amdgpu_kernel void @fadd_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
-; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000)
+; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x400000003f800000
; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -1989,7 +1989,7 @@ define amdgpu_kernel void @fmul_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000)
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x4040000040800000
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -2001,7 +2001,7 @@ define amdgpu_kernel void @fmul_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
-; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000)
+; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x4040000040800000
; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -2907,8 +2907,8 @@ define amdgpu_kernel void @fma_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v6, 0x3ff, v0
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000)
-; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], lit64(0x400000003f800000)
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x4040000040800000
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], 0x400000003f800000
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -2920,9 +2920,9 @@ define amdgpu_kernel void @fma_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0
-; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000)
+; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x4040000040800000
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
-; GFX1250-GISEL-NEXT: s_mov_b64 s[4:5], lit64(0x400000003f800000)
+; GFX1250-GISEL-NEXT: s_mov_b64 s[4:5], 0x400000003f800000
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/sub_u64.ll b/llvm/test/CodeGen/AMDGPU/sub_u64.ll
index baaca4ddeaf05..f79fbd98f1e09 100644
--- a/llvm/test/CodeGen/AMDGPU/sub_u64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sub_u64.ll
@@ -126,7 +126,7 @@ define amdgpu_ps <2 x float> @test_sub_u64_64bit_imm_v(i64 %a) {
;
; GFX1250-LABEL: test_sub_u64_64bit_imm_v:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: v_sub_nc_u64_e32 v[0:1], lit64(0x13b9ac9ff), v[0:1]
+; GFX1250-NEXT: v_sub_nc_u64_e32 v[0:1], 0x13b9ac9ff, v[0:1]
; GFX1250-NEXT: ; return to shader part epilog
%sub = sub i64 5294967295, %a
%ret = bitcast i64 %sub to <2 x float>
More information about the llvm-commits
mailing list