[llvm] [X86] Add missing immediate arg tag to VRNDSCALES instruction names (PR #117203)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 22 06:09:41 PST 2024
https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/117203
>From bd44642066afcacb718130b488ffa56f269ac90e Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Thu, 21 Nov 2024 18:10:15 +0000
Subject: [PATCH] [X86] Add missing attributes to VRNDSCALES instruction names
More canonicalization of the instruction names to make the predictable - more closely matches VRNDSCALEP / VROUND equivalent instructions
---
llvm/lib/Target/X86/X86CompressEVEX.cpp | 16 ++--
llvm/lib/Target/X86/X86InstrAVX512.td | 14 ++--
llvm/lib/Target/X86/X86InstrInfo.cpp | 42 +++++-----
llvm/lib/Target/X86/X86SchedSapphireRapids.td | 18 ++---
.../test/CodeGen/X86/evex-to-vex-compress.mir | 80 +++++++++----------
llvm/test/TableGen/x86-fold-tables.inc | 24 +++---
llvm/test/TableGen/x86-instr-mapping.inc | 16 ++--
llvm/utils/TableGen/X86ManualInstrMapping.def | 8 +-
8 files changed, 109 insertions(+), 109 deletions(-)
diff --git a/llvm/lib/Target/X86/X86CompressEVEX.cpp b/llvm/lib/Target/X86/X86CompressEVEX.cpp
index 6fb480c37e1ff8..7213ed32fc46d0 100644
--- a/llvm/lib/Target/X86/X86CompressEVEX.cpp
+++ b/llvm/lib/Target/X86/X86CompressEVEX.cpp
@@ -154,14 +154,14 @@ static bool performCustomAdjustments(MachineInstr &MI, unsigned NewOpc) {
case X86::VRNDSCALEPDZ256rmi:
case X86::VRNDSCALEPSZ256rri:
case X86::VRNDSCALEPSZ256rmi:
- case X86::VRNDSCALESDZr:
- case X86::VRNDSCALESDZm:
- case X86::VRNDSCALESSZr:
- case X86::VRNDSCALESSZm:
- case X86::VRNDSCALESDZr_Int:
- case X86::VRNDSCALESDZm_Int:
- case X86::VRNDSCALESSZr_Int:
- case X86::VRNDSCALESSZm_Int:
+ case X86::VRNDSCALESDZrri:
+ case X86::VRNDSCALESDZrmi:
+ case X86::VRNDSCALESSZrri:
+ case X86::VRNDSCALESSZrmi:
+ case X86::VRNDSCALESDZrri_Int:
+ case X86::VRNDSCALESDZrmi_Int:
+ case X86::VRNDSCALESSZrri_Int:
+ case X86::VRNDSCALESSZrmi_Int:
const MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands() - 1);
int64_t ImmVal = Imm.getImm();
// Ensure that only bits 3:0 of the immediate are used.
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index a05a3063cac559..299a2a74d86fc3 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -9596,7 +9596,7 @@ defm VSQRT : avx512_sqrt_scalar_all<0x51, "vsqrt", SchedWriteFSqrtSizes>, VEX_LI
multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
X86FoldableSchedWrite sched, X86VectorVTInfo _> {
let ExeDomain = _.ExeDomain in {
- defm r_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ defm rri_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
"$src3, $src2, $src1", "$src1, $src2, $src3",
(_.VT (X86RndScales (_.VT _.RC:$src1), (_.VT _.RC:$src2),
@@ -9604,14 +9604,14 @@ multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
Sched<[sched]>, SIMD_EXC;
let Uses = [MXCSR] in
- defm rb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ defm rrib_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
"$src3, {sae}, $src2, $src1", "$src1, $src2, {sae}, $src3",
(_.VT (X86RndScalesSAE (_.VT _.RC:$src1), (_.VT _.RC:$src2),
(i32 timm:$src3)))>, EVEX_B,
Sched<[sched]>;
- defm m_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ defm rmi_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.IntScalarMemOp:$src2, i32u8imm:$src3),
OpcodeStr,
"$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -9620,13 +9620,13 @@ multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
let isCodeGenOnly = 1, hasSideEffects = 0, Predicates = [HasAVX512] in {
- def r : I<opc, MRMSrcReg, (outs _.FRC:$dst),
+ def rri : I<opc, MRMSrcReg, (outs _.FRC:$dst),
(ins _.FRC:$src1, _.FRC:$src2, i32u8imm:$src3),
OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>, Sched<[sched]>, SIMD_EXC;
let mayLoad = 1 in
- def m : I<opc, MRMSrcMem, (outs _.FRC:$dst),
+ def rmi : I<opc, MRMSrcMem, (outs _.FRC:$dst),
(ins _.FRC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>, Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
@@ -9635,13 +9635,13 @@ multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX512] in {
def : Pat<(X86any_VRndScale _.FRC:$src1, timm:$src2),
- (_.EltVT (!cast<Instruction>(NAME#r) (_.EltVT (IMPLICIT_DEF)),
+ (_.EltVT (!cast<Instruction>(NAME#rri) (_.EltVT (IMPLICIT_DEF)),
_.FRC:$src1, timm:$src2))>;
}
let Predicates = [HasAVX512, OptForSize] in {
def : Pat<(X86any_VRndScale (_.ScalarLdFrag addr:$src1), timm:$src2),
- (_.EltVT (!cast<Instruction>(NAME#m) (_.EltVT (IMPLICIT_DEF)),
+ (_.EltVT (!cast<Instruction>(NAME#rmi) (_.EltVT (IMPLICIT_DEF)),
addr:$src1, timm:$src2))>;
}
}
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index e8a50227912d8b..5a6ea1182ccb83 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -6971,16 +6971,16 @@ static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
case X86::VGETMANTSSZrri:
case X86::VGETMANTSSZrrib:
case X86::VGETMANTSSZrmi:
- case X86::VRNDSCALESDZr:
- case X86::VRNDSCALESDZr_Int:
- case X86::VRNDSCALESDZrb_Int:
- case X86::VRNDSCALESDZm:
- case X86::VRNDSCALESDZm_Int:
- case X86::VRNDSCALESSZr:
- case X86::VRNDSCALESSZr_Int:
- case X86::VRNDSCALESSZrb_Int:
- case X86::VRNDSCALESSZm:
- case X86::VRNDSCALESSZm_Int:
+ case X86::VRNDSCALESDZrri:
+ case X86::VRNDSCALESDZrri_Int:
+ case X86::VRNDSCALESDZrrib_Int:
+ case X86::VRNDSCALESDZrmi:
+ case X86::VRNDSCALESDZrmi_Int:
+ case X86::VRNDSCALESSZrri:
+ case X86::VRNDSCALESSZrri_Int:
+ case X86::VRNDSCALESSZrrib_Int:
+ case X86::VRNDSCALESSZrmi:
+ case X86::VRNDSCALESSZrmi_Int:
case X86::VRCP14SDZrr:
case X86::VRCP14SDZrm:
case X86::VRCP14SSZrr:
@@ -6998,11 +6998,11 @@ static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
case X86::VGETMANTSHZrri:
case X86::VGETMANTSHZrrib:
case X86::VGETMANTSHZrmi:
- case X86::VRNDSCALESHZr:
- case X86::VRNDSCALESHZr_Int:
- case X86::VRNDSCALESHZrb_Int:
- case X86::VRNDSCALESHZm:
- case X86::VRNDSCALESHZm_Int:
+ case X86::VRNDSCALESHZrri:
+ case X86::VRNDSCALESHZrri_Int:
+ case X86::VRNDSCALESHZrrib_Int:
+ case X86::VRNDSCALESHZrmi:
+ case X86::VRNDSCALESHZrmi_Int:
case X86::VSQRTSHZr:
case X86::VSQRTSHZr_Int:
case X86::VSQRTSHZrb_Int:
@@ -7790,9 +7790,9 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
case X86::VREDUCESSZrri:
case X86::VREDUCESSZrrik:
case X86::VREDUCESSZrrikz:
- case X86::VRNDSCALESSZr_Int:
- case X86::VRNDSCALESSZr_Intk:
- case X86::VRNDSCALESSZr_Intkz:
+ case X86::VRNDSCALESSZrri_Int:
+ case X86::VRNDSCALESSZrri_Intk:
+ case X86::VRNDSCALESSZrri_Intkz:
case X86::VRSQRT14SSZrr:
case X86::VRSQRT14SSZrrk:
case X86::VRSQRT14SSZrrkz:
@@ -7959,9 +7959,9 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
case X86::VREDUCESDZrri:
case X86::VREDUCESDZrrik:
case X86::VREDUCESDZrrikz:
- case X86::VRNDSCALESDZr_Int:
- case X86::VRNDSCALESDZr_Intk:
- case X86::VRNDSCALESDZr_Intkz:
+ case X86::VRNDSCALESDZrri_Int:
+ case X86::VRNDSCALESDZrri_Intk:
+ case X86::VRNDSCALESDZrri_Intkz:
case X86::VRSQRT14SDZrr:
case X86::VRSQRT14SDZrrk:
case X86::VRSQRT14SDZrrkz:
diff --git a/llvm/lib/Target/X86/X86SchedSapphireRapids.td b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
index 4344a48a526281..8a23d1b103aa6b 100644
--- a/llvm/lib/Target/X86/X86SchedSapphireRapids.td
+++ b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
@@ -2300,8 +2300,8 @@ def : InstRW<[SPRWriteResGroup218, ReadAfterVecXLd], (instregex "^(V?)ROUNDS(D|S
"^VRNDSCALEP(D|S)Z128rm(bi|ik)$",
"^VRNDSCALEP(D|S)Z128rmbik(z?)$",
"^VRNDSCALEP(D|S)Z128rmi((kz)?)$",
- "^VRNDSCALES(D|S)Zm$",
- "^VRNDSCALES(D|S)Zm_Int((k|kz)?)$")>;
+ "^VRNDSCALES(D|S)Zrmi$",
+ "^VRNDSCALES(D|S)Zrmi_Int((k|kz)?)$")>;
def SPRWriteResGroup219 : SchedWriteRes<[SPRPort00_01]> {
let ReleaseAtCycles = [2];
@@ -2312,8 +2312,8 @@ def : InstRW<[SPRWriteResGroup219], (instregex "^(V?)ROUND(PD|SS)ri$",
"^(V?)ROUND(PS|SD)ri$",
"^(V?)ROUNDS(D|S)ri_Int$",
"^VRNDSCALEP(D|S)Z(128|256)rri((k|kz)?)$",
- "^VRNDSCALES(D|S)Zr$",
- "^VRNDSCALES(D|S)Zr(b?)_Int((k|kz)?)$",
+ "^VRNDSCALES(D|S)Zrri$",
+ "^VRNDSCALES(D|S)Zrri(b?)_Int((k|kz)?)$",
"^VROUNDP(D|S)Yri$")>;
def SPRWriteResGroup220 : SchedWriteRes<[SPRPort00_06]> {
@@ -3747,7 +3747,7 @@ def : InstRW<[SPRWriteResGroup390, ReadAfterVecXLd], (instregex "^VF(C?)MADDCSHZ
"^VF(C?)MULCPHZ128rm(b?)$",
"^VF(C?)MULCSHZrm$",
"^VRNDSCALEPHZ128rm(b?)i$",
- "^VRNDSCALESHZm((_Int)?)$",
+ "^VRNDSCALESHZrmi((_Int)?)$",
"^VSCALEFPHZ128rm(b?)$")>;
def : InstRW<[SPRWriteResGroup390, ReadAfterVecYLd], (instregex "^VF(C?)MULCPHZ256rm(b?)$",
"^VRNDSCALEP(D|H|S)Z256rm(b?)i$",
@@ -3779,9 +3779,9 @@ def : InstRW<[SPRWriteResGroup392], (instregex "^VF(C?)MADDCPHZ(128|256)r$",
"^VF(C?)MULCPHZ(128|256)rr$",
"^VF(C?)MULCSHZrr(b?)$",
"^VRNDSCALEPHZ(128|256)rri$",
- "^VRNDSCALESHZr(b?)_Int$",
+ "^VRNDSCALESHZrri(b?)_Int$",
"^VSCALEFPHZ(128|256)rr$")>;
-def : InstRW<[SPRWriteResGroup392], (instrs VRNDSCALESHZr,
+def : InstRW<[SPRWriteResGroup392], (instrs VRNDSCALESHZrri,
VSCALEFSHZrr,
VSCALEFSHZrrb_Int)>;
@@ -4884,7 +4884,7 @@ def SPRWriteResGroup534 : SchedWriteRes<[SPRPort00_01, SPRPort02_03_11]> {
let NumMicroOps = 3;
}
def : InstRW<[SPRWriteResGroup534, ReadAfterVecXLd], (instregex "^VRNDSCALEPHZ128rm(b?)ik(z?)$",
- "^VRNDSCALESHZm_Intk(z?)$",
+ "^VRNDSCALESHZrmi_Intk(z?)$",
"^VSCALEFPHZ128rm(bk|kz)$",
"^VSCALEFPHZ128rm(k|bkz)$")>;
def : InstRW<[SPRWriteResGroup534, ReadAfterVecYLd], (instregex "^VRNDSCALEPHZ256rm(b?)ik(z?)$",
@@ -4898,7 +4898,7 @@ def SPRWriteResGroup535 : SchedWriteRes<[SPRPort00_01]> {
let NumMicroOps = 2;
}
def : InstRW<[SPRWriteResGroup535], (instregex "^VRNDSCALEPHZ(128|256)rrik(z?)$",
- "^VRNDSCALESHZr(b?)_Intk(z?)$",
+ "^VRNDSCALESHZrri(b?)_Intk(z?)$",
"^VSCALEFPHZ(128|256)rrk(z?)$",
"^VSCALEFSHZrrb_Intk(z?)$",
"^VSCALEFSHZrrk(z?)$")>;
diff --git a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
index 2f587d789779cf..452adf1d920b53 100644
--- a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
+++ b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
@@ -2309,21 +2309,21 @@ body: |
; CHECK: $xmm0 = VINSERTPSrri $xmm0, $xmm0, 1
$xmm0 = VINSERTPSZrri $xmm0, $xmm0, 1
; CHECK: $xmm0 = VROUNDSDmi $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- $xmm0 = VRNDSCALESDZm $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ $xmm0 = VRNDSCALESDZrmi $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
; CHECK: $xmm0 = VROUNDSDri $xmm0, $xmm1, 15, implicit $mxcsr
- $xmm0 = VRNDSCALESDZr $xmm0, $xmm1, 15, implicit $mxcsr
+ $xmm0 = VRNDSCALESDZrri $xmm0, $xmm1, 15, implicit $mxcsr
; CHECK: $xmm0 = VROUNDSSmi $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- $xmm0 = VRNDSCALESSZm $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ $xmm0 = VRNDSCALESSZrmi $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
; CHECK: $xmm0 = VROUNDSSri $xmm0, $xmm1, 15, implicit $mxcsr
- $xmm0 = VRNDSCALESSZr $xmm0, $xmm1, 15, implicit $mxcsr
+ $xmm0 = VRNDSCALESSZrri $xmm0, $xmm1, 15, implicit $mxcsr
; CHECK: $xmm0 = VROUNDSDmi_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- $xmm0 = VRNDSCALESDZm_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ $xmm0 = VRNDSCALESDZrmi_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
; CHECK: $xmm0 = VROUNDSDri_Int $xmm0, $xmm1, 15, implicit $mxcsr
- $xmm0 = VRNDSCALESDZr_Int $xmm0, $xmm1, 15, implicit $mxcsr
+ $xmm0 = VRNDSCALESDZrri_Int $xmm0, $xmm1, 15, implicit $mxcsr
; CHECK: $xmm0 = VROUNDSSmi_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- $xmm0 = VRNDSCALESSZm_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ $xmm0 = VRNDSCALESSZrmi_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
; CHECK: $xmm0 = VROUNDSSri_Int $xmm0, $xmm1, 15, implicit $mxcsr
- $xmm0 = VRNDSCALESSZr_Int $xmm0, $xmm1, 15, implicit $mxcsr
+ $xmm0 = VRNDSCALESSZrri_Int $xmm0, $xmm1, 15, implicit $mxcsr
RET64
...
@@ -4636,38 +4636,38 @@ body: |
VUCOMISSZrm $xmm16, $rdi, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit $mxcsr
; CHECK: VUCOMISSZrr $xmm16, $xmm1, implicit-def $eflags, implicit $mxcsr
VUCOMISSZrr $xmm16, $xmm1, implicit-def $eflags, implicit $mxcsr
- ; CHECK: $xmm16 = VRNDSCALESDZm $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- $xmm16 = VRNDSCALESDZm $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- ; CHECK: $xmm16 = VRNDSCALESDZr $xmm16, $xmm1, 15, implicit $mxcsr
- $xmm16 = VRNDSCALESDZr $xmm16, $xmm1, 15, implicit $mxcsr
- ; CHECK: $xmm16 = VRNDSCALESSZm $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- $xmm16 = VRNDSCALESSZm $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- ; CHECK: $xmm16 = VRNDSCALESSZr $xmm16, $xmm1, 15, implicit $mxcsr
- $xmm16 = VRNDSCALESSZr $xmm16, $xmm1, 15, implicit $mxcsr
- ; CHECK: $xmm16 = VRNDSCALESDZm_Int $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- $xmm16 = VRNDSCALESDZm_Int $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- ; CHECK: $xmm16 = VRNDSCALESDZr_Int $xmm16, $xmm1, 15, implicit $mxcsr
- $xmm16 = VRNDSCALESDZr_Int $xmm16, $xmm1, 15, implicit $mxcsr
- ; CHECK: $xmm16 = VRNDSCALESSZm_Int $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- $xmm16 = VRNDSCALESSZm_Int $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
- ; CHECK: $xmm16 = VRNDSCALESSZr_Int $xmm16, $xmm1, 15, implicit $mxcsr
- $xmm16 = VRNDSCALESSZr_Int $xmm16, $xmm1, 15, implicit $mxcsr
- ; CHECK: $xmm0 = VRNDSCALESDZm $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
- $xmm0 = VRNDSCALESDZm $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
- ; CHECK: $xmm0 = VRNDSCALESDZr $xmm0, $xmm1, 31, implicit $mxcsr
- $xmm0 = VRNDSCALESDZr $xmm0, $xmm1, 31, implicit $mxcsr
- ; CHECK: $xmm0 = VRNDSCALESSZm $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
- $xmm0 = VRNDSCALESSZm $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
- ; CHECK: $xmm0 = VRNDSCALESSZr $xmm0, $xmm1, 31, implicit $mxcsr
- $xmm0 = VRNDSCALESSZr $xmm0, $xmm1, 31, implicit $mxcsr
- ; CHECK: $xmm0 = VRNDSCALESDZm_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
- $xmm0 = VRNDSCALESDZm_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
- ; CHECK: $xmm0 = VRNDSCALESDZr_Int $xmm0, $xmm1, 31, implicit $mxcsr
- $xmm0 = VRNDSCALESDZr_Int $xmm0, $xmm1, 31, implicit $mxcsr
- ; CHECK: $xmm0 = VRNDSCALESSZm_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
- $xmm0 = VRNDSCALESSZm_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
- ; CHECK: $xmm0 = VRNDSCALESSZr_Int $xmm0, $xmm1, 31, implicit $mxcsr
- $xmm0 = VRNDSCALESSZr_Int $xmm0, $xmm1, 31, implicit $mxcsr
+ ; CHECK: $xmm16 = VRNDSCALESDZrmi $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ $xmm16 = VRNDSCALESDZrmi $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ ; CHECK: $xmm16 = VRNDSCALESDZrri $xmm16, $xmm1, 15, implicit $mxcsr
+ $xmm16 = VRNDSCALESDZrri $xmm16, $xmm1, 15, implicit $mxcsr
+ ; CHECK: $xmm16 = VRNDSCALESSZrmi $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ $xmm16 = VRNDSCALESSZrmi $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ ; CHECK: $xmm16 = VRNDSCALESSZrri $xmm16, $xmm1, 15, implicit $mxcsr
+ $xmm16 = VRNDSCALESSZrri $xmm16, $xmm1, 15, implicit $mxcsr
+ ; CHECK: $xmm16 = VRNDSCALESDZrmi_Int $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ $xmm16 = VRNDSCALESDZrmi_Int $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ ; CHECK: $xmm16 = VRNDSCALESDZrri_Int $xmm16, $xmm1, 15, implicit $mxcsr
+ $xmm16 = VRNDSCALESDZrri_Int $xmm16, $xmm1, 15, implicit $mxcsr
+ ; CHECK: $xmm16 = VRNDSCALESSZrmi_Int $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ $xmm16 = VRNDSCALESSZrmi_Int $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+ ; CHECK: $xmm16 = VRNDSCALESSZrri_Int $xmm16, $xmm1, 15, implicit $mxcsr
+ $xmm16 = VRNDSCALESSZrri_Int $xmm16, $xmm1, 15, implicit $mxcsr
+ ; CHECK: $xmm0 = VRNDSCALESDZrmi $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+ $xmm0 = VRNDSCALESDZrmi $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+ ; CHECK: $xmm0 = VRNDSCALESDZrri $xmm0, $xmm1, 31, implicit $mxcsr
+ $xmm0 = VRNDSCALESDZrri $xmm0, $xmm1, 31, implicit $mxcsr
+ ; CHECK: $xmm0 = VRNDSCALESSZrmi $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+ $xmm0 = VRNDSCALESSZrmi $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+ ; CHECK: $xmm0 = VRNDSCALESSZrri $xmm0, $xmm1, 31, implicit $mxcsr
+ $xmm0 = VRNDSCALESSZrri $xmm0, $xmm1, 31, implicit $mxcsr
+ ; CHECK: $xmm0 = VRNDSCALESDZrmi_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+ $xmm0 = VRNDSCALESDZrmi_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+ ; CHECK: $xmm0 = VRNDSCALESDZrri_Int $xmm0, $xmm1, 31, implicit $mxcsr
+ $xmm0 = VRNDSCALESDZrri_Int $xmm0, $xmm1, 31, implicit $mxcsr
+ ; CHECK: $xmm0 = VRNDSCALESSZrmi_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+ $xmm0 = VRNDSCALESSZrmi_Int $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+ ; CHECK: $xmm0 = VRNDSCALESSZrri_Int $xmm0, $xmm1, 31, implicit $mxcsr
+ $xmm0 = VRNDSCALESSZrri_Int $xmm0, $xmm1, 31, implicit $mxcsr
RET64
...
diff --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc
index 621e9e0abeb289..36f6afacdf09d6 100644
--- a/llvm/test/TableGen/x86-fold-tables.inc
+++ b/llvm/test/TableGen/x86-fold-tables.inc
@@ -4051,12 +4051,12 @@ static const X86FoldTableEntry Table2[] = {
{X86::VRNDSCALEPSZ128rrikz, X86::VRNDSCALEPSZ128rmikz, 0},
{X86::VRNDSCALEPSZ256rrikz, X86::VRNDSCALEPSZ256rmikz, 0},
{X86::VRNDSCALEPSZrrikz, X86::VRNDSCALEPSZrmikz, 0},
- {X86::VRNDSCALESDZr, X86::VRNDSCALESDZm, 0},
- {X86::VRNDSCALESDZr_Int, X86::VRNDSCALESDZm_Int, TB_NO_REVERSE},
- {X86::VRNDSCALESHZr, X86::VRNDSCALESHZm, 0},
- {X86::VRNDSCALESHZr_Int, X86::VRNDSCALESHZm_Int, TB_NO_REVERSE},
- {X86::VRNDSCALESSZr, X86::VRNDSCALESSZm, 0},
- {X86::VRNDSCALESSZr_Int, X86::VRNDSCALESSZm_Int, TB_NO_REVERSE},
+ {X86::VRNDSCALESDZrri, X86::VRNDSCALESDZrmi, 0},
+ {X86::VRNDSCALESDZrri_Int, X86::VRNDSCALESDZrmi_Int, TB_NO_REVERSE},
+ {X86::VRNDSCALESHZrri, X86::VRNDSCALESHZrmi, 0},
+ {X86::VRNDSCALESHZrri_Int, X86::VRNDSCALESHZrmi_Int, TB_NO_REVERSE},
+ {X86::VRNDSCALESSZrri, X86::VRNDSCALESSZrmi, 0},
+ {X86::VRNDSCALESSZrri_Int, X86::VRNDSCALESSZrmi_Int, TB_NO_REVERSE},
{X86::VROUNDSDri, X86::VROUNDSDmi, 0},
{X86::VROUNDSDri_Int, X86::VROUNDSDmi_Int, TB_NO_REVERSE},
{X86::VROUNDSSri, X86::VROUNDSSmi, 0},
@@ -5969,9 +5969,9 @@ static const X86FoldTableEntry Table3[] = {
{X86::VRNDSCALEPSZ128rrik, X86::VRNDSCALEPSZ128rmik, 0},
{X86::VRNDSCALEPSZ256rrik, X86::VRNDSCALEPSZ256rmik, 0},
{X86::VRNDSCALEPSZrrik, X86::VRNDSCALEPSZrmik, 0},
- {X86::VRNDSCALESDZr_Intkz, X86::VRNDSCALESDZm_Intkz, TB_NO_REVERSE},
- {X86::VRNDSCALESHZr_Intkz, X86::VRNDSCALESHZm_Intkz, TB_NO_REVERSE},
- {X86::VRNDSCALESSZr_Intkz, X86::VRNDSCALESSZm_Intkz, TB_NO_REVERSE},
+ {X86::VRNDSCALESDZrri_Intkz, X86::VRNDSCALESDZrmi_Intkz, TB_NO_REVERSE},
+ {X86::VRNDSCALESHZrri_Intkz, X86::VRNDSCALESHZrmi_Intkz, TB_NO_REVERSE},
+ {X86::VRNDSCALESSZrri_Intkz, X86::VRNDSCALESSZrmi_Intkz, TB_NO_REVERSE},
{X86::VRSQRT14PDZ128rk, X86::VRSQRT14PDZ128mk, 0},
{X86::VRSQRT14PDZ256rk, X86::VRSQRT14PDZ256mk, 0},
{X86::VRSQRT14PDZrk, X86::VRSQRT14PDZmk, 0},
@@ -7344,9 +7344,9 @@ static const X86FoldTableEntry Table4[] = {
{X86::VREDUCESDZrrik, X86::VREDUCESDZrmik, TB_NO_REVERSE},
{X86::VREDUCESHZrrik, X86::VREDUCESHZrmik, TB_NO_REVERSE},
{X86::VREDUCESSZrrik, X86::VREDUCESSZrmik, TB_NO_REVERSE},
- {X86::VRNDSCALESDZr_Intk, X86::VRNDSCALESDZm_Intk, TB_NO_REVERSE},
- {X86::VRNDSCALESHZr_Intk, X86::VRNDSCALESHZm_Intk, TB_NO_REVERSE},
- {X86::VRNDSCALESSZr_Intk, X86::VRNDSCALESSZm_Intk, TB_NO_REVERSE},
+ {X86::VRNDSCALESDZrri_Intk, X86::VRNDSCALESDZrmi_Intk, TB_NO_REVERSE},
+ {X86::VRNDSCALESHZrri_Intk, X86::VRNDSCALESHZrmi_Intk, TB_NO_REVERSE},
+ {X86::VRNDSCALESSZrri_Intk, X86::VRNDSCALESSZrmi_Intk, TB_NO_REVERSE},
{X86::VRSQRT14SDZrrk, X86::VRSQRT14SDZrmk, TB_NO_REVERSE},
{X86::VRSQRT14SSZrrk, X86::VRSQRT14SSZrmk, TB_NO_REVERSE},
{X86::VRSQRT28SDZrk, X86::VRSQRT28SDZmk, TB_NO_REVERSE},
diff --git a/llvm/test/TableGen/x86-instr-mapping.inc b/llvm/test/TableGen/x86-instr-mapping.inc
index b972427c2ff7ac..ed43684db2dfc4 100644
--- a/llvm/test/TableGen/x86-instr-mapping.inc
+++ b/llvm/test/TableGen/x86-instr-mapping.inc
@@ -1362,14 +1362,14 @@ static const X86TableEntry X86CompressEVEXTable[] = {
{ X86::VRNDSCALEPSZ128rri, X86::VROUNDPSri },
{ X86::VRNDSCALEPSZ256rmi, X86::VROUNDPSYmi },
{ X86::VRNDSCALEPSZ256rri, X86::VROUNDPSYri },
- { X86::VRNDSCALESDZm, X86::VROUNDSDmi },
- { X86::VRNDSCALESDZm_Int, X86::VROUNDSDmi_Int },
- { X86::VRNDSCALESDZr, X86::VROUNDSDri },
- { X86::VRNDSCALESDZr_Int, X86::VROUNDSDri_Int },
- { X86::VRNDSCALESSZm, X86::VROUNDSSmi },
- { X86::VRNDSCALESSZm_Int, X86::VROUNDSSmi_Int },
- { X86::VRNDSCALESSZr, X86::VROUNDSSri },
- { X86::VRNDSCALESSZr_Int, X86::VROUNDSSri_Int },
+ { X86::VRNDSCALESDZrmi, X86::VROUNDSDmi },
+ { X86::VRNDSCALESDZrmi_Int, X86::VROUNDSDmi_Int },
+ { X86::VRNDSCALESDZrri, X86::VROUNDSDri },
+ { X86::VRNDSCALESDZrri_Int, X86::VROUNDSDri_Int },
+ { X86::VRNDSCALESSZrmi, X86::VROUNDSSmi },
+ { X86::VRNDSCALESSZrmi_Int, X86::VROUNDSSmi_Int },
+ { X86::VRNDSCALESSZrri, X86::VROUNDSSri },
+ { X86::VRNDSCALESSZrri_Int, X86::VROUNDSSri_Int },
{ X86::VSHUFF32X4Z256rmi, X86::VPERM2F128rmi },
{ X86::VSHUFF32X4Z256rri, X86::VPERM2F128rri },
{ X86::VSHUFF64X2Z256rmi, X86::VPERM2F128rmi },
diff --git a/llvm/utils/TableGen/X86ManualInstrMapping.def b/llvm/utils/TableGen/X86ManualInstrMapping.def
index 2fdc4dc90f340c..662c13eb1b5f5b 100644
--- a/llvm/utils/TableGen/X86ManualInstrMapping.def
+++ b/llvm/utils/TableGen/X86ManualInstrMapping.def
@@ -204,10 +204,10 @@ ENTRY(VPXORQZ128rm, VPXORrm)
ENTRY(VPXORQZ128rr, VPXORrr)
ENTRY(VRNDSCALEPDZ128rmi, VROUNDPDmi)
ENTRY(VRNDSCALEPDZ128rri, VROUNDPDri)
-ENTRY(VRNDSCALESDZm, VROUNDSDmi)
-ENTRY(VRNDSCALESDZm_Int, VROUNDSDmi_Int)
-ENTRY(VRNDSCALESDZr, VROUNDSDri)
-ENTRY(VRNDSCALESDZr_Int, VROUNDSDri_Int)
+ENTRY(VRNDSCALESDZrmi, VROUNDSDmi)
+ENTRY(VRNDSCALESDZrmi_Int, VROUNDSDmi_Int)
+ENTRY(VRNDSCALESDZrri, VROUNDSDri)
+ENTRY(VRNDSCALESDZrri_Int, VROUNDSDri_Int)
ENTRY(VSHUFPDZ128rmi, VSHUFPDrmi)
ENTRY(VSHUFPDZ128rri, VSHUFPDrri)
ENTRY(VSQRTPDZ128m, VSQRTPDm)
More information about the llvm-commits
mailing list