[llvm] [X86] movsd/movss/movd/movq - add support for constant comments (PR #78601)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 18 07:41:55 PST 2024
https://github.com/RKSimon created https://github.com/llvm/llvm-project/pull/78601
If we're loading a constant value, print the constant (and the zero upper elements) instead of just the shuffle mask
This did require me to move the shuffle mask handling into addConstantComments as we can't handle this in the MC layer
>From 994486081ed6816d466d64081686c2ec051b335f Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Thu, 18 Jan 2024 15:37:13 +0000
Subject: [PATCH] [X86] movsd/movss/movd/movq - add support for constant
comments
If we're loading a constant value, print the constant (and the zero upper elements) instead of just the shuffle mask
This did require me to move the shuffle mask handling into addConstantComments as we can't handle this in the MC layer
---
.../X86/MCTargetDesc/X86InstComments.cpp | 32 +-
llvm/lib/Target/X86/X86MCInstLower.cpp | 90 ++
.../CodeGen/X86/2008-09-25-sseregparm-1.ll | 4 +-
llvm/test/CodeGen/X86/GlobalISel/fconstant.ll | 4 +-
.../X86/asm-reg-type-mismatch-avx512.ll | 2 +-
llvm/test/CodeGen/X86/atomic-fp.ll | 36 +-
llvm/test/CodeGen/X86/avx512-cmp.ll | 2 +-
.../test/CodeGen/X86/avx512-fma-intrinsics.ll | 19 +-
.../test/CodeGen/X86/avx512-insert-extract.ll | 4 +-
.../CodeGen/X86/avx512-intrinsics-upgrade.ll | 48 +-
llvm/test/CodeGen/X86/avx512-mov.ll | 24 +-
.../test/CodeGen/X86/avx512-regcall-NoMask.ll | 12 +-
llvm/test/CodeGen/X86/avx512-vec-cmp.ll | 12 +-
llvm/test/CodeGen/X86/bc-extract.ll | 2 +-
llvm/test/CodeGen/X86/bfloat.ll | 4 +-
llvm/test/CodeGen/X86/buildvec-insertvec.ll | 2 +-
llvm/test/CodeGen/X86/cmov-fp.ll | 48 +-
llvm/test/CodeGen/X86/cmovcmov.ll | 4 +-
llvm/test/CodeGen/X86/combine-fabs.ll | 4 +-
.../CodeGen/X86/combineIncDecVector-crash.ll | 2 +-
llvm/test/CodeGen/X86/cvtv2f32.ll | 2 +-
llvm/test/CodeGen/X86/dagcombine-select.ll | 12 +-
llvm/test/CodeGen/X86/deopt-intrinsic.ll | 2 +-
llvm/test/CodeGen/X86/extract-fp.ll | 12 +-
llvm/test/CodeGen/X86/extractelement-fp.ll | 4 +-
llvm/test/CodeGen/X86/extractelement-load.ll | 8 +-
llvm/test/CodeGen/X86/fadd-combines.ll | 2 +-
llvm/test/CodeGen/X86/fast-isel-constpool.ll | 32 +-
llvm/test/CodeGen/X86/fdiv-combine-vec.ll | 24 +-
llvm/test/CodeGen/X86/fdiv-combine.ll | 12 +-
.../CodeGen/X86/fma-intrinsics-canonical.ll | 32 +-
.../CodeGen/X86/fma-intrinsics-x86-upgrade.ll | 64 +-
llvm/test/CodeGen/X86/fma-intrinsics-x86.ll | 68 +-
llvm/test/CodeGen/X86/fma-scalar-memfold.ll | 128 +-
llvm/test/CodeGen/X86/fma.ll | 132 +-
llvm/test/CodeGen/X86/fma_patterns.ll | 12 +-
llvm/test/CodeGen/X86/fmf-flags.ll | 2 +-
llvm/test/CodeGen/X86/fminimum-fmaximum.ll | 24 +-
.../X86/fold-int-pow2-with-fmul-or-fdiv.ll | 112 +-
llvm/test/CodeGen/X86/fp-intrinsics-fma.ll | 24 +-
llvm/test/CodeGen/X86/fp-intrinsics.ll | 116 +-
llvm/test/CodeGen/X86/fp-logic.ll | 12 +-
.../X86/fp-strict-scalar-fptoint-fp16.ll | 4 +-
.../CodeGen/X86/fp-strict-scalar-fptoint.ll | 24 +-
llvm/test/CodeGen/X86/fp-undef.ll | 50 +-
llvm/test/CodeGen/X86/fpclamptosat.ll | 16 +-
llvm/test/CodeGen/X86/fpclamptosat_vec.ll | 20 +-
llvm/test/CodeGen/X86/fptosi-sat-scalar.ll | 56 +-
.../test/CodeGen/X86/fptosi-sat-vector-128.ll | 30 +-
llvm/test/CodeGen/X86/fptoui-sat-scalar.ll | 46 +-
.../test/CodeGen/X86/fptoui-sat-vector-128.ll | 24 +-
llvm/test/CodeGen/X86/ftrunc.ll | 4 +-
llvm/test/CodeGen/X86/half.ll | 14 +-
llvm/test/CodeGen/X86/insertelement-ones.ll | 6 +-
llvm/test/CodeGen/X86/ldexp.ll | 4 +-
.../test/CodeGen/X86/load-scalar-as-vector.ll | 16 +-
llvm/test/CodeGen/X86/logical-load-fold.ll | 8 +-
llvm/test/CodeGen/X86/lsr-static-addr.ll | 4 +-
.../X86/machine-trace-metrics-crash.ll | 4 +-
llvm/test/CodeGen/X86/masked-iv-safe.ll | 48 +-
llvm/test/CodeGen/X86/masked-iv-unsafe.ll | 78 +-
.../X86/merge-consecutive-loads-128.ll | 2 +-
llvm/test/CodeGen/X86/neg_fp.ll | 2 +-
llvm/test/CodeGen/X86/nontemporal-4.ll | 6 +-
llvm/test/CodeGen/X86/oss-fuzz-25184.ll | 2 +-
llvm/test/CodeGen/X86/peep-test-0.ll | 2 +-
llvm/test/CodeGen/X86/pow.ll | 20 +-
llvm/test/CodeGen/X86/powi-int32min.ll | 2 +-
llvm/test/CodeGen/X86/pr23103.ll | 2 +-
llvm/test/CodeGen/X86/pr37879.ll | 2 +-
llvm/test/CodeGen/X86/pr40539.ll | 4 +-
llvm/test/CodeGen/X86/pr44749.ll | 6 +-
llvm/test/CodeGen/X86/pr59258.ll | 16 +-
llvm/test/CodeGen/X86/pr59305.ll | 6 +-
llvm/test/CodeGen/X86/pseudo_cmov_lower2.ll | 8 +-
llvm/test/CodeGen/X86/recip-fastmath.ll | 32 +-
llvm/test/CodeGen/X86/recip-fastmath2.ll | 64 +-
llvm/test/CodeGen/X86/recip-pic.ll | 2 +-
llvm/test/CodeGen/X86/scalar-fp-to-i64.ll | 24 +-
llvm/test/CodeGen/X86/scalarize-fp.ll | 48 +-
.../CodeGen/X86/select-of-fp-constants.ll | 6 +-
llvm/test/CodeGen/X86/select.ll | 2 +-
llvm/test/CodeGen/X86/select_const.ll | 2 +-
llvm/test/CodeGen/X86/setcc-combine.ll | 2 +-
llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll | 4 +-
.../CodeGen/X86/sqrt-fastmath-tunecpu-attr.ll | 4 +-
llvm/test/CodeGen/X86/sqrt-fastmath.ll | 12 +-
llvm/test/CodeGen/X86/sse-fcopysign.ll | 4 +-
.../CodeGen/X86/sse-intrinsics-fast-isel.ll | 184 +--
llvm/test/CodeGen/X86/sse-load-ret.ll | 4 +-
llvm/test/CodeGen/X86/sse-minmax.ll | 66 +-
llvm/test/CodeGen/X86/sse1.ll | 16 +-
.../CodeGen/X86/sse2-intrinsics-fast-isel.ll | 384 ++---
.../X86/sse2-intrinsics-x86-upgrade.ll | 24 +-
llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll | 4 +-
llvm/test/CodeGen/X86/sse41.ll | 56 +-
.../CodeGen/X86/stack-folding-int-avx2.ll | 6 +-
llvm/test/CodeGen/X86/swifterror.ll | 32 +-
.../CodeGen/X86/vec-strict-fptoint-128.ll | 56 +-
.../CodeGen/X86/vec-strict-fptoint-256.ll | 16 +-
.../CodeGen/X86/vec-strict-fptoint-512.ll | 4 +-
llvm/test/CodeGen/X86/vec_fp_to_int.ll | 34 +-
.../vector-constrained-fp-intrinsics-fma.ll | 20 +-
.../X86/vector-constrained-fp-intrinsics.ll | 1250 ++++++++---------
llvm/test/CodeGen/X86/vector-reduce-fadd.ll | 6 +-
.../CodeGen/X86/vector-shuffle-combining.ll | 8 +-
llvm/test/CodeGen/X86/vselect-zero.ll | 16 +-
.../CodeGen/X86/widen-load-of-small-alloca.ll | 4 +-
llvm/test/CodeGen/X86/x86-64-varargs.ll | 4 +-
109 files changed, 2091 insertions(+), 2036 deletions(-)
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
index 20b37d5a99902be..619328af12719d1 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
@@ -1212,15 +1212,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VMOVSDZrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
- [[fallthrough]];
-
- case X86::MOVSDrm_alt:
- case X86::MOVSDrm:
- case X86::VMOVSDrm_alt:
- case X86::VMOVSDrm:
- case X86::VMOVSDZrm:
- case X86::VMOVSDZrm_alt:
- DecodeScalarMoveMask(2, nullptr == Src2Name, ShuffleMask);
+ DecodeScalarMoveMask(2, false, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -1229,15 +1221,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VMOVSSZrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
- [[fallthrough]];
-
- case X86::MOVSSrm:
- case X86::MOVSSrm_alt:
- case X86::VMOVSSrm:
- case X86::VMOVSSrm_alt:
- case X86::VMOVSSZrm:
- case X86::VMOVSSZrm_alt:
- DecodeScalarMoveMask(4, nullptr == Src2Name, ShuffleMask);
+ DecodeScalarMoveMask(4, false, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -1248,22 +1232,10 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VMOVZPQILo2PQIrr:
case X86::VMOVZPQILo2PQIZrr:
Src1Name = getRegName(MI->getOperand(1).getReg());
- [[fallthrough]];
-
- case X86::MOVQI2PQIrm:
- case X86::VMOVQI2PQIrm:
- case X86::VMOVQI2PQIZrm:
DecodeZeroMoveLowMask(2, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
- case X86::MOVDI2PDIrm:
- case X86::VMOVDI2PDIrm:
- case X86::VMOVDI2PDIZrm:
- DecodeZeroMoveLowMask(4, ShuffleMask);
- DestName = getRegName(MI->getOperand(0).getReg());
- break;
-
case X86::EXTRQI:
if (MI->getOperand(2).isImm() &&
MI->getOperand(3).isImm())
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index 2d5ccbfdfc765fa..cc615a3fac34fd3 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -1806,6 +1806,96 @@ static void addConstantComments(const MachineInstr *MI,
break;
}
+ case X86::MOVSDrm:
+ case X86::MOVSSrm:
+ case X86::VMOVSDrm:
+ case X86::VMOVSSrm:
+ case X86::VMOVSDZrm:
+ case X86::VMOVSSZrm:
+ case X86::MOVSDrm_alt:
+ case X86::MOVSSrm_alt:
+ case X86::VMOVSDrm_alt:
+ case X86::VMOVSSrm_alt:
+ case X86::VMOVSDZrm_alt:
+ case X86::VMOVSSZrm_alt:
+ case X86::MOVDI2PDIrm:
+ case X86::MOVQI2PQIrm:
+ case X86::VMOVDI2PDIrm:
+ case X86::VMOVQI2PQIrm:
+ case X86::VMOVDI2PDIZrm:
+ case X86::VMOVQI2PQIZrm: {
+ assert(MI->getNumOperands() >= (1 + X86::AddrNumOperands) &&
+ "Unexpected number of operands!");
+ int SclWidth = 32;
+ int VecWidth = 128;
+
+ switch (MI->getOpcode()) {
+ default:
+ llvm_unreachable("Invalid opcode");
+ case X86::MOVSDrm:
+ case X86::VMOVSDrm:
+ case X86::VMOVSDZrm:
+ case X86::MOVSDrm_alt:
+ case X86::VMOVSDrm_alt:
+ case X86::VMOVSDZrm_alt:
+ case X86::MOVQI2PQIrm:
+ case X86::VMOVQI2PQIrm:
+ case X86::VMOVQI2PQIZrm:
+ SclWidth = 64;
+ VecWidth = 128;
+ break;
+ case X86::MOVSSrm:
+ case X86::VMOVSSrm:
+ case X86::VMOVSSZrm:
+ case X86::MOVSSrm_alt:
+ case X86::VMOVSSrm_alt:
+ case X86::VMOVSSZrm_alt:
+ case X86::MOVDI2PDIrm:
+ case X86::VMOVDI2PDIrm:
+ case X86::VMOVDI2PDIZrm:
+ SclWidth = 32;
+ VecWidth = 128;
+ break;
+ }
+ std::string Comment;
+ raw_string_ostream CS(Comment);
+ const MachineOperand &DstOp = MI->getOperand(0);
+ CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = ";
+
+ if (auto *C =
+ X86::getConstantFromPool(*MI, MI->getOperand(1 + X86::AddrDisp))) {
+ if (SclWidth == C->getType()->getScalarSizeInBits()) {
+ if (auto *CI = dyn_cast<ConstantInt>(C)) {
+ CS << "[";
+ printConstant(CI->getValue(), CS);
+ for (int I = 1, E = VecWidth / SclWidth; I < E; ++I) {
+ CS << ",0";
+ }
+ CS << "]";
+ OutStreamer.AddComment(CS.str());
+ break; // early-out
+ }
+ if (auto *CF = dyn_cast<ConstantFP>(C)) {
+ CS << "[";
+ printConstant(CF->getValue(), CS);
+ APFloat ZeroFP = APFloat::getZero(CF->getValue().getSemantics());
+ for (int I = 1, E = VecWidth / SclWidth; I < E; ++I) {
+ CS << ",";
+ printConstant(ZeroFP, CS);
+ }
+ CS << "]";
+ OutStreamer.AddComment(CS.str());
+ break; // early-out
+ }
+ }
+ }
+
+ // We didn't find a constant load, fallback to a shuffle mask decode.
+ CS << (SclWidth == 32 ? "mem[0],zero,zero,zero" : "mem[0],zero");
+ OutStreamer.AddComment(CS.str());
+ break;
+ }
+
#define MOV_CASE(Prefix, Suffix) \
case X86::Prefix##MOVAPD##Suffix##rm: \
case X86::Prefix##MOVAPS##Suffix##rm: \
diff --git a/llvm/test/CodeGen/X86/2008-09-25-sseregparm-1.ll b/llvm/test/CodeGen/X86/2008-09-25-sseregparm-1.ll
index a2dd55767a7efae..6288f7e1d039c9c 100644
--- a/llvm/test/CodeGen/X86/2008-09-25-sseregparm-1.ll
+++ b/llvm/test/CodeGen/X86/2008-09-25-sseregparm-1.ll
@@ -5,7 +5,7 @@
define inreg double @foo1() nounwind {
; CHECK-LABEL: foo1:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; CHECK-NEXT: retl
ret double 1.0
}
@@ -13,7 +13,7 @@ define inreg double @foo1() nounwind {
define inreg float @foo2() nounwind {
; CHECK-LABEL: foo2:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: retl
ret float 1.0
}
diff --git a/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll b/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll
index 1e08c804af586e3..a9b2037e9947a1b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll
@@ -8,7 +8,7 @@
define void @test_float(ptr %a , float %b) {
; CHECK64_SMALL-LABEL: test_float:
; CHECK64_SMALL: # %bb.0: # %entry
-; CHECK64_SMALL-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK64_SMALL-NEXT: movss {{.*#+}} xmm1 = [5.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK64_SMALL-NEXT: addss %xmm0, %xmm1
; CHECK64_SMALL-NEXT: movd %xmm1, %eax
; CHECK64_SMALL-NEXT: movl %eax, (%rdi)
@@ -26,7 +26,7 @@ define void @test_float(ptr %a , float %b) {
; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK32-NEXT: movss {{.*#+}} xmm0 = [5.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK32-NEXT: movd %ecx, %xmm1
; CHECK32-NEXT: addss %xmm0, %xmm1
; CHECK32-NEXT: movd %xmm1, %ecx
diff --git a/llvm/test/CodeGen/X86/asm-reg-type-mismatch-avx512.ll b/llvm/test/CodeGen/X86/asm-reg-type-mismatch-avx512.ll
index 053ca11b95a50c7..56b05418afa9464 100644
--- a/llvm/test/CodeGen/X86/asm-reg-type-mismatch-avx512.ll
+++ b/llvm/test/CodeGen/X86/asm-reg-type-mismatch-avx512.ll
@@ -5,7 +5,7 @@ define i64 @test1() nounwind {
; CHECK-LABEL: test1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: #APP
-; CHECK-NEXT: vmovq {{.*#+}} xmm16 = mem[0],zero
+; CHECK-NEXT: vmovq 0, %xmm16
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: vmovq %xmm16, %rax
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/atomic-fp.ll b/llvm/test/CodeGen/X86/atomic-fp.ll
index d933ffec623b945..1094edd19af4380 100644
--- a/llvm/test/CodeGen/X86/atomic-fp.ll
+++ b/llvm/test/CodeGen/X86/atomic-fp.ll
@@ -207,28 +207,28 @@ define dso_local void @fadd_32g() nounwind {
;
; X86-SSE2-LABEL: fadd_32g:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE2-NEXT: addss glob32, %xmm0
; X86-SSE2-NEXT: movss %xmm0, glob32
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: fadd_32g:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-AVX-NEXT: vaddss glob32, %xmm0, %xmm0
; X86-AVX-NEXT: vmovss %xmm0, glob32
; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: fadd_32g:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-SSE-NEXT: addss glob32(%rip), %xmm0
; X64-SSE-NEXT: movss %xmm0, glob32(%rip)
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fadd_32g:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX-NEXT: vaddss glob32(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vmovss %xmm0, glob32(%rip)
; X64-AVX-NEXT: retq
@@ -319,14 +319,14 @@ define dso_local void @fadd_64g() nounwind {
;
; X64-SSE-LABEL: fadd_64g:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; X64-SSE-NEXT: addsd glob64(%rip), %xmm0
; X64-SSE-NEXT: movsd %xmm0, glob64(%rip)
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fadd_64g:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; X64-AVX-NEXT: vaddsd glob64(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vmovsd %xmm0, glob64(%rip)
; X64-AVX-NEXT: retq
@@ -368,14 +368,14 @@ define dso_local void @fadd_32imm() nounwind {
;
; X86-SSE2-LABEL: fadd_32imm:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE2-NEXT: addss -559038737, %xmm0
; X86-SSE2-NEXT: movss %xmm0, -559038737
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: fadd_32imm:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-AVX-NEXT: vaddss -559038737, %xmm0, %xmm0
; X86-AVX-NEXT: vmovss %xmm0, -559038737
; X86-AVX-NEXT: retl
@@ -383,7 +383,7 @@ define dso_local void @fadd_32imm() nounwind {
; X64-SSE-LABEL: fadd_32imm:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
-; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-SSE-NEXT: addss (%rax), %xmm0
; X64-SSE-NEXT: movss %xmm0, (%rax)
; X64-SSE-NEXT: retq
@@ -391,7 +391,7 @@ define dso_local void @fadd_32imm() nounwind {
; X64-AVX-LABEL: fadd_32imm:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
-; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX-NEXT: vaddss (%rax), %xmm0, %xmm0
; X64-AVX-NEXT: vmovss %xmm0, (%rax)
; X64-AVX-NEXT: retq
@@ -483,7 +483,7 @@ define dso_local void @fadd_64imm() nounwind {
; X64-SSE-LABEL: fadd_64imm:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
-; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; X64-SSE-NEXT: addsd (%rax), %xmm0
; X64-SSE-NEXT: movsd %xmm0, (%rax)
; X64-SSE-NEXT: retq
@@ -491,7 +491,7 @@ define dso_local void @fadd_64imm() nounwind {
; X64-AVX-LABEL: fadd_64imm:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
-; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; X64-AVX-NEXT: vaddsd (%rax), %xmm0, %xmm0
; X64-AVX-NEXT: vmovsd %xmm0, (%rax)
; X64-AVX-NEXT: retq
@@ -534,7 +534,7 @@ define dso_local void @fadd_32stack() nounwind {
; X86-SSE2-LABEL: fadd_32stack:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %eax
-; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE2-NEXT: addss (%esp), %xmm0
; X86-SSE2-NEXT: movss %xmm0, (%esp)
; X86-SSE2-NEXT: popl %eax
@@ -543,7 +543,7 @@ define dso_local void @fadd_32stack() nounwind {
; X86-AVX-LABEL: fadd_32stack:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: pushl %eax
-; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-AVX-NEXT: vaddss (%esp), %xmm0, %xmm0
; X86-AVX-NEXT: vmovss %xmm0, (%esp)
; X86-AVX-NEXT: popl %eax
@@ -551,14 +551,14 @@ define dso_local void @fadd_32stack() nounwind {
;
; X64-SSE-LABEL: fadd_32stack:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-SSE-NEXT: addss -{{[0-9]+}}(%rsp), %xmm0
; X64-SSE-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fadd_32stack:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX-NEXT: vaddss -{{[0-9]+}}(%rsp), %xmm0, %xmm0
; X64-AVX-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: retq
@@ -650,14 +650,14 @@ define dso_local void @fadd_64stack() nounwind {
;
; X64-SSE-LABEL: fadd_64stack:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; X64-SSE-NEXT: addsd -{{[0-9]+}}(%rsp), %xmm0
; X64-SSE-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fadd_64stack:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; X64-AVX-NEXT: vaddsd -{{[0-9]+}}(%rsp), %xmm0, %xmm0
; X64-AVX-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/avx512-cmp.ll b/llvm/test/CodeGen/X86/avx512-cmp.ll
index 919edb334b3671b..0c3d9d6f7277c40 100644
--- a/llvm/test/CodeGen/X86/avx512-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-cmp.ll
@@ -70,7 +70,7 @@ define float @test5(float %p) #0 {
; ALL-NEXT: retq
; ALL-NEXT: LBB3_1: ## %if.end
; ALL-NEXT: vcmpltss %xmm0, %xmm1, %k1
-; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ALL-NEXT: vmovss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; ALL-NEXT: vmovss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1}
; ALL-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/avx512-fma-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-fma-intrinsics.ll
index def5ad51d732c29..c5a994e6846a455 100644
--- a/llvm/test/CodeGen/X86/avx512-fma-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-fma-intrinsics.ll
@@ -1150,19 +1150,12 @@ define <16 x float>@test_int_x86_avx512_mask_vfnmadd_ps_512(<16 x float> %x0, <1
; This test case used to crash due to combineFMA not bitcasting results of isFNEG.
define <4 x float> @foo() {
-; X86-LABEL: foo:
-; X86: # %bb.0: # %entry
-; X86-NEXT: vmovss (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x00]
-; X86-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-NEXT: vfmsub213ss {rd-sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x38,0xab,0xc0]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: foo:
-; X64: # %bb.0: # %entry
-; X64-NEXT: vmovss (%rax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x00]
-; X64-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X64-NEXT: vfmsub213ss {rd-sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x38,0xab,0xc0]
-; X64-NEXT: retq # encoding: [0xc3]
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x00]
+; CHECK-NEXT: vfmsub213ss {rd-sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x38,0xab,0xc0]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
entry:
%0 = load <4 x float>, ptr undef, align 16
%sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %0
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index 4a2dd7673f4e767..abfe3e6428e6632 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -2175,7 +2175,7 @@ define void @test_concat_v2i1(ptr %arg, ptr %arg1, ptr %arg2) nounwind {
; KNL-NEXT: movzwl %ax, %eax
; KNL-NEXT: vmovd %eax, %xmm1
; KNL-NEXT: vcvtph2ps %xmm1, %xmm1
-; KNL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; KNL-NEXT: vmovss {{.*#+}} xmm2 = [6.0E+0,0.0E+0,0.0E+0,0.0E+0]
; KNL-NEXT: vucomiss %xmm2, %xmm1
; KNL-NEXT: setb %al
; KNL-NEXT: andl $1, %eax
@@ -2217,7 +2217,7 @@ define void @test_concat_v2i1(ptr %arg, ptr %arg1, ptr %arg2) nounwind {
; SKX-NEXT: movzwl %ax, %eax
; SKX-NEXT: vmovd %eax, %xmm1
; SKX-NEXT: vcvtph2ps %xmm1, %xmm1
-; SKX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SKX-NEXT: vmovss {{.*#+}} xmm2 = [6.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SKX-NEXT: vucomiss %xmm2, %xmm1
; SKX-NEXT: setb %al
; SKX-NEXT: kmovd %eax, %k0
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
index a5a4bf1e53631ee..6c9c28bc9e55e11 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
@@ -10104,10 +10104,10 @@ define void @fmadd_ss_mask_memfold(ptr %a, ptr %b, i8 %c) {
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x0c]
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ## encoding: [0x8b,0x54,0x24,0x04]
-; X86-NEXT: vmovss (%edx), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x02]
-; X86-NEXT: ## xmm0 = mem[0],zero,zero,zero
-; X86-NEXT: vmovss (%ecx), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x09]
-; X86-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x02]
+; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x09]
; X86-NEXT: vfmadd213ss %xmm0, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0xc8]
; X86-NEXT: ## xmm1 = (xmm0 * xmm1) + xmm0
; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
@@ -10117,10 +10117,10 @@ define void @fmadd_ss_mask_memfold(ptr %a, ptr %b, i8 %c) {
;
; X64-LABEL: fmadd_ss_mask_memfold:
; X64: ## %bb.0:
-; X64-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; X64-NEXT: ## xmm0 = mem[0],zero,zero,zero
-; X64-NEXT: vmovss (%rsi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0e]
-; X64-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
+; X64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0e]
; X64-NEXT: vfmadd213ss %xmm0, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0xc8]
; X64-NEXT: ## xmm1 = (xmm0 * xmm1) + xmm0
; X64-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
@@ -10152,8 +10152,8 @@ define void @fmadd_ss_maskz_memfold(ptr %a, ptr %b, i8 %c) {
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x0c]
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ## encoding: [0x8b,0x54,0x24,0x04]
-; X86-NEXT: vmovss (%edx), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x02]
-; X86-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x02]
; X86-NEXT: vfmadd231ss (%ecx), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xb9,0x01]
; X86-NEXT: ## xmm0 = (xmm0 * mem) + xmm0
; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
@@ -10163,8 +10163,8 @@ define void @fmadd_ss_maskz_memfold(ptr %a, ptr %b, i8 %c) {
;
; X64-LABEL: fmadd_ss_maskz_memfold:
; X64: ## %bb.0:
-; X64-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; X64-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; X64-NEXT: vfmadd231ss (%rsi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xb9,0x06]
; X64-NEXT: ## xmm0 = (xmm0 * mem) + xmm0
; X64-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
@@ -10196,10 +10196,10 @@ define void @fmadd_sd_mask_memfold(ptr %a, ptr %b, i8 %c) {
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x0c]
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ## encoding: [0x8b,0x54,0x24,0x04]
-; X86-NEXT: vmovsd (%edx), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x02]
-; X86-NEXT: ## xmm0 = mem[0],zero
-; X86-NEXT: vmovsd (%ecx), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x09]
-; X86-NEXT: ## xmm1 = mem[0],zero
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x02]
+; X86-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x09]
; X86-NEXT: vfmadd213sd %xmm0, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0xc8]
; X86-NEXT: ## xmm1 = (xmm0 * xmm1) + xmm0
; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
@@ -10209,10 +10209,10 @@ define void @fmadd_sd_mask_memfold(ptr %a, ptr %b, i8 %c) {
;
; X64-LABEL: fmadd_sd_mask_memfold:
; X64: ## %bb.0:
-; X64-NEXT: vmovsd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; X64-NEXT: ## xmm0 = mem[0],zero
-; X64-NEXT: vmovsd (%rsi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x0e]
-; X64-NEXT: ## xmm1 = mem[0],zero
+; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
+; X64-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x0e]
; X64-NEXT: vfmadd213sd %xmm0, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0xc8]
; X64-NEXT: ## xmm1 = (xmm0 * xmm1) + xmm0
; X64-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
@@ -10240,8 +10240,8 @@ define void @fmadd_sd_maskz_memfold(ptr %a, ptr %b, i8 %c) {
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x0c]
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ## encoding: [0x8b,0x54,0x24,0x04]
-; X86-NEXT: vmovsd (%edx), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x02]
-; X86-NEXT: ## xmm0 = mem[0],zero
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x02]
; X86-NEXT: vfmadd231sd (%ecx), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xb9,0x01]
; X86-NEXT: ## xmm0 = (xmm0 * mem) + xmm0
; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
@@ -10251,8 +10251,8 @@ define void @fmadd_sd_maskz_memfold(ptr %a, ptr %b, i8 %c) {
;
; X64-LABEL: fmadd_sd_maskz_memfold:
; X64: ## %bb.0:
-; X64-NEXT: vmovsd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; X64-NEXT: ## xmm0 = mem[0],zero
+; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; X64-NEXT: vfmadd231sd (%rsi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xb9,0x06]
; X64-NEXT: ## xmm0 = (xmm0 * mem) + xmm0
; X64-NEXT: kmovw %edx, %k1 ## encoding: [0xc5,0xf8,0x92,0xca]
diff --git a/llvm/test/CodeGen/X86/avx512-mov.ll b/llvm/test/CodeGen/X86/avx512-mov.ll
index 88682cea754663b..895317cd73d82e3 100644
--- a/llvm/test/CodeGen/X86/avx512-mov.ll
+++ b/llvm/test/CodeGen/X86/avx512-mov.ll
@@ -31,8 +31,8 @@ define <2 x i64> @test3(i64 %x) {
define <4 x i32> @test4(ptr %x) {
; CHECK-LABEL: test4:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%y = load i32, ptr %x
%res = insertelement <4 x i32>undef, i32 %y, i32 0
@@ -60,8 +60,8 @@ define void @test6(double %x, ptr %y) {
define float @test7(ptr %x) {
; CHECK-LABEL: test7:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%y = load i32, ptr %x
%res = bitcast i32 %y to float
@@ -89,8 +89,8 @@ define i64 @test9(<2 x i64> %x) {
define <4 x i32> @test10(ptr %x) {
; CHECK-LABEL: test10:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%y = load i32, ptr %x, align 4
%res = insertelement <4 x i32>zeroinitializer, i32 %y, i32 0
@@ -100,8 +100,8 @@ define <4 x i32> @test10(ptr %x) {
define <4 x float> @test11(ptr %x) {
; CHECK-LABEL: test11:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%y = load float, ptr %x, align 4
%res = insertelement <4 x float>zeroinitializer, float %y, i32 0
@@ -111,8 +111,8 @@ define <4 x float> @test11(ptr %x) {
define <2 x double> @test12(ptr %x) {
; CHECK-LABEL: test12:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vmovsd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; CHECK-NEXT: ## xmm0 = mem[0],zero
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%y = load double, ptr %x, align 8
%res = insertelement <2 x double>zeroinitializer, double %y, i32 0
@@ -140,8 +140,8 @@ define <4 x i32> @test14(i32 %x) {
define <4 x i32> @test15(ptr %x) {
; CHECK-LABEL: test15:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vmovss (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%y = load i32, ptr %x, align 4
%res = insertelement <4 x i32>zeroinitializer, i32 %y, i32 0
diff --git a/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll b/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
index 7a534721bae056e..2081d201704f3af 100644
--- a/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
+++ b/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
@@ -340,7 +340,7 @@ define dso_local x86_regcallcc float @test_CallargRetFloat(float %a) {
; X32: # %bb.0:
; X32-NEXT: subl $28, %esp
; X32-NEXT: vmovups %xmm4, (%esp) # 16-byte Spill
-; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm4 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X32-NEXT: vaddss %xmm4, %xmm0, %xmm0
; X32-NEXT: calll _test_argRetFloat
; X32-NEXT: vaddss %xmm4, %xmm0, %xmm0
@@ -355,7 +355,7 @@ define dso_local x86_regcallcc float @test_CallargRetFloat(float %a) {
; WIN64-NEXT: vmovaps %xmm8, (%rsp) # 16-byte Spill
; WIN64-NEXT: .seh_savexmm %xmm8, 0
; WIN64-NEXT: .seh_endprologue
-; WIN64-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero
+; WIN64-NEXT: vmovss {{.*#+}} xmm8 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; WIN64-NEXT: vaddss %xmm0, %xmm8, %xmm0
; WIN64-NEXT: callq test_argRetFloat
; WIN64-NEXT: vaddss %xmm0, %xmm8, %xmm0
@@ -370,7 +370,7 @@ define dso_local x86_regcallcc float @test_CallargRetFloat(float %a) {
; LINUXOSX64-NEXT: vmovaps %xmm8, (%rsp) # 16-byte Spill
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 32
; LINUXOSX64-NEXT: .cfi_offset %xmm8, -32
-; LINUXOSX64-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero
+; LINUXOSX64-NEXT: vmovss {{.*#+}} xmm8 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; LINUXOSX64-NEXT: vaddss %xmm0, %xmm8, %xmm0
; LINUXOSX64-NEXT: callq test_argRetFloat
; LINUXOSX64-NEXT: vaddss %xmm0, %xmm8, %xmm0
@@ -410,7 +410,7 @@ define dso_local x86_regcallcc double @test_CallargRetDouble(double %a) {
; X32: # %bb.0:
; X32-NEXT: subl $28, %esp
; X32-NEXT: vmovups %xmm4, (%esp) # 16-byte Spill
-; X32-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; X32-NEXT: vmovsd {{.*#+}} xmm4 = [1.0E+0,0.0E+0]
; X32-NEXT: vaddsd %xmm4, %xmm0, %xmm0
; X32-NEXT: calll _test_argRetDouble
; X32-NEXT: vaddsd %xmm4, %xmm0, %xmm0
@@ -425,7 +425,7 @@ define dso_local x86_regcallcc double @test_CallargRetDouble(double %a) {
; WIN64-NEXT: vmovaps %xmm8, (%rsp) # 16-byte Spill
; WIN64-NEXT: .seh_savexmm %xmm8, 0
; WIN64-NEXT: .seh_endprologue
-; WIN64-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; WIN64-NEXT: vmovsd {{.*#+}} xmm8 = [1.0E+0,0.0E+0]
; WIN64-NEXT: vaddsd %xmm0, %xmm8, %xmm0
; WIN64-NEXT: callq test_argRetDouble
; WIN64-NEXT: vaddsd %xmm0, %xmm8, %xmm0
@@ -440,7 +440,7 @@ define dso_local x86_regcallcc double @test_CallargRetDouble(double %a) {
; LINUXOSX64-NEXT: vmovaps %xmm8, (%rsp) # 16-byte Spill
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 32
; LINUXOSX64-NEXT: .cfi_offset %xmm8, -32
-; LINUXOSX64-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; LINUXOSX64-NEXT: vmovsd {{.*#+}} xmm8 = [1.0E+0,0.0E+0]
; LINUXOSX64-NEXT: vaddsd %xmm0, %xmm8, %xmm0
; LINUXOSX64-NEXT: callq test_argRetDouble
; LINUXOSX64-NEXT: vaddsd %xmm0, %xmm8, %xmm0
diff --git a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
index e4c62fca5bd57aa..973f4ee21564920 100644
--- a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -1434,8 +1434,8 @@ define <4 x i32> @zext_bool_logic(<4 x i64> %cond1, <4 x i64> %cond2, <4 x i32>
define void @half_vec_compare(ptr %x, ptr %y) {
; KNL-LABEL: half_vec_compare:
; KNL: ## %bb.0: ## %entry
-; KNL-NEXT: vmovd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
-; KNL-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; KNL-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; KNL-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
; KNL-NEXT: vpsrld $16, %xmm0, %xmm1 ## encoding: [0xc5,0xf1,0x72,0xd0,0x10]
; KNL-NEXT: vpextrw $0, %xmm1, %eax ## encoding: [0xc5,0xf9,0xc5,0xc1,0x00]
; KNL-NEXT: movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
@@ -1466,8 +1466,8 @@ define void @half_vec_compare(ptr %x, ptr %y) {
;
; AVX512BW-LABEL: half_vec_compare:
; AVX512BW: ## %bb.0: ## %entry
-; AVX512BW-NEXT: vmovd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
-; AVX512BW-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512BW-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1 ## encoding: [0xc5,0xf1,0x72,0xd0,0x10]
; AVX512BW-NEXT: vpextrw $0, %xmm1, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xc1,0x00]
; AVX512BW-NEXT: movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
@@ -1498,8 +1498,8 @@ define void @half_vec_compare(ptr %x, ptr %y) {
;
; SKX-LABEL: half_vec_compare:
; SKX: ## %bb.0: ## %entry
-; SKX-NEXT: vmovd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
-; SKX-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; SKX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SKX-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
; SKX-NEXT: vpsrld $16, %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x72,0xd0,0x10]
; SKX-NEXT: vpextrw $0, %xmm1, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xc1,0x00]
; SKX-NEXT: movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
diff --git a/llvm/test/CodeGen/X86/bc-extract.ll b/llvm/test/CodeGen/X86/bc-extract.ll
index 506ba906800a6df..23091a2da9c5893 100644
--- a/llvm/test/CodeGen/X86/bc-extract.ll
+++ b/llvm/test/CodeGen/X86/bc-extract.ll
@@ -10,7 +10,7 @@ define float @extractFloat1() nounwind {
;
; X64-LABEL: extractFloat1:
; X64: # %bb.0: # %entry
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: retq
entry:
%tmp0 = bitcast <1 x double> <double 0x000000003F800000> to <2 x float>
diff --git a/llvm/test/CodeGen/X86/bfloat.ll b/llvm/test/CodeGen/X86/bfloat.ll
index 9d2ef51b0a8fbe3..f2d3c4fb34199e9 100644
--- a/llvm/test/CodeGen/X86/bfloat.ll
+++ b/llvm/test/CodeGen/X86/bfloat.ll
@@ -1007,11 +1007,11 @@ define <32 x bfloat> @pr63017_2() nounwind {
; SSE2-NEXT: movzwl (%rax), %eax
; SSE2-NEXT: shll $16, %eax
; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movd {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: jmp .LBB12_3
; SSE2-NEXT: .LBB12_1:
-; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: movd {{.*#+}} xmm1 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; SSE2-NEXT: .LBB12_3: # %else
diff --git a/llvm/test/CodeGen/X86/buildvec-insertvec.ll b/llvm/test/CodeGen/X86/buildvec-insertvec.ll
index 3fdfde8576f7777..ae70b6a5a466566 100644
--- a/llvm/test/CodeGen/X86/buildvec-insertvec.ll
+++ b/llvm/test/CodeGen/X86/buildvec-insertvec.ll
@@ -50,7 +50,7 @@ define <4 x float> @test_negative_zero_1(<4 x float> %A) {
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm1 = [-0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/cmov-fp.ll b/llvm/test/CodeGen/X86/cmov-fp.ll
index 749b96e25b4ca56..26e720ffcebccd0 100644
--- a/llvm/test/CodeGen/X86/cmov-fp.ll
+++ b/llvm/test/CodeGen/X86/cmov-fp.ll
@@ -19,7 +19,7 @@ define double @test1(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp .LBB0_3
; SSE-NEXT: .LBB0_1:
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [9.9E+1,0.0E+0]
; SSE-NEXT: .LBB0_3:
; SSE-NEXT: movsd %xmm0, (%esp)
; SSE-NEXT: fldl (%esp)
@@ -82,7 +82,7 @@ define double @test2(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp .LBB1_3
; SSE-NEXT: .LBB1_1:
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [9.9E+1,0.0E+0]
; SSE-NEXT: .LBB1_3:
; SSE-NEXT: movsd %xmm0, (%esp)
; SSE-NEXT: fldl (%esp)
@@ -145,7 +145,7 @@ define double @test3(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp .LBB2_3
; SSE-NEXT: .LBB2_1:
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [9.9E+1,0.0E+0]
; SSE-NEXT: .LBB2_3:
; SSE-NEXT: movsd %xmm0, (%esp)
; SSE-NEXT: fldl (%esp)
@@ -208,7 +208,7 @@ define double @test4(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp .LBB3_3
; SSE-NEXT: .LBB3_1:
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [9.9E+1,0.0E+0]
; SSE-NEXT: .LBB3_3:
; SSE-NEXT: movsd %xmm0, (%esp)
; SSE-NEXT: fldl (%esp)
@@ -271,7 +271,7 @@ define double @test5(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp .LBB4_3
; SSE-NEXT: .LBB4_1:
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [9.9E+1,0.0E+0]
; SSE-NEXT: .LBB4_3:
; SSE-NEXT: movsd %xmm0, (%esp)
; SSE-NEXT: fldl (%esp)
@@ -338,7 +338,7 @@ define double @test6(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp .LBB5_3
; SSE-NEXT: .LBB5_1:
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [9.9E+1,0.0E+0]
; SSE-NEXT: .LBB5_3:
; SSE-NEXT: movsd %xmm0, (%esp)
; SSE-NEXT: fldl (%esp)
@@ -405,7 +405,7 @@ define double @test7(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp .LBB6_3
; SSE-NEXT: .LBB6_1:
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [9.9E+1,0.0E+0]
; SSE-NEXT: .LBB6_3:
; SSE-NEXT: movsd %xmm0, (%esp)
; SSE-NEXT: fldl (%esp)
@@ -472,7 +472,7 @@ define double @test8(i32 %a, i32 %b, double %x) nounwind {
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp .LBB7_3
; SSE-NEXT: .LBB7_1:
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [9.9E+1,0.0E+0]
; SSE-NEXT: .LBB7_3:
; SSE-NEXT: movsd %xmm0, (%esp)
; SSE-NEXT: fldl (%esp)
@@ -536,7 +536,7 @@ define float @test9(i32 %a, i32 %b, float %x) nounwind {
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: jmp .LBB8_3
; SSE-NEXT: .LBB8_1:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: .LBB8_3:
; SSE-NEXT: movss %xmm0, (%esp)
; SSE-NEXT: flds (%esp)
@@ -553,7 +553,7 @@ define float @test9(i32 %a, i32 %b, float %x) nounwind {
; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; NOSSE2-NEXT: jmp .LBB8_3
; NOSSE2-NEXT: .LBB8_1:
-; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; NOSSE2-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; NOSSE2-NEXT: .LBB8_3:
; NOSSE2-NEXT: movss %xmm0, (%esp)
; NOSSE2-NEXT: flds (%esp)
@@ -601,7 +601,7 @@ define float @test10(i32 %a, i32 %b, float %x) nounwind {
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: jmp .LBB9_3
; SSE-NEXT: .LBB9_1:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: .LBB9_3:
; SSE-NEXT: movss %xmm0, (%esp)
; SSE-NEXT: flds (%esp)
@@ -618,7 +618,7 @@ define float @test10(i32 %a, i32 %b, float %x) nounwind {
; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; NOSSE2-NEXT: jmp .LBB9_3
; NOSSE2-NEXT: .LBB9_1:
-; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; NOSSE2-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; NOSSE2-NEXT: .LBB9_3:
; NOSSE2-NEXT: movss %xmm0, (%esp)
; NOSSE2-NEXT: flds (%esp)
@@ -666,7 +666,7 @@ define float @test11(i32 %a, i32 %b, float %x) nounwind {
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: jmp .LBB10_3
; SSE-NEXT: .LBB10_1:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: .LBB10_3:
; SSE-NEXT: movss %xmm0, (%esp)
; SSE-NEXT: flds (%esp)
@@ -683,7 +683,7 @@ define float @test11(i32 %a, i32 %b, float %x) nounwind {
; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; NOSSE2-NEXT: jmp .LBB10_3
; NOSSE2-NEXT: .LBB10_1:
-; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; NOSSE2-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; NOSSE2-NEXT: .LBB10_3:
; NOSSE2-NEXT: movss %xmm0, (%esp)
; NOSSE2-NEXT: flds (%esp)
@@ -731,7 +731,7 @@ define float @test12(i32 %a, i32 %b, float %x) nounwind {
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: jmp .LBB11_3
; SSE-NEXT: .LBB11_1:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: .LBB11_3:
; SSE-NEXT: movss %xmm0, (%esp)
; SSE-NEXT: flds (%esp)
@@ -748,7 +748,7 @@ define float @test12(i32 %a, i32 %b, float %x) nounwind {
; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; NOSSE2-NEXT: jmp .LBB11_3
; NOSSE2-NEXT: .LBB11_1:
-; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; NOSSE2-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; NOSSE2-NEXT: .LBB11_3:
; NOSSE2-NEXT: movss %xmm0, (%esp)
; NOSSE2-NEXT: flds (%esp)
@@ -796,7 +796,7 @@ define float @test13(i32 %a, i32 %b, float %x) nounwind {
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: jmp .LBB12_3
; SSE-NEXT: .LBB12_1:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: .LBB12_3:
; SSE-NEXT: movss %xmm0, (%esp)
; SSE-NEXT: flds (%esp)
@@ -813,7 +813,7 @@ define float @test13(i32 %a, i32 %b, float %x) nounwind {
; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; NOSSE2-NEXT: jmp .LBB12_3
; NOSSE2-NEXT: .LBB12_1:
-; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; NOSSE2-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; NOSSE2-NEXT: .LBB12_3:
; NOSSE2-NEXT: movss %xmm0, (%esp)
; NOSSE2-NEXT: flds (%esp)
@@ -863,7 +863,7 @@ define float @test14(i32 %a, i32 %b, float %x) nounwind {
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: jmp .LBB13_3
; SSE-NEXT: .LBB13_1:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: .LBB13_3:
; SSE-NEXT: movss %xmm0, (%esp)
; SSE-NEXT: flds (%esp)
@@ -880,7 +880,7 @@ define float @test14(i32 %a, i32 %b, float %x) nounwind {
; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; NOSSE2-NEXT: jmp .LBB13_3
; NOSSE2-NEXT: .LBB13_1:
-; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; NOSSE2-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; NOSSE2-NEXT: .LBB13_3:
; NOSSE2-NEXT: movss %xmm0, (%esp)
; NOSSE2-NEXT: flds (%esp)
@@ -930,7 +930,7 @@ define float @test15(i32 %a, i32 %b, float %x) nounwind {
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: jmp .LBB14_3
; SSE-NEXT: .LBB14_1:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: .LBB14_3:
; SSE-NEXT: movss %xmm0, (%esp)
; SSE-NEXT: flds (%esp)
@@ -947,7 +947,7 @@ define float @test15(i32 %a, i32 %b, float %x) nounwind {
; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; NOSSE2-NEXT: jmp .LBB14_3
; NOSSE2-NEXT: .LBB14_1:
-; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; NOSSE2-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; NOSSE2-NEXT: .LBB14_3:
; NOSSE2-NEXT: movss %xmm0, (%esp)
; NOSSE2-NEXT: flds (%esp)
@@ -997,7 +997,7 @@ define float @test16(i32 %a, i32 %b, float %x) nounwind {
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: jmp .LBB15_3
; SSE-NEXT: .LBB15_1:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: .LBB15_3:
; SSE-NEXT: movss %xmm0, (%esp)
; SSE-NEXT: flds (%esp)
@@ -1014,7 +1014,7 @@ define float @test16(i32 %a, i32 %b, float %x) nounwind {
; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; NOSSE2-NEXT: jmp .LBB15_3
; NOSSE2-NEXT: .LBB15_1:
-; NOSSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; NOSSE2-NEXT: movss {{.*#+}} xmm0 = [9.9E+1,0.0E+0,0.0E+0,0.0E+0]
; NOSSE2-NEXT: .LBB15_3:
; NOSSE2-NEXT: movss %xmm0, (%esp)
; NOSSE2-NEXT: flds (%esp)
diff --git a/llvm/test/CodeGen/X86/cmovcmov.ll b/llvm/test/CodeGen/X86/cmovcmov.ll
index ab863dee69010e5..d2d1c4db4608d91 100644
--- a/llvm/test/CodeGen/X86/cmovcmov.ll
+++ b/llvm/test/CodeGen/X86/cmovcmov.ll
@@ -217,7 +217,7 @@ define dso_local float @test_zext_fcmp_une(float %a, float %b) nounwind {
; CMOV-LABEL: test_zext_fcmp_une:
; CMOV: # %bb.0: # %entry
; CMOV-NEXT: cmpneqss %xmm1, %xmm0
-; CMOV-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CMOV-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CMOV-NEXT: andps %xmm1, %xmm0
; CMOV-NEXT: retq
;
@@ -255,7 +255,7 @@ define dso_local float @test_zext_fcmp_oeq(float %a, float %b) nounwind {
; CMOV-LABEL: test_zext_fcmp_oeq:
; CMOV: # %bb.0: # %entry
; CMOV-NEXT: cmpeqss %xmm1, %xmm0
-; CMOV-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CMOV-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CMOV-NEXT: andps %xmm1, %xmm0
; CMOV-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/combine-fabs.ll b/llvm/test/CodeGen/X86/combine-fabs.ll
index e668c87003202e6..a862ea16a748f6c 100644
--- a/llvm/test/CodeGen/X86/combine-fabs.ll
+++ b/llvm/test/CodeGen/X86/combine-fabs.ll
@@ -11,12 +11,12 @@
define float @combine_fabs_constant() {
; SSE-LABEL: combine_fabs_constant:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_fabs_constant:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: retq
%1 = call float @llvm.fabs.f32(float -2.0)
ret float %1
diff --git a/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll b/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll
index f98e38d92099a67..2d6ce2017dc0dc8 100644
--- a/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll
+++ b/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll
@@ -18,7 +18,7 @@ define void @TestvMeth(i32 %0, i64 %1) gc "statepoint-example" !prof !1 {
; CHECK-NEXT: movl $400, %ecx # imm = 0x190
; CHECK-NEXT: callq newarray at PLT
; CHECK-NEXT: .Ltmp0:
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.5E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: addss (%rax), %xmm0
; CHECK-NEXT: movdqu (%rax), %xmm1
; CHECK-NEXT: pcmpeqd %xmm2, %xmm2
diff --git a/llvm/test/CodeGen/X86/cvtv2f32.ll b/llvm/test/CodeGen/X86/cvtv2f32.ll
index b4b63caf4e91c34..3875b72d5d68ae8 100644
--- a/llvm/test/CodeGen/X86/cvtv2f32.ll
+++ b/llvm/test/CodeGen/X86/cvtv2f32.ll
@@ -9,7 +9,7 @@ define <2 x float> @uitofp_2i32_cvt_buildvector(i32 %x, i32 %y, <2 x float> %v)
; X86-LABEL: uitofp_2i32_cvt_buildvector:
; X86: # %bb.0:
; X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X86-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; X86-NEXT: movsd {{.*#+}} xmm2 = [4.503599627370496E+15,0.0E+0]
; X86-NEXT: orpd %xmm2, %xmm1
; X86-NEXT: subsd %xmm2, %xmm1
; X86-NEXT: cvtsd2ss %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/dagcombine-select.ll b/llvm/test/CodeGen/X86/dagcombine-select.ll
index d29f161c76fb831..1380c02663ee0e1 100644
--- a/llvm/test/CodeGen/X86/dagcombine-select.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-select.ll
@@ -279,10 +279,10 @@ define double @fsub_constant_sel_constants(i1 %cond) {
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: jne .LBB20_1
; CHECK-NEXT: # %bb.2:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [-1.8200000000000003E+1,0.0E+0]
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB20_1:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [9.0999999999999996E+0,0.0E+0]
; CHECK-NEXT: retq
%sel = select i1 %cond, double -4.0, double 23.3
%bo = fsub double 5.1, %sel
@@ -295,10 +295,10 @@ define double @fdiv_constant_sel_constants(i1 %cond) {
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: jne .LBB21_1
; CHECK-NEXT: # %bb.2:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [2.188841201716738E-1,0.0E+0]
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB21_1:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [-1.2749999999999999E+0,0.0E+0]
; CHECK-NEXT: retq
%sel = select i1 %cond, double -4.0, double 23.3
%bo = fdiv double 5.1, %sel
@@ -311,10 +311,10 @@ define double @frem_constant_sel_constants(i1 %cond) {
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: jne .LBB22_1
; CHECK-NEXT: # %bb.2:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [5.0999999999999996E+0,0.0E+0]
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB22_1:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0999999999999996E+0,0.0E+0]
; CHECK-NEXT: retq
%sel = select i1 %cond, double -4.0, double 23.3
%bo = frem double 5.1, %sel
diff --git a/llvm/test/CodeGen/X86/deopt-intrinsic.ll b/llvm/test/CodeGen/X86/deopt-intrinsic.ll
index b99482f0fb03869..d610cc859f1ce89 100644
--- a/llvm/test/CodeGen/X86/deopt-intrinsic.ll
+++ b/llvm/test/CodeGen/X86/deopt-intrinsic.ll
@@ -27,7 +27,7 @@ define i8 @caller_1() {
; CHECK-NEXT: ##{{.+}}
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: {{.+cfi.+}}
-; CHECK-NEXT: movss {{[a-zA-Z0-9_]+}}(%rip), %xmm0 ## xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{[a-zA-Z0-9_]+}}(%rip), %xmm0 ## xmm0 = [5.0E+2,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movl $42, %edi
; CHECK-NEXT: callq ___llvm_deoptimize
; CHECK-NEXT: {{Ltmp[0-9]+}}:
diff --git a/llvm/test/CodeGen/X86/extract-fp.ll b/llvm/test/CodeGen/X86/extract-fp.ll
index 58aaab9c83f95ae..fd4f2171807bf21 100644
--- a/llvm/test/CodeGen/X86/extract-fp.ll
+++ b/llvm/test/CodeGen/X86/extract-fp.ll
@@ -16,7 +16,7 @@ define float @ext_fsub_v4f32(<4 x float> %x) {
; CHECK-LABEL: ext_fsub_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: subss %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -51,7 +51,7 @@ define float @ext_fdiv_v4f32_constant_op0(<4 x float> %x) {
; CHECK-LABEL: ext_fdiv_v4f32_constant_op0:
; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: divss %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -64,7 +64,7 @@ define float @ext_frem_v4f32(<4 x float> %x) {
; CHECK-LABEL: ext_frem_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: jmp fmodf at PLT # TAILCALL
%bo = frem <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 42.0>
%ext = extractelement <4 x float> %bo, i32 2
@@ -76,7 +76,7 @@ define float @ext_frem_v4f32_constant_op0(<4 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: jmp fmodf at PLT # TAILCALL
%bo = frem <4 x float> <float 1.0, float 2.0, float 3.0, float 42.0>, %x
%ext = extractelement <4 x float> %bo, i32 1
@@ -109,7 +109,7 @@ define double @ext_maximum_v4f64(<2 x double> %x) nounwind {
; CHECK-LABEL: ext_maximum_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.3E+1,0.0E+0]
; CHECK-NEXT: maxsd %xmm0, %xmm1
; CHECK-NEXT: movapd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -122,7 +122,7 @@ define float @ext_minimum_v4f32(<4 x float> %x) nounwind {
; CHECK-LABEL: ext_minimum_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: minss %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/extractelement-fp.ll b/llvm/test/CodeGen/X86/extractelement-fp.ll
index 5bc6d00022e2353..38162f676e7ee38 100644
--- a/llvm/test/CodeGen/X86/extractelement-fp.ll
+++ b/llvm/test/CodeGen/X86/extractelement-fp.ll
@@ -320,7 +320,7 @@ define <3 x double> @extvselectsetcc_crash(<2 x double> %x) {
; X64-LABEL: extvselectsetcc_crash:
; X64: # %bb.0:
; X64-NEXT: vcmpeqpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; X64-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; X64-NEXT: vmovsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
; X64-NEXT: vandpd %xmm2, %xmm1, %xmm1
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,3,3]
@@ -329,7 +329,7 @@ define <3 x double> @extvselectsetcc_crash(<2 x double> %x) {
; X86-LABEL: extvselectsetcc_crash:
; X86: # %bb.0:
; X86-NEXT: vcmpeqpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
-; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; X86-NEXT: vmovsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
; X86-NEXT: vandpd %xmm2, %xmm1, %xmm1
; X86-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X86-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,3,3]
diff --git a/llvm/test/CodeGen/X86/extractelement-load.ll b/llvm/test/CodeGen/X86/extractelement-load.ll
index 538b8ed10f25b96..9d573ef2a8fad98 100644
--- a/llvm/test/CodeGen/X86/extractelement-load.ll
+++ b/llvm/test/CodeGen/X86/extractelement-load.ll
@@ -139,7 +139,7 @@ define float @t6(ptr%a0) {
; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X32-SSE2-NEXT: xorps %xmm1, %xmm1
; X32-SSE2-NEXT: cmpeqss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X32-SSE2-NEXT: andps %xmm1, %xmm2
; X32-SSE2-NEXT: andnps %xmm0, %xmm1
; X32-SSE2-NEXT: orps %xmm2, %xmm1
@@ -154,7 +154,7 @@ define float @t6(ptr%a0) {
; X64-SSSE3-NEXT: movshdup {{.*#+}} xmm1 = mem[1,1,3,3]
; X64-SSSE3-NEXT: xorps %xmm0, %xmm0
; X64-SSSE3-NEXT: cmpeqss %xmm1, %xmm0
-; X64-SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X64-SSSE3-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-SSSE3-NEXT: andps %xmm0, %xmm2
; X64-SSSE3-NEXT: andnps %xmm1, %xmm0
; X64-SSSE3-NEXT: orps %xmm2, %xmm0
@@ -239,7 +239,7 @@ define float @PR43971_1(ptr%a0) nounwind {
; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X32-SSE2-NEXT: xorps %xmm1, %xmm1
; X32-SSE2-NEXT: cmpeqss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X32-SSE2-NEXT: andps %xmm1, %xmm2
; X32-SSE2-NEXT: andnps %xmm0, %xmm1
; X32-SSE2-NEXT: orps %xmm2, %xmm1
@@ -253,7 +253,7 @@ define float @PR43971_1(ptr%a0) nounwind {
; X64-SSSE3-NEXT: movshdup {{.*#+}} xmm1 = mem[1,1,3,3]
; X64-SSSE3-NEXT: xorps %xmm0, %xmm0
; X64-SSSE3-NEXT: cmpeqss %xmm1, %xmm0
-; X64-SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X64-SSSE3-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-SSSE3-NEXT: andps %xmm0, %xmm2
; X64-SSSE3-NEXT: andnps %xmm1, %xmm0
; X64-SSSE3-NEXT: orps %xmm2, %xmm0
diff --git a/llvm/test/CodeGen/X86/fadd-combines.ll b/llvm/test/CodeGen/X86/fadd-combines.ll
index 15512e997d14d89..1082177e3da190c 100644
--- a/llvm/test/CodeGen/X86/fadd-combines.ll
+++ b/llvm/test/CodeGen/X86/fadd-combines.ll
@@ -236,7 +236,7 @@ define <4 x float> @fadd_fadd_x_x_fadd_x_x_4f32(<4 x float> %x) #0 {
define float @fadd_const_multiuse_attr(float %x) #0 {
; CHECK-LABEL: fadd_const_multiuse_attr:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: addss %xmm0, %xmm1
; CHECK-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: addss %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/fast-isel-constpool.ll b/llvm/test/CodeGen/X86/fast-isel-constpool.ll
index 9e4cbb61308eaa2..dc746e1daac5080 100644
--- a/llvm/test/CodeGen/X86/fast-isel-constpool.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-constpool.ll
@@ -17,7 +17,7 @@
define float @constpool_float(float %x) {
; CHECK-LABEL: constpool_float:
; CHECK: ## %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.65E+2,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: addss %xmm1, %xmm0
; CHECK-NEXT: retq
;
@@ -39,7 +39,7 @@ define float @constpool_float(float %x) {
;
; AVX-LABEL: constpool_float:
; AVX: ## %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.65E+2,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
@@ -53,8 +53,8 @@ define float @constpool_float(float %x) {
; X86-LARGE: ## %bb.0:
; X86-LARGE-NEXT: pushl %eax ## encoding: [0x50]
; X86-LARGE-NEXT: .cfi_def_cfa_offset 8
-; X86-LARGE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xf3,0x0f,0x10,0x44,0x24,0x08]
-; X86-LARGE-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; X86-LARGE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-LARGE-NEXT: ## encoding: [0xf3,0x0f,0x10,0x44,0x24,0x08]
; X86-LARGE-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ## encoding: [0xf3,0x0f,0x58,0x05,A,A,A,A]
; X86-LARGE-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-LARGE-NEXT: movss %xmm0, (%esp) ## encoding: [0xf3,0x0f,0x11,0x04,0x24]
@@ -69,7 +69,7 @@ define float @constpool_float(float %x) {
define double @constpool_double(double %x) nounwind {
; CHECK-LABEL: constpool_double:
; CHECK: ## %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [8.4999999999999998E-1,0.0E+0]
; CHECK-NEXT: addsd %xmm1, %xmm0
; CHECK-NEXT: retq
;
@@ -91,7 +91,7 @@ define double @constpool_double(double %x) nounwind {
;
; AVX-LABEL: constpool_double:
; AVX: ## %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [8.4999999999999998E-1,0.0E+0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
@@ -104,8 +104,8 @@ define double @constpool_double(double %x) nounwind {
; X86-LARGE-LABEL: constpool_double:
; X86-LARGE: ## %bb.0:
; X86-LARGE-NEXT: subl $12, %esp ## encoding: [0x83,0xec,0x0c]
-; X86-LARGE-NEXT: movsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xf2,0x0f,0x10,0x44,0x24,0x10]
-; X86-LARGE-NEXT: ## xmm0 = mem[0],zero
+; X86-LARGE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-LARGE-NEXT: ## encoding: [0xf2,0x0f,0x10,0x44,0x24,0x10]
; X86-LARGE-NEXT: addsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ## encoding: [0xf2,0x0f,0x58,0x05,A,A,A,A]
; X86-LARGE-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-LARGE-NEXT: movsd %xmm0, (%esp) ## encoding: [0xf2,0x0f,0x11,0x04,0x24]
@@ -120,7 +120,7 @@ define double @constpool_double(double %x) nounwind {
define void @constpool_float_no_fp_args(ptr %x) nounwind {
; CHECK-LABEL: constpool_float_no_fp_args:
; CHECK: ## %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.65E+2,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: addss (%rdi), %xmm0
; CHECK-NEXT: movss %xmm0, (%rdi)
; CHECK-NEXT: retq
@@ -147,7 +147,7 @@ define void @constpool_float_no_fp_args(ptr %x) nounwind {
;
; AVX-LABEL: constpool_float_no_fp_args:
; AVX: ## %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.65E+2,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vaddss (%rdi), %xmm0, %xmm0
; AVX-NEXT: vmovss %xmm0, (%rdi)
; AVX-NEXT: retq
@@ -163,9 +163,9 @@ define void @constpool_float_no_fp_args(ptr %x) nounwind {
; X86-LARGE-LABEL: constpool_float_no_fp_args:
; X86-LARGE: ## %bb.0:
; X86-LARGE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-LARGE-NEXT: movss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ## encoding: [0xf3,0x0f,0x10,0x05,A,A,A,A]
+; X86-LARGE-NEXT: movss {{.*#+}} xmm0 = [1.65E+2,0.0E+0,0.0E+0,0.0E+0]
+; X86-LARGE-NEXT: ## encoding: [0xf3,0x0f,0x10,0x05,A,A,A,A]
; X86-LARGE-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
-; X86-LARGE-NEXT: ## xmm0 = mem[0],zero,zero,zero
; X86-LARGE-NEXT: addss (%eax), %xmm0 ## encoding: [0xf3,0x0f,0x58,0x00]
; X86-LARGE-NEXT: movss %xmm0, (%eax) ## encoding: [0xf3,0x0f,0x11,0x00]
; X86-LARGE-NEXT: retl ## encoding: [0xc3]
@@ -178,7 +178,7 @@ define void @constpool_float_no_fp_args(ptr %x) nounwind {
define void @constpool_double_no_fp_args(ptr %x) nounwind {
; CHECK-LABEL: constpool_double_no_fp_args:
; CHECK: ## %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [8.4999999999999998E-1,0.0E+0]
; CHECK-NEXT: addsd (%rdi), %xmm0
; CHECK-NEXT: movsd %xmm0, (%rdi)
; CHECK-NEXT: retq
@@ -205,7 +205,7 @@ define void @constpool_double_no_fp_args(ptr %x) nounwind {
;
; AVX-LABEL: constpool_double_no_fp_args:
; AVX: ## %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [8.4999999999999998E-1,0.0E+0]
; AVX-NEXT: vaddsd (%rdi), %xmm0, %xmm0
; AVX-NEXT: vmovsd %xmm0, (%rdi)
; AVX-NEXT: retq
@@ -221,9 +221,9 @@ define void @constpool_double_no_fp_args(ptr %x) nounwind {
; X86-LARGE-LABEL: constpool_double_no_fp_args:
; X86-LARGE: ## %bb.0:
; X86-LARGE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-LARGE-NEXT: movsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ## encoding: [0xf2,0x0f,0x10,0x05,A,A,A,A]
+; X86-LARGE-NEXT: movsd {{.*#+}} xmm0 = [8.4999999999999998E-1,0.0E+0]
+; X86-LARGE-NEXT: ## encoding: [0xf2,0x0f,0x10,0x05,A,A,A,A]
; X86-LARGE-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
-; X86-LARGE-NEXT: ## xmm0 = mem[0],zero
; X86-LARGE-NEXT: addsd (%eax), %xmm0 ## encoding: [0xf2,0x0f,0x58,0x00]
; X86-LARGE-NEXT: movsd %xmm0, (%eax) ## encoding: [0xf2,0x0f,0x11,0x00]
; X86-LARGE-NEXT: retl ## encoding: [0xc3]
diff --git a/llvm/test/CodeGen/X86/fdiv-combine-vec.ll b/llvm/test/CodeGen/X86/fdiv-combine-vec.ll
index 5e119207ccf1bc2..5c25d0214114fa7 100644
--- a/llvm/test/CodeGen/X86/fdiv-combine-vec.ll
+++ b/llvm/test/CodeGen/X86/fdiv-combine-vec.ll
@@ -5,7 +5,7 @@
define <2 x double> @splat_fdiv_v2f64(<2 x double> %x, double %y) {
; SSE-LABEL: splat_fdiv_v2f64:
; SSE: # %bb.0:
-; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
; SSE-NEXT: divsd %xmm1, %xmm2
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0,0]
; SSE-NEXT: mulpd %xmm2, %xmm0
@@ -13,7 +13,7 @@ define <2 x double> @splat_fdiv_v2f64(<2 x double> %x, double %y) {
;
; AVX-LABEL: splat_fdiv_v2f64:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
; AVX-NEXT: vdivsd %xmm1, %xmm2, %xmm1
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0
@@ -27,7 +27,7 @@ define <2 x double> @splat_fdiv_v2f64(<2 x double> %x, double %y) {
define <4 x double> @splat_fdiv_v4f64(<4 x double> %x, double %y) {
; SSE-LABEL: splat_fdiv_v4f64:
; SSE: # %bb.0:
-; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm3 = [1.0E+0,0.0E+0]
; SSE-NEXT: divsd %xmm2, %xmm3
; SSE-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0,0]
; SSE-NEXT: mulpd %xmm3, %xmm0
@@ -36,7 +36,7 @@ define <4 x double> @splat_fdiv_v4f64(<4 x double> %x, double %y) {
;
; AVX-LABEL: splat_fdiv_v4f64:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
; AVX-NEXT: vdivsd %xmm1, %xmm2, %xmm1
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
@@ -51,7 +51,7 @@ define <4 x double> @splat_fdiv_v4f64(<4 x double> %x, double %y) {
define <4 x float> @splat_fdiv_v4f32(<4 x float> %x, float %y) {
; SSE-LABEL: splat_fdiv_v4f32:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: divss %xmm1, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-NEXT: mulps %xmm2, %xmm0
@@ -59,7 +59,7 @@ define <4 x float> @splat_fdiv_v4f32(<4 x float> %x, float %y) {
;
; AVX-LABEL: splat_fdiv_v4f32:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss %xmm1, %xmm2, %xmm1
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
@@ -73,7 +73,7 @@ define <4 x float> @splat_fdiv_v4f32(<4 x float> %x, float %y) {
define <8 x float> @splat_fdiv_v8f32(<8 x float> %x, float %y) {
; SSE-LABEL: splat_fdiv_v8f32:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: divss %xmm2, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-NEXT: mulps %xmm3, %xmm0
@@ -82,7 +82,7 @@ define <8 x float> @splat_fdiv_v8f32(<8 x float> %x, float %y) {
;
; AVX-LABEL: splat_fdiv_v8f32:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss %xmm1, %xmm2, %xmm1
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
@@ -99,7 +99,7 @@ define <4 x float> @splat_fdiv_v4f32_estimate(<4 x float> %x, float %y) #0 {
; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm1, %xmm2
; SSE-NEXT: mulss %xmm2, %xmm1
-; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: subss %xmm1, %xmm3
; SSE-NEXT: mulss %xmm2, %xmm3
; SSE-NEXT: addss %xmm2, %xmm3
@@ -111,7 +111,7 @@ define <4 x float> @splat_fdiv_v4f32_estimate(<4 x float> %x, float %y) #0 {
; AVX: # %bb.0:
; AVX-NEXT: vrcpss %xmm1, %xmm1, %xmm2
; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsubss %xmm1, %xmm3, %xmm1
; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm2, %xmm1
@@ -129,7 +129,7 @@ define <8 x float> @splat_fdiv_v8f32_estimate(<8 x float> %x, float %y) #0 {
; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm2, %xmm3
; SSE-NEXT: mulss %xmm3, %xmm2
-; SSE-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm4 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: subss %xmm2, %xmm4
; SSE-NEXT: mulss %xmm3, %xmm4
; SSE-NEXT: addss %xmm3, %xmm4
@@ -142,7 +142,7 @@ define <8 x float> @splat_fdiv_v8f32_estimate(<8 x float> %x, float %y) #0 {
; AVX: # %bb.0:
; AVX-NEXT: vrcpss %xmm1, %xmm1, %xmm2
; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsubss %xmm1, %xmm3, %xmm1
; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm2, %xmm1
diff --git a/llvm/test/CodeGen/X86/fdiv-combine.ll b/llvm/test/CodeGen/X86/fdiv-combine.ll
index a97f0891e002b66..322ef6b393922da 100644
--- a/llvm/test/CodeGen/X86/fdiv-combine.ll
+++ b/llvm/test/CodeGen/X86/fdiv-combine.ll
@@ -20,7 +20,7 @@ define float @div1_arcp(float %x, float %y, float %z) {
define float @div2_arcp_all(float %x, float %y, float %z) {
; CHECK-LABEL: div2_arcp_all:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: divss %xmm2, %xmm3
; CHECK-NEXT: mulss %xmm3, %xmm0
; CHECK-NEXT: mulss %xmm1, %xmm0
@@ -67,7 +67,7 @@ define float @div2_arcp_partial2(float %x, float %y, float %z) {
define float @div2_arcp_partial3(float %x, float %y, float %z) {
; CHECK-LABEL: div2_arcp_partial3:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: divss %xmm2, %xmm3
; CHECK-NEXT: mulss %xmm3, %xmm0
; CHECK-NEXT: mulss %xmm1, %xmm0
@@ -85,7 +85,7 @@ define float @div2_arcp_partial3(float %x, float %y, float %z) {
define double @div3_arcp(double %x, double %y, double %z) {
; CHECK-LABEL: div3_arcp:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
; CHECK-NEXT: divsd %xmm1, %xmm2
; CHECK-NEXT: mulsd %xmm2, %xmm0
; CHECK-NEXT: addsd %xmm2, %xmm0
@@ -102,10 +102,10 @@ define float @div_select_constant_fold(i1 zeroext %arg) {
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: jne .LBB6_1
; CHECK-NEXT: # %bb.2:
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB6_1:
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: retq
%tmp = select i1 %arg, float 5.000000e+00, float 6.000000e+00
%B2 = fdiv nnan float %tmp, 2.000000e+00
@@ -115,7 +115,7 @@ define float @div_select_constant_fold(i1 zeroext %arg) {
define float @div_select_constant_fold_zero(i1 zeroext %arg) {
; CHECK-LABEL: div_select_constant_fold_zero:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [+Inf,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: retq
%tmp = select i1 %arg, float 5.000000e+00, float 6.000000e+00
%B2 = fdiv float %tmp, 0.000000e+00
diff --git a/llvm/test/CodeGen/X86/fma-intrinsics-canonical.ll b/llvm/test/CodeGen/X86/fma-intrinsics-canonical.ll
index 1de8fb15c740313..e420215121728b2 100644
--- a/llvm/test/CodeGen/X86/fma-intrinsics-canonical.ll
+++ b/llvm/test/CodeGen/X86/fma-intrinsics-canonical.ll
@@ -71,8 +71,8 @@ define <4 x float> @test_mm_fmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-FMA-WIN-LABEL: test_mm_fmadd_ss:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmadd132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x99,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -101,8 +101,8 @@ define <2 x double> @test_mm_fmadd_sd(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-FMA-WIN-LABEL: test_mm_fmadd_sd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmadd132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x99,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -183,8 +183,8 @@ define <4 x float> @test_mm_fmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-FMA-WIN-LABEL: test_mm_fmsub_ss:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmsub132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9b,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -214,8 +214,8 @@ define <2 x double> @test_mm_fmsub_sd(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-FMA-WIN-LABEL: test_mm_fmsub_sd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmsub132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9b,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -297,8 +297,8 @@ define <4 x float> @test_mm_fnmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float
; CHECK-FMA-WIN-LABEL: test_mm_fnmadd_ss:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmadd132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9d,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -328,8 +328,8 @@ define <2 x double> @test_mm_fnmadd_sd(<2 x double> %a, <2 x double> %b, <2 x do
; CHECK-FMA-WIN-LABEL: test_mm_fnmadd_sd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmadd132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9d,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -413,8 +413,8 @@ define <4 x float> @test_mm_fnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float
; CHECK-FMA-WIN-LABEL: test_mm_fnmsub_ss:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmsub132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9f,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -445,8 +445,8 @@ define <2 x double> @test_mm_fnmsub_sd(<2 x double> %a, <2 x double> %b, <2 x do
; CHECK-FMA-WIN-LABEL: test_mm_fnmsub_sd:
; CHECK-FMA-WIN: # %bb.0: # %entry
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmsub132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9f,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
diff --git a/llvm/test/CodeGen/X86/fma-intrinsics-x86-upgrade.ll b/llvm/test/CodeGen/X86/fma-intrinsics-x86-upgrade.ll
index 726de36023ce607..7d99b6a610f670f 100644
--- a/llvm/test/CodeGen/X86/fma-intrinsics-x86-upgrade.ll
+++ b/llvm/test/CodeGen/X86/fma-intrinsics-x86-upgrade.ll
@@ -20,8 +20,8 @@ define <4 x float> @test_x86_fma_vfmadd_ss(<4 x float> %a0, <4 x float> %a1, <4
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmadd132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x99,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -47,8 +47,8 @@ define <4 x float> @test_x86_fma_vfmadd_bac_ss(<4 x float> %a0, <4 x float> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_bac_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmadd132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x99,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -73,8 +73,8 @@ define <2 x double> @test_x86_fma_vfmadd_sd(<2 x double> %a0, <2 x double> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmadd132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x99,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -100,8 +100,8 @@ define <2 x double> @test_x86_fma_vfmadd_bac_sd(<2 x double> %a0, <2 x double> %
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_bac_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmadd132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x99,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -227,8 +227,8 @@ define <4 x float> @test_x86_fma_vfmsub_ss(<4 x float> %a0, <4 x float> %a1, <4
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmsub132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9b,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -254,8 +254,8 @@ define <4 x float> @test_x86_fma_vfmsub_bac_ss(<4 x float> %a0, <4 x float> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_bac_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmsub132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9b,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -280,8 +280,8 @@ define <2 x double> @test_x86_fma_vfmsub_sd(<2 x double> %a0, <2 x double> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmsub132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9b,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -307,8 +307,8 @@ define <2 x double> @test_x86_fma_vfmsub_bac_sd(<2 x double> %a0, <2 x double> %
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_bac_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmsub132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9b,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -434,8 +434,8 @@ define <4 x float> @test_x86_fma_vfnmadd_ss(<4 x float> %a0, <4 x float> %a1, <4
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmadd132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9d,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -461,8 +461,8 @@ define <4 x float> @test_x86_fma_vfnmadd_bac_ss(<4 x float> %a0, <4 x float> %a1
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_bac_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmadd132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9d,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -487,8 +487,8 @@ define <2 x double> @test_x86_fma_vfnmadd_sd(<2 x double> %a0, <2 x double> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmadd132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9d,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -514,8 +514,8 @@ define <2 x double> @test_x86_fma_vfnmadd_bac_sd(<2 x double> %a0, <2 x double>
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_bac_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmadd132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9d,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -641,8 +641,8 @@ define <4 x float> @test_x86_fma_vfnmsub_ss(<4 x float> %a0, <4 x float> %a1, <4
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmsub132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9f,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -668,8 +668,8 @@ define <4 x float> @test_x86_fma_vfnmsub_bac_ss(<4 x float> %a0, <4 x float> %a1
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_bac_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmsub132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9f,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -694,8 +694,8 @@ define <2 x double> @test_x86_fma_vfnmsub_sd(<2 x double> %a0, <2 x double> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmsub132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9f,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -721,8 +721,8 @@ define <2 x double> @test_x86_fma_vfnmsub_bac_sd(<2 x double> %a0, <2 x double>
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_bac_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmsub132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9f,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
diff --git a/llvm/test/CodeGen/X86/fma-intrinsics-x86.ll b/llvm/test/CodeGen/X86/fma-intrinsics-x86.ll
index eb351ec81d383b3..94229c9370e2a40 100644
--- a/llvm/test/CodeGen/X86/fma-intrinsics-x86.ll
+++ b/llvm/test/CodeGen/X86/fma-intrinsics-x86.ll
@@ -20,8 +20,8 @@ define <4 x float> @test_x86_fma_vfmadd_ss(<4 x float> %a0, <4 x float> %a1, <4
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmadd132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x99,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -51,8 +51,8 @@ define <4 x float> @test_x86_fma_vfmadd_bac_ss(<4 x float> %a0, <4 x float> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_bac_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmadd132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x99,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -82,8 +82,8 @@ define <4 x float> @test_x86_fma_vfmadd_ss_231(<4 x float> %a0, <4 x float> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ss_231:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%r8), %xmm0 # encoding: [0xc4,0xc1,0x78,0x28,0x00]
-; CHECK-FMA-WIN-NEXT: vmovss (%rcx), %xmm1 # encoding: [0xc5,0xfa,0x10,0x09]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc5,0xfa,0x10,0x09]
; CHECK-FMA-WIN-NEXT: vfmadd231ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xb9,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * mem) + xmm0
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -111,8 +111,8 @@ define <2 x double> @test_x86_fma_vfmadd_sd(<2 x double> %a0, <2 x double> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmadd132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x99,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -142,8 +142,8 @@ define <2 x double> @test_x86_fma_vfmadd_bac_sd(<2 x double> %a0, <2 x double> %
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_bac_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmadd132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x99,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -268,8 +268,8 @@ define <4 x float> @test_x86_fma_vfmsub_ss(<4 x float> %a0, <4 x float> %a1, <4
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmsub132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9b,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -300,8 +300,8 @@ define <4 x float> @test_x86_fma_vfmsub_bac_ss(<4 x float> %a0, <4 x float> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_bac_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmsub132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9b,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -330,8 +330,8 @@ define <2 x double> @test_x86_fma_vfmsub_sd(<2 x double> %a0, <2 x double> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmsub132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9b,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -362,8 +362,8 @@ define <2 x double> @test_x86_fma_vfmsub_bac_sd(<2 x double> %a0, <2 x double> %
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_bac_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfmsub132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9b,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -493,8 +493,8 @@ define <4 x float> @test_x86_fma_vfnmadd_ss(<4 x float> %a0, <4 x float> %a1, <4
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmadd132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9d,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -525,8 +525,8 @@ define <4 x float> @test_x86_fma_vfnmadd_bac_ss(<4 x float> %a0, <4 x float> %a1
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_bac_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmadd132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9d,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -555,8 +555,8 @@ define <2 x double> @test_x86_fma_vfnmadd_sd(<2 x double> %a0, <2 x double> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmadd132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9d,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -587,8 +587,8 @@ define <2 x double> @test_x86_fma_vfnmadd_bac_sd(<2 x double> %a0, <2 x double>
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_bac_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmadd132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9d,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) + xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -718,8 +718,8 @@ define <4 x float> @test_x86_fma_vfnmsub_ss(<4 x float> %a0, <4 x float> %a1, <4
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmsub132ss (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9f,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -751,8 +751,8 @@ define <4 x float> @test_x86_fma_vfnmsub_bac_ss(<4 x float> %a0, <4 x float> %a1
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_bac_ss:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovss (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7a,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmsub132ss (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x9f,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -782,8 +782,8 @@ define <2 x double> @test_x86_fma_vfnmsub_sd(<2 x double> %a0, <2 x double> %a1,
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmsub132sd (%rdx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9f,0x02]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
@@ -815,8 +815,8 @@ define <2 x double> @test_x86_fma_vfnmsub_bac_sd(<2 x double> %a0, <2 x double>
; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_bac_sd:
; CHECK-FMA-WIN: # %bb.0:
; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
-; CHECK-FMA-WIN-NEXT: vmovsd (%r8), %xmm1 # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
-; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-FMA-WIN-NEXT: # encoding: [0xc4,0xc1,0x7b,0x10,0x08]
; CHECK-FMA-WIN-NEXT: vfnmsub132sd (%rcx), %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0x9f,0x01]
; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm0 * mem) - xmm1
; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3]
diff --git a/llvm/test/CodeGen/X86/fma-scalar-memfold.ll b/llvm/test/CodeGen/X86/fma-scalar-memfold.ll
index 508bb4b299b7e71..e81d80a457928e8 100644
--- a/llvm/test/CodeGen/X86/fma-scalar-memfold.ll
+++ b/llvm/test/CodeGen/X86/fma-scalar-memfold.ll
@@ -17,8 +17,8 @@ declare <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double>, <2 x double>, <2 x d
define void @fmadd_aab_ss(ptr %a, ptr %b) {
; AVX2-LABEL: fmadd_aab_ss:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: # encoding: [0xc5,0xfa,0x10,0x07]
; AVX2-NEXT: vfmadd213ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xa9,0x06]
; AVX2-NEXT: # xmm0 = (xmm0 * xmm0) + mem
; AVX2-NEXT: vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
@@ -26,8 +26,8 @@ define void @fmadd_aab_ss(ptr %a, ptr %b) {
;
; AVX512-LABEL: fmadd_aab_ss:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; AVX512-NEXT: vfmadd213ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0x06]
; AVX512-NEXT: # xmm0 = (xmm0 * xmm0) + mem
; AVX512-NEXT: vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
@@ -54,8 +54,8 @@ define void @fmadd_aab_ss(ptr %a, ptr %b) {
define void @fmadd_aba_ss(ptr %a, ptr %b) {
; AVX2-LABEL: fmadd_aba_ss:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: # encoding: [0xc5,0xfa,0x10,0x07]
; AVX2-NEXT: vfmadd231ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xb9,0x06]
; AVX2-NEXT: # xmm0 = (xmm0 * mem) + xmm0
; AVX2-NEXT: vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
@@ -63,8 +63,8 @@ define void @fmadd_aba_ss(ptr %a, ptr %b) {
;
; AVX512-LABEL: fmadd_aba_ss:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; AVX512-NEXT: vfmadd231ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xb9,0x06]
; AVX512-NEXT: # xmm0 = (xmm0 * mem) + xmm0
; AVX512-NEXT: vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
@@ -91,8 +91,8 @@ define void @fmadd_aba_ss(ptr %a, ptr %b) {
define void @fmsub_aab_ss(ptr %a, ptr %b) {
; AVX2-LABEL: fmsub_aab_ss:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: # encoding: [0xc5,0xfa,0x10,0x07]
; AVX2-NEXT: vfmsub213ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xab,0x06]
; AVX2-NEXT: # xmm0 = (xmm0 * xmm0) - mem
; AVX2-NEXT: vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
@@ -100,8 +100,8 @@ define void @fmsub_aab_ss(ptr %a, ptr %b) {
;
; AVX512-LABEL: fmsub_aab_ss:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; AVX512-NEXT: vfmsub213ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xab,0x06]
; AVX512-NEXT: # xmm0 = (xmm0 * xmm0) - mem
; AVX512-NEXT: vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
@@ -128,8 +128,8 @@ define void @fmsub_aab_ss(ptr %a, ptr %b) {
define void @fmsub_aba_ss(ptr %a, ptr %b) {
; AVX2-LABEL: fmsub_aba_ss:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: # encoding: [0xc5,0xfa,0x10,0x07]
; AVX2-NEXT: vfmsub231ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xbb,0x06]
; AVX2-NEXT: # xmm0 = (xmm0 * mem) - xmm0
; AVX2-NEXT: vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
@@ -137,8 +137,8 @@ define void @fmsub_aba_ss(ptr %a, ptr %b) {
;
; AVX512-LABEL: fmsub_aba_ss:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; AVX512-NEXT: vfmsub231ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xbb,0x06]
; AVX512-NEXT: # xmm0 = (xmm0 * mem) - xmm0
; AVX512-NEXT: vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
@@ -165,8 +165,8 @@ define void @fmsub_aba_ss(ptr %a, ptr %b) {
define void @fnmadd_aab_ss(ptr %a, ptr %b) {
; AVX2-LABEL: fnmadd_aab_ss:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: # encoding: [0xc5,0xfa,0x10,0x07]
; AVX2-NEXT: vfnmadd213ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xad,0x06]
; AVX2-NEXT: # xmm0 = -(xmm0 * xmm0) + mem
; AVX2-NEXT: vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
@@ -174,8 +174,8 @@ define void @fnmadd_aab_ss(ptr %a, ptr %b) {
;
; AVX512-LABEL: fnmadd_aab_ss:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; AVX512-NEXT: vfnmadd213ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xad,0x06]
; AVX512-NEXT: # xmm0 = -(xmm0 * xmm0) + mem
; AVX512-NEXT: vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
@@ -202,8 +202,8 @@ define void @fnmadd_aab_ss(ptr %a, ptr %b) {
define void @fnmadd_aba_ss(ptr %a, ptr %b) {
; AVX2-LABEL: fnmadd_aba_ss:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: # encoding: [0xc5,0xfa,0x10,0x07]
; AVX2-NEXT: vfnmadd231ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xbd,0x06]
; AVX2-NEXT: # xmm0 = -(xmm0 * mem) + xmm0
; AVX2-NEXT: vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
@@ -211,8 +211,8 @@ define void @fnmadd_aba_ss(ptr %a, ptr %b) {
;
; AVX512-LABEL: fnmadd_aba_ss:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; AVX512-NEXT: vfnmadd231ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xbd,0x06]
; AVX512-NEXT: # xmm0 = -(xmm0 * mem) + xmm0
; AVX512-NEXT: vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
@@ -239,8 +239,8 @@ define void @fnmadd_aba_ss(ptr %a, ptr %b) {
define void @fnmsub_aab_ss(ptr %a, ptr %b) {
; AVX2-LABEL: fnmsub_aab_ss:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: # encoding: [0xc5,0xfa,0x10,0x07]
; AVX2-NEXT: vfnmsub213ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xaf,0x06]
; AVX2-NEXT: # xmm0 = -(xmm0 * xmm0) - mem
; AVX2-NEXT: vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
@@ -248,8 +248,8 @@ define void @fnmsub_aab_ss(ptr %a, ptr %b) {
;
; AVX512-LABEL: fnmsub_aab_ss:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; AVX512-NEXT: vfnmsub213ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaf,0x06]
; AVX512-NEXT: # xmm0 = -(xmm0 * xmm0) - mem
; AVX512-NEXT: vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
@@ -276,8 +276,8 @@ define void @fnmsub_aab_ss(ptr %a, ptr %b) {
define void @fnmsub_aba_ss(ptr %a, ptr %b) {
; AVX2-LABEL: fnmsub_aba_ss:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: # encoding: [0xc5,0xfa,0x10,0x07]
; AVX2-NEXT: vfnmsub231ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xbf,0x06]
; AVX2-NEXT: # xmm0 = -(xmm0 * mem) - xmm0
; AVX2-NEXT: vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
@@ -285,8 +285,8 @@ define void @fnmsub_aba_ss(ptr %a, ptr %b) {
;
; AVX512-LABEL: fnmsub_aba_ss:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; AVX512-NEXT: vfnmsub231ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xbf,0x06]
; AVX512-NEXT: # xmm0 = -(xmm0 * mem) - xmm0
; AVX512-NEXT: vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
@@ -313,8 +313,8 @@ define void @fnmsub_aba_ss(ptr %a, ptr %b) {
define void @fmadd_aab_sd(ptr %a, ptr %b) {
; AVX2-LABEL: fmadd_aab_sd:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; AVX2-NEXT: vfmadd213sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xa9,0x06]
; AVX2-NEXT: # xmm0 = (xmm0 * xmm0) + mem
; AVX2-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
@@ -322,8 +322,8 @@ define void @fmadd_aab_sd(ptr %a, ptr %b) {
;
; AVX512-LABEL: fmadd_aab_sd:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; AVX512-NEXT: vfmadd213sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0x06]
; AVX512-NEXT: # xmm0 = (xmm0 * xmm0) + mem
; AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
@@ -346,8 +346,8 @@ define void @fmadd_aab_sd(ptr %a, ptr %b) {
define void @fmadd_aba_sd(ptr %a, ptr %b) {
; AVX2-LABEL: fmadd_aba_sd:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; AVX2-NEXT: vfmadd231sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xb9,0x06]
; AVX2-NEXT: # xmm0 = (xmm0 * mem) + xmm0
; AVX2-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
@@ -355,8 +355,8 @@ define void @fmadd_aba_sd(ptr %a, ptr %b) {
;
; AVX512-LABEL: fmadd_aba_sd:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; AVX512-NEXT: vfmadd231sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xb9,0x06]
; AVX512-NEXT: # xmm0 = (xmm0 * mem) + xmm0
; AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
@@ -379,8 +379,8 @@ define void @fmadd_aba_sd(ptr %a, ptr %b) {
define void @fmsub_aab_sd(ptr %a, ptr %b) {
; AVX2-LABEL: fmsub_aab_sd:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; AVX2-NEXT: vfmsub213sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xab,0x06]
; AVX2-NEXT: # xmm0 = (xmm0 * xmm0) - mem
; AVX2-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
@@ -388,8 +388,8 @@ define void @fmsub_aab_sd(ptr %a, ptr %b) {
;
; AVX512-LABEL: fmsub_aab_sd:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; AVX512-NEXT: vfmsub213sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xab,0x06]
; AVX512-NEXT: # xmm0 = (xmm0 * xmm0) - mem
; AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
@@ -412,8 +412,8 @@ define void @fmsub_aab_sd(ptr %a, ptr %b) {
define void @fmsub_aba_sd(ptr %a, ptr %b) {
; AVX2-LABEL: fmsub_aba_sd:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; AVX2-NEXT: vfmsub231sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xbb,0x06]
; AVX2-NEXT: # xmm0 = (xmm0 * mem) - xmm0
; AVX2-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
@@ -421,8 +421,8 @@ define void @fmsub_aba_sd(ptr %a, ptr %b) {
;
; AVX512-LABEL: fmsub_aba_sd:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; AVX512-NEXT: vfmsub231sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xbb,0x06]
; AVX512-NEXT: # xmm0 = (xmm0 * mem) - xmm0
; AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
@@ -445,8 +445,8 @@ define void @fmsub_aba_sd(ptr %a, ptr %b) {
define void @fnmadd_aab_sd(ptr %a, ptr %b) {
; AVX2-LABEL: fnmadd_aab_sd:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; AVX2-NEXT: vfnmadd213sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xad,0x06]
; AVX2-NEXT: # xmm0 = -(xmm0 * xmm0) + mem
; AVX2-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
@@ -454,8 +454,8 @@ define void @fnmadd_aab_sd(ptr %a, ptr %b) {
;
; AVX512-LABEL: fnmadd_aab_sd:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; AVX512-NEXT: vfnmadd213sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xad,0x06]
; AVX512-NEXT: # xmm0 = -(xmm0 * xmm0) + mem
; AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
@@ -478,8 +478,8 @@ define void @fnmadd_aab_sd(ptr %a, ptr %b) {
define void @fnmadd_aba_sd(ptr %a, ptr %b) {
; AVX2-LABEL: fnmadd_aba_sd:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; AVX2-NEXT: vfnmadd231sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xbd,0x06]
; AVX2-NEXT: # xmm0 = -(xmm0 * mem) + xmm0
; AVX2-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
@@ -487,8 +487,8 @@ define void @fnmadd_aba_sd(ptr %a, ptr %b) {
;
; AVX512-LABEL: fnmadd_aba_sd:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; AVX512-NEXT: vfnmadd231sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xbd,0x06]
; AVX512-NEXT: # xmm0 = -(xmm0 * mem) + xmm0
; AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
@@ -511,8 +511,8 @@ define void @fnmadd_aba_sd(ptr %a, ptr %b) {
define void @fnmsub_aab_sd(ptr %a, ptr %b) {
; AVX2-LABEL: fnmsub_aab_sd:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; AVX2-NEXT: vfnmsub213sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xaf,0x06]
; AVX2-NEXT: # xmm0 = -(xmm0 * xmm0) - mem
; AVX2-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
@@ -520,8 +520,8 @@ define void @fnmsub_aab_sd(ptr %a, ptr %b) {
;
; AVX512-LABEL: fnmsub_aab_sd:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; AVX512-NEXT: vfnmsub213sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaf,0x06]
; AVX512-NEXT: # xmm0 = -(xmm0 * xmm0) - mem
; AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
@@ -544,8 +544,8 @@ define void @fnmsub_aab_sd(ptr %a, ptr %b) {
define void @fnmsub_aba_sd(ptr %a, ptr %b) {
; AVX2-LABEL: fnmsub_aba_sd:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; AVX2-NEXT: # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; AVX2-NEXT: vfnmsub231sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xbf,0x06]
; AVX2-NEXT: # xmm0 = -(xmm0 * mem) - xmm0
; AVX2-NEXT: vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
@@ -553,8 +553,8 @@ define void @fnmsub_aba_sd(ptr %a, ptr %b) {
;
; AVX512-LABEL: fnmsub_aba_sd:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; AVX512-NEXT: # xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; AVX512-NEXT: vfnmsub231sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xbf,0x06]
; AVX512-NEXT: # xmm0 = -(xmm0 * mem) - xmm0
; AVX512-NEXT: vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
diff --git a/llvm/test/CodeGen/X86/fma.ll b/llvm/test/CodeGen/X86/fma.ll
index 03de1533e1d64c3..c55f50e97786a24 100644
--- a/llvm/test/CodeGen/X86/fma.ll
+++ b/llvm/test/CodeGen/X86/fma.ll
@@ -12,10 +12,10 @@ define float @test_f32(float %a, float %b, float %c) #0 {
; FMA32-LABEL: test_f32:
; FMA32: ## %bb.0:
; FMA32-NEXT: pushl %eax ## encoding: [0x50]
-; FMA32-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
-; FMA32-NEXT: ## xmm0 = mem[0],zero,zero,zero
-; FMA32-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
-; FMA32-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; FMA32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA32-NEXT: ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
+; FMA32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA32-NEXT: ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
; FMA32-NEXT: vfmadd213ss {{[0-9]+}}(%esp), %xmm0, %xmm1 ## encoding: [0xc4,0xe2,0x79,0xa9,0x4c,0x24,0x10]
; FMA32-NEXT: ## xmm1 = (xmm0 * xmm1) + mem
; FMA32-NEXT: vmovss %xmm1, (%esp) ## encoding: [0xc5,0xfa,0x11,0x0c,0x24]
@@ -60,10 +60,10 @@ define float @test_f32_reassoc(float %a, float %b, float %c) #0 {
; FMA32-LABEL: test_f32_reassoc:
; FMA32: ## %bb.0:
; FMA32-NEXT: pushl %eax ## encoding: [0x50]
-; FMA32-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
-; FMA32-NEXT: ## xmm0 = mem[0],zero,zero,zero
-; FMA32-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
-; FMA32-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; FMA32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA32-NEXT: ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
+; FMA32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA32-NEXT: ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
; FMA32-NEXT: vfmadd213ss {{[0-9]+}}(%esp), %xmm0, %xmm1 ## encoding: [0xc4,0xe2,0x79,0xa9,0x4c,0x24,0x10]
; FMA32-NEXT: ## xmm1 = (xmm0 * xmm1) + mem
; FMA32-NEXT: vmovss %xmm1, (%esp) ## encoding: [0xc5,0xfa,0x11,0x0c,0x24]
@@ -74,8 +74,8 @@ define float @test_f32_reassoc(float %a, float %b, float %c) #0 {
; FMACALL32-LABEL: test_f32_reassoc:
; FMACALL32: ## %bb.0:
; FMACALL32-NEXT: pushl %eax ## encoding: [0x50]
-; FMACALL32-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
-; FMACALL32-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; FMACALL32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMACALL32-NEXT: ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
; FMACALL32-NEXT: vmulss {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x59,0x44,0x24,0x0c]
; FMACALL32-NEXT: vaddss {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x58,0x44,0x24,0x10]
; FMACALL32-NEXT: vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
@@ -114,10 +114,10 @@ define double @test_f64(double %a, double %b, double %c) #0 {
; FMA32-LABEL: test_f64:
; FMA32: ## %bb.0: ## %entry
; FMA32-NEXT: subl $12, %esp ## encoding: [0x83,0xec,0x0c]
-; FMA32-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x10]
-; FMA32-NEXT: ## xmm0 = mem[0],zero
-; FMA32-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x18]
-; FMA32-NEXT: ## xmm1 = mem[0],zero
+; FMA32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA32-NEXT: ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x10]
+; FMA32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMA32-NEXT: ## encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x18]
; FMA32-NEXT: vfmadd213sd {{[0-9]+}}(%esp), %xmm0, %xmm1 ## encoding: [0xc4,0xe2,0xf9,0xa9,0x4c,0x24,0x20]
; FMA32-NEXT: ## xmm1 = (xmm0 * xmm1) + mem
; FMA32-NEXT: vmovsd %xmm1, (%esp) ## encoding: [0xc5,0xfb,0x11,0x0c,0x24]
@@ -263,30 +263,30 @@ define float @test_f32_cst() #0 {
;
; FMA64-LABEL: test_f32_cst:
; FMA64: ## %bb.0: ## %entry
-; FMA64-NEXT: vmovss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
+; FMA64-NEXT: vmovss {{.*#+}} xmm0 = [1.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; FMA64-NEXT: ## encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
; FMA64-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
-; FMA64-NEXT: ## xmm0 = mem[0],zero,zero,zero
; FMA64-NEXT: retq ## encoding: [0xc3]
;
; FMACALL64-LABEL: test_f32_cst:
; FMACALL64: ## %bb.0: ## %entry
-; FMACALL64-NEXT: movss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ## encoding: [0xf3,0x0f,0x10,0x05,A,A,A,A]
+; FMACALL64-NEXT: movss {{.*#+}} xmm0 = [1.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; FMACALL64-NEXT: ## encoding: [0xf3,0x0f,0x10,0x05,A,A,A,A]
; FMACALL64-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
-; FMACALL64-NEXT: ## xmm0 = mem[0],zero,zero,zero
; FMACALL64-NEXT: retq ## encoding: [0xc3]
;
; AVX512-LABEL: test_f32_cst:
; AVX512: ## %bb.0: ## %entry
-; AVX512-NEXT: vmovss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = [1.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
; AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
-; AVX512-NEXT: ## xmm0 = mem[0],zero,zero,zero
; AVX512-NEXT: retq ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_f32_cst:
; AVX512VL: ## %bb.0: ## %entry
-; AVX512VL-NEXT: vmovss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
+; AVX512VL-NEXT: vmovss {{.*#+}} xmm0 = [1.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX512VL-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x05,A,A,A,A]
; AVX512VL-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
-; AVX512VL-NEXT: ## xmm0 = mem[0],zero,zero,zero
; AVX512VL-NEXT: retq ## encoding: [0xc3]
entry:
%call = call float @llvm.fma.f32(float 3.0, float 3.0, float 3.0)
@@ -450,8 +450,8 @@ define <4 x float> @test_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #
; FMACALL32_BDVER2-NEXT: calll _fmaf ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
; FMACALL32_BDVER2-NEXT: fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x10]
-; FMACALL32_BDVER2-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x1c]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x1c]
; FMACALL32_BDVER2-NEXT: vinsertps $16, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x18,0x10]
; FMACALL32_BDVER2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
; FMACALL32_BDVER2-NEXT: vinsertps $32, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x14,0x20]
@@ -778,12 +778,12 @@ define <8 x float> @test_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #
; FMACALL32_BDVER2-NEXT: calll _fmaf ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
; FMACALL32_BDVER2-NEXT: fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x10]
-; FMACALL32_BDVER2-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x2c]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x2c]
; FMACALL32_BDVER2-NEXT: vinsertps $16, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x28,0x10]
; FMACALL32_BDVER2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
-; FMACALL32_BDVER2-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x1c]
-; FMACALL32_BDVER2-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x1c]
; FMACALL32_BDVER2-NEXT: vinsertps $16, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x18,0x10]
; FMACALL32_BDVER2-NEXT: ## xmm1 = xmm1[0],mem[0],xmm1[2,3]
; FMACALL32_BDVER2-NEXT: vinsertps $32, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x24,0x20]
@@ -1382,16 +1382,16 @@ define <16 x float> @test_v16f32(<16 x float> %a, <16 x float> %b, <16 x float>
; FMACALL32_BDVER2-NEXT: calll _fmaf ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
; FMACALL32_BDVER2-NEXT: fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x40]
-; FMACALL32_BDVER2-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x3c]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x3c]
; FMACALL32_BDVER2-NEXT: vinsertps $16, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x38,0x10]
; FMACALL32_BDVER2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
-; FMACALL32_BDVER2-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x2c]
-; FMACALL32_BDVER2-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x2c]
; FMACALL32_BDVER2-NEXT: vinsertps $16, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x28,0x10]
; FMACALL32_BDVER2-NEXT: ## xmm1 = xmm1[0],mem[0],xmm1[2,3]
-; FMACALL32_BDVER2-NEXT: vmovss {{[0-9]+}}(%esp), %xmm2 ## encoding: [0xc5,0xfa,0x10,0x54,0x24,0x4c]
-; FMACALL32_BDVER2-NEXT: ## xmm2 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfa,0x10,0x54,0x24,0x4c]
; FMACALL32_BDVER2-NEXT: vinsertps $16, {{[0-9]+}}(%esp), %xmm2, %xmm2 ## encoding: [0xc4,0xe3,0x69,0x21,0x54,0x24,0x48,0x10]
; FMACALL32_BDVER2-NEXT: ## xmm2 = xmm2[0],mem[0],xmm2[2,3]
; FMACALL32_BDVER2-NEXT: vinsertps $32, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x34,0x20]
@@ -1407,8 +1407,8 @@ define <16 x float> @test_v16f32(<16 x float> %a, <16 x float> %b, <16 x float>
; FMACALL32_BDVER2-NEXT: vinsertps $48, {{[0-9]+}}(%esp), %xmm2, %xmm2 ## encoding: [0xc4,0xe3,0x69,0x21,0x54,0x24,0x40,0x30]
; FMACALL32_BDVER2-NEXT: ## xmm2 = xmm2[0,1,2],mem[0]
; FMACALL32_BDVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
-; FMACALL32_BDVER2-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x5c]
-; FMACALL32_BDVER2-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x5c]
; FMACALL32_BDVER2-NEXT: vinsertps $16, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x58,0x10]
; FMACALL32_BDVER2-NEXT: ## xmm1 = xmm1[0],mem[0],xmm1[2,3]
; FMACALL32_BDVER2-NEXT: vinsertps $32, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x54,0x20]
@@ -1512,8 +1512,8 @@ define <2 x double> @test_v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %
; FMACALL32_BDVER2-NEXT: calll _fma ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
; FMACALL32_BDVER2-NEXT: fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x20]
-; FMACALL32_BDVER2-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x28]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x28]
; FMACALL32_BDVER2-NEXT: vmovhps {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x16,0x44,0x24,0x20]
; FMACALL32_BDVER2-NEXT: ## xmm0 = xmm0[0,1],mem[0,1]
; FMACALL32_BDVER2-NEXT: addl $108, %esp ## encoding: [0x83,0xc4,0x6c]
@@ -1733,10 +1733,10 @@ define <4 x double> @test_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %
; FMACALL32_BDVER2-NEXT: calll _fma ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
; FMACALL32_BDVER2-NEXT: fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x18]
-; FMACALL32_BDVER2-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x30]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
-; FMACALL32_BDVER2-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x20]
-; FMACALL32_BDVER2-NEXT: ## xmm1 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x30]
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x20]
; FMACALL32_BDVER2-NEXT: vmovhps {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x16,0x44,0x24,0x28]
; FMACALL32_BDVER2-NEXT: ## xmm0 = xmm0[0,1],mem[0,1]
; FMACALL32_BDVER2-NEXT: vmovhps {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x16,0x4c,0x24,0x18]
@@ -1928,8 +1928,8 @@ define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %
; FMACALL32_BDVER2-NEXT: ## imm = 0x160
; FMACALL32_BDVER2-NEXT: vmovaps %ymm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 32-byte Spill
; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfc,0x29,0x84,0x24,0xe0,0x00,0x00,0x00]
-; FMACALL32_BDVER2-NEXT: vmovsd 56(%ebp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x45,0x38]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x45,0x38]
; FMACALL32_BDVER2-NEXT: vmovaps %ymm2, {{[-0-9]+}}(%e{{[sb]}}p) ## 32-byte Spill
; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfc,0x29,0x94,0x24,0x00,0x01,0x00,0x00]
; FMACALL32_BDVER2-NEXT: vextractf128 $1, %ymm3, %xmm2 ## encoding: [0xc4,0xe3,0x7d,0x19,0xda,0x01]
@@ -1949,8 +1949,8 @@ define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %
; FMACALL32_BDVER2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; FMACALL32_BDVER2-NEXT: calll _fma ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
-; FMACALL32_BDVER2-NEXT: vmovsd 48(%ebp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x45,0x30]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x45,0x30]
; FMACALL32_BDVER2-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) ## 10-byte Folded Spill
; FMACALL32_BDVER2-NEXT: ## encoding: [0xdb,0xbc,0x24,0x94,0x00,0x00,0x00]
; FMACALL32_BDVER2-NEXT: vmovsd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfb,0x11,0x44,0x24,0x10]
@@ -1963,8 +1963,8 @@ define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %
; FMACALL32_BDVER2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; FMACALL32_BDVER2-NEXT: calll _fma ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
-; FMACALL32_BDVER2-NEXT: vmovsd 40(%ebp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x45,0x28]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x45,0x28]
; FMACALL32_BDVER2-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) ## 10-byte Folded Spill
; FMACALL32_BDVER2-NEXT: ## encoding: [0xdb,0xbc,0x24,0x88,0x00,0x00,0x00]
; FMACALL32_BDVER2-NEXT: vmovsd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfb,0x11,0x44,0x24,0x10]
@@ -1977,8 +1977,8 @@ define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %
; FMACALL32_BDVER2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; FMACALL32_BDVER2-NEXT: calll _fma ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
-; FMACALL32_BDVER2-NEXT: vmovsd 32(%ebp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x45,0x20]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x45,0x20]
; FMACALL32_BDVER2-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) ## 10-byte Folded Spill
; FMACALL32_BDVER2-NEXT: ## encoding: [0xdb,0xbc,0x24,0xc0,0x00,0x00,0x00]
; FMACALL32_BDVER2-NEXT: vmovsd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfb,0x11,0x44,0x24,0x10]
@@ -1998,8 +1998,8 @@ define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %
; FMACALL32_BDVER2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; FMACALL32_BDVER2-NEXT: calll _fma ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
-; FMACALL32_BDVER2-NEXT: vmovsd 24(%ebp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x45,0x18]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x45,0x18]
; FMACALL32_BDVER2-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) ## 10-byte Folded Spill
; FMACALL32_BDVER2-NEXT: ## encoding: [0xdb,0xbc,0x24,0xa0,0x00,0x00,0x00]
; FMACALL32_BDVER2-NEXT: vmovsd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfb,0x11,0x44,0x24,0x10]
@@ -2011,8 +2011,8 @@ define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %
; FMACALL32_BDVER2-NEXT: vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
; FMACALL32_BDVER2-NEXT: calll _fma ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
-; FMACALL32_BDVER2-NEXT: vmovsd 16(%ebp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x45,0x10]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x45,0x10]
; FMACALL32_BDVER2-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) ## 10-byte Folded Spill
; FMACALL32_BDVER2-NEXT: ## encoding: [0xdb,0x7c,0x24,0x30]
; FMACALL32_BDVER2-NEXT: vmovsd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfb,0x11,0x44,0x24,0x10]
@@ -2025,8 +2025,8 @@ define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %
; FMACALL32_BDVER2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; FMACALL32_BDVER2-NEXT: calll _fma ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
-; FMACALL32_BDVER2-NEXT: vmovsd 8(%ebp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x45,0x08]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x45,0x08]
; FMACALL32_BDVER2-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) ## 10-byte Folded Spill
; FMACALL32_BDVER2-NEXT: ## encoding: [0xdb,0x7c,0x24,0x20]
; FMACALL32_BDVER2-NEXT: vmovsd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfb,0x11,0x44,0x24,0x10]
@@ -2039,8 +2039,8 @@ define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %
; FMACALL32_BDVER2-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; FMACALL32_BDVER2-NEXT: calll _fma ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
-; FMACALL32_BDVER2-NEXT: vmovsd 64(%ebp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x45,0x40]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x45,0x40]
; FMACALL32_BDVER2-NEXT: vmovsd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfb,0x11,0x44,0x24,0x10]
; FMACALL32_BDVER2-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0x30,0x01,0x00,0x00]
@@ -2070,21 +2070,21 @@ define <8 x double> @test_v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %
; FMACALL32_BDVER2-NEXT: calll _fma ## encoding: [0xe8,A,A,A,A]
; FMACALL32_BDVER2-NEXT: ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
; FMACALL32_BDVER2-NEXT: fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x68]
-; FMACALL32_BDVER2-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x60]
-; FMACALL32_BDVER2-NEXT: ## xmm0 = mem[0],zero
-; FMACALL32_BDVER2-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x50]
-; FMACALL32_BDVER2-NEXT: ## xmm1 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x60]
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x50]
; FMACALL32_BDVER2-NEXT: vmovhps {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x16,0x44,0x24,0x58]
; FMACALL32_BDVER2-NEXT: ## xmm0 = xmm0[0,1],mem[0,1]
; FMACALL32_BDVER2-NEXT: vmovhps {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x16,0x4c,0x24,0x48]
; FMACALL32_BDVER2-NEXT: ## xmm1 = xmm1[0,1],mem[0,1]
-; FMACALL32_BDVER2-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm2 ## encoding: [0xc5,0xfb,0x10,0x54,0x24,0x70]
-; FMACALL32_BDVER2-NEXT: ## xmm2 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x54,0x24,0x70]
; FMACALL32_BDVER2-NEXT: vmovhps {{[0-9]+}}(%esp), %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x16,0x54,0x24,0x68]
; FMACALL32_BDVER2-NEXT: ## xmm2 = xmm2[0,1],mem[0,1]
; FMACALL32_BDVER2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
-; FMACALL32_BDVER2-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfb,0x10,0x8c,0x24,0x80,0x00,0x00,0x00]
-; FMACALL32_BDVER2-NEXT: ## xmm1 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMACALL32_BDVER2-NEXT: ## encoding: [0xc5,0xfb,0x10,0x8c,0x24,0x80,0x00,0x00,0x00]
; FMACALL32_BDVER2-NEXT: vmovhps {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x16,0x4c,0x24,0x78]
; FMACALL32_BDVER2-NEXT: ## xmm1 = xmm1[0,1],mem[0,1]
; FMACALL32_BDVER2-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ## encoding: [0xc4,0xe3,0x75,0x18,0xca,0x01]
diff --git a/llvm/test/CodeGen/X86/fma_patterns.ll b/llvm/test/CodeGen/X86/fma_patterns.ll
index aa99672b8fc6ae4..0ffcb8c46cef95d 100644
--- a/llvm/test/CodeGen/X86/fma_patterns.ll
+++ b/llvm/test/CodeGen/X86/fma_patterns.ll
@@ -1269,7 +1269,7 @@ define <4 x float> @test_v4f32_mul_y_sub_x_negone_undefs(<4 x float> %x, <4 x fl
define float @test_f32_interp(float %x, float %y, float %t) {
; FMA-INFS-LABEL: test_f32_interp:
; FMA-INFS: # %bb.0:
-; FMA-INFS-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; FMA-INFS-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; FMA-INFS-NEXT: vsubss %xmm2, %xmm3, %xmm3
; FMA-INFS-NEXT: vmulss %xmm3, %xmm1, %xmm1
; FMA-INFS-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1
@@ -1277,7 +1277,7 @@ define float @test_f32_interp(float %x, float %y, float %t) {
;
; FMA4-INFS-LABEL: test_f32_interp:
; FMA4-INFS: # %bb.0:
-; FMA4-INFS-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; FMA4-INFS-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; FMA4-INFS-NEXT: vsubss %xmm2, %xmm3, %xmm3
; FMA4-INFS-NEXT: vmulss %xmm3, %xmm1, %xmm1
; FMA4-INFS-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1
@@ -1285,7 +1285,7 @@ define float @test_f32_interp(float %x, float %y, float %t) {
;
; AVX512-INFS-LABEL: test_f32_interp:
; AVX512-INFS: # %bb.0:
-; AVX512-INFS-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX512-INFS-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX512-INFS-NEXT: vsubss %xmm2, %xmm3, %xmm3
; AVX512-INFS-NEXT: vmulss %xmm3, %xmm1, %xmm1
; AVX512-INFS-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1
@@ -1416,7 +1416,7 @@ define <8 x float> @test_v8f32_interp(<8 x float> %x, <8 x float> %y, <8 x float
define double @test_f64_interp(double %x, double %y, double %t) {
; FMA-INFS-LABEL: test_f64_interp:
; FMA-INFS: # %bb.0:
-; FMA-INFS-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; FMA-INFS-NEXT: vmovsd {{.*#+}} xmm3 = [1.0E+0,0.0E+0]
; FMA-INFS-NEXT: vsubsd %xmm2, %xmm3, %xmm3
; FMA-INFS-NEXT: vmulsd %xmm3, %xmm1, %xmm1
; FMA-INFS-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1
@@ -1424,7 +1424,7 @@ define double @test_f64_interp(double %x, double %y, double %t) {
;
; FMA4-INFS-LABEL: test_f64_interp:
; FMA4-INFS: # %bb.0:
-; FMA4-INFS-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; FMA4-INFS-NEXT: vmovsd {{.*#+}} xmm3 = [1.0E+0,0.0E+0]
; FMA4-INFS-NEXT: vsubsd %xmm2, %xmm3, %xmm3
; FMA4-INFS-NEXT: vmulsd %xmm3, %xmm1, %xmm1
; FMA4-INFS-NEXT: vfmaddsd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1
@@ -1432,7 +1432,7 @@ define double @test_f64_interp(double %x, double %y, double %t) {
;
; AVX512-INFS-LABEL: test_f64_interp:
; AVX512-INFS: # %bb.0:
-; AVX512-INFS-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512-INFS-NEXT: vmovsd {{.*#+}} xmm3 = [1.0E+0,0.0E+0]
; AVX512-INFS-NEXT: vsubsd %xmm2, %xmm3, %xmm3
; AVX512-INFS-NEXT: vmulsd %xmm3, %xmm1, %xmm1
; AVX512-INFS-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm2 * xmm0) + xmm1
diff --git a/llvm/test/CodeGen/X86/fmf-flags.ll b/llvm/test/CodeGen/X86/fmf-flags.ll
index daaf643201e92ce..24dabfc18b9e3f2 100644
--- a/llvm/test/CodeGen/X86/fmf-flags.ll
+++ b/llvm/test/CodeGen/X86/fmf-flags.ll
@@ -51,7 +51,7 @@ define dso_local float @fast_fmuladd_opts(float %a , float %b , float %c) {
define dso_local double @not_so_fast_mul_add(double %x) {
; X64-LABEL: not_so_fast_mul_add:
; X64: # %bb.0:
-; X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm1 = [4.2000000000000002E+0,0.0E+0]
; X64-NEXT: mulsd %xmm0, %xmm1
; X64-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-NEXT: movsd %xmm1, mul1(%rip)
diff --git a/llvm/test/CodeGen/X86/fminimum-fmaximum.ll b/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
index b927f92897a20bc..55502f37da1a627 100644
--- a/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
+++ b/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
@@ -119,12 +119,12 @@ define <4 x float> @test_fmaximum_scalarize(<4 x float> %x, <4 x float> %y) "no-
define float @test_fmaximum_nan0(float %x, float %y) {
; SSE2-LABEL: test_fmaximum_nan0:
; SSE2: # %bb.0:
-; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fmaximum_nan0:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: retq
;
; X86-LABEL: test_fmaximum_nan0:
@@ -138,12 +138,12 @@ define float @test_fmaximum_nan0(float %x, float %y) {
define float @test_fmaximum_nan1(float %x, float %y) {
; SSE2-LABEL: test_fmaximum_nan1:
; SSE2: # %bb.0:
-; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fmaximum_nan1:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: retq
;
; X86-LABEL: test_fmaximum_nan1:
@@ -608,12 +608,12 @@ define <2 x double> @test_fminimum_scalarize(<2 x double> %x, <2 x double> %y) "
define float @test_fminimum_nan0(float %x, float %y) {
; SSE2-LABEL: test_fminimum_nan0:
; SSE2: # %bb.0:
-; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fminimum_nan0:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: retq
;
; X86-LABEL: test_fminimum_nan0:
@@ -627,12 +627,12 @@ define float @test_fminimum_nan0(float %x, float %y) {
define float @test_fminimum_nan1(float %x, float %y) {
; SSE2-LABEL: test_fminimum_nan1:
; SSE2: # %bb.0:
-; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fminimum_nan1:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: retq
;
; X86-LABEL: test_fminimum_nan1:
@@ -816,12 +816,12 @@ define double @test_fminimum_zero1(double %x, double %y) nounwind {
define double @test_fminimum_zero2(double %x, double %y) {
; SSE2-LABEL: test_fminimum_zero2:
; SSE2: # %bb.0:
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fminimum_zero2:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
; AVX-NEXT: retq
;
; X86-LABEL: test_fminimum_zero2:
@@ -1187,7 +1187,7 @@ define <4 x float> @test_fmaximum_vector_non_zero(<4 x float> %x) {
define <2 x double> @test_fminimum_vector_nan(<2 x double> %x) {
; SSE2-LABEL: test_fminimum_vector_nan:
; SSE2: # %bb.0:
-; SSE2-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = [NaN,0.0E+0]
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: minpd %xmm0, %xmm1
@@ -1197,7 +1197,7 @@ define <2 x double> @test_fminimum_vector_nan(<2 x double> %x) {
;
; AVX-LABEL: test_fminimum_vector_nan:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [NaN,0.0E+0]
; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; AVX-NEXT: vminpd %xmm0, %xmm2, %xmm0
diff --git a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
index 063182fcecf3e43..abaefaee33ed607 100644
--- a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
+++ b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
@@ -49,26 +49,26 @@ define <4 x float> @fmul_pow2_ldexp_4xfloat(<4 x i32> %i) {
; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
; CHECK-SSE-NEXT: movd %xmm1, %edi
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-SSE-NEXT: # xmm0 = mem[2,3,2,3]
; CHECK-SSE-NEXT: movd %xmm0, %edi
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-SSE-NEXT: movd %xmm0, %edi
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-SSE-NEXT: # xmm0 = mem[1,1,1,1]
; CHECK-SSE-NEXT: movd %xmm0, %edi
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -85,26 +85,26 @@ define <4 x float> @fmul_pow2_ldexp_4xfloat(<4 x i32> %i) {
; CHECK-AVX-NEXT: .cfi_def_cfa_offset 48
; CHECK-AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX-NEXT: vextractps $1, %xmm0, %edi
-; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = [9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX-NEXT: callq ldexpf at PLT
; CHECK-AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-AVX-NEXT: vmovd %xmm0, %edi
-; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = [9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX-NEXT: callq ldexpf at PLT
; CHECK-AVX-NEXT: vinsertps $16, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; CHECK-AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
; CHECK-AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-AVX-NEXT: vextractps $2, %xmm0, %edi
-; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = [9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX-NEXT: callq ldexpf at PLT
; CHECK-AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-AVX-NEXT: vextractps $3, %xmm0, %edi
-; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = [9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX-NEXT: callq ldexpf at PLT
; CHECK-AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
@@ -407,13 +407,13 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-SSE-NEXT: .cfi_def_cfa_offset 80
; CHECK-SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-SSE-NEXT: pextrw $7, %xmm0, %edi
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-SSE-NEXT: pextrw $6, %xmm0, %edi
-; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
@@ -421,13 +421,13 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-SSE-NEXT: pextrw $5, %xmm0, %edi
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-SSE-NEXT: pextrw $4, %xmm0, %edi
-; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
@@ -437,13 +437,13 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-SSE-NEXT: pextrw $3, %xmm0, %edi
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-SSE-NEXT: pextrw $2, %xmm0, %edi
-; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
@@ -451,14 +451,14 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-SSE-NEXT: pextrw $1, %xmm0, %edi
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-SSE-NEXT: movd %xmm0, %eax
; CHECK-SSE-NEXT: movzwl %ax, %edi
-; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: callq ldexpf at PLT
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
@@ -477,13 +477,13 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX2-NEXT: .cfi_def_cfa_offset 80
; CHECK-AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-AVX2-NEXT: vpextrw $7, %xmm0, %edi
-; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: callq ldexpf at PLT
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX2-NEXT: vpextrw $6, %xmm0, %edi
-; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: callq ldexpf at PLT
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
@@ -491,13 +491,13 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX2-NEXT: vpextrw $5, %xmm0, %edi
-; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: callq ldexpf at PLT
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX2-NEXT: vpextrw $4, %xmm0, %edi
-; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: callq ldexpf at PLT
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
@@ -507,13 +507,13 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX2-NEXT: vpextrw $3, %xmm0, %edi
-; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: callq ldexpf at PLT
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX2-NEXT: vpextrw $2, %xmm0, %edi
-; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: callq ldexpf at PLT
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
@@ -521,14 +521,14 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX2-NEXT: vpextrw $1, %xmm0, %edi
-; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: callq ldexpf at PLT
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX2-NEXT: vmovd %xmm0, %eax
; CHECK-AVX2-NEXT: movzwl %ax, %edi
-; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: callq ldexpf at PLT
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
@@ -547,7 +547,7 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX512F-NEXT: .cfi_def_cfa_offset 80
; CHECK-AVX512F-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
; CHECK-AVX512F-NEXT: vpextrw $7, %xmm0, %edi
-; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX512F-NEXT: callq ldexpf at PLT
; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax
@@ -555,7 +555,7 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX512F-NEXT: vpextrw $6, %xmm0, %edi
-; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX512F-NEXT: callq ldexpf at PLT
; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax
@@ -565,7 +565,7 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX512F-NEXT: vpextrw $5, %xmm0, %edi
-; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX512F-NEXT: callq ldexpf at PLT
; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax
@@ -573,7 +573,7 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX512F-NEXT: vpextrw $4, %xmm0, %edi
-; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX512F-NEXT: callq ldexpf at PLT
; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax
@@ -585,7 +585,7 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX512F-NEXT: vpextrw $3, %xmm0, %edi
-; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX512F-NEXT: callq ldexpf at PLT
; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax
@@ -593,7 +593,7 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX512F-NEXT: vpextrw $2, %xmm0, %edi
-; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX512F-NEXT: callq ldexpf at PLT
; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax
@@ -603,7 +603,7 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX512F-NEXT: vpextrw $1, %xmm0, %edi
-; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX512F-NEXT: callq ldexpf at PLT
; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax
@@ -612,7 +612,7 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax
; CHECK-AVX512F-NEXT: movzwl %ax, %edi
-; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX512F-NEXT: callq ldexpf at PLT
; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax
@@ -1270,7 +1270,7 @@ define float @fdiv_pow_shl_cnt_fail_maybe_z(i64 %cnt) nounwind {
; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1
; CHECK-SSE-NEXT: addss %xmm1, %xmm1
; CHECK-SSE-NEXT: .LBB22_3:
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [-9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: divss %xmm1, %xmm0
; CHECK-SSE-NEXT: retq
;
@@ -1290,7 +1290,7 @@ define float @fdiv_pow_shl_cnt_fail_maybe_z(i64 %cnt) nounwind {
; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm0
; CHECK-AVX2-NEXT: .LBB22_3:
-; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = [-9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-AVX2-NEXT: retq
;
@@ -1301,7 +1301,7 @@ define float @fdiv_pow_shl_cnt_fail_maybe_z(i64 %cnt) nounwind {
; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx
; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rax
; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %rax, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = [-9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-NO-FASTFMA-NEXT: retq
;
@@ -1310,7 +1310,7 @@ define float @fdiv_pow_shl_cnt_fail_maybe_z(i64 %cnt) nounwind {
; CHECK-FMA-NEXT: movl $8, %eax
; CHECK-FMA-NEXT: shlxq %rdi, %rax, %rax
; CHECK-FMA-NEXT: vcvtusi2ss %rax, %xmm0, %xmm0
-; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = [-9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-FMA-NEXT: retq
%shl = shl i64 8, %cnt
@@ -1327,7 +1327,7 @@ define float @fdiv_pow_shl_cnt_fail_neg_int(i64 %cnt) nounwind {
; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $rcx
; CHECK-SSE-NEXT: shlq %cl, %rax
; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [-9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: divss %xmm1, %xmm0
; CHECK-SSE-NEXT: retq
;
@@ -1338,7 +1338,7 @@ define float @fdiv_pow_shl_cnt_fail_neg_int(i64 %cnt) nounwind {
; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx
; CHECK-AVX2-NEXT: shlq %cl, %rax
; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = [-9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-AVX2-NEXT: retq
;
@@ -1349,7 +1349,7 @@ define float @fdiv_pow_shl_cnt_fail_neg_int(i64 %cnt) nounwind {
; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx
; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rax
; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = [-9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-NO-FASTFMA-NEXT: retq
;
@@ -1358,7 +1358,7 @@ define float @fdiv_pow_shl_cnt_fail_neg_int(i64 %cnt) nounwind {
; CHECK-FMA-NEXT: movl $8, %eax
; CHECK-FMA-NEXT: shlxq %rdi, %rax, %rax
; CHECK-FMA-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0
-; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = [-9.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-FMA-NEXT: retq
%shl = shl i64 8, %cnt
@@ -1376,7 +1376,7 @@ define float @fdiv_pow_shl_cnt(i64 %cnt_in) nounwind {
; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $rcx
; CHECK-SSE-NEXT: shlq %cl, %rax
; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [-5.0E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: divss %xmm1, %xmm0
; CHECK-SSE-NEXT: retq
;
@@ -1388,7 +1388,7 @@ define float @fdiv_pow_shl_cnt(i64 %cnt_in) nounwind {
; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx
; CHECK-AVX2-NEXT: shlq %cl, %rax
; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = [-5.0E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-AVX2-NEXT: retq
;
@@ -1400,7 +1400,7 @@ define float @fdiv_pow_shl_cnt(i64 %cnt_in) nounwind {
; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx
; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rax
; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = [-5.0E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-NO-FASTFMA-NEXT: retq
;
@@ -1410,7 +1410,7 @@ define float @fdiv_pow_shl_cnt(i64 %cnt_in) nounwind {
; CHECK-FMA-NEXT: movl $8, %eax
; CHECK-FMA-NEXT: shlxq %rdi, %rax, %rax
; CHECK-FMA-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0
-; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = [-5.0E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-FMA-NEXT: retq
%cnt = and i64 %cnt_in, 31
@@ -1431,7 +1431,7 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bounds(i32 %cnt) nounwind {
; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm0
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
; CHECK-SSE-NEXT: callq __extendhfsf2 at PLT
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: divss %xmm0, %xmm1
; CHECK-SSE-NEXT: movaps %xmm1, %xmm0
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
@@ -1448,7 +1448,7 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bounds(i32 %cnt) nounwind {
; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: callq __extendhfsf2 at PLT
-; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: popq %rax
@@ -1464,7 +1464,7 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bounds(i32 %cnt) nounwind {
; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vmovd %xmm0, %eax
@@ -1479,7 +1479,7 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bounds(i32 %cnt) nounwind {
; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-FMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %xmm0
-; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = [8.192E+3,0.0E+0,0.0E+0,0.0E+0]
; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-FMA-NEXT: vmovd %xmm0, %eax
@@ -1547,7 +1547,7 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bound2(i16 %cnt) nounwind {
; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
; CHECK-SSE-NEXT: callq __extendhfsf2 at PLT
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: divss %xmm0, %xmm1
; CHECK-SSE-NEXT: movaps %xmm1, %xmm0
; CHECK-SSE-NEXT: callq __truncsfhf2 at PLT
@@ -1565,7 +1565,7 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bound2(i16 %cnt) nounwind {
; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: callq __extendhfsf2 at PLT
-; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-AVX2-NEXT: callq __truncsfhf2 at PLT
; CHECK-AVX2-NEXT: popq %rax
@@ -1582,7 +1582,7 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bound2(i16 %cnt) nounwind {
; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-NO-FASTFMA-NEXT: vmovd %xmm0, %eax
@@ -1598,7 +1598,7 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bound2(i16 %cnt) nounwind {
; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-FMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %xmm0
-; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; CHECK-FMA-NEXT: vmovd %xmm0, %eax
@@ -1642,7 +1642,7 @@ define float @fdiv_pow_shl_cnt32_out_of_bounds2(i32 %cnt) nounwind {
; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx
; CHECK-SSE-NEXT: shll %cl, %eax
; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1
-; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = [1.00974148E-28,0.0E+0,0.0E+0,0.0E+0]
; CHECK-SSE-NEXT: divss %xmm1, %xmm0
; CHECK-SSE-NEXT: retq
;
@@ -1653,7 +1653,7 @@ define float @fdiv_pow_shl_cnt32_out_of_bounds2(i32 %cnt) nounwind {
; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
; CHECK-AVX2-NEXT: shll %cl, %eax
; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = [1.00974148E-28,0.0E+0,0.0E+0,0.0E+0]
; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-AVX2-NEXT: retq
;
@@ -1664,7 +1664,7 @@ define float @fdiv_pow_shl_cnt32_out_of_bounds2(i32 %cnt) nounwind {
; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx
; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax
; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = [1.00974148E-28,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-NO-FASTFMA-NEXT: retq
;
@@ -1673,7 +1673,7 @@ define float @fdiv_pow_shl_cnt32_out_of_bounds2(i32 %cnt) nounwind {
; CHECK-FMA-NEXT: movl $1, %eax
; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax
; CHECK-FMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0
-; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = [1.00974148E-28,0.0E+0,0.0E+0,0.0E+0]
; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0
; CHECK-FMA-NEXT: retq
%shl = shl nuw i32 1, %cnt
diff --git a/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll b/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll
index 2253d7cbaf8b67e..71d49481ebb8e71 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll
@@ -385,7 +385,7 @@ entry:
define float @f15() #0 {
; NOFMA-LABEL: f15:
; NOFMA: # %bb.0: # %entry
-; NOFMA-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; NOFMA-NEXT: movss {{.*#+}} xmm1 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; NOFMA-NEXT: movaps %xmm1, %xmm0
; NOFMA-NEXT: mulss %xmm1, %xmm0
; NOFMA-NEXT: addss %xmm1, %xmm0
@@ -393,13 +393,13 @@ define float @f15() #0 {
;
; FMA-LABEL: f15:
; FMA: # %bb.0: # %entry
-; FMA-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA-NEXT: vmovss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; FMA-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: f15:
; FMA4: # %bb.0: # %entry
-; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA4-NEXT: vmovss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; FMA4-NEXT: retq
entry:
@@ -417,7 +417,7 @@ entry:
define double @f16() #0 {
; NOFMA-LABEL: f16:
; NOFMA: # %bb.0: # %entry
-; NOFMA-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; NOFMA-NEXT: movsd {{.*#+}} xmm1 = [4.2100000000000001E+1,0.0E+0]
; NOFMA-NEXT: movapd %xmm1, %xmm0
; NOFMA-NEXT: mulsd %xmm1, %xmm0
; NOFMA-NEXT: addsd %xmm1, %xmm0
@@ -425,13 +425,13 @@ define double @f16() #0 {
;
; FMA-LABEL: f16:
; FMA: # %bb.0: # %entry
-; FMA-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; FMA-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: f16:
; FMA4: # %bb.0: # %entry
-; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; FMA4-NEXT: vfmaddsd {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; FMA4-NEXT: retq
entry:
@@ -451,7 +451,7 @@ define float @f17() #0 {
; NOFMA: # %bb.0: # %entry
; NOFMA-NEXT: pushq %rax
; NOFMA-NEXT: .cfi_def_cfa_offset 16
-; NOFMA-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; NOFMA-NEXT: movss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; NOFMA-NEXT: movaps %xmm0, %xmm1
; NOFMA-NEXT: movaps %xmm0, %xmm2
; NOFMA-NEXT: callq fmaf at PLT
@@ -461,13 +461,13 @@ define float @f17() #0 {
;
; FMA-LABEL: f17:
; FMA: # %bb.0: # %entry
-; FMA-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA-NEXT: vmovss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; FMA-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: f17:
; FMA4: # %bb.0: # %entry
-; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA4-NEXT: vmovss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; FMA4-NEXT: vfmaddss {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; FMA4-NEXT: retq
entry:
@@ -487,7 +487,7 @@ define double @f18() #0 {
; NOFMA: # %bb.0: # %entry
; NOFMA-NEXT: pushq %rax
; NOFMA-NEXT: .cfi_def_cfa_offset 16
-; NOFMA-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; NOFMA-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; NOFMA-NEXT: movaps %xmm0, %xmm1
; NOFMA-NEXT: movaps %xmm0, %xmm2
; NOFMA-NEXT: callq fma at PLT
@@ -497,13 +497,13 @@ define double @f18() #0 {
;
; FMA-LABEL: f18:
; FMA: # %bb.0: # %entry
-; FMA-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; FMA-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; FMA-NEXT: retq
;
; FMA4-LABEL: f18:
; FMA4: # %bb.0: # %entry
-; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; FMA4-NEXT: vfmaddsd {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; FMA4-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index 5f77e2cb46cbf06..d2b45ee1e03e639 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -26,7 +26,7 @@ define double @f1() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; X86-SSE-NEXT: divsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: fldl (%esp)
@@ -37,13 +37,13 @@ define double @f1() #0 {
;
; SSE-LABEL: f1:
; SSE: # %bb.0: # %entry
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; SSE-NEXT: divsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f1:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; AVX-NEXT: vdivsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -130,7 +130,7 @@ define double @f3(double %a, double %b) #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
; X86-SSE-NEXT: movapd %xmm0, %xmm1
; X86-SSE-NEXT: subsd {{[0-9]+}}(%esp), %xmm1
; X86-SSE-NEXT: mulsd {{[0-9]+}}(%esp), %xmm1
@@ -144,7 +144,7 @@ define double @f3(double %a, double %b) #0 {
;
; SSE-LABEL: f3:
; SSE: # %bb.0: # %entry
-; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = [-0.0E+0,0.0E+0]
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: subsd %xmm0, %xmm3
; SSE-NEXT: mulsd %xmm1, %xmm3
@@ -154,7 +154,7 @@ define double @f3(double %a, double %b) #0 {
;
; AVX-LABEL: f3:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = [-0.0E+0,0.0E+0]
; AVX-NEXT: vsubsd %xmm0, %xmm2, %xmm0
; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vsubsd %xmm0, %xmm2, %xmm0
@@ -264,7 +264,7 @@ define double @f5() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; X86-SSE-NEXT: sqrtsd %xmm0, %xmm0
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: fldl (%esp)
@@ -275,13 +275,13 @@ define double @f5() #0 {
;
; SSE-LABEL: f5:
; SSE: # %bb.0: # %entry
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; SSE-NEXT: sqrtsd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f5:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -311,9 +311,9 @@ define double @f6() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $28, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 32
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [3.0E+0,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll pow
; X86-SSE-NEXT: addl $28, %esp
@@ -324,8 +324,8 @@ define double @f6() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; SSE-NEXT: callq pow at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -335,8 +335,8 @@ define double @f6() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; AVX-NEXT: callq pow at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -368,7 +368,7 @@ define double @f7() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: movl $3, {{[0-9]+}}(%esp)
; X86-SSE-NEXT: calll __powidf2
@@ -380,7 +380,7 @@ define double @f7() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; SSE-NEXT: movl $3, %edi
; SSE-NEXT: callq __powidf2 at PLT
; SSE-NEXT: popq %rax
@@ -391,7 +391,7 @@ define double @f7() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2 at PLT
; AVX-NEXT: popq %rax
@@ -423,7 +423,7 @@ define double @f8() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll sin
; X86-SSE-NEXT: addl $12, %esp
@@ -434,7 +434,7 @@ define double @f8() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; SSE-NEXT: callq sin at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -444,7 +444,7 @@ define double @f8() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq sin at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -474,7 +474,7 @@ define double @f9() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll cos
; X86-SSE-NEXT: addl $12, %esp
@@ -485,7 +485,7 @@ define double @f9() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; SSE-NEXT: callq cos at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -495,7 +495,7 @@ define double @f9() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq cos at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -525,7 +525,7 @@ define double @f10() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll exp
; X86-SSE-NEXT: addl $12, %esp
@@ -536,7 +536,7 @@ define double @f10() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; SSE-NEXT: callq exp at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -546,7 +546,7 @@ define double @f10() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq exp at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -576,7 +576,7 @@ define double @f11() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll exp2
; X86-SSE-NEXT: addl $12, %esp
@@ -587,7 +587,7 @@ define double @f11() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; SSE-NEXT: callq exp2 at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -597,7 +597,7 @@ define double @f11() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -627,7 +627,7 @@ define double @f12() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll log
; X86-SSE-NEXT: addl $12, %esp
@@ -638,7 +638,7 @@ define double @f12() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; SSE-NEXT: callq log at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -648,7 +648,7 @@ define double @f12() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -678,7 +678,7 @@ define double @f13() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll log10
; X86-SSE-NEXT: addl $12, %esp
@@ -689,7 +689,7 @@ define double @f13() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; SSE-NEXT: callq log10 at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -699,7 +699,7 @@ define double @f13() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -729,7 +729,7 @@ define double @f14() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll log2
; X86-SSE-NEXT: addl $12, %esp
@@ -740,7 +740,7 @@ define double @f14() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; SSE-NEXT: callq log2 at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -750,7 +750,7 @@ define double @f14() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -780,7 +780,7 @@ define double @f15() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll rint
; X86-SSE-NEXT: addl $12, %esp
@@ -791,7 +791,7 @@ define double @f15() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; SSE-NEXT: callq rint at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -799,7 +799,7 @@ define double @f15() #0 {
;
; AVX-LABEL: f15:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -828,7 +828,7 @@ define double @f16() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll nearbyint
; X86-SSE-NEXT: addl $12, %esp
@@ -839,7 +839,7 @@ define double @f16() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; SSE-NEXT: callq nearbyint at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -847,7 +847,7 @@ define double @f16() #0 {
;
; AVX-LABEL: f16:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -877,9 +877,9 @@ define double @f19() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $28, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 32
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+1,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: calll fmod
; X86-SSE-NEXT: addl $28, %esp
@@ -890,8 +890,8 @@ define double @f19() #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; SSE-NEXT: callq fmod at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
@@ -901,8 +901,8 @@ define double @f19() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -1301,7 +1301,7 @@ define i32 @f20u(double %x) #0 {
; X86-SSE-LABEL: f20u:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm2 = [2.147483648E+9,0.0E+0]
; X86-SSE-NEXT: comisd %xmm0, %xmm2
; X86-SSE-NEXT: xorpd %xmm1, %xmm1
; X86-SSE-NEXT: ja .LBB24_2
@@ -1378,7 +1378,7 @@ define i64 @f20u64(double %x) #0 {
; X86-SSE-NEXT: subl $20, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 24
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; X86-SSE-NEXT: comisd %xmm0, %xmm1
; X86-SSE-NEXT: jbe .LBB25_2
; X86-SSE-NEXT: # %bb.1: # %entry
@@ -1406,7 +1406,7 @@ define i64 @f20u64(double %x) #0 {
;
; SSE-LABEL: f20u64:
; SSE: # %bb.0: # %entry
-; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; SSE-NEXT: comisd %xmm2, %xmm0
; SSE-NEXT: xorpd %xmm1, %xmm1
; SSE-NEXT: jb .LBB25_2
@@ -1423,7 +1423,7 @@ define i64 @f20u64(double %x) #0 {
;
; AVX1-LABEL: f20u64:
; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX1-NEXT: vcomisd %xmm1, %xmm0
; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: jb .LBB25_2
@@ -1537,7 +1537,7 @@ define float @f21() #0 {
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: pushl %eax
; X86-SSE-NEXT: .cfi_def_cfa_offset 8
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; X86-SSE-NEXT: cvtsd2ss %xmm0, %xmm0
; X86-SSE-NEXT: movss %xmm0, (%esp)
; X86-SSE-NEXT: flds (%esp)
@@ -1548,13 +1548,13 @@ define float @f21() #0 {
;
; SSE-LABEL: f21:
; SSE: # %bb.0: # %entry
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; SSE-NEXT: cvtsd2ss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f21:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/fp-logic.ll b/llvm/test/CodeGen/X86/fp-logic.ll
index 7fef4269f656501..522a1589caf09f4 100644
--- a/llvm/test/CodeGen/X86/fp-logic.ll
+++ b/llvm/test/CodeGen/X86/fp-logic.ll
@@ -99,7 +99,7 @@ define float @f6(float %x, i32 %y) {
define float @f7(float %x) {
; CHECK-LABEL: f7:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.20389539E-45,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast float %x to i32
@@ -113,7 +113,7 @@ define float @f7(float %x) {
define float @f8(float %x) {
; CHECK-LABEL: f8:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [5.60519386E-45,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast float %x to i32
@@ -177,7 +177,7 @@ define float @xor(float %x, float %y) {
define float @f7_or(float %x) {
; CHECK-LABEL: f7_or:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.20389539E-45,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: orps %xmm1, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast float %x to i32
@@ -189,7 +189,7 @@ define float @f7_or(float %x) {
define float @f7_xor(float %x) {
; CHECK-LABEL: f7_xor:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.20389539E-45,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: xorps %xmm1, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast float %x to i32
@@ -215,7 +215,7 @@ define double @doubles(double %x, double %y) {
define double @f7_double(double %x) {
; CHECK-LABEL: f7_double:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.4821969375237396E-323,0.0E+0]
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast double %x to i64
@@ -231,7 +231,7 @@ define double @f7_double(double %x) {
define float @movmsk(float %x) {
; CHECK-LABEL: movmsk:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [-0.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: andps %xmm1, %xmm0
; CHECK-NEXT: retq
%bc1 = bitcast float %x to i32
diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint-fp16.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint-fp16.ll
index fac14d8f14e8a17..bd3cb377bfce628 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint-fp16.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint-fp16.ll
@@ -347,7 +347,7 @@ define i64 @fptoui_f16toi64(half %x) #0 {
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: callq __extendhfsf2 at PLT
-; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: comiss %xmm2, %xmm0
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: jb .LBB9_2
@@ -369,7 +369,7 @@ define i64 @fptoui_f16toi64(half %x) #0 {
; F16C-NEXT: movzwl %ax, %eax
; F16C-NEXT: vmovd %eax, %xmm0
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
-; F16C-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; F16C-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; F16C-NEXT: vcomiss %xmm1, %xmm0
; F16C-NEXT: vxorps %xmm2, %xmm2, %xmm2
; F16C-NEXT: jb .LBB9_2
diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
index 25a946465ff3fa2..ecdc507a882c31e 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
@@ -447,7 +447,7 @@ define i32 @fptoui_f32toi32(float %x) #0 {
; SSE-X86-LABEL: fptoui_f32toi32:
; SSE-X86: # %bb.0:
; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-X86-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-X86-NEXT: movss {{.*#+}} xmm2 = [2.14748365E+9,0.0E+0,0.0E+0,0.0E+0]
; SSE-X86-NEXT: comiss %xmm0, %xmm2
; SSE-X86-NEXT: xorps %xmm1, %xmm1
; SSE-X86-NEXT: ja .LBB8_2
@@ -543,7 +543,7 @@ define i64 @fptoui_f32toi64(float %x) #0 {
; SSE-X86-NEXT: andl $-8, %esp
; SSE-X86-NEXT: subl $16, %esp
; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-X86-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-X86-NEXT: comiss %xmm0, %xmm1
; SSE-X86-NEXT: jbe .LBB9_2
; SSE-X86-NEXT: # %bb.1:
@@ -572,7 +572,7 @@ define i64 @fptoui_f32toi64(float %x) #0 {
;
; SSE-X64-LABEL: fptoui_f32toi64:
; SSE-X64: # %bb.0:
-; SSE-X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-X64-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-X64-NEXT: comiss %xmm2, %xmm0
; SSE-X64-NEXT: xorps %xmm1, %xmm1
; SSE-X64-NEXT: jb .LBB9_2
@@ -597,7 +597,7 @@ define i64 @fptoui_f32toi64(float %x) #0 {
; AVX1-X86-NEXT: andl $-8, %esp
; AVX1-X86-NEXT: subl $8, %esp
; AVX1-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-X86-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX1-X86-NEXT: vcomiss %xmm0, %xmm1
; AVX1-X86-NEXT: jbe .LBB9_2
; AVX1-X86-NEXT: # %bb.1:
@@ -620,7 +620,7 @@ define i64 @fptoui_f32toi64(float %x) #0 {
;
; AVX1-X64-LABEL: fptoui_f32toi64:
; AVX1-X64: # %bb.0:
-; AVX1-X64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-X64-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX1-X64-NEXT: vcomiss %xmm1, %xmm0
; AVX1-X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX1-X64-NEXT: jb .LBB9_2
@@ -645,7 +645,7 @@ define i64 @fptoui_f32toi64(float %x) #0 {
; AVX512-X86-NEXT: andl $-8, %esp
; AVX512-X86-NEXT: subl $8, %esp
; AVX512-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX512-X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-X86-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX512-X86-NEXT: xorl %edx, %edx
; AVX512-X86-NEXT: vcomiss %xmm0, %xmm1
; AVX512-X86-NEXT: setbe %dl
@@ -1087,7 +1087,7 @@ define i32 @fptoui_f64toi32(double %x) #0 {
; SSE-X86-LABEL: fptoui_f64toi32:
; SSE-X86: # %bb.0:
; SSE-X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-X86-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-X86-NEXT: movsd {{.*#+}} xmm2 = [2.147483648E+9,0.0E+0]
; SSE-X86-NEXT: comisd %xmm0, %xmm2
; SSE-X86-NEXT: xorpd %xmm1, %xmm1
; SSE-X86-NEXT: ja .LBB17_2
@@ -1183,7 +1183,7 @@ define i64 @fptoui_f64toi64(double %x) #0 {
; SSE-X86-NEXT: andl $-8, %esp
; SSE-X86-NEXT: subl $16, %esp
; SSE-X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-X86-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-X86-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; SSE-X86-NEXT: comisd %xmm0, %xmm1
; SSE-X86-NEXT: jbe .LBB18_2
; SSE-X86-NEXT: # %bb.1:
@@ -1212,7 +1212,7 @@ define i64 @fptoui_f64toi64(double %x) #0 {
;
; SSE-X64-LABEL: fptoui_f64toi64:
; SSE-X64: # %bb.0:
-; SSE-X64-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-X64-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; SSE-X64-NEXT: comisd %xmm2, %xmm0
; SSE-X64-NEXT: xorpd %xmm1, %xmm1
; SSE-X64-NEXT: jb .LBB18_2
@@ -1237,7 +1237,7 @@ define i64 @fptoui_f64toi64(double %x) #0 {
; AVX1-X86-NEXT: andl $-8, %esp
; AVX1-X86-NEXT: subl $8, %esp
; AVX1-X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1-X86-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1-X86-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX1-X86-NEXT: vcomisd %xmm0, %xmm1
; AVX1-X86-NEXT: jbe .LBB18_2
; AVX1-X86-NEXT: # %bb.1:
@@ -1260,7 +1260,7 @@ define i64 @fptoui_f64toi64(double %x) #0 {
;
; AVX1-X64-LABEL: fptoui_f64toi64:
; AVX1-X64: # %bb.0:
-; AVX1-X64-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1-X64-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX1-X64-NEXT: vcomisd %xmm1, %xmm0
; AVX1-X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX1-X64-NEXT: jb .LBB18_2
@@ -1285,7 +1285,7 @@ define i64 @fptoui_f64toi64(double %x) #0 {
; AVX512-X86-NEXT: andl $-8, %esp
; AVX512-X86-NEXT: subl $8, %esp
; AVX512-X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX512-X86-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-X86-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX512-X86-NEXT: xorl %edx, %edx
; AVX512-X86-NEXT: vcomisd %xmm0, %xmm1
; AVX512-X86-NEXT: setbe %dl
diff --git a/llvm/test/CodeGen/X86/fp-undef.ll b/llvm/test/CodeGen/X86/fp-undef.ll
index 2ae51c6c97e9bc1..227f0073e103b48 100644
--- a/llvm/test/CodeGen/X86/fp-undef.ll
+++ b/llvm/test/CodeGen/X86/fp-undef.ll
@@ -8,7 +8,7 @@
define float @fadd_undef_op0(float %x) {
; ANY-LABEL: fadd_undef_op0:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fadd float undef, %x
ret float %r
@@ -17,7 +17,7 @@ define float @fadd_undef_op0(float %x) {
define float @fadd_undef_op1(float %x) {
; ANY-LABEL: fadd_undef_op1:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fadd float %x, undef
ret float %r
@@ -26,7 +26,7 @@ define float @fadd_undef_op1(float %x) {
define float @fsub_undef_op0(float %x) {
; ANY-LABEL: fsub_undef_op0:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fsub float undef, %x
ret float %r
@@ -35,7 +35,7 @@ define float @fsub_undef_op0(float %x) {
define float @fsub_undef_op1(float %x) {
; ANY-LABEL: fsub_undef_op1:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fsub float %x, undef
ret float %r
@@ -44,7 +44,7 @@ define float @fsub_undef_op1(float %x) {
define float @fmul_undef_op0(float %x) {
; ANY-LABEL: fmul_undef_op0:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fmul float undef, %x
ret float %r
@@ -53,7 +53,7 @@ define float @fmul_undef_op0(float %x) {
define float @fmul_undef_op1(float %x) {
; ANY-LABEL: fmul_undef_op1:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fmul float %x, undef
ret float %r
@@ -62,7 +62,7 @@ define float @fmul_undef_op1(float %x) {
define float @fdiv_undef_op0(float %x) {
; ANY-LABEL: fdiv_undef_op0:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fdiv float undef, %x
ret float %r
@@ -71,7 +71,7 @@ define float @fdiv_undef_op0(float %x) {
define float @fdiv_undef_op1(float %x) {
; ANY-LABEL: fdiv_undef_op1:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fdiv float %x, undef
ret float %r
@@ -80,7 +80,7 @@ define float @fdiv_undef_op1(float %x) {
define float @frem_undef_op0(float %x) {
; ANY-LABEL: frem_undef_op0:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = frem float undef, %x
ret float %r
@@ -89,7 +89,7 @@ define float @frem_undef_op0(float %x) {
define float @frem_undef_op1(float %x) {
; ANY-LABEL: frem_undef_op1:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = frem float %x, undef
ret float %r
@@ -232,7 +232,7 @@ define float @fadd_undef_op0_nnan_constant(float %x) {
define float @fadd_undef_op1_constant(float %x) {
; ANY-LABEL: fadd_undef_op1_constant:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fadd float 2.0, undef
ret float %r
@@ -249,7 +249,7 @@ define float @fsub_undef_op0_fast_constant(float %x) {
define float @fsub_undef_op1_constant(float %x) {
; ANY-LABEL: fsub_undef_op1_constant:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fsub float 4.0, undef
ret float %r
@@ -266,7 +266,7 @@ define float @fmul_undef_op0_nnan_constant(float %x) {
define float @fmul_undef_op1_constant(float %x) {
; ANY-LABEL: fmul_undef_op1_constant:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fmul float 6.0, undef
ret float %r
@@ -283,7 +283,7 @@ define float @fdiv_undef_op0_fast_constant(float %x) {
define float @fdiv_undef_op1_constant(float %x) {
; ANY-LABEL: fdiv_undef_op1_constant:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = fdiv float 8.0, undef
ret float %r
@@ -300,7 +300,7 @@ define float @frem_undef_op0_nnan_constant(float %x) {
define float @frem_undef_op1_constant(float %x) {
; ANY-LABEL: frem_undef_op1_constant:
; ANY: # %bb.0:
-; ANY-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ANY-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; ANY-NEXT: retq
%r = frem float 10.0, undef
ret float %r
@@ -311,7 +311,7 @@ define float @frem_undef_op1_constant(float %x) {
define double @fadd_undef_op0_constant_nan(double %x) {
; ANY-LABEL: fadd_undef_op0_constant_nan:
; ANY: # %bb.0:
-; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ANY-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; ANY-NEXT: retq
%r = fadd double undef, 0x7FF8000000000000
ret double %r
@@ -328,7 +328,7 @@ define double @fadd_undef_op1_fast_constant_nan(double %x) {
define double @fsub_undef_op0_constant_nan(double %x) {
; ANY-LABEL: fsub_undef_op0_constant_nan:
; ANY: # %bb.0:
-; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ANY-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; ANY-NEXT: retq
%r = fsub double undef, 0xFFF8000000000010
ret double %r
@@ -345,7 +345,7 @@ define double @fsub_undef_op1_nnan_constant_nan(double %x) {
define double @fmul_undef_op0_constant_nan(double %x) {
; ANY-LABEL: fmul_undef_op0_constant_nan:
; ANY: # %bb.0:
-; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ANY-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; ANY-NEXT: retq
%r = fmul double undef, 0x7FF8000000000100
ret double %r
@@ -362,7 +362,7 @@ define double @fmul_undef_op1_fast_constant_nan(double %x) {
define double @fdiv_undef_op0_constant_nan(double %x) {
; ANY-LABEL: fdiv_undef_op0_constant_nan:
; ANY: # %bb.0:
-; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ANY-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; ANY-NEXT: retq
%r = fdiv double undef, 0xFFF8000000000110
ret double %r
@@ -379,7 +379,7 @@ define double @fdiv_undef_op1_nnan_constant_nan(double %x) {
define double @frem_undef_op0_constant_nan(double %x) {
; ANY-LABEL: frem_undef_op0_constant_nan:
; ANY: # %bb.0:
-; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ANY-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; ANY-NEXT: retq
%r = frem double undef, 0x7FF8000000001000
ret double %r
@@ -398,7 +398,7 @@ define double @frem_undef_op1_fast_constant_nan(double %x) {
define double @fadd_undef_op0_constant_inf(double %x) {
; ANY-LABEL: fadd_undef_op0_constant_inf:
; ANY: # %bb.0:
-; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ANY-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; ANY-NEXT: retq
%r = fadd double undef, 0x7FF0000000000000
ret double %r
@@ -415,7 +415,7 @@ define double @fadd_undef_op1_fast_constant_inf(double %x) {
define double @fsub_undef_op0_constant_inf(double %x) {
; ANY-LABEL: fsub_undef_op0_constant_inf:
; ANY: # %bb.0:
-; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ANY-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; ANY-NEXT: retq
%r = fsub double undef, 0xFFF0000000000000
ret double %r
@@ -432,7 +432,7 @@ define double @fsub_undef_op1_ninf_constant_inf(double %x) {
define double @fmul_undef_op0_constant_inf(double %x) {
; ANY-LABEL: fmul_undef_op0_constant_inf:
; ANY: # %bb.0:
-; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ANY-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; ANY-NEXT: retq
%r = fmul double undef, 0x7FF0000000000000
ret double %r
@@ -449,7 +449,7 @@ define double @fmul_undef_op1_fast_constant_inf(double %x) {
define double @fdiv_undef_op0_constant_inf(double %x) {
; ANY-LABEL: fdiv_undef_op0_constant_inf:
; ANY: # %bb.0:
-; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ANY-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; ANY-NEXT: retq
%r = fdiv double undef, 0xFFF0000000000000
ret double %r
@@ -466,7 +466,7 @@ define double @fdiv_undef_op1_ninf_constant_inf(double %x) {
define double @frem_undef_op0_constant_inf(double %x) {
; ANY-LABEL: frem_undef_op0_constant_inf:
; ANY: # %bb.0:
-; ANY-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ANY-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; ANY-NEXT: retq
%r = frem double undef, 0x7FF0000000000000
ret double %r
diff --git a/llvm/test/CodeGen/X86/fpclamptosat.ll b/llvm/test/CodeGen/X86/fpclamptosat.ll
index 2564e7f974cd8e1..3f5ec7b530fe00d 100644
--- a/llvm/test/CodeGen/X86/fpclamptosat.ll
+++ b/llvm/test/CodeGen/X86/fpclamptosat.ll
@@ -217,9 +217,9 @@ entry:
define i16 @stest_f64i16(double %x) nounwind {
; CHECK-LABEL: stest_f64i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-3.2768E+4,0.0E+0]
; CHECK-NEXT: maxsd %xmm0, %xmm1
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [3.2767E+4,0.0E+0]
; CHECK-NEXT: minsd %xmm1, %xmm0
; CHECK-NEXT: cvttsd2si %xmm0, %eax
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
@@ -276,9 +276,9 @@ entry:
define i16 @stest_f32i16(float %x) nounwind {
; CHECK-LABEL: stest_f32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [-3.2768E+4,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: maxss %xmm0, %xmm1
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [3.2767E+4,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: minss %xmm1, %xmm0
; CHECK-NEXT: cvttss2si %xmm0, %eax
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
@@ -815,9 +815,9 @@ entry:
define i16 @stest_f64i16_mm(double %x) nounwind {
; CHECK-LABEL: stest_f64i16_mm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-3.2768E+4,0.0E+0]
; CHECK-NEXT: maxsd %xmm0, %xmm1
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [3.2767E+4,0.0E+0]
; CHECK-NEXT: minsd %xmm1, %xmm0
; CHECK-NEXT: cvttsd2si %xmm0, %eax
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
@@ -869,9 +869,9 @@ entry:
define i16 @stest_f32i16_mm(float %x) nounwind {
; CHECK-LABEL: stest_f32i16_mm:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [-3.2768E+4,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: maxss %xmm0, %xmm1
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [3.2767E+4,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: minss %xmm1, %xmm0
; CHECK-NEXT: cvttss2si %xmm0, %eax
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
diff --git a/llvm/test/CodeGen/X86/fpclamptosat_vec.ll b/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
index 78ccc983d1637a5..42d6e8139c6aa32 100644
--- a/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/X86/fpclamptosat_vec.ll
@@ -79,7 +79,7 @@ entry:
define <2 x i32> @utest_f64i32(<2 x double> %x) nounwind {
; SSE-LABEL: utest_f64i32:
; SSE: # %bb.0: # %entry
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; SSE-NEXT: movapd %xmm0, %xmm2
; SSE-NEXT: subsd %xmm1, %xmm2
; SSE-NEXT: cvttsd2si %xmm2, %rax
@@ -115,7 +115,7 @@ define <2 x i32> @utest_f64i32(<2 x double> %x) nounwind {
;
; AVX2-LABEL: utest_f64i32:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX2-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vcvttsd2si %xmm2, %rax
; AVX2-NEXT: vcvttsd2si %xmm0, %rcx
@@ -348,7 +348,7 @@ entry:
define <4 x i32> @utest_f32i32(<4 x float> %x) nounwind {
; SSE-LABEL: utest_f32i32:
; SSE: # %bb.0: # %entry
-; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: subss %xmm2, %xmm1
; SSE-NEXT: cvttss2si %xmm1, %rax
@@ -419,7 +419,7 @@ define <4 x i32> @utest_f32i32(<4 x float> %x) nounwind {
; AVX2-LABEL: utest_f32i32:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttss2si %xmm3, %rax
; AVX2-NEXT: vcvttss2si %xmm2, %rcx
@@ -854,7 +854,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
-; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttss2si %xmm3, %rax
; AVX2-NEXT: vcvttss2si %xmm2, %rcx
@@ -2737,7 +2737,7 @@ entry:
define <2 x i32> @utest_f64i32_mm(<2 x double> %x) nounwind {
; SSE-LABEL: utest_f64i32_mm:
; SSE: # %bb.0: # %entry
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; SSE-NEXT: movapd %xmm0, %xmm2
; SSE-NEXT: subsd %xmm1, %xmm2
; SSE-NEXT: cvttsd2si %xmm2, %rax
@@ -2773,7 +2773,7 @@ define <2 x i32> @utest_f64i32_mm(<2 x double> %x) nounwind {
;
; AVX2-LABEL: utest_f64i32_mm:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX2-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vcvttsd2si %xmm2, %rax
; AVX2-NEXT: vcvttsd2si %xmm0, %rcx
@@ -3001,7 +3001,7 @@ entry:
define <4 x i32> @utest_f32i32_mm(<4 x float> %x) nounwind {
; SSE-LABEL: utest_f32i32_mm:
; SSE: # %bb.0: # %entry
-; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: subss %xmm2, %xmm1
; SSE-NEXT: cvttss2si %xmm1, %rax
@@ -3072,7 +3072,7 @@ define <4 x i32> @utest_f32i32_mm(<4 x float> %x) nounwind {
; AVX2-LABEL: utest_f32i32_mm:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttss2si %xmm3, %rax
; AVX2-NEXT: vcvttss2si %xmm2, %rcx
@@ -3502,7 +3502,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
-; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttss2si %xmm3, %rax
; AVX2-NEXT: vcvttss2si %xmm2, %rcx
diff --git a/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll b/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll
index 2eb351c8fac9e46..04fce7badb95146 100644
--- a/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll
+++ b/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll
@@ -72,7 +72,7 @@ define i1 @test_signed_i1_f32(float %f) nounwind {
;
; X86-SSE-LABEL: test_signed_i1_f32:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: maxss {{[0-9]+}}(%esp), %xmm0
; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: minss %xmm0, %xmm1
@@ -82,7 +82,7 @@ define i1 @test_signed_i1_f32(float %f) nounwind {
;
; X64-LABEL: test_signed_i1_f32:
; X64: # %bb.0:
-; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm1 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: maxss %xmm0, %xmm1
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: minss %xmm1, %xmm0
@@ -143,9 +143,9 @@ define i8 @test_signed_i8_f32(float %f) nounwind {
;
; X86-SSE-LABEL: test_signed_i8_f32:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = [-1.28E+2,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: maxss {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = [1.27E+2,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: minss %xmm0, %xmm1
; X86-SSE-NEXT: cvttss2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -153,9 +153,9 @@ define i8 @test_signed_i8_f32(float %f) nounwind {
;
; X64-LABEL: test_signed_i8_f32:
; X64: # %bb.0:
-; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm1 = [-1.28E+2,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: maxss %xmm0, %xmm1
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [1.27E+2,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: minss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
@@ -215,9 +215,9 @@ define i13 @test_signed_i13_f32(float %f) nounwind {
;
; X86-SSE-LABEL: test_signed_i13_f32:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = [-4.096E+3,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: maxss {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = [4.095E+3,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: minss %xmm0, %xmm1
; X86-SSE-NEXT: cvttss2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
@@ -225,9 +225,9 @@ define i13 @test_signed_i13_f32(float %f) nounwind {
;
; X64-LABEL: test_signed_i13_f32:
; X64: # %bb.0:
-; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm1 = [-4.096E+3,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: maxss %xmm0, %xmm1
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [4.095E+3,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: minss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
@@ -287,9 +287,9 @@ define i16 @test_signed_i16_f32(float %f) nounwind {
;
; X86-SSE-LABEL: test_signed_i16_f32:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = [-3.2768E+4,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: maxss {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = [3.2767E+4,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: minss %xmm0, %xmm1
; X86-SSE-NEXT: cvttss2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
@@ -297,9 +297,9 @@ define i16 @test_signed_i16_f32(float %f) nounwind {
;
; X64-LABEL: test_signed_i16_f32:
; X64: # %bb.0:
-; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm1 = [-3.2768E+4,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: maxss %xmm0, %xmm1
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [3.2767E+4,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: minss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
@@ -1092,7 +1092,7 @@ define i1 @test_signed_i1_f64(double %f) nounwind {
;
; X86-SSE-LABEL: test_signed_i1_f64:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [-1.0E+0,0.0E+0]
; X86-SSE-NEXT: maxsd {{[0-9]+}}(%esp), %xmm0
; X86-SSE-NEXT: xorpd %xmm1, %xmm1
; X86-SSE-NEXT: minsd %xmm0, %xmm1
@@ -1102,7 +1102,7 @@ define i1 @test_signed_i1_f64(double %f) nounwind {
;
; X64-LABEL: test_signed_i1_f64:
; X64: # %bb.0:
-; X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm1 = [-1.0E+0,0.0E+0]
; X64-NEXT: maxsd %xmm0, %xmm1
; X64-NEXT: xorpd %xmm0, %xmm0
; X64-NEXT: minsd %xmm1, %xmm0
@@ -1163,9 +1163,9 @@ define i8 @test_signed_i8_f64(double %f) nounwind {
;
; X86-SSE-LABEL: test_signed_i8_f64:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [-1.28E+2,0.0E+0]
; X86-SSE-NEXT: maxsd {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = [1.27E+2,0.0E+0]
; X86-SSE-NEXT: minsd %xmm0, %xmm1
; X86-SSE-NEXT: cvttsd2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -1173,9 +1173,9 @@ define i8 @test_signed_i8_f64(double %f) nounwind {
;
; X64-LABEL: test_signed_i8_f64:
; X64: # %bb.0:
-; X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm1 = [-1.28E+2,0.0E+0]
; X64-NEXT: maxsd %xmm0, %xmm1
-; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm0 = [1.27E+2,0.0E+0]
; X64-NEXT: minsd %xmm1, %xmm0
; X64-NEXT: cvttsd2si %xmm0, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
@@ -1235,9 +1235,9 @@ define i13 @test_signed_i13_f64(double %f) nounwind {
;
; X86-SSE-LABEL: test_signed_i13_f64:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [-4.096E+3,0.0E+0]
; X86-SSE-NEXT: maxsd {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = [4.095E+3,0.0E+0]
; X86-SSE-NEXT: minsd %xmm0, %xmm1
; X86-SSE-NEXT: cvttsd2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
@@ -1245,9 +1245,9 @@ define i13 @test_signed_i13_f64(double %f) nounwind {
;
; X64-LABEL: test_signed_i13_f64:
; X64: # %bb.0:
-; X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm1 = [-4.096E+3,0.0E+0]
; X64-NEXT: maxsd %xmm0, %xmm1
-; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm0 = [4.095E+3,0.0E+0]
; X64-NEXT: minsd %xmm1, %xmm0
; X64-NEXT: cvttsd2si %xmm0, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
@@ -1307,9 +1307,9 @@ define i16 @test_signed_i16_f64(double %f) nounwind {
;
; X86-SSE-LABEL: test_signed_i16_f64:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [-3.2768E+4,0.0E+0]
; X86-SSE-NEXT: maxsd {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = [3.2767E+4,0.0E+0]
; X86-SSE-NEXT: minsd %xmm0, %xmm1
; X86-SSE-NEXT: cvttsd2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
@@ -1317,9 +1317,9 @@ define i16 @test_signed_i16_f64(double %f) nounwind {
;
; X64-LABEL: test_signed_i16_f64:
; X64: # %bb.0:
-; X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm1 = [-3.2768E+4,0.0E+0]
; X64-NEXT: maxsd %xmm0, %xmm1
-; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm0 = [3.2767E+4,0.0E+0]
; X64-NEXT: minsd %xmm1, %xmm0
; X64-NEXT: cvttsd2si %xmm0, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
diff --git a/llvm/test/CodeGen/X86/fptosi-sat-vector-128.ll b/llvm/test/CodeGen/X86/fptosi-sat-vector-128.ll
index 2856cfa01fad15d..eaa1293ed2f981e 100644
--- a/llvm/test/CodeGen/X86/fptosi-sat-vector-128.ll
+++ b/llvm/test/CodeGen/X86/fptosi-sat-vector-128.ll
@@ -17,7 +17,7 @@ define <4 x i1> @test_signed_v4i1_v4f32(<4 x float> %f) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ucomiss %xmm1, %xmm1
; CHECK-NEXT: maxss %xmm2, %xmm1
@@ -60,10 +60,10 @@ define <4 x i1> @test_signed_v4i1_v4f32(<4 x float> %f) nounwind {
define <4 x i8> @test_signed_v4i8_v4f32(<4 x float> %f) nounwind {
; CHECK-LABEL: test_signed_v4i8_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [-1.28E+2,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movaps %xmm1, %xmm3
; CHECK-NEXT: maxss %xmm0, %xmm3
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [1.27E+2,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movaps %xmm2, %xmm4
; CHECK-NEXT: minss %xmm3, %xmm4
; CHECK-NEXT: cvttss2si %xmm4, %eax
@@ -105,10 +105,10 @@ define <4 x i16> @test_signed_v4i16_v4f32(<4 x float> %f) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [-3.2768E+4,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movaps %xmm2, %xmm3
; CHECK-NEXT: maxss %xmm1, %xmm3
-; CHECK-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm4 = [3.2767E+4,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movaps %xmm4, %xmm1
; CHECK-NEXT: minss %xmm3, %xmm1
; CHECK-NEXT: cvttss2si %xmm1, %eax
@@ -144,7 +144,7 @@ define <4 x i32> @test_signed_v4i32_v4f32(<4 x float> %f) nounwind {
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
; CHECK-NEXT: cvttss2si %xmm1, %edx
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [2.14748352E+9,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: ucomiss %xmm2, %xmm1
; CHECK-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
; CHECK-NEXT: cmoval %eax, %edx
@@ -186,7 +186,7 @@ define <4 x i64> @test_signed_v4i64_v4f32(<4 x float> %f) nounwind {
; CHECK-LABEL: test_signed_v4i64_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: cvttss2si %xmm0, %rdx
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [9.22337149E+18,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: ucomiss %xmm1, %xmm0
; CHECK-NEXT: movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
; CHECK-NEXT: cmovaq %rax, %rdx
@@ -347,7 +347,7 @@ declare <2 x i128> @llvm.fptosi.sat.v2i128.v2f64(<2 x double>)
define <2 x i1> @test_signed_v2i1_v2f64(<2 x double> %f) nounwind {
; CHECK-LABEL: test_signed_v2i1_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [-1.0E+0,0.0E+0]
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: maxsd %xmm2, %xmm1
; CHECK-NEXT: xorpd %xmm3, %xmm3
@@ -374,10 +374,10 @@ define <2 x i1> @test_signed_v2i1_v2f64(<2 x double> %f) nounwind {
define <2 x i8> @test_signed_v2i8_v2f64(<2 x double> %f) nounwind {
; CHECK-LABEL: test_signed_v2i8_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-1.28E+2,0.0E+0]
; CHECK-NEXT: movapd %xmm1, %xmm2
; CHECK-NEXT: maxsd %xmm0, %xmm2
-; CHECK-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm3 = [1.27E+2,0.0E+0]
; CHECK-NEXT: movapd %xmm3, %xmm4
; CHECK-NEXT: minsd %xmm2, %xmm4
; CHECK-NEXT: cvttsd2si %xmm4, %eax
@@ -397,12 +397,12 @@ define <2 x i8> @test_signed_v2i8_v2f64(<2 x double> %f) nounwind {
define <2 x i16> @test_signed_v2i16_v2f64(<2 x double> %f) nounwind {
; CHECK-LABEL: test_signed_v2i16_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-3.2768E+4,0.0E+0]
; CHECK-NEXT: movapd %xmm1, %xmm2
; CHECK-NEXT: maxsd %xmm0, %xmm1
; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; CHECK-NEXT: maxsd %xmm0, %xmm2
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [3.2767E+4,0.0E+0]
; CHECK-NEXT: movapd %xmm0, %xmm3
; CHECK-NEXT: minsd %xmm2, %xmm3
; CHECK-NEXT: cvttsd2si %xmm3, %eax
@@ -418,10 +418,10 @@ define <2 x i16> @test_signed_v2i16_v2f64(<2 x double> %f) nounwind {
define <2 x i32> @test_signed_v2i32_v2f64(<2 x double> %f) nounwind {
; CHECK-LABEL: test_signed_v2i32_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [-2.147483648E+9,0.0E+0]
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: maxsd %xmm2, %xmm1
-; CHECK-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm3 = [2.147483647E+9,0.0E+0]
; CHECK-NEXT: minsd %xmm3, %xmm1
; CHECK-NEXT: cvttsd2si %xmm1, %eax
; CHECK-NEXT: xorl %ecx, %ecx
@@ -446,7 +446,7 @@ define <2 x i64> @test_signed_v2i64_v2f64(<2 x double> %f) nounwind {
; CHECK-LABEL: test_signed_v2i64_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: cvttsd2si %xmm0, %rax
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547748E+18,0.0E+0]
; CHECK-NEXT: ucomisd %xmm2, %xmm0
; CHECK-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
; CHECK-NEXT: cmovaq %rcx, %rax
diff --git a/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll b/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll
index e334af71397ff4f..fefc92c313511b1 100644
--- a/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll
+++ b/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll
@@ -62,7 +62,7 @@ define i1 @test_unsigned_i1_f32(float %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: xorps %xmm0, %xmm0
; X86-SSE-NEXT: maxss {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: minss %xmm0, %xmm1
; X86-SSE-NEXT: cvttss2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -72,7 +72,7 @@ define i1 @test_unsigned_i1_f32(float %f) nounwind {
; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: maxss %xmm0, %xmm1
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: minss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
@@ -125,7 +125,7 @@ define i8 @test_unsigned_i8_f32(float %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: xorps %xmm0, %xmm0
; X86-SSE-NEXT: maxss {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: minss %xmm0, %xmm1
; X86-SSE-NEXT: cvttss2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -135,7 +135,7 @@ define i8 @test_unsigned_i8_f32(float %f) nounwind {
; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: maxss %xmm0, %xmm1
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: minss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
@@ -187,7 +187,7 @@ define i13 @test_unsigned_i13_f32(float %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: xorps %xmm0, %xmm0
; X86-SSE-NEXT: maxss {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = [8.191E+3,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: minss %xmm0, %xmm1
; X86-SSE-NEXT: cvttss2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
@@ -197,7 +197,7 @@ define i13 @test_unsigned_i13_f32(float %f) nounwind {
; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: maxss %xmm0, %xmm1
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [8.191E+3,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: minss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
@@ -249,7 +249,7 @@ define i16 @test_unsigned_i16_f32(float %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: xorps %xmm0, %xmm0
; X86-SSE-NEXT: maxss {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = [6.5535E+4,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: minss %xmm0, %xmm1
; X86-SSE-NEXT: cvttss2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
@@ -259,7 +259,7 @@ define i16 @test_unsigned_i16_f32(float %f) nounwind {
; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: maxss %xmm0, %xmm1
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [6.5535E+4,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: minss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
@@ -474,7 +474,7 @@ define i50 @test_unsigned_i50_f32(float %f) nounwind {
; X86-SSE-NEXT: pushl %esi
; X86-SSE-NEXT: subl $16, %esp
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: ucomiss %xmm0, %xmm2
; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: jbe .LBB6_2
@@ -598,7 +598,7 @@ define i64 @test_unsigned_i64_f32(float %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: subl $20, %esp
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: ucomiss %xmm0, %xmm2
; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: jbe .LBB7_2
@@ -997,7 +997,7 @@ define i1 @test_unsigned_i1_f64(double %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: xorpd %xmm0, %xmm0
; X86-SSE-NEXT: maxsd {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = [1.0E+0,0.0E+0]
; X86-SSE-NEXT: minsd %xmm0, %xmm1
; X86-SSE-NEXT: cvttsd2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -1007,7 +1007,7 @@ define i1 @test_unsigned_i1_f64(double %f) nounwind {
; X64: # %bb.0:
; X64-NEXT: xorpd %xmm1, %xmm1
; X64-NEXT: maxsd %xmm0, %xmm1
-; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; X64-NEXT: minsd %xmm1, %xmm0
; X64-NEXT: cvttsd2si %xmm0, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
@@ -1060,7 +1060,7 @@ define i8 @test_unsigned_i8_f64(double %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: xorpd %xmm0, %xmm0
; X86-SSE-NEXT: maxsd {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = [2.55E+2,0.0E+0]
; X86-SSE-NEXT: minsd %xmm0, %xmm1
; X86-SSE-NEXT: cvttsd2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -1070,7 +1070,7 @@ define i8 @test_unsigned_i8_f64(double %f) nounwind {
; X64: # %bb.0:
; X64-NEXT: xorpd %xmm1, %xmm1
; X64-NEXT: maxsd %xmm0, %xmm1
-; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm0 = [2.55E+2,0.0E+0]
; X64-NEXT: minsd %xmm1, %xmm0
; X64-NEXT: cvttsd2si %xmm0, %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
@@ -1122,7 +1122,7 @@ define i13 @test_unsigned_i13_f64(double %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: xorpd %xmm0, %xmm0
; X86-SSE-NEXT: maxsd {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = [8.191E+3,0.0E+0]
; X86-SSE-NEXT: minsd %xmm0, %xmm1
; X86-SSE-NEXT: cvttsd2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
@@ -1132,7 +1132,7 @@ define i13 @test_unsigned_i13_f64(double %f) nounwind {
; X64: # %bb.0:
; X64-NEXT: xorpd %xmm1, %xmm1
; X64-NEXT: maxsd %xmm0, %xmm1
-; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm0 = [8.191E+3,0.0E+0]
; X64-NEXT: minsd %xmm1, %xmm0
; X64-NEXT: cvttsd2si %xmm0, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
@@ -1184,7 +1184,7 @@ define i16 @test_unsigned_i16_f64(double %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: xorpd %xmm0, %xmm0
; X86-SSE-NEXT: maxsd {{[0-9]+}}(%esp), %xmm0
-; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = [6.5535E+4,0.0E+0]
; X86-SSE-NEXT: minsd %xmm0, %xmm1
; X86-SSE-NEXT: cvttsd2si %xmm1, %eax
; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
@@ -1194,7 +1194,7 @@ define i16 @test_unsigned_i16_f64(double %f) nounwind {
; X64: # %bb.0:
; X64-NEXT: xorpd %xmm1, %xmm1
; X64-NEXT: maxsd %xmm0, %xmm1
-; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm0 = [6.5535E+4,0.0E+0]
; X64-NEXT: minsd %xmm1, %xmm0
; X64-NEXT: cvttsd2si %xmm0, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
@@ -1320,7 +1320,7 @@ define i32 @test_unsigned_i32_f64(double %f) nounwind {
; X64: # %bb.0:
; X64-NEXT: xorpd %xmm1, %xmm1
; X64-NEXT: maxsd %xmm0, %xmm1
-; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm0 = [4.294967295E+9,0.0E+0]
; X64-NEXT: minsd %xmm1, %xmm0
; X64-NEXT: cvttsd2si %xmm0, %rax
; X64-NEXT: # kill: def $eax killed $eax killed $rax
@@ -1402,7 +1402,7 @@ define i50 @test_unsigned_i50_f64(double %f) nounwind {
; X86-SSE-NEXT: pushl %esi
; X86-SSE-NEXT: subl $16, %esp
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; X86-SSE-NEXT: ucomisd %xmm0, %xmm2
; X86-SSE-NEXT: xorpd %xmm1, %xmm1
; X86-SSE-NEXT: jbe .LBB16_2
@@ -1522,7 +1522,7 @@ define i64 @test_unsigned_i64_f64(double %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: subl $20, %esp
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; X86-SSE-NEXT: ucomisd %xmm0, %xmm2
; X86-SSE-NEXT: xorpd %xmm1, %xmm1
; X86-SSE-NEXT: jbe .LBB17_2
@@ -2455,7 +2455,7 @@ define i50 @test_unsigned_i50_f16(half %f) nounwind {
; X86-SSE-NEXT: calll __extendhfsf2
; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp)
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: ucomiss %xmm2, %xmm0
; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: jae .LBB26_2
@@ -2596,7 +2596,7 @@ define i64 @test_unsigned_i64_f16(half %f) nounwind {
; X86-SSE-NEXT: calll __extendhfsf2
; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp)
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: ucomiss %xmm2, %xmm0
; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: jae .LBB27_2
diff --git a/llvm/test/CodeGen/X86/fptoui-sat-vector-128.ll b/llvm/test/CodeGen/X86/fptoui-sat-vector-128.ll
index 0cced636dddbabe..4305886168abed6 100644
--- a/llvm/test/CodeGen/X86/fptoui-sat-vector-128.ll
+++ b/llvm/test/CodeGen/X86/fptoui-sat-vector-128.ll
@@ -19,7 +19,7 @@ define <4 x i1> @test_unsigned_v4i1_v4f32(<4 x float> %f) nounwind {
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
; CHECK-NEXT: xorps %xmm2, %xmm2
; CHECK-NEXT: maxss %xmm2, %xmm1
-; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: minss %xmm3, %xmm1
; CHECK-NEXT: cvttss2si %xmm1, %eax
; CHECK-NEXT: movd %eax, %xmm1
@@ -54,7 +54,7 @@ define <4 x i8> @test_unsigned_v4i8_v4f32(<4 x float> %f) nounwind {
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: xorps %xmm3, %xmm3
; CHECK-NEXT: maxss %xmm0, %xmm3
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movaps %xmm2, %xmm4
; CHECK-NEXT: minss %xmm3, %xmm4
; CHECK-NEXT: cvttss2si %xmm4, %eax
@@ -99,7 +99,7 @@ define <4 x i16> @test_unsigned_v4i16_v4f32(<4 x float> %f) nounwind {
; CHECK-NEXT: xorps %xmm2, %xmm2
; CHECK-NEXT: xorps %xmm3, %xmm3
; CHECK-NEXT: maxss %xmm1, %xmm3
-; CHECK-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm4 = [6.5535E+4,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movaps %xmm4, %xmm1
; CHECK-NEXT: minss %xmm3, %xmm1
; CHECK-NEXT: cvttss2si %xmm1, %eax
@@ -139,7 +139,7 @@ define <4 x i32> @test_unsigned_v4i32_v4f32(<4 x float> %f) nounwind {
; CHECK-NEXT: xorps %xmm2, %xmm2
; CHECK-NEXT: ucomiss %xmm2, %xmm1
; CHECK-NEXT: cmovbl %eax, %edx
-; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm3 = [4.29496704E+9,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: ucomiss %xmm3, %xmm1
; CHECK-NEXT: movl $-1, %ecx
; CHECK-NEXT: cmoval %ecx, %edx
@@ -177,7 +177,7 @@ define <4 x i32> @test_unsigned_v4i32_v4f32(<4 x float> %f) nounwind {
define <4 x i64> @test_unsigned_v4i64_v4f32(<4 x float> %f) nounwind {
; CHECK-LABEL: test_unsigned_v4i64_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movaps %xmm0, %xmm2
; CHECK-NEXT: subss %xmm1, %xmm2
; CHECK-NEXT: cvttss2si %xmm2, %rax
@@ -190,7 +190,7 @@ define <4 x i64> @test_unsigned_v4i64_v4f32(<4 x float> %f) nounwind {
; CHECK-NEXT: xorps %xmm3, %xmm3
; CHECK-NEXT: ucomiss %xmm3, %xmm0
; CHECK-NEXT: cmovbq %rax, %rdx
-; CHECK-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm4 = [1.8446743E+19,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: ucomiss %xmm4, %xmm0
; CHECK-NEXT: movq $-1, %rcx
; CHECK-NEXT: cmovaq %rcx, %rdx
@@ -352,7 +352,7 @@ define <2 x i1> @test_unsigned_v2i1_v2f64(<2 x double> %f) nounwind {
; CHECK-NEXT: xorpd %xmm2, %xmm2
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: maxsd %xmm2, %xmm1
-; CHECK-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm3 = [1.0E+0,0.0E+0]
; CHECK-NEXT: minsd %xmm3, %xmm1
; CHECK-NEXT: cvttsd2si %xmm1, %rax
; CHECK-NEXT: movq %rax, %xmm1
@@ -374,7 +374,7 @@ define <2 x i8> @test_unsigned_v2i8_v2f64(<2 x double> %f) nounwind {
; CHECK-NEXT: xorpd %xmm1, %xmm1
; CHECK-NEXT: xorpd %xmm2, %xmm2
; CHECK-NEXT: maxsd %xmm0, %xmm2
-; CHECK-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm3 = [2.55E+2,0.0E+0]
; CHECK-NEXT: movapd %xmm3, %xmm4
; CHECK-NEXT: minsd %xmm2, %xmm4
; CHECK-NEXT: cvttsd2si %xmm4, %eax
@@ -399,7 +399,7 @@ define <2 x i16> @test_unsigned_v2i16_v2f64(<2 x double> %f) nounwind {
; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; CHECK-NEXT: xorpd %xmm2, %xmm2
; CHECK-NEXT: maxsd %xmm0, %xmm2
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [6.5535E+4,0.0E+0]
; CHECK-NEXT: movapd %xmm0, %xmm3
; CHECK-NEXT: minsd %xmm2, %xmm3
; CHECK-NEXT: cvttsd2si %xmm3, %eax
@@ -418,7 +418,7 @@ define <2 x i32> @test_unsigned_v2i32_v2f64(<2 x double> %f) nounwind {
; CHECK-NEXT: xorpd %xmm2, %xmm2
; CHECK-NEXT: xorpd %xmm1, %xmm1
; CHECK-NEXT: maxsd %xmm0, %xmm1
-; CHECK-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm3 = [4.294967295E+9,0.0E+0]
; CHECK-NEXT: movapd %xmm3, %xmm4
; CHECK-NEXT: minsd %xmm1, %xmm4
; CHECK-NEXT: cvttsd2si %xmm4, %rax
@@ -438,7 +438,7 @@ define <2 x i32> @test_unsigned_v2i32_v2f64(<2 x double> %f) nounwind {
define <2 x i64> @test_unsigned_v2i64_v2f64(<2 x double> %f) nounwind {
; CHECK-LABEL: test_unsigned_v2i64_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: subsd %xmm2, %xmm1
; CHECK-NEXT: cvttsd2si %xmm1, %rax
@@ -451,7 +451,7 @@ define <2 x i64> @test_unsigned_v2i64_v2f64(<2 x double> %f) nounwind {
; CHECK-NEXT: xorpd %xmm3, %xmm3
; CHECK-NEXT: ucomisd %xmm3, %xmm0
; CHECK-NEXT: cmovbq %rax, %rdx
-; CHECK-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm4 = [1.844674407370955E+19,0.0E+0]
; CHECK-NEXT: ucomisd %xmm4, %xmm0
; CHECK-NEXT: movq $-1, %rcx
; CHECK-NEXT: cmovaq %rcx, %rdx
diff --git a/llvm/test/CodeGen/X86/ftrunc.ll b/llvm/test/CodeGen/X86/ftrunc.ll
index 08705e9cdc59c27..da44b5ec1371e6d 100644
--- a/llvm/test/CodeGen/X86/ftrunc.ll
+++ b/llvm/test/CodeGen/X86/ftrunc.ll
@@ -122,7 +122,7 @@ define <4 x float> @trunc_unsigned_v4f32(<4 x float> %x) #0 {
define <2 x double> @trunc_unsigned_v2f64(<2 x double> %x) #0 {
; SSE2-LABEL: trunc_unsigned_v2f64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; SSE2-NEXT: movapd %xmm0, %xmm1
; SSE2-NEXT: subsd %xmm2, %xmm1
; SSE2-NEXT: cvttsd2si %xmm1, %rax
@@ -170,7 +170,7 @@ define <4 x double> @trunc_unsigned_v4f64(<4 x double> %x) #0 {
; SSE2-LABEL: trunc_unsigned_v4f64:
; SSE2: # %bb.0:
; SSE2-NEXT: movapd %xmm1, %xmm2
-; SSE2-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE2-NEXT: movsd {{.*#+}} xmm3 = [9.2233720368547758E+18,0.0E+0]
; SSE2-NEXT: subsd %xmm3, %xmm1
; SSE2-NEXT: cvttsd2si %xmm1, %rax
; SSE2-NEXT: cvttsd2si %xmm2, %rcx
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index 7225257203161b2..b42f6fdea34b65f 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -334,7 +334,7 @@ define i64 @test_fptoui_i64(ptr %p) #0 {
; CHECK-I686-NEXT: calll __extendhfsf2
; CHECK-I686-NEXT: fstps {{[0-9]+}}(%esp)
; CHECK-I686-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-I686-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; CHECK-I686-NEXT: ucomiss %xmm1, %xmm0
; CHECK-I686-NEXT: jae .LBB9_2
; CHECK-I686-NEXT: # %bb.1:
@@ -1066,12 +1066,12 @@ define void @main.158() #0 {
; CHECK-LIBCALL-NEXT: xorps %xmm0, %xmm0
; CHECK-LIBCALL-NEXT: callq __truncsfhf2 at PLT
; CHECK-LIBCALL-NEXT: callq __extendhfsf2 at PLT
-; CHECK-LIBCALL-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-LIBCALL-NEXT: movss {{.*#+}} xmm1 = [8.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-LIBCALL-NEXT: ucomiss %xmm0, %xmm1
; CHECK-LIBCALL-NEXT: xorps %xmm0, %xmm0
; CHECK-LIBCALL-NEXT: jae .LBB20_2
; CHECK-LIBCALL-NEXT: # %bb.1: # %entry
-; CHECK-LIBCALL-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-LIBCALL-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; CHECK-LIBCALL-NEXT: .LBB20_2: # %entry
; CHECK-LIBCALL-NEXT: callq __truncsfhf2 at PLT
; CHECK-LIBCALL-NEXT: pextrw $0, %xmm0, %eax
@@ -1085,11 +1085,11 @@ define void @main.158() #0 {
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; BWON-F16C-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; BWON-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
-; BWON-F16C-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; BWON-F16C-NEXT: vmovss {{.*#+}} xmm2 = [8.0E+0,0.0E+0,0.0E+0,0.0E+0]
; BWON-F16C-NEXT: vucomiss %xmm1, %xmm2
; BWON-F16C-NEXT: jae .LBB20_2
; BWON-F16C-NEXT: # %bb.1: # %entry
-; BWON-F16C-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; BWON-F16C-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; BWON-F16C-NEXT: .LBB20_2: # %entry
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; BWON-F16C-NEXT: vmovd %xmm0, %eax
@@ -1105,12 +1105,12 @@ define void @main.158() #0 {
; CHECK-I686-NEXT: movw %ax, (%esp)
; CHECK-I686-NEXT: calll __extendhfsf2
; CHECK-I686-NEXT: fstps {{[0-9]+}}(%esp)
-; CHECK-I686-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: movss {{.*#+}} xmm0 = [8.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-I686-NEXT: ucomiss {{[0-9]+}}(%esp), %xmm0
; CHECK-I686-NEXT: xorps %xmm0, %xmm0
; CHECK-I686-NEXT: jae .LBB20_2
; CHECK-I686-NEXT: # %bb.1: # %entry
-; CHECK-I686-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-I686-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; CHECK-I686-NEXT: .LBB20_2: # %entry
; CHECK-I686-NEXT: movss %xmm0, (%esp)
; CHECK-I686-NEXT: calll __truncsfhf2
diff --git a/llvm/test/CodeGen/X86/insertelement-ones.ll b/llvm/test/CodeGen/X86/insertelement-ones.ll
index 5470fae5fd5812f..ed7cbb0a8430d93 100644
--- a/llvm/test/CodeGen/X86/insertelement-ones.ll
+++ b/llvm/test/CodeGen/X86/insertelement-ones.ll
@@ -150,7 +150,7 @@ define <4 x i32> @insert_v4i32_01x3(<4 x i32> %a) {
define <8 x i32> @insert_v8i32_x12345x7(<8 x i32> %a) {
; SSE2-LABEL: insert_v8i32_x12345x7:
; SSE2: # %bb.0:
-; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm2 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE2-NEXT: movl $-1, %eax
; SSE2-NEXT: movd %eax, %xmm2
@@ -160,7 +160,7 @@ define <8 x i32> @insert_v8i32_x12345x7(<8 x i32> %a) {
;
; SSE3-LABEL: insert_v8i32_x12345x7:
; SSE3: # %bb.0:
-; SSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE3-NEXT: movss {{.*#+}} xmm2 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; SSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE3-NEXT: movl $-1, %eax
; SSE3-NEXT: movd %eax, %xmm2
@@ -170,7 +170,7 @@ define <8 x i32> @insert_v8i32_x12345x7(<8 x i32> %a) {
;
; SSSE3-LABEL: insert_v8i32_x12345x7:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT: movss {{.*#+}} xmm2 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSSE3-NEXT: movl $-1, %eax
; SSSE3-NEXT: movd %eax, %xmm2
diff --git a/llvm/test/CodeGen/X86/ldexp.ll b/llvm/test/CodeGen/X86/ldexp.ll
index ec128fc6686c85b..2be5dec156690ed 100644
--- a/llvm/test/CodeGen/X86/ldexp.ll
+++ b/llvm/test/CodeGen/X86/ldexp.ll
@@ -5,7 +5,7 @@
define float @ldexp_f32(i8 zeroext %x) {
; X64-LABEL: ldexp_f32:
; X64: # %bb.0:
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: jmp ldexpf at PLT # TAILCALL
;
; WIN32-LABEL: ldexp_f32:
@@ -86,7 +86,7 @@ define float @ldexp_f32(i8 zeroext %x) {
define double @ldexp_f64(i8 zeroext %x) {
; X64-LABEL: ldexp_f64:
; X64: # %bb.0:
-; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; X64-NEXT: jmp ldexp at PLT # TAILCALL
;
; WIN32-LABEL: ldexp_f64:
diff --git a/llvm/test/CodeGen/X86/load-scalar-as-vector.ll b/llvm/test/CodeGen/X86/load-scalar-as-vector.ll
index 5ebcde3053a7b3b..3edbcd1fe18ebd2 100644
--- a/llvm/test/CodeGen/X86/load-scalar-as-vector.ll
+++ b/llvm/test/CodeGen/X86/load-scalar-as-vector.ll
@@ -587,13 +587,13 @@ define <2 x double> @fsub_op1_constant(ptr %p) nounwind {
define <4 x float> @fsub_op0_constant(ptr %p) nounwind {
; SSE-LABEL: fsub_op0_constant:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: subss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fsub_op0_constant:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsubss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%x = load float, ptr %p
@@ -641,13 +641,13 @@ define <2 x double> @fdiv_op1_constant(ptr %p) nounwind {
define <4 x float> @fdiv_op0_constant(ptr %p) nounwind {
; SSE-LABEL: fdiv_op0_constant:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: divss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fdiv_op0_constant:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%x = load float, ptr %p
@@ -661,7 +661,7 @@ define <4 x float> @frem_op1_constant(ptr %p) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: pushq %rax
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: callq fmodf at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: retq
@@ -670,7 +670,7 @@ define <4 x float> @frem_op1_constant(ptr %p) nounwind {
; AVX: # %bb.0:
; AVX-NEXT: pushq %rax
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fmodf at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: retq
@@ -685,7 +685,7 @@ define <2 x double> @frem_op0_constant(ptr %p) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: pushq %rax
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; SSE-NEXT: callq fmod at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: retq
@@ -694,7 +694,7 @@ define <2 x double> @frem_op0_constant(ptr %p) nounwind {
; AVX: # %bb.0:
; AVX-NEXT: pushq %rax
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/logical-load-fold.ll b/llvm/test/CodeGen/X86/logical-load-fold.ll
index 3890c1869419655..1c3f209fc1e66d1 100644
--- a/llvm/test/CodeGen/X86/logical-load-fold.ll
+++ b/llvm/test/CodeGen/X86/logical-load-fold.ll
@@ -14,14 +14,14 @@ define double @load_double_no_fold(double %x, double %y) {
; SSE2-LABEL: load_double_no_fold:
; SSE2: # %bb.0:
; SSE2-NEXT: cmplesd %xmm0, %xmm1
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; SSE2-NEXT: andpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: load_double_no_fold:
; AVX: # %bb.0:
; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+0,0.0E+0]
; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -35,14 +35,14 @@ define float @load_float_no_fold(float %x, float %y) {
; SSE2-LABEL: load_float_no_fold:
; SSE2: # %bb.0:
; SSE2-NEXT: cmpless %xmm0, %xmm1
-; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: andps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: load_float_no_fold:
; AVX: # %bb.0:
; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/lsr-static-addr.ll b/llvm/test/CodeGen/X86/lsr-static-addr.ll
index 67d6e6d812e1143..a06ead491bf7df5 100644
--- a/llvm/test/CodeGen/X86/lsr-static-addr.ll
+++ b/llvm/test/CodeGen/X86/lsr-static-addr.ll
@@ -11,7 +11,7 @@ define void @foo(i64 %n) nounwind {
; CHECK-NEXT: jle .LBB0_3
; CHECK-NEXT: # %bb.1: # %for.body.preheader
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [2.2999999999999998E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -30,7 +30,7 @@ define void @foo(i64 %n) nounwind {
; ATOM-NEXT: jle .LBB0_3
; ATOM-NEXT: # %bb.1: # %for.body.preheader
; ATOM-NEXT: xorl %eax, %eax
-; ATOM-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; ATOM-NEXT: movsd {{.*#+}} xmm0 = [2.2999999999999998E+0,0.0E+0]
; ATOM-NEXT: .p2align 4, 0x90
; ATOM-NEXT: .LBB0_2: # %for.body
; ATOM-NEXT: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll b/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll
index 8b626a937ed3dd8..5828f06bf1c39bf 100644
--- a/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll
+++ b/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll
@@ -19,7 +19,7 @@ define void @PR24199(i32 %a0) {
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: je .LBB0_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: jmp .LBB0_3
; CHECK-NEXT: .LBB0_2: # %if.then
; CHECK-NEXT: xorps %xmm0, %xmm0
@@ -30,7 +30,7 @@ define void @PR24199(i32 %a0) {
; CHECK-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Reload
; CHECK-NEXT: # xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: mulss %xmm0, %xmm2
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: addss %xmm1, %xmm0
; CHECK-NEXT: addss %xmm2, %xmm0
; CHECK-NEXT: movss %xmm0, (%rax)
diff --git a/llvm/test/CodeGen/X86/masked-iv-safe.ll b/llvm/test/CodeGen/X86/masked-iv-safe.ll
index f5190cff4471468..4e4ad3a0161e038 100644
--- a/llvm/test/CodeGen/X86/masked-iv-safe.ll
+++ b/llvm/test/CodeGen/X86/masked-iv-safe.ll
@@ -8,9 +8,9 @@ define void @count_up(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: count_up:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-80, %rax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -54,9 +54,9 @@ define void @count_down(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: count_down:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $80, %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB1_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -100,9 +100,9 @@ define void @count_up_signed(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: count_up_signed:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-80, %rax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB2_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -148,9 +148,9 @@ define void @count_down_signed(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: count_down_signed:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $80, %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB3_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -196,9 +196,9 @@ define void @another_count_up(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: another_count_up:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-8, %rax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB4_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -246,9 +246,9 @@ define void @another_count_down(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: another_count_down:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-2040, %rax # imm = 0xF808
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: movq %rdi, %rcx
; CHECK-NEXT: movq %rdi, %rdx
; CHECK-NEXT: .p2align 4, 0x90
@@ -300,9 +300,9 @@ define void @another_count_up_signed(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: another_count_up_signed:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-8, %rax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB6_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -348,9 +348,9 @@ define void @another_count_down_signed(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: another_count_down_signed:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $8, %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB7_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/X86/masked-iv-unsafe.ll b/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
index 045c42627a397ee..f10db3424c2ea06 100644
--- a/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
+++ b/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
@@ -8,9 +8,9 @@ define void @count_up(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: count_up:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $10, %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -61,9 +61,9 @@ define void @count_down(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: count_down:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $10, %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB1_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -117,9 +117,9 @@ define void @count_up_signed(ptr %d, i64 %n) nounwind {
; CHECK-NEXT: movl $10, %eax
; CHECK-NEXT: movl $167772160, %ecx # imm = 0xA000000
; CHECK-NEXT: movl $2560, %edx # imm = 0xA00
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB2_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -177,9 +177,9 @@ define void @count_down_signed(ptr %d, i64 %n) nounwind {
; CHECK-NEXT: movq $-10, %rax
; CHECK-NEXT: movl $167772160, %ecx # imm = 0xA000000
; CHECK-NEXT: movl $2560, %edx # imm = 0xA00
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB3_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -235,9 +235,9 @@ define void @another_count_up(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: another_count_up:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB4_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -288,9 +288,9 @@ return:
define void @another_count_down(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: another_count_down:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB5_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -342,9 +342,9 @@ define void @another_count_up_signed(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: another_count_up_signed:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: movq %rdi, %rdx
; CHECK-NEXT: .p2align 4, 0x90
@@ -406,9 +406,9 @@ define void @another_count_down_signed(ptr %d, i64 %n) nounwind {
; CHECK-NEXT: movq %rsi, %rcx
; CHECK-NEXT: shlq $24, %rcx
; CHECK-NEXT: shlq $8, %rsi
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB7_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -464,9 +464,9 @@ define void @yet_another_count_down(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: yet_another_count_down:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-2040, %rax # imm = 0xF808
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: movq %rdi, %rcx
; CHECK-NEXT: movq %rdi, %rdx
; CHECK-NEXT: .p2align 4, 0x90
@@ -518,9 +518,9 @@ define void @yet_another_count_up(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: yet_another_count_up:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB9_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -572,9 +572,9 @@ define void @still_another_count_down(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: still_another_count_down:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $10, %eax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB10_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -626,9 +626,9 @@ define void @yet_another_count_up_signed(ptr %d, i64 %n) nounwind {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-10, %rax
; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB11_1: # %loop
@@ -687,9 +687,9 @@ define void @yet_another_count_down_signed(ptr %d, i64 %n) nounwind {
; CHECK-NEXT: movl $10, %eax
; CHECK-NEXT: movl $167772160, %ecx # imm = 0xA000000
; CHECK-NEXT: movl $2560, %edx # imm = 0xA00
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB12_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
index 1e31ee7ad6b590b..595f8491b405c9e 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
@@ -1074,7 +1074,7 @@ define void @merge_4i32_i32_combine(ptr %dst, ptr %src) {
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: movss {{.*#+}} xmm1 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE1-NEXT: andps %xmm0, %xmm1
; X86-SSE1-NEXT: movaps %xmm1, (%eax)
; X86-SSE1-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/neg_fp.ll b/llvm/test/CodeGen/X86/neg_fp.ll
index 0d50ebd475f0104..802098250981983 100644
--- a/llvm/test/CodeGen/X86/neg_fp.ll
+++ b/llvm/test/CodeGen/X86/neg_fp.ll
@@ -30,7 +30,7 @@ define double @negation_propagation(ptr %arg, double %arg1, double %arg2) nounwi
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: andl $-8, %esp
; CHECK-NEXT: subl $8, %esp
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; CHECK-NEXT: divsd 12(%ebp), %xmm0
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: mulsd %xmm0, %xmm1
diff --git a/llvm/test/CodeGen/X86/nontemporal-4.ll b/llvm/test/CodeGen/X86/nontemporal-4.ll
index 0f42a9a9cb7191b..6a8df2445690ab6 100644
--- a/llvm/test/CodeGen/X86/nontemporal-4.ll
+++ b/llvm/test/CodeGen/X86/nontemporal-4.ll
@@ -78,7 +78,7 @@ define void @test_constant_v2i64_align1(ptr %dst) nounwind {
;
; SSE4A-LABEL: test_constant_v2i64_align1:
; SSE4A: # %bb.0:
-; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [4.9406564584124654E-324,0.0E+0]
; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
; SSE4A-NEXT: xorl %eax, %eax
; SSE4A-NEXT: movntiq %rax, (%rdi)
@@ -340,7 +340,7 @@ define void @test_constant_v4i64_align1(ptr %dst) nounwind {
;
; SSE4A-LABEL: test_constant_v4i64_align1:
; SSE4A: # %bb.0:
-; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
; SSE4A-NEXT: xorl %eax, %eax
; SSE4A-NEXT: movntiq %rax, (%rdi)
@@ -905,7 +905,7 @@ define void @test_constant_v8i64_align1(ptr %dst) nounwind {
;
; SSE4A-LABEL: test_constant_v8i64_align1:
; SSE4A: # %bb.0:
-; SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE4A-NEXT: movsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; SSE4A-NEXT: movntsd %xmm0, 8(%rdi)
; SSE4A-NEXT: xorl %eax, %eax
; SSE4A-NEXT: movntiq %rax, (%rdi)
diff --git a/llvm/test/CodeGen/X86/oss-fuzz-25184.ll b/llvm/test/CodeGen/X86/oss-fuzz-25184.ll
index 8e6b343ab0e0b03..87e20e566ae7733 100644
--- a/llvm/test/CodeGen/X86/oss-fuzz-25184.ll
+++ b/llvm/test/CodeGen/X86/oss-fuzz-25184.ll
@@ -6,7 +6,7 @@
define <2 x double> @test_fpext() {
; CHECK-LABEL: test_fpext:
; CHECK: ## %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.9406564584124654E-324,0.0E+0]
; CHECK-NEXT: retq
%tmp12 = insertelement <4 x float> undef, float 0.000000e+00, i32 3
%tmp5 = fpext <4 x float> %tmp12 to <4 x double>
diff --git a/llvm/test/CodeGen/X86/peep-test-0.ll b/llvm/test/CodeGen/X86/peep-test-0.ll
index d31d2620bf9fe9c..71dadf7edea3afa 100644
--- a/llvm/test/CodeGen/X86/peep-test-0.ll
+++ b/llvm/test/CodeGen/X86/peep-test-0.ll
@@ -7,7 +7,7 @@ define void @loop(i64 %n, ptr nocapture %d) nounwind {
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: shlq $4, %rax
; CHECK-NEXT: addq %rsi, %rax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [3.0E+0,0.0E+0]
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_1: # %bb
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/X86/pow.ll b/llvm/test/CodeGen/X86/pow.ll
index fc1593005debc8e..b3a5268460aa3f8 100644
--- a/llvm/test/CodeGen/X86/pow.ll
+++ b/llvm/test/CodeGen/X86/pow.ll
@@ -87,7 +87,7 @@ define <2 x double> @pow_v2f64_one_fourth_fmf(<2 x double> %x) nounwind {
define float @pow_f32_one_fourth_not_enough_fmf(float %x) nounwind {
; CHECK-LABEL: pow_f32_one_fourth_not_enough_fmf:
; CHECK: # %bb.0:
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [2.5E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: jmp powf at PLT # TAILCALL
%r = call afn ninf float @llvm.pow.f32(float %x, float 2.5e-01)
ret float %r
@@ -96,7 +96,7 @@ define float @pow_f32_one_fourth_not_enough_fmf(float %x) nounwind {
define double @pow_f64_one_fourth_not_enough_fmf(double %x) nounwind {
; CHECK-LABEL: pow_f64_one_fourth_not_enough_fmf:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.5E-1,0.0E+0]
; CHECK-NEXT: jmp pow at PLT # TAILCALL
%r = call nsz ninf double @llvm.pow.f64(double %x, double 2.5e-01)
ret double %r
@@ -108,23 +108,23 @@ define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind
; CHECK-NEXT: subq $56, %rsp
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [2.5E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq powf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [2.5E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq powf at PLT
; CHECK-NEXT: unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [2.5E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq powf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [2.5E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq powf at PLT
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -142,12 +142,12 @@ define <2 x double> @pow_v2f64_one_fourth_not_enough_fmf(<2 x double> %x) nounwi
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.5E-1,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [2.5E-1,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -205,7 +205,7 @@ define x86_fp80 @pow_f80_one_third_fmf(x86_fp80 %x) nounwind {
define double @pow_f64_not_exactly_one_third_fmf(double %x) nounwind {
; CHECK-LABEL: pow_f64_not_exactly_one_third_fmf:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.3333333333333337E-1,0.0E+0]
; CHECK-NEXT: jmp pow at PLT # TAILCALL
%r = call nsz nnan ninf afn double @llvm.pow.f64(double %x, double 0x3fd5555555555556)
ret double %r
@@ -216,7 +216,7 @@ define double @pow_f64_not_exactly_one_third_fmf(double %x) nounwind {
define double @pow_f64_not_enough_fmf(double %x) nounwind {
; CHECK-LABEL: pow_f64_not_enough_fmf:
; CHECK: # %bb.0:
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.3333333333333331E-1,0.0E+0]
; CHECK-NEXT: jmp pow at PLT # TAILCALL
%r = call nsz ninf afn double @llvm.pow.f64(double %x, double 0x3fd5555555555555)
ret double %r
diff --git a/llvm/test/CodeGen/X86/powi-int32min.ll b/llvm/test/CodeGen/X86/powi-int32min.ll
index 3a1304c61c49ac7..b3093a08c496faf 100644
--- a/llvm/test/CodeGen/X86/powi-int32min.ll
+++ b/llvm/test/CodeGen/X86/powi-int32min.ll
@@ -5,7 +5,7 @@ define float @test_powi(ptr %p) nounwind {
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-COUNT-31: mulss %xmm1, %xmm1
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: divss %xmm1, %xmm0
; CHECK-NEXT: retq
bb:
diff --git a/llvm/test/CodeGen/X86/pr23103.ll b/llvm/test/CodeGen/X86/pr23103.ll
index 2146d32f8cc1812..2142ae8657aebc3 100644
--- a/llvm/test/CodeGen/X86/pr23103.ll
+++ b/llvm/test/CodeGen/X86/pr23103.ll
@@ -14,7 +14,7 @@ define <1 x double> @pr23103(ptr align 8 %Vp) {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq foo at PLT
-; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [NaN,0.0E+0]
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/pr37879.ll b/llvm/test/CodeGen/X86/pr37879.ll
index 06bf74d13ffaeca..60ca7c5b6d22b22 100644
--- a/llvm/test/CodeGen/X86/pr37879.ll
+++ b/llvm/test/CodeGen/X86/pr37879.ll
@@ -7,7 +7,7 @@ define double @foo(ptr nocapture readonly) #0 {
; CHECK-NEXT: movq (%rax), %rax
; CHECK-NEXT: vcvtsi2sd %rax, %xmm0, %xmm1
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
; CHECK-NEXT: retq
%2 = load i64, ptr undef, align 8
diff --git a/llvm/test/CodeGen/X86/pr40539.ll b/llvm/test/CodeGen/X86/pr40539.ll
index f92d4a90bf8525f..56d80a025fa084a 100644
--- a/llvm/test/CodeGen/X86/pr40539.ll
+++ b/llvm/test/CodeGen/X86/pr40539.ll
@@ -40,10 +40,10 @@ define zeroext i1 @_Z8test_cosv() {
; CHECK-NEXT: subl $8, %esp
; CHECK-NEXT: .cfi_def_cfa_offset 12
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [8.70000004E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: divss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [8.60000014E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
; CHECK-NEXT: #APP
; CHECK-NEXT: fcos
diff --git a/llvm/test/CodeGen/X86/pr44749.ll b/llvm/test/CodeGen/X86/pr44749.ll
index cc9963dc2d8cc67..9df2a4b14dcf4ad 100644
--- a/llvm/test/CodeGen/X86/pr44749.ll
+++ b/llvm/test/CodeGen/X86/pr44749.ll
@@ -15,10 +15,10 @@ define i32 @a() {
; CHECK-NEXT: setne %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: cvtsi2sd %eax, %xmm0
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [1.0E+2,0.0E+0]
; CHECK-NEXT: subsd %xmm2, %xmm0
-; CHECK-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm3 = [1.0E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [3.1400000000000001E+0,0.0E+0]
; CHECK-NEXT: cmplesd %xmm1, %xmm0
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: andpd %xmm3, %xmm1
diff --git a/llvm/test/CodeGen/X86/pr59258.ll b/llvm/test/CodeGen/X86/pr59258.ll
index 61ddb24eaaf87d4..e5f5ca71739df30 100644
--- a/llvm/test/CodeGen/X86/pr59258.ll
+++ b/llvm/test/CodeGen/X86/pr59258.ll
@@ -90,14 +90,14 @@ define <8 x half> @cvt_and_clamp2(<8 x float>) nounwind {
; CHECK-NEXT: callq fmaxf at PLT
; CHECK-NEXT: callq __truncsfhf2 at PLT
; CHECK-NEXT: callq __extendhfsf2 at PLT
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: callq __truncsfhf2 at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq __extendhfsf2 at PLT
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: callq __truncsfhf2 at PLT
; CHECK-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
@@ -106,14 +106,14 @@ define <8 x half> @cvt_and_clamp2(<8 x float>) nounwind {
; CHECK-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq __extendhfsf2 at PLT
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: callq __truncsfhf2 at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movd (%rsp), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq __extendhfsf2 at PLT
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: callq __truncsfhf2 at PLT
; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
@@ -124,14 +124,14 @@ define <8 x half> @cvt_and_clamp2(<8 x float>) nounwind {
; CHECK-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq __extendhfsf2 at PLT
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: callq __truncsfhf2 at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq __extendhfsf2 at PLT
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: callq __truncsfhf2 at PLT
; CHECK-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
@@ -140,14 +140,14 @@ define <8 x half> @cvt_and_clamp2(<8 x float>) nounwind {
; CHECK-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq __extendhfsf2 at PLT
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: callq __truncsfhf2 at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq __extendhfsf2 at PLT
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: callq __truncsfhf2 at PLT
; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
diff --git a/llvm/test/CodeGen/X86/pr59305.ll b/llvm/test/CodeGen/X86/pr59305.ll
index cb98e9d5dda8e93..c2f6d21a41d4dc4 100644
--- a/llvm/test/CodeGen/X86/pr59305.ll
+++ b/llvm/test/CodeGen/X86/pr59305.ll
@@ -8,17 +8,17 @@ define double @foo(double %0) #0 {
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movl $1024, %edi # imm = 0x400
; CHECK-NEXT: callq fesetround at PLT
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.0E+0,0.0E+0]
; CHECK-NEXT: divsd (%rsp), %xmm1 # 8-byte Folded Reload
; CHECK-NEXT: movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movl $1024, %edi # imm = 0x400
; CHECK-NEXT: callq fesetround at PLT
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
; CHECK-NEXT: divsd (%rsp), %xmm0 # 8-byte Folded Reload
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movl $1024, %edi # imm = 0x400
; CHECK-NEXT: callq fesetround at PLT
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
; CHECK-NEXT: divsd (%rsp), %xmm2 # 8-byte Folded Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
diff --git a/llvm/test/CodeGen/X86/pseudo_cmov_lower2.ll b/llvm/test/CodeGen/X86/pseudo_cmov_lower2.ll
index 6a009ec0efed72f..f2c1269143fbc54 100644
--- a/llvm/test/CodeGen/X86/pseudo_cmov_lower2.ll
+++ b/llvm/test/CodeGen/X86/pseudo_cmov_lower2.ll
@@ -29,7 +29,7 @@ define double @foo2(float %p1, double %p2, double %p3) nounwind {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm3, %xmm3
; CHECK-NEXT: ucomiss %xmm3, %xmm0
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.25E+0,0.0E+0]
; CHECK-NEXT: jae .LBB1_1
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: addsd %xmm0, %xmm2
@@ -122,7 +122,7 @@ define double @foo5(float %p1, double %p2, double %p3) nounwind {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm3, %xmm3
; CHECK-NEXT: ucomiss %xmm3, %xmm0
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.25E+0,0.0E+0]
; CHECK-NEXT: jae .LBB4_1
; CHECK-NEXT: # %bb.2: # %select.false
; CHECK-NEXT: addsd %xmm2, %xmm0
@@ -156,7 +156,7 @@ define double @foo6(float %p1, double %p2, double %p3) nounwind {
; CHECK-NEXT: movaps %xmm0, %xmm3
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: ucomiss %xmm0, %xmm3
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.25E+0,0.0E+0]
; CHECK-NEXT: jae .LBB5_1
; CHECK-NEXT: # %bb.2: # %select.false
; CHECK-NEXT: addsd %xmm2, %xmm0
@@ -203,7 +203,7 @@ define double @foo1_g(float %p1, double %p2, double %p3) nounwind !dbg !4 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm3, %xmm3
; CHECK-NEXT: ucomiss %xmm3, %xmm0
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.25E+0,0.0E+0]
; CHECK-NEXT: jae .LBB6_1
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: addsd %xmm2, %xmm0
diff --git a/llvm/test/CodeGen/X86/recip-fastmath.ll b/llvm/test/CodeGen/X86/recip-fastmath.ll
index 7e9bbc555642484..bb8c074f97e5efb 100644
--- a/llvm/test/CodeGen/X86/recip-fastmath.ll
+++ b/llvm/test/CodeGen/X86/recip-fastmath.ll
@@ -21,14 +21,14 @@
define float @f32_no_estimate(float %x) #0 {
; SSE-LABEL: f32_no_estimate:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: divss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f32_no_estimate:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%div = fdiv fast float 1.0, %x
@@ -40,7 +40,7 @@ define float @f32_one_step(float %x) #1 {
; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm2
; SSE-NEXT: mulss %xmm2, %xmm0
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: subss %xmm0, %xmm1
; SSE-NEXT: mulss %xmm2, %xmm1
; SSE-NEXT: addss %xmm2, %xmm1
@@ -51,7 +51,7 @@ define float @f32_one_step(float %x) #1 {
; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-RECIP-NEXT: vsubss %xmm0, %xmm2, %xmm0
; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
; AVX-RECIP-NEXT: vaddss %xmm0, %xmm1, %xmm0
@@ -73,7 +73,7 @@ define float @f32_one_step(float %x) #1 {
;
; BTVER2-LABEL: f32_one_step:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0
; BTVER2-NEXT: vsubss %xmm0, %xmm2, %xmm0
@@ -85,7 +85,7 @@ define float @f32_one_step(float %x) #1 {
; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SANDY-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0
; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0
; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0
@@ -102,7 +102,7 @@ define float @f32_one_step(float %x) #1 {
; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm2, %xmm0
; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0
; HASWELL-NO-FMA-NEXT: vaddss %xmm0, %xmm1, %xmm0
@@ -211,7 +211,7 @@ define float @f32_two_step(float %x) #2 {
; SSE-NEXT: rcpss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: mulss %xmm2, %xmm3
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm1, %xmm4
; SSE-NEXT: subss %xmm3, %xmm4
; SSE-NEXT: mulss %xmm2, %xmm4
@@ -227,7 +227,7 @@ define float @f32_two_step(float %x) #2 {
; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm2
-; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-RECIP-NEXT: vsubss %xmm2, %xmm3, %xmm2
; AVX-RECIP-NEXT: vmulss %xmm2, %xmm1, %xmm2
; AVX-RECIP-NEXT: vaddss %xmm2, %xmm1, %xmm1
@@ -240,7 +240,7 @@ define float @f32_two_step(float %x) #2 {
; FMA-RECIP-LABEL: f32_two_step:
; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
; FMA-RECIP-NEXT: vfmsub213ss {{.*#+}} xmm3 = (xmm0 * xmm3) - xmm2
; FMA-RECIP-NEXT: vfnmadd132ss {{.*#+}} xmm3 = -(xmm3 * xmm1) + xmm1
@@ -251,7 +251,7 @@ define float @f32_two_step(float %x) #2 {
; BDVER2-LABEL: f32_two_step:
; BDVER2: # %bb.0:
; BDVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; BDVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; BDVER2-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; BDVER2-NEXT: vfmsubss {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm2
; BDVER2-NEXT: vfnmaddss {{.*#+}} xmm1 = -(xmm1 * xmm3) + xmm1
; BDVER2-NEXT: vfmsubss {{.*#+}} xmm0 = (xmm0 * xmm1) - xmm2
@@ -260,7 +260,7 @@ define float @f32_two_step(float %x) #2 {
;
; BTVER2-LABEL: f32_two_step:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; BTVER2-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm2
; BTVER2-NEXT: vsubss %xmm2, %xmm3, %xmm2
@@ -276,7 +276,7 @@ define float @f32_two_step(float %x) #2 {
; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm2
-; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SANDY-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SANDY-NEXT: vsubss %xmm2, %xmm3, %xmm2
; SANDY-NEXT: vmulss %xmm2, %xmm1, %xmm2
; SANDY-NEXT: vaddss %xmm2, %xmm1, %xmm1
@@ -289,7 +289,7 @@ define float @f32_two_step(float %x) #2 {
; HASWELL-LABEL: f32_two_step:
; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; HASWELL-NEXT: vmovaps %xmm1, %xmm3
; HASWELL-NEXT: vfmsub213ss {{.*#+}} xmm3 = (xmm0 * xmm3) - xmm2
; HASWELL-NEXT: vfnmadd132ss {{.*#+}} xmm3 = -(xmm3 * xmm1) + xmm1
@@ -301,7 +301,7 @@ define float @f32_two_step(float %x) #2 {
; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm2
-; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; HASWELL-NO-FMA-NEXT: vsubss %xmm2, %xmm3, %xmm2
; HASWELL-NO-FMA-NEXT: vmulss %xmm2, %xmm1, %xmm2
; HASWELL-NO-FMA-NEXT: vaddss %xmm2, %xmm1, %xmm1
@@ -314,7 +314,7 @@ define float @f32_two_step(float %x) #2 {
; AVX512-LABEL: f32_two_step:
; AVX512: # %bb.0:
; AVX512-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; AVX512-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX512-NEXT: vmovaps %xmm1, %xmm3
; AVX512-NEXT: vfmsub213ss {{.*#+}} xmm3 = (xmm0 * xmm3) - xmm2
; AVX512-NEXT: vfnmadd132ss {{.*#+}} xmm3 = -(xmm3 * xmm1) + xmm1
diff --git a/llvm/test/CodeGen/X86/recip-fastmath2.ll b/llvm/test/CodeGen/X86/recip-fastmath2.ll
index 2a5e46bba2c0098..042069703af21d3 100644
--- a/llvm/test/CodeGen/X86/recip-fastmath2.ll
+++ b/llvm/test/CodeGen/X86/recip-fastmath2.ll
@@ -32,7 +32,7 @@ define float @f32_one_step_2(float %x) #1 {
; SSE-LABEL: f32_one_step_2:
; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm2
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [3.456E+3,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: mulss %xmm1, %xmm3
; SSE-NEXT: mulss %xmm3, %xmm0
@@ -45,7 +45,7 @@ define float @f32_one_step_2(float %x) #1 {
; AVX-RECIP-LABEL: f32_one_step_2:
; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = [3.456E+3,0.0E+0,0.0E+0,0.0E+0]
; AVX-RECIP-NEXT: vmulss %xmm2, %xmm1, %xmm3
; AVX-RECIP-NEXT: vmulss %xmm3, %xmm0, %xmm0
; AVX-RECIP-NEXT: vsubss %xmm0, %xmm2, %xmm0
@@ -56,7 +56,7 @@ define float @f32_one_step_2(float %x) #1 {
; FMA-RECIP-LABEL: f32_one_step_2:
; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = [3.456E+3,0.0E+0,0.0E+0,0.0E+0]
; FMA-RECIP-NEXT: vmulss %xmm2, %xmm1, %xmm3
; FMA-RECIP-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm3 * xmm0) - xmm2
; FMA-RECIP-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm3
@@ -65,7 +65,7 @@ define float @f32_one_step_2(float %x) #1 {
; BDVER2-LABEL: f32_one_step_2:
; BDVER2: # %bb.0:
; BDVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; BDVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; BDVER2-NEXT: vmovss {{.*#+}} xmm2 = [3.456E+3,0.0E+0,0.0E+0,0.0E+0]
; BDVER2-NEXT: vmulss %xmm2, %xmm1, %xmm3
; BDVER2-NEXT: vfmsubss {{.*#+}} xmm0 = (xmm0 * xmm3) - xmm2
; BDVER2-NEXT: vfnmaddss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm3
@@ -73,7 +73,7 @@ define float @f32_one_step_2(float %x) #1 {
;
; BTVER2-LABEL: f32_one_step_2:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = [3.456E+3,0.0E+0,0.0E+0,0.0E+0]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; BTVER2-NEXT: vmulss %xmm2, %xmm1, %xmm3
; BTVER2-NEXT: vmulss %xmm3, %xmm0, %xmm0
@@ -85,7 +85,7 @@ define float @f32_one_step_2(float %x) #1 {
; SANDY-LABEL: f32_one_step_2:
; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SANDY-NEXT: vmovss {{.*#+}} xmm2 = [3.456E+3,0.0E+0,0.0E+0,0.0E+0]
; SANDY-NEXT: vmulss %xmm2, %xmm1, %xmm3
; SANDY-NEXT: vmulss %xmm3, %xmm0, %xmm0
; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0
@@ -96,7 +96,7 @@ define float @f32_one_step_2(float %x) #1 {
; HASWELL-LABEL: f32_one_step_2:
; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = [3.456E+3,0.0E+0,0.0E+0,0.0E+0]
; HASWELL-NEXT: vmulss %xmm2, %xmm1, %xmm3
; HASWELL-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm3 * xmm0) - xmm2
; HASWELL-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm3
@@ -105,7 +105,7 @@ define float @f32_one_step_2(float %x) #1 {
; HASWELL-NO-FMA-LABEL: f32_one_step_2:
; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = [3.456E+3,0.0E+0,0.0E+0,0.0E+0]
; HASWELL-NO-FMA-NEXT: vmulss %xmm2, %xmm1, %xmm3
; HASWELL-NO-FMA-NEXT: vmulss %xmm3, %xmm0, %xmm0
; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm2, %xmm0
@@ -116,7 +116,7 @@ define float @f32_one_step_2(float %x) #1 {
; AVX512-LABEL: f32_one_step_2:
; AVX512: # %bb.0:
; AVX512-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; AVX512-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm2 = [3.456E+3,0.0E+0,0.0E+0,0.0E+0]
; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm3
; AVX512-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm3 * xmm0) - xmm2
; AVX512-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm3
@@ -130,11 +130,11 @@ define float @f32_one_step_2_divs(float %x) #1 {
; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm0, %xmm1
; SSE-NEXT: mulss %xmm1, %xmm0
-; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: subss %xmm0, %xmm2
; SSE-NEXT: mulss %xmm1, %xmm2
; SSE-NEXT: addss %xmm1, %xmm2
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [3.456E+3,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: mulss %xmm2, %xmm0
; SSE-NEXT: mulss %xmm2, %xmm0
; SSE-NEXT: retq
@@ -143,7 +143,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-RECIP-NEXT: vsubss %xmm0, %xmm2, %xmm0
; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
; AVX-RECIP-NEXT: vaddss %xmm0, %xmm1, %xmm0
@@ -171,7 +171,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
;
; BTVER2-LABEL: f32_one_step_2_divs:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0
; BTVER2-NEXT: vsubss %xmm0, %xmm2, %xmm0
@@ -185,7 +185,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SANDY-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0
; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0
; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0
@@ -206,7 +206,7 @@ define float @f32_one_step_2_divs(float %x) #1 {
; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm2, %xmm0
; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0
; HASWELL-NO-FMA-NEXT: vaddss %xmm0, %xmm1, %xmm0
@@ -233,11 +233,11 @@ define float @f32_two_step_2(float %x) #2 {
; SSE-NEXT: rcpss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: mulss %xmm1, %xmm2
-; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: subss %xmm2, %xmm3
; SSE-NEXT: mulss %xmm1, %xmm3
; SSE-NEXT: addss %xmm1, %xmm3
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [6.789E+3,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm3, %xmm2
; SSE-NEXT: mulss %xmm1, %xmm2
; SSE-NEXT: mulss %xmm2, %xmm0
@@ -251,11 +251,11 @@ define float @f32_two_step_2(float %x) #2 {
; AVX-RECIP: # %bb.0:
; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm2
-; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-RECIP-NEXT: vsubss %xmm2, %xmm3, %xmm2
; AVX-RECIP-NEXT: vmulss %xmm2, %xmm1, %xmm2
; AVX-RECIP-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = [6.789E+3,0.0E+0,0.0E+0,0.0E+0]
; AVX-RECIP-NEXT: vmulss %xmm2, %xmm1, %xmm3
; AVX-RECIP-NEXT: vmulss %xmm3, %xmm0, %xmm0
; AVX-RECIP-NEXT: vsubss %xmm0, %xmm2, %xmm0
@@ -266,10 +266,10 @@ define float @f32_two_step_2(float %x) #2 {
; FMA-RECIP-LABEL: f32_two_step_2:
; FMA-RECIP: # %bb.0:
; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; FMA-RECIP-NEXT: vfmsub231ss {{.*#+}} xmm2 = (xmm0 * xmm1) - xmm2
; FMA-RECIP-NEXT: vfnmadd132ss {{.*#+}} xmm2 = -(xmm2 * xmm1) + xmm1
-; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm1 = [6.789E+3,0.0E+0,0.0E+0,0.0E+0]
; FMA-RECIP-NEXT: vmulss %xmm1, %xmm2, %xmm3
; FMA-RECIP-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm3 * xmm0) - xmm1
; FMA-RECIP-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm2 * xmm0) + xmm3
@@ -279,7 +279,7 @@ define float @f32_two_step_2(float %x) #2 {
; BDVER2: # %bb.0:
; BDVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; BDVER2-NEXT: vfmsubss {{.*#+}} xmm2 = (xmm0 * xmm1) - mem
-; BDVER2-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; BDVER2-NEXT: vmovss {{.*#+}} xmm4 = [6.789E+3,0.0E+0,0.0E+0,0.0E+0]
; BDVER2-NEXT: vfnmaddss {{.*#+}} xmm1 = -(xmm1 * xmm2) + xmm1
; BDVER2-NEXT: vmulss %xmm4, %xmm1, %xmm3
; BDVER2-NEXT: vfmsubss {{.*#+}} xmm0 = (xmm0 * xmm3) - xmm4
@@ -288,9 +288,9 @@ define float @f32_two_step_2(float %x) #2 {
;
; BTVER2-LABEL: f32_two_step_2:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; BTVER2-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; BTVER2-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; BTVER2-NEXT: vmovss {{.*#+}} xmm4 = [6.789E+3,0.0E+0,0.0E+0,0.0E+0]
; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm2
; BTVER2-NEXT: vsubss %xmm2, %xmm3, %xmm2
; BTVER2-NEXT: vmulss %xmm2, %xmm1, %xmm2
@@ -306,11 +306,11 @@ define float @f32_two_step_2(float %x) #2 {
; SANDY: # %bb.0:
; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm2
-; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SANDY-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SANDY-NEXT: vsubss %xmm2, %xmm3, %xmm2
; SANDY-NEXT: vmulss %xmm2, %xmm1, %xmm2
; SANDY-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SANDY-NEXT: vmovss {{.*#+}} xmm2 = [6.789E+3,0.0E+0,0.0E+0,0.0E+0]
; SANDY-NEXT: vmulss %xmm2, %xmm1, %xmm3
; SANDY-NEXT: vmulss %xmm3, %xmm0, %xmm0
; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0
@@ -321,10 +321,10 @@ define float @f32_two_step_2(float %x) #2 {
; HASWELL-LABEL: f32_two_step_2:
; HASWELL: # %bb.0:
; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; HASWELL-NEXT: vfmsub231ss {{.*#+}} xmm2 = (xmm0 * xmm1) - xmm2
; HASWELL-NEXT: vfnmadd132ss {{.*#+}} xmm2 = -(xmm2 * xmm1) + xmm1
-; HASWELL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; HASWELL-NEXT: vmovss {{.*#+}} xmm1 = [6.789E+3,0.0E+0,0.0E+0,0.0E+0]
; HASWELL-NEXT: vmulss %xmm1, %xmm2, %xmm3
; HASWELL-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm3 * xmm0) - xmm1
; HASWELL-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm2 * xmm0) + xmm3
@@ -334,11 +334,11 @@ define float @f32_two_step_2(float %x) #2 {
; HASWELL-NO-FMA: # %bb.0:
; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm2
-; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm3 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; HASWELL-NO-FMA-NEXT: vsubss %xmm2, %xmm3, %xmm2
; HASWELL-NO-FMA-NEXT: vmulss %xmm2, %xmm1, %xmm2
; HASWELL-NO-FMA-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = [6.789E+3,0.0E+0,0.0E+0,0.0E+0]
; HASWELL-NO-FMA-NEXT: vmulss %xmm2, %xmm1, %xmm3
; HASWELL-NO-FMA-NEXT: vmulss %xmm3, %xmm0, %xmm0
; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm2, %xmm0
@@ -349,10 +349,10 @@ define float @f32_two_step_2(float %x) #2 {
; AVX512-LABEL: f32_two_step_2:
; AVX512: # %bb.0:
; AVX512-NEXT: vrcpss %xmm0, %xmm0, %xmm1
-; AVX512-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX512-NEXT: vfmsub231ss {{.*#+}} xmm2 = (xmm0 * xmm1) - xmm2
; AVX512-NEXT: vfnmadd132ss {{.*#+}} xmm2 = -(xmm2 * xmm1) + xmm1
-; AVX512-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm1 = [6.789E+3,0.0E+0,0.0E+0,0.0E+0]
; AVX512-NEXT: vmulss %xmm1, %xmm2, %xmm3
; AVX512-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm3 * xmm0) - xmm1
; AVX512-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm2 * xmm0) + xmm3
diff --git a/llvm/test/CodeGen/X86/recip-pic.ll b/llvm/test/CodeGen/X86/recip-pic.ll
index c04aa394c309ef3..d01ecc1e2ce1e18 100644
--- a/llvm/test/CodeGen/X86/recip-pic.ll
+++ b/llvm/test/CodeGen/X86/recip-pic.ll
@@ -11,7 +11,7 @@ define fastcc float @foo(float %x) unnamed_addr #0 {
; CHECK-NEXT: .cfi_adjust_cfa_offset -4
; CHECK-NEXT: .Ltmp0:
; CHECK-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L0$pb), %eax
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: divss %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: movss %xmm1, (%eax)
diff --git a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll
index 718b8b558c9b2a2..86d4be9cb7af651 100644
--- a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll
+++ b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll
@@ -68,7 +68,7 @@ define i64 @f_to_u64(float %a) nounwind {
; X86-AVX512F-WIN-NEXT: andl $-8, %esp
; X86-AVX512F-WIN-NEXT: subl $8, %esp
; X86-AVX512F-WIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX512F-WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX512F-WIN-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; X86-AVX512F-WIN-NEXT: xorl %edx, %edx
; X86-AVX512F-WIN-NEXT: vucomiss %xmm0, %xmm1
; X86-AVX512F-WIN-NEXT: setbe %dl
@@ -89,7 +89,7 @@ define i64 @f_to_u64(float %a) nounwind {
; X86-AVX512F-LIN: # %bb.0:
; X86-AVX512F-LIN-NEXT: subl $12, %esp
; X86-AVX512F-LIN-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX512F-LIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX512F-LIN-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; X86-AVX512F-LIN-NEXT: xorl %edx, %edx
; X86-AVX512F-LIN-NEXT: vucomiss %xmm0, %xmm1
; X86-AVX512F-LIN-NEXT: setbe %dl
@@ -112,7 +112,7 @@ define i64 @f_to_u64(float %a) nounwind {
; X86-SSE3-WIN-NEXT: andl $-8, %esp
; X86-SSE3-WIN-NEXT: subl $8, %esp
; X86-SSE3-WIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE3-WIN-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE3-WIN-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE3-WIN-NEXT: ucomiss %xmm0, %xmm1
; X86-SSE3-WIN-NEXT: jbe LBB0_2
; X86-SSE3-WIN-NEXT: # %bb.1:
@@ -135,7 +135,7 @@ define i64 @f_to_u64(float %a) nounwind {
; X86-SSE3-LIN: # %bb.0:
; X86-SSE3-LIN-NEXT: subl $12, %esp
; X86-SSE3-LIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE3-LIN-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE3-LIN-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE3-LIN-NEXT: ucomiss %xmm0, %xmm1
; X86-SSE3-LIN-NEXT: jbe .LBB0_2
; X86-SSE3-LIN-NEXT: # %bb.1:
@@ -182,7 +182,7 @@ define i64 @f_to_u64(float %a) nounwind {
; X86-SSE2-WIN-NEXT: andl $-8, %esp
; X86-SSE2-WIN-NEXT: subl $16, %esp
; X86-SSE2-WIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE2-WIN-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE2-WIN-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE2-WIN-NEXT: ucomiss %xmm0, %xmm1
; X86-SSE2-WIN-NEXT: jbe LBB0_2
; X86-SSE2-WIN-NEXT: # %bb.1:
@@ -211,7 +211,7 @@ define i64 @f_to_u64(float %a) nounwind {
; X86-SSE2-LIN: # %bb.0:
; X86-SSE2-LIN-NEXT: subl $20, %esp
; X86-SSE2-LIN-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE2-LIN-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE2-LIN-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE2-LIN-NEXT: ucomiss %xmm0, %xmm1
; X86-SSE2-LIN-NEXT: jbe .LBB0_2
; X86-SSE2-LIN-NEXT: # %bb.1:
@@ -501,7 +501,7 @@ define i64 @d_to_u64(double %a) nounwind {
; X86-AVX512F-WIN-NEXT: andl $-8, %esp
; X86-AVX512F-WIN-NEXT: subl $8, %esp
; X86-AVX512F-WIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX512F-WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX512F-WIN-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; X86-AVX512F-WIN-NEXT: xorl %edx, %edx
; X86-AVX512F-WIN-NEXT: vucomisd %xmm0, %xmm1
; X86-AVX512F-WIN-NEXT: setbe %dl
@@ -522,7 +522,7 @@ define i64 @d_to_u64(double %a) nounwind {
; X86-AVX512F-LIN: # %bb.0:
; X86-AVX512F-LIN-NEXT: subl $12, %esp
; X86-AVX512F-LIN-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX512F-LIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX512F-LIN-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; X86-AVX512F-LIN-NEXT: xorl %edx, %edx
; X86-AVX512F-LIN-NEXT: vucomisd %xmm0, %xmm1
; X86-AVX512F-LIN-NEXT: setbe %dl
@@ -545,7 +545,7 @@ define i64 @d_to_u64(double %a) nounwind {
; X86-SSE3-WIN-NEXT: andl $-8, %esp
; X86-SSE3-WIN-NEXT: subl $8, %esp
; X86-SSE3-WIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE3-WIN-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE3-WIN-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; X86-SSE3-WIN-NEXT: ucomisd %xmm0, %xmm1
; X86-SSE3-WIN-NEXT: jbe LBB2_2
; X86-SSE3-WIN-NEXT: # %bb.1:
@@ -568,7 +568,7 @@ define i64 @d_to_u64(double %a) nounwind {
; X86-SSE3-LIN: # %bb.0:
; X86-SSE3-LIN-NEXT: subl $12, %esp
; X86-SSE3-LIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE3-LIN-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE3-LIN-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; X86-SSE3-LIN-NEXT: ucomisd %xmm0, %xmm1
; X86-SSE3-LIN-NEXT: jbe .LBB2_2
; X86-SSE3-LIN-NEXT: # %bb.1:
@@ -615,7 +615,7 @@ define i64 @d_to_u64(double %a) nounwind {
; X86-SSE2-WIN-NEXT: andl $-8, %esp
; X86-SSE2-WIN-NEXT: subl $16, %esp
; X86-SSE2-WIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE2-WIN-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE2-WIN-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; X86-SSE2-WIN-NEXT: ucomisd %xmm0, %xmm1
; X86-SSE2-WIN-NEXT: jbe LBB2_2
; X86-SSE2-WIN-NEXT: # %bb.1:
@@ -644,7 +644,7 @@ define i64 @d_to_u64(double %a) nounwind {
; X86-SSE2-LIN: # %bb.0:
; X86-SSE2-LIN-NEXT: subl $20, %esp
; X86-SSE2-LIN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE2-LIN-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE2-LIN-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; X86-SSE2-LIN-NEXT: ucomisd %xmm0, %xmm1
; X86-SSE2-LIN-NEXT: jbe .LBB2_2
; X86-SSE2-LIN-NEXT: # %bb.1:
diff --git a/llvm/test/CodeGen/X86/scalarize-fp.ll b/llvm/test/CodeGen/X86/scalarize-fp.ll
index 8379d20b603e32a..ef04437c7951fcf 100644
--- a/llvm/test/CodeGen/X86/scalarize-fp.ll
+++ b/llvm/test/CodeGen/X86/scalarize-fp.ll
@@ -38,14 +38,14 @@ define <4 x float> @load_fadd_op1_constant_v4f32(ptr %p) nounwind {
define <4 x float> @fsub_op0_constant_v4f32(float %x) nounwind {
; SSE-LABEL: fsub_op0_constant_v4f32:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: subss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fsub_op0_constant_v4f32:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%v = insertelement <4 x float> undef, float %x, i32 0
@@ -56,13 +56,13 @@ define <4 x float> @fsub_op0_constant_v4f32(float %x) nounwind {
define <4 x float> @load_fsub_op0_constant_v4f32(ptr %p) nounwind {
; SSE-LABEL: load_fsub_op0_constant_v4f32:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: subss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: load_fsub_op0_constant_v4f32:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsubss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%x = load float, ptr %p
@@ -140,14 +140,14 @@ define <4 x float> @load_fdiv_op1_constant_v4f32(ptr %p) nounwind {
define <4 x float> @fdiv_op0_constant_v4f32(float %x) nounwind {
; SSE-LABEL: fdiv_op0_constant_v4f32:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: divss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fdiv_op0_constant_v4f32:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%v = insertelement <4 x float> undef, float %x, i32 0
@@ -158,13 +158,13 @@ define <4 x float> @fdiv_op0_constant_v4f32(float %x) nounwind {
define <4 x float> @load_fdiv_op0_constant_v4f32(ptr %p) nounwind {
; SSE-LABEL: load_fdiv_op0_constant_v4f32:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: divss (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: load_fdiv_op0_constant_v4f32:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%x = load float, ptr %p
@@ -209,14 +209,14 @@ define <4 x double> @load_fadd_op1_constant_v4f64(ptr %p) nounwind {
define <4 x double> @fsub_op0_constant_v4f64(double %x) nounwind {
; SSE-LABEL: fsub_op0_constant_v4f64:
; SSE: # %bb.0:
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; SSE-NEXT: subsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fsub_op0_constant_v4f64:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%v = insertelement <4 x double> undef, double %x, i32 0
@@ -227,13 +227,13 @@ define <4 x double> @fsub_op0_constant_v4f64(double %x) nounwind {
define <4 x double> @load_fsub_op0_constant_v4f64(ptr %p) nounwind {
; SSE-LABEL: load_fsub_op0_constant_v4f64:
; SSE: # %bb.0:
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; SSE-NEXT: subsd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: load_fsub_op0_constant_v4f64:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: vsubsd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%x = load double, ptr %p
@@ -311,14 +311,14 @@ define <4 x double> @load_fdiv_op1_constant_v4f64(ptr %p) nounwind {
define <4 x double> @fdiv_op0_constant_v4f64(double %x) nounwind {
; SSE-LABEL: fdiv_op0_constant_v4f64:
; SSE: # %bb.0:
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; SSE-NEXT: divsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fdiv_op0_constant_v4f64:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%v = insertelement <4 x double> undef, double %x, i32 0
@@ -329,13 +329,13 @@ define <4 x double> @fdiv_op0_constant_v4f64(double %x) nounwind {
define <4 x double> @load_fdiv_op0_constant_v4f64(ptr %p) nounwind {
; SSE-LABEL: load_fdiv_op0_constant_v4f64:
; SSE: # %bb.0:
-; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; SSE-NEXT: divsd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: load_fdiv_op0_constant_v4f64:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: vdivsd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%x = load double, ptr %p
@@ -524,7 +524,7 @@ define <2 x double> @fadd_splat_const_op1_v2f64(<2 x double> %vx) {
define <4 x double> @fsub_const_op0_splat_v4f64(double %x) {
; SSE-LABEL: fsub_const_op0_splat_v4f64:
; SSE: # %bb.0:
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [8.0E+0,0.0E+0]
; SSE-NEXT: subsd %xmm0, %xmm1
; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0,0]
; SSE-NEXT: movapd %xmm1, %xmm0
@@ -532,7 +532,7 @@ define <4 x double> @fsub_const_op0_splat_v4f64(double %x) {
;
; AVX-LABEL: fsub_const_op0_splat_v4f64:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [8.0E+0,0.0E+0]
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -565,7 +565,7 @@ define <4 x float> @fmul_splat_const_op1_v4f32(<4 x float> %vx, <4 x float> %vy)
define <8 x float> @fdiv_splat_const_op0_v8f32(<8 x float> %vy) {
; SSE-LABEL: fdiv_splat_const_op0_v8f32:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [4.5E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: divss %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE-NEXT: movaps %xmm1, %xmm0
@@ -573,7 +573,7 @@ define <8 x float> @fdiv_splat_const_op0_v8f32(<8 x float> %vy) {
;
; AVX-LABEL: fdiv_splat_const_op0_v8f32:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -700,7 +700,7 @@ define <2 x double> @splat0_fadd_const_op1_v2f64(<2 x double> %vx) {
define <4 x double> @splat0_fsub_const_op0_v4f64(double %x) {
; SSE-LABEL: splat0_fsub_const_op0_v4f64:
; SSE: # %bb.0:
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [-4.2E+1,0.0E+0]
; SSE-NEXT: subsd %xmm0, %xmm1
; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0,0]
; SSE-NEXT: movapd %xmm1, %xmm0
@@ -708,7 +708,7 @@ define <4 x double> @splat0_fsub_const_op0_v4f64(double %x) {
;
; AVX-LABEL: splat0_fsub_const_op0_v4f64:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [-4.2E+1,0.0E+0]
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -756,7 +756,7 @@ define <8 x float> @splat0_fdiv_const_op1_v8f32(<8 x float> %vx) {
define <8 x float> @splat0_fdiv_const_op0_v8f32(<8 x float> %vx) {
; SSE-LABEL: splat0_fdiv_const_op0_v8f32:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: divss %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE-NEXT: movaps %xmm1, %xmm0
@@ -764,7 +764,7 @@ define <8 x float> @splat0_fdiv_const_op0_v8f32(<8 x float> %vx) {
;
; AVX-LABEL: splat0_fdiv_const_op0_v8f32:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/select-of-fp-constants.ll b/llvm/test/CodeGen/X86/select-of-fp-constants.ll
index 3ddeeee1bce040f..76b8ea8e2b8a2b4 100644
--- a/llvm/test/CodeGen/X86/select-of-fp-constants.ll
+++ b/llvm/test/CodeGen/X86/select-of-fp-constants.ll
@@ -42,7 +42,7 @@ define float @icmp_select_fp_constants(i32 %x) nounwind readnone {
define float @fcmp_select_fp_constants(float %x) nounwind readnone {
; X86-SSE-LABEL: fcmp_select_fp_constants:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = [-4.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-SSE-NEXT: cmpneqss {{[0-9]+}}(%esp), %xmm0
; X86-SSE-NEXT: movd %xmm0, %eax
; X86-SSE-NEXT: andl $1, %eax
@@ -51,7 +51,7 @@ define float @fcmp_select_fp_constants(float %x) nounwind readnone {
;
; X86-AVX2-LABEL: fcmp_select_fp_constants:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT: vmovss {{.*#+}} xmm0 = [-4.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-AVX2-NEXT: vcmpneqss {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
; X86-AVX2-NEXT: andl $1, %eax
@@ -85,7 +85,7 @@ define float @fcmp_select_fp_constants(float %x) nounwind readnone {
; X64-AVX512F-LABEL: fcmp_select_fp_constants:
; X64-AVX512F: # %bb.0:
; X64-AVX512F-NEXT: vcmpneqss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
-; X64-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = [2.3E+1,0.0E+0,0.0E+0,0.0E+0]
; X64-AVX512F-NEXT: vmovss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1}
; X64-AVX512F-NEXT: retq
%c = fcmp une float %x, -4.0
diff --git a/llvm/test/CodeGen/X86/select.ll b/llvm/test/CodeGen/X86/select.ll
index 6d2b73e2108ba22..ca5558561a65b9e 100644
--- a/llvm/test/CodeGen/X86/select.ll
+++ b/llvm/test/CodeGen/X86/select.ll
@@ -177,7 +177,7 @@ entry:
define signext i8 @test4(ptr nocapture %P, double %F) nounwind readonly {
; CHECK-LABEL: test4:
; CHECK: ## %bb.0: ## %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ucomisd %xmm0, %xmm1
; CHECK-NEXT: seta %al
diff --git a/llvm/test/CodeGen/X86/select_const.ll b/llvm/test/CodeGen/X86/select_const.ll
index eba22036701b4fe..d604923b48a11a8 100644
--- a/llvm/test/CodeGen/X86/select_const.ll
+++ b/llvm/test/CodeGen/X86/select_const.ll
@@ -958,7 +958,7 @@ define float @select_undef_fp(float %x) {
;
; X64-LABEL: select_undef_fp:
; X64: # %bb.0:
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [4.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: retq
%f = select i1 undef, float 4.0, float %x
ret float %f
diff --git a/llvm/test/CodeGen/X86/setcc-combine.ll b/llvm/test/CodeGen/X86/setcc-combine.ll
index 780a769bc9e2b57..0745881b2f3a326 100644
--- a/llvm/test/CodeGen/X86/setcc-combine.ll
+++ b/llvm/test/CodeGen/X86/setcc-combine.ll
@@ -488,7 +488,7 @@ define double @ogt_no_zero(double %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0]
; CHECK-NEXT: xorpd %xmm0, %xmm1
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
; CHECK-NEXT: cmpltsd %xmm0, %xmm2
; CHECK-NEXT: andpd %xmm2, %xmm0
; CHECK-NEXT: andnpd %xmm1, %xmm2
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll
index 6d6a7b897c33265..9f2071ff14b8747 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll
@@ -14,7 +14,7 @@ define float @f32_no_daz(float %f) #0 {
; NHM-NEXT: rsqrtss %xmm0, %xmm1
; NHM-NEXT: movaps %xmm0, %xmm2
; NHM-NEXT: mulss %xmm1, %xmm2
-; NHM-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; NHM-NEXT: movss {{.*#+}} xmm3 = [-5.0E-1,0.0E+0,0.0E+0,0.0E+0]
; NHM-NEXT: mulss %xmm2, %xmm3
; NHM-NEXT: mulss %xmm1, %xmm2
; NHM-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
@@ -221,7 +221,7 @@ define float @f32_daz(float %f) #1 {
; NHM-NEXT: rsqrtss %xmm0, %xmm1
; NHM-NEXT: movaps %xmm0, %xmm2
; NHM-NEXT: mulss %xmm1, %xmm2
-; NHM-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; NHM-NEXT: movss {{.*#+}} xmm3 = [-5.0E-1,0.0E+0,0.0E+0,0.0E+0]
; NHM-NEXT: mulss %xmm2, %xmm3
; NHM-NEXT: mulss %xmm1, %xmm2
; NHM-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath-tunecpu-attr.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-tunecpu-attr.ll
index 99b45f83ac90948..85f7733e671a761 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath-tunecpu-attr.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath-tunecpu-attr.ll
@@ -7,7 +7,7 @@ define float @f32_tune_nhm(float %f) #0 {
; CHECK-NEXT: rsqrtss %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm0, %xmm2
; CHECK-NEXT: mulss %xmm1, %xmm2
-; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm3 = [-5.0E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: mulss %xmm2, %xmm3
; CHECK-NEXT: mulss %xmm1, %xmm2
; CHECK-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
@@ -44,7 +44,7 @@ define float @f32_tune_x86_64(float %f) #3 {
; CHECK-NEXT: rsqrtss %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm0, %xmm2
; CHECK-NEXT: mulss %xmm1, %xmm2
-; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm3 = [-5.0E-1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: mulss %xmm2, %xmm3
; CHECK-NEXT: mulss %xmm1, %xmm2
; CHECK-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath.ll b/llvm/test/CodeGen/X86/sqrt-fastmath.ll
index c0ad1a31c7d8ddf..af2f66d1e9bd039 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath.ll
@@ -289,14 +289,14 @@ define float @f32_no_estimate(float %x) #0 {
; SSE-LABEL: f32_no_estimate:
; SSE: # %bb.0:
; SSE-NEXT: sqrtss %xmm0, %xmm1
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: divss %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f32_no_estimate:
; AVX: # %bb.0:
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%sqrt = tail call float @llvm.sqrt.f32(float %x)
@@ -940,7 +940,7 @@ define double @sqrt_simplify_before_recip(double %x, ptr %p) nounwind {
; SSE-LABEL: sqrt_simplify_before_recip:
; SSE: # %bb.0:
; SSE-NEXT: sqrtsd %xmm0, %xmm0
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [1.0E+0,0.0E+0]
; SSE-NEXT: divsd %xmm0, %xmm1
; SSE-NEXT: movsd %xmm1, (%rdi)
; SSE-NEXT: retq
@@ -948,7 +948,7 @@ define double @sqrt_simplify_before_recip(double %x, ptr %p) nounwind {
; AVX-LABEL: sqrt_simplify_before_recip:
; AVX: # %bb.0:
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+0,0.0E+0]
; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm1
; AVX-NEXT: vmovsd %xmm1, (%rdi)
; AVX-NEXT: retq
@@ -987,7 +987,7 @@ define double @sqrt_simplify_before_recip_order(double %x, ptr %p) nounwind {
; SSE-LABEL: sqrt_simplify_before_recip_order:
; SSE: # %bb.0:
; SSE-NEXT: sqrtsd %xmm0, %xmm0
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; SSE-NEXT: divsd %xmm0, %xmm1
; SSE-NEXT: movsd %xmm1, (%rdi)
; SSE-NEXT: retq
@@ -995,7 +995,7 @@ define double @sqrt_simplify_before_recip_order(double %x, ptr %p) nounwind {
; AVX-LABEL: sqrt_simplify_before_recip_order:
; AVX: # %bb.0:
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm1
; AVX-NEXT: vmovsd %xmm1, (%rdi)
; AVX-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/sse-fcopysign.ll b/llvm/test/CodeGen/X86/sse-fcopysign.ll
index 59150d41e38e84a..3eadcad145b65d2 100644
--- a/llvm/test/CodeGen/X86/sse-fcopysign.ll
+++ b/llvm/test/CodeGen/X86/sse-fcopysign.ll
@@ -188,7 +188,7 @@ define float @cst1() nounwind {
;
; X64-LABEL: cst1:
; X64: # %bb.0:
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: retq
%tmp = tail call float @llvm.copysign.f32( float 1.0, float -2.0 )
ret float %tmp
@@ -203,7 +203,7 @@ define double @cst2() nounwind {
;
; X64-LABEL: cst2:
; X64: # %bb.0:
-; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
; X64-NEXT: retq
%tmp1 = fadd float -1.0, -1.0
%tmp2 = fpext float %tmp1 to double
diff --git a/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
index 7ada1c04626f35f..db2f78bf5eedc58 100644
--- a/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
@@ -1199,8 +1199,8 @@ define <4 x float> @test_mm_load_ps1(float* %a0) nounwind {
; X86-SSE-LABEL: test_mm_load_ps1:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-SSE-NEXT: movss (%eax), %xmm0 # encoding: [0xf3,0x0f,0x10,0x00]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x00]
; X86-SSE-NEXT: shufps $0, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x00]
; X86-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
; X86-SSE-NEXT: retl # encoding: [0xc3]
@@ -1219,8 +1219,8 @@ define <4 x float> @test_mm_load_ps1(float* %a0) nounwind {
;
; X64-SSE-LABEL: test_mm_load_ps1:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movss (%rdi), %xmm0 # encoding: [0xf3,0x0f,0x10,0x07]
-; X64-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x07]
; X64-SSE-NEXT: shufps $0, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x00]
; X64-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
; X64-SSE-NEXT: retq # encoding: [0xc3]
@@ -1246,40 +1246,40 @@ define <4 x float> @test_mm_load_ss(float* %a0) nounwind {
; X86-SSE-LABEL: test_mm_load_ss:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-SSE-NEXT: movss (%eax), %xmm0 # encoding: [0xf3,0x0f,0x10,0x00]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x00]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_load_ss:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX1-NEXT: vmovss (%eax), %xmm0 # encoding: [0xc5,0xfa,0x10,0x00]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x00]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_load_ss:
; X86-AVX512: # %bb.0:
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT: vmovss (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x00]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x00]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_load_ss:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movss (%rdi), %xmm0 # encoding: [0xf3,0x0f,0x10,0x07]
-; X64-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x07]
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_load_ss:
; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
-; X64-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X64-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x07]
; X64-AVX1-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_load_ss:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; X64-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X64-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; X64-AVX512-NEXT: retq # encoding: [0xc3]
%ld = load float, float* %a0, align 1
%res0 = insertelement <4 x float> undef, float %ld, i32 0
@@ -1293,8 +1293,8 @@ define <4 x float> @test_mm_load1_ps(float* %a0) nounwind {
; X86-SSE-LABEL: test_mm_load1_ps:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-SSE-NEXT: movss (%eax), %xmm0 # encoding: [0xf3,0x0f,0x10,0x00]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x00]
; X86-SSE-NEXT: shufps $0, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x00]
; X86-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
; X86-SSE-NEXT: retl # encoding: [0xc3]
@@ -1313,8 +1313,8 @@ define <4 x float> @test_mm_load1_ps(float* %a0) nounwind {
;
; X64-SSE-LABEL: test_mm_load1_ps:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movss (%rdi), %xmm0 # encoding: [0xf3,0x0f,0x10,0x07]
-; X64-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x07]
; X64-SSE-NEXT: shufps $0, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x00]
; X64-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
; X64-SSE-NEXT: retq # encoding: [0xc3]
@@ -2002,16 +2002,16 @@ define void @test_MM_SET_FLUSH_ZERO_MODE(i32 %a0) nounwind {
define <4 x float> @test_mm_set_ps(float %a0, float %a1, float %a2, float %a3) nounwind {
; X86-SSE-LABEL: test_mm_set_ps:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
; X86-SSE-NEXT: unpcklps %xmm1, %xmm0 # encoding: [0x0f,0x14,0xc1]
; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x08]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x04]
-; X86-SSE-NEXT: # xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x08]
+; X86-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x04]
; X86-SSE-NEXT: unpcklps %xmm2, %xmm1 # encoding: [0x0f,0x14,0xca]
; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
@@ -2020,36 +2020,36 @@ define <4 x float> @test_mm_set_ps(float %a0, float %a1, float %a2, float %a3) n
;
; X86-AVX1-LABEL: test_mm_set_ps:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x10]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
-; X86-AVX1-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x10]
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
; X86-AVX1-NEXT: vinsertps $16, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x10]
; X86-AVX1-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x08]
-; X86-AVX1-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x08]
; X86-AVX1-NEXT: vinsertps $32, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x20]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
-; X86-AVX1-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
; X86-AVX1-NEXT: vinsertps $48, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,1,2],xmm1[0]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_ps:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x10]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
-; X86-AVX512-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x10]
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
; X86-AVX512-NEXT: vinsertps $16, %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x10]
; X86-AVX512-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x08]
-; X86-AVX512-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x08]
; X86-AVX512-NEXT: vinsertps $32, %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x20]
; X86-AVX512-NEXT: # xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
-; X86-AVX512-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
; X86-AVX512-NEXT: vinsertps $48, %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x30]
; X86-AVX512-NEXT: # xmm0 = xmm0[0,1,2],xmm1[0]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
@@ -2094,24 +2094,24 @@ define <4 x float> @test_mm_set_ps(float %a0, float %a1, float %a2, float %a3) n
define <4 x float> @test_mm_set_ps1(float %a0) nounwind {
; X86-SSE-LABEL: test_mm_set_ps1:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT: shufps $0, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x00]
; X86-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_ps1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT: vshufps $0, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0xc6,0xc0,0x00]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_ps1:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX512-NEXT: vbroadcastss %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc0]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
@@ -2206,8 +2206,8 @@ define void @test_MM_SET_ROUNDING_MODE(i32 %a0) nounwind {
define <4 x float> @test_mm_set_ss(float %a0) nounwind {
; X86-SSE-LABEL: test_mm_set_ss:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
; X86-SSE-NEXT: xorps %xmm0, %xmm0 # encoding: [0x0f,0x57,0xc0]
; X86-SSE-NEXT: movss %xmm1, %xmm0 # encoding: [0xf3,0x0f,0x10,0xc1]
; X86-SSE-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
@@ -2215,8 +2215,8 @@ define <4 x float> @test_mm_set_ss(float %a0) nounwind {
;
; X86-AVX1-LABEL: test_mm_set_ss:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf0,0x57,0xc9]
; X86-AVX1-NEXT: vblendps $1, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x01]
; X86-AVX1-NEXT: # xmm0 = xmm0[0],xmm1[1,2,3]
@@ -2224,8 +2224,8 @@ define <4 x float> @test_mm_set_ss(float %a0) nounwind {
;
; X86-AVX512-LABEL: test_mm_set_ss:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf0,0x57,0xc9]
; X86-AVX512-NEXT: vblendps $1, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x01]
; X86-AVX512-NEXT: # xmm0 = xmm0[0],xmm1[1,2,3]
@@ -2255,24 +2255,24 @@ define <4 x float> @test_mm_set_ss(float %a0) nounwind {
define <4 x float> @test_mm_set1_ps(float %a0) nounwind {
; X86-SSE-LABEL: test_mm_set1_ps:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT: shufps $0, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x00]
; X86-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set1_ps:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT: vshufps $0, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0xc6,0xc0,0x00]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set1_ps:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX512-NEXT: vbroadcastss %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc0]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
@@ -2335,16 +2335,16 @@ define void @test_mm_setcsr(i32 %a0) nounwind {
define <4 x float> @test_mm_setr_ps(float %a0, float %a1, float %a2, float %a3) nounwind {
; X86-SSE-LABEL: test_mm_setr_ps:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
; X86-SSE-NEXT: unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x08]
-; X86-SSE-NEXT: # xmm2 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x08]
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT: unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
@@ -2353,14 +2353,14 @@ define <4 x float> @test_mm_setr_ps(float %a0, float %a1, float %a2, float %a3)
;
; X86-AVX1-LABEL: test_mm_setr_ps:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x10]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
-; X86-AVX1-NEXT: # xmm1 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xc5,0xfa,0x10,0x54,0x24,0x08]
-; X86-AVX1-NEXT: # xmm2 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm3 # encoding: [0xc5,0xfa,0x10,0x5c,0x24,0x04]
-; X86-AVX1-NEXT: # xmm3 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x10]
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x54,0x24,0x08]
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x5c,0x24,0x04]
; X86-AVX1-NEXT: vinsertps $16, %xmm2, %xmm3, %xmm2 # encoding: [0xc4,0xe3,0x61,0x21,0xd2,0x10]
; X86-AVX1-NEXT: # xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; X86-AVX1-NEXT: vinsertps $32, %xmm1, %xmm2, %xmm1 # encoding: [0xc4,0xe3,0x69,0x21,0xc9,0x20]
@@ -2371,14 +2371,14 @@ define <4 x float> @test_mm_setr_ps(float %a0, float %a1, float %a2, float %a3)
;
; X86-AVX512-LABEL: test_mm_setr_ps:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x10]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
-; X86-AVX512-NEXT: # xmm1 = mem[0],zero,zero,zero
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm2 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x54,0x24,0x08]
-; X86-AVX512-NEXT: # xmm2 = mem[0],zero,zero,zero
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x5c,0x24,0x04]
-; X86-AVX512-NEXT: # xmm3 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x10]
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x54,0x24,0x08]
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x5c,0x24,0x04]
; X86-AVX512-NEXT: vinsertps $16, %xmm2, %xmm3, %xmm2 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x61,0x21,0xd2,0x10]
; X86-AVX512-NEXT: # xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; X86-AVX512-NEXT: vinsertps $32, %xmm1, %xmm2, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x69,0x21,0xc9,0x20]
@@ -2520,8 +2520,8 @@ define float @test_mm_sqrt_ss_scalar(float %a0) {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pushl %eax # encoding: [0x50]
; X86-SSE-NEXT: .cfi_def_cfa_offset 8
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x08]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x08]
; X86-SSE-NEXT: sqrtss %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x51,0xc0]
; X86-SSE-NEXT: movss %xmm0, (%esp) # encoding: [0xf3,0x0f,0x11,0x04,0x24]
; X86-SSE-NEXT: flds (%esp) # encoding: [0xd9,0x04,0x24]
@@ -2533,8 +2533,8 @@ define float @test_mm_sqrt_ss_scalar(float %a0) {
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %eax # encoding: [0x50]
; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
; X86-AVX1-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x51,0xc0]
; X86-AVX1-NEXT: vmovss %xmm0, (%esp) # encoding: [0xc5,0xfa,0x11,0x04,0x24]
; X86-AVX1-NEXT: flds (%esp) # encoding: [0xd9,0x04,0x24]
@@ -2546,8 +2546,8 @@ define float @test_mm_sqrt_ss_scalar(float %a0) {
; X86-AVX512: # %bb.0:
; X86-AVX512-NEXT: pushl %eax # encoding: [0x50]
; X86-AVX512-NEXT: .cfi_def_cfa_offset 8
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x08]
; X86-AVX512-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x51,0xc0]
; X86-AVX512-NEXT: vmovss %xmm0, (%esp) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x04,0x24]
; X86-AVX512-NEXT: flds (%esp) # encoding: [0xd9,0x04,0x24]
diff --git a/llvm/test/CodeGen/X86/sse-load-ret.ll b/llvm/test/CodeGen/X86/sse-load-ret.ll
index f1c7c9014cadbf6..0e9b24360e2f876 100644
--- a/llvm/test/CodeGen/X86/sse-load-ret.ll
+++ b/llvm/test/CodeGen/X86/sse-load-ret.ll
@@ -33,10 +33,10 @@ define double @test3(i1 %B) {
; CHECK-NEXT: testb $1, 8(%ebp)
; CHECK-NEXT: jne .LBB2_1
; CHECK-NEXT: # %bb.2:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [5.2301123123000002E+2,0.0E+0]
; CHECK-NEXT: jmp .LBB2_3
; CHECK-NEXT: .LBB2_1:
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.2341200000000001E+2,0.0E+0]
; CHECK-NEXT: .LBB2_3:
; CHECK-NEXT: movsd %xmm0, (%esp)
; CHECK-NEXT: fldl (%esp)
diff --git a/llvm/test/CodeGen/X86/sse-minmax.ll b/llvm/test/CodeGen/X86/sse-minmax.ll
index 93edca04b775b4e..1c14b7400a35859 100644
--- a/llvm/test/CodeGen/X86/sse-minmax.ll
+++ b/llvm/test/CodeGen/X86/sse-minmax.ll
@@ -690,7 +690,7 @@ define double @olt_y(double %x) {
define double @ogt_inverse_y(double %x) {
; STRICT-LABEL: ogt_inverse_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
@@ -702,7 +702,7 @@ define double @ogt_inverse_y(double %x) {
;
; FINITE-LABEL: ogt_inverse_y:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -714,7 +714,7 @@ define double @ogt_inverse_y(double %x) {
define double @olt_inverse_y(double %x) {
; STRICT-LABEL: olt_inverse_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
@@ -726,7 +726,7 @@ define double @olt_inverse_y(double %x) {
;
; FINITE-LABEL: olt_inverse_y:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -738,7 +738,7 @@ define double @olt_inverse_y(double %x) {
define double @oge_y(double %x) {
; STRICT-LABEL: oge_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmplesd %xmm0, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm0
@@ -758,7 +758,7 @@ define double @oge_y(double %x) {
define double @ole_y(double %x) {
; STRICT-LABEL: ole_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmplesd %xmm1, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm0
@@ -778,7 +778,7 @@ define double @ole_y(double %x) {
define double @oge_inverse_y(double %x) {
; STRICT-LABEL: oge_inverse_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm2 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: movapd %xmm2, %xmm1
; STRICT-NEXT: cmplesd %xmm0, %xmm1
; STRICT-NEXT: andpd %xmm1, %xmm2
@@ -794,7 +794,7 @@ define double @oge_inverse_y(double %x) {
;
; FINITE-LABEL: oge_inverse_y:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -806,7 +806,7 @@ define double @oge_inverse_y(double %x) {
define double @ole_inverse_y(double %x) {
; STRICT-LABEL: ole_inverse_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm2 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmplesd %xmm2, %xmm1
; STRICT-NEXT: andpd %xmm1, %xmm2
@@ -822,7 +822,7 @@ define double @ole_inverse_y(double %x) {
;
; FINITE-LABEL: ole_inverse_y:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -834,7 +834,7 @@ define double @ole_inverse_y(double %x) {
define double @ugt_y(double %x) {
; STRICT-LABEL: ugt_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: movapd %xmm0, %xmm2
; STRICT-NEXT: cmpnlesd %xmm1, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm0
@@ -854,7 +854,7 @@ define double @ugt_y(double %x) {
define double @ult_y(double %x) {
; STRICT-LABEL: ult_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: movapd %xmm1, %xmm2
; STRICT-NEXT: cmpnlesd %xmm0, %xmm2
; STRICT-NEXT: andpd %xmm2, %xmm0
@@ -874,7 +874,7 @@ define double @ult_y(double %x) {
define double @ugt_inverse_y(double %x) {
; STRICT-LABEL: ugt_inverse_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm2 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: movapd %xmm0, %xmm1
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
; STRICT-NEXT: andpd %xmm1, %xmm2
@@ -890,7 +890,7 @@ define double @ugt_inverse_y(double %x) {
;
; FINITE-LABEL: ugt_inverse_y:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -902,7 +902,7 @@ define double @ugt_inverse_y(double %x) {
define double @ult_inverse_y(double %x) {
; STRICT-LABEL: ult_inverse_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm2 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: movapd %xmm2, %xmm1
; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
; STRICT-NEXT: andpd %xmm1, %xmm2
@@ -918,7 +918,7 @@ define double @ult_inverse_y(double %x) {
;
; FINITE-LABEL: ult_inverse_y:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -930,7 +930,7 @@ define double @ult_inverse_y(double %x) {
define double @uge_y(double %x) {
; STRICT-LABEL: uge_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
@@ -947,7 +947,7 @@ define double @uge_y(double %x) {
define double @ule_y(double %x) {
; STRICT-LABEL: ule_y:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
@@ -974,7 +974,7 @@ define double @uge_inverse_y(double %x) {
;
; FINITE-LABEL: uge_inverse_y:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -996,7 +996,7 @@ define double @ule_inverse_y(double %x) {
;
; FINITE-LABEL: ule_inverse_y:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -1010,7 +1010,7 @@ define double @ule_inverse_y(double %x) {
define double @clampTo3k_a(double %x) {
; STRICT-LABEL: clampTo3k_a:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
@@ -1022,7 +1022,7 @@ define double @clampTo3k_a(double %x) {
;
; FINITE-LABEL: clampTo3k_a:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -1044,7 +1044,7 @@ define double @clampTo3k_b(double %x) {
;
; FINITE-LABEL: clampTo3k_b:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -1056,7 +1056,7 @@ define double @clampTo3k_b(double %x) {
define double @clampTo3k_c(double %x) {
; STRICT-LABEL: clampTo3k_c:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
@@ -1068,7 +1068,7 @@ define double @clampTo3k_c(double %x) {
;
; FINITE-LABEL: clampTo3k_c:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -1090,7 +1090,7 @@ define double @clampTo3k_d(double %x) {
;
; FINITE-LABEL: clampTo3k_d:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -1102,7 +1102,7 @@ define double @clampTo3k_d(double %x) {
define double @clampTo3k_e(double %x) {
; STRICT-LABEL: clampTo3k_e:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; STRICT-NEXT: maxsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
@@ -1114,7 +1114,7 @@ define double @clampTo3k_e(double %x) {
;
; FINITE-LABEL: clampTo3k_e:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -1136,7 +1136,7 @@ define double @clampTo3k_f(double %x) {
;
; FINITE-LABEL: clampTo3k_f:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -1148,7 +1148,7 @@ define double @clampTo3k_f(double %x) {
define double @clampTo3k_g(double %x) {
; STRICT-LABEL: clampTo3k_g:
; STRICT: # %bb.0:
-; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; STRICT-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; STRICT-NEXT: minsd %xmm0, %xmm1
; STRICT-NEXT: movapd %xmm1, %xmm0
; STRICT-NEXT: retq
@@ -1160,7 +1160,7 @@ define double @clampTo3k_g(double %x) {
;
; FINITE-LABEL: clampTo3k_g:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -1182,7 +1182,7 @@ define double @clampTo3k_h(double %x) {
;
; FINITE-LABEL: clampTo3k_h:
; FINITE: # %bb.0:
-; FINITE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; FINITE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movapd %xmm1, %xmm0
; FINITE-NEXT: retq
@@ -1344,7 +1344,7 @@ define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) {
define float @ossfuzz13838(float %x) {
; ALL-LABEL: ossfuzz13838:
; ALL: # %bb.0: # %bb
-; ALL-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ALL-NEXT: movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
; ALL-NEXT: retq
bb:
%cmp2 = fcmp fast olt float %x, 2.550000e+02
diff --git a/llvm/test/CodeGen/X86/sse1.ll b/llvm/test/CodeGen/X86/sse1.ll
index f048002bf56f25a..8ac86d11d89e6cc 100644
--- a/llvm/test/CodeGen/X86/sse1.ll
+++ b/llvm/test/CodeGen/X86/sse1.ll
@@ -52,17 +52,17 @@ define <4 x float> @vselect(ptr%p, <4 x i32> %q) {
; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X86-NEXT: jne .LBB1_5
; X86-NEXT: .LBB1_4:
-; X86-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-NEXT: movss {{.*#+}} xmm2 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X86-NEXT: jne .LBB1_8
; X86-NEXT: .LBB1_7:
-; X86-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X86-NEXT: movss {{.*#+}} xmm3 = [4.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X86-NEXT: je .LBB1_10
; X86-NEXT: jmp .LBB1_11
; X86-NEXT: .LBB1_1:
-; X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: movss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X86-NEXT: je .LBB1_4
; X86-NEXT: .LBB1_5: # %entry
@@ -75,7 +75,7 @@ define <4 x float> @vselect(ptr%p, <4 x i32> %q) {
; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X86-NEXT: jne .LBB1_11
; X86-NEXT: .LBB1_10:
-; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X86-NEXT: .LBB1_11: # %entry
; X86-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
@@ -91,17 +91,17 @@ define <4 x float> @vselect(ptr%p, <4 x i32> %q) {
; X64-NEXT: testl %ecx, %ecx
; X64-NEXT: jne .LBB1_5
; X64-NEXT: .LBB1_4:
-; X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm2 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: testl %r8d, %r8d
; X64-NEXT: jne .LBB1_8
; X64-NEXT: .LBB1_7:
-; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm3 = [4.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X64-NEXT: testl %esi, %esi
; X64-NEXT: je .LBB1_10
; X64-NEXT: jmp .LBB1_11
; X64-NEXT: .LBB1_1:
-; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm1 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: testl %ecx, %ecx
; X64-NEXT: je .LBB1_4
; X64-NEXT: .LBB1_5: # %entry
@@ -114,7 +114,7 @@ define <4 x float> @vselect(ptr%p, <4 x i32> %q) {
; X64-NEXT: testl %esi, %esi
; X64-NEXT: jne .LBB1_11
; X64-NEXT: .LBB1_10:
-; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; X64-NEXT: .LBB1_11: # %entry
; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
diff --git a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
index eba390733794eba..adf4fc28208e788 100644
--- a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
@@ -1679,20 +1679,20 @@ define <2 x double> @test_mm_cvtsi32_sd(<2 x double> %a0, i32 %a1) nounwind {
define <2 x i64> @test_mm_cvtsi32_si128(i32 %a0) nounwind {
; X86-SSE-LABEL: test_mm_cvtsi32_si128:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_cvtsi32_si128:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_cvtsi32_si128:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_cvtsi32_si128:
@@ -2000,58 +2000,58 @@ define <2 x double> @test_mm_load_sd(ptr %a0) nounwind {
; X86-SSE-LABEL: test_mm_load_sd:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-SSE-NEXT: movsd (%eax), %xmm0 # encoding: [0xf2,0x0f,0x10,0x00]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x00]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_load_sd:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX1-NEXT: vmovsd (%eax), %xmm0 # encoding: [0xc5,0xfb,0x10,0x00]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_load_sd:
; X86-AVX512: # %bb.0:
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT: vmovsd (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero
+; X86-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_load_sd:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movsd (%rdi), %xmm0 # encoding: [0xf2,0x0f,0x10,0x07]
-; X64-SSE-NEXT: # xmm0 = mem[0],zero
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x07]
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_load_sd:
; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; X64-AVX1-NEXT: # xmm0 = mem[0],zero
+; X64-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX1-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_load_sd:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; X64-AVX512-NEXT: # xmm0 = mem[0],zero
+; X64-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX512-NEXT: retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_load_sd:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movsd (%edi), %xmm0 # encoding: [0x67,0xf2,0x0f,0x10,0x07]
-; X32-SSE-NEXT: # xmm0 = mem[0],zero
+; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X32-SSE-NEXT: # encoding: [0x67,0xf2,0x0f,0x10,0x07]
; X32-SSE-NEXT: retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_load_sd:
; X32-AVX1: # %bb.0:
-; X32-AVX1-NEXT: vmovsd (%edi), %xmm0 # encoding: [0x67,0xc5,0xfb,0x10,0x07]
-; X32-AVX1-NEXT: # xmm0 = mem[0],zero
+; X32-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-AVX1-NEXT: # encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX1-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_load_sd:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: vmovsd (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
-; X32-AVX512-NEXT: # xmm0 = mem[0],zero
+; X32-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX512-NEXT: retq # encoding: [0xc3]
%ld = load double, ptr %a0, align 1
%res0 = insertelement <2 x double> undef, double %ld, i32 0
@@ -2115,8 +2115,8 @@ define <2 x double> @test_mm_load1_pd(ptr %a0) nounwind {
; X86-SSE-LABEL: test_mm_load1_pd:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-SSE-NEXT: movsd (%eax), %xmm0 # encoding: [0xf2,0x0f,0x10,0x00]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x00]
; X86-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X86-SSE-NEXT: # xmm0 = xmm0[0,0]
; X86-SSE-NEXT: retl # encoding: [0xc3]
@@ -2137,8 +2137,8 @@ define <2 x double> @test_mm_load1_pd(ptr %a0) nounwind {
;
; X64-SSE-LABEL: test_mm_load1_pd:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movsd (%rdi), %xmm0 # encoding: [0xf2,0x0f,0x10,0x07]
-; X64-SSE-NEXT: # xmm0 = mem[0],zero
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x07]
; X64-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X64-SSE-NEXT: # xmm0 = xmm0[0,0]
; X64-SSE-NEXT: retq # encoding: [0xc3]
@@ -2157,8 +2157,8 @@ define <2 x double> @test_mm_load1_pd(ptr %a0) nounwind {
;
; X32-SSE-LABEL: test_mm_load1_pd:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movsd (%edi), %xmm0 # encoding: [0x67,0xf2,0x0f,0x10,0x07]
-; X32-SSE-NEXT: # xmm0 = mem[0],zero
+; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X32-SSE-NEXT: # encoding: [0x67,0xf2,0x0f,0x10,0x07]
; X32-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X32-SSE-NEXT: # xmm0 = xmm0[0,0]
; X32-SSE-NEXT: retq # encoding: [0xc3]
@@ -2246,58 +2246,58 @@ define <2 x i64> @test_mm_loadl_epi64(<2 x i64> %a0, ptr %a1) nounwind {
; X86-SSE-LABEL: test_mm_loadl_epi64:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-SSE-NEXT: movsd (%eax), %xmm0 # encoding: [0xf2,0x0f,0x10,0x00]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x00]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadl_epi64:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX1-NEXT: vmovsd (%eax), %xmm0 # encoding: [0xc5,0xfb,0x10,0x00]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadl_epi64:
; X86-AVX512: # %bb.0:
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT: vmovsd (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero
+; X86-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadl_epi64:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movsd (%rdi), %xmm0 # encoding: [0xf2,0x0f,0x10,0x07]
-; X64-SSE-NEXT: # xmm0 = mem[0],zero
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x07]
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadl_epi64:
; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; X64-AVX1-NEXT: # xmm0 = mem[0],zero
+; X64-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX1-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadl_epi64:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; X64-AVX512-NEXT: # xmm0 = mem[0],zero
+; X64-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX512-NEXT: retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadl_epi64:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movsd (%edi), %xmm0 # encoding: [0x67,0xf2,0x0f,0x10,0x07]
-; X32-SSE-NEXT: # xmm0 = mem[0],zero
+; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X32-SSE-NEXT: # encoding: [0x67,0xf2,0x0f,0x10,0x07]
; X32-SSE-NEXT: retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadl_epi64:
; X32-AVX1: # %bb.0:
-; X32-AVX1-NEXT: vmovsd (%edi), %xmm0 # encoding: [0x67,0xc5,0xfb,0x10,0x07]
-; X32-AVX1-NEXT: # xmm0 = mem[0],zero
+; X32-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-AVX1-NEXT: # encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX1-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadl_epi64:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: vmovsd (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
-; X32-AVX512-NEXT: # xmm0 = mem[0],zero
+; X32-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX512-NEXT: retq # encoding: [0xc3]
%ld = load i64, ptr %a1, align 1
%res0 = insertelement <2 x i64> undef, i64 %ld, i32 0
@@ -2540,58 +2540,58 @@ define <2 x i64> @test_mm_loadu_si64(ptr nocapture readonly %A) {
; X86-SSE-LABEL: test_mm_loadu_si64:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-SSE-NEXT: movsd (%eax), %xmm0 # encoding: [0xf2,0x0f,0x10,0x00]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x00]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadu_si64:
; X86-AVX1: # %bb.0: # %entry
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX1-NEXT: vmovsd (%eax), %xmm0 # encoding: [0xc5,0xfb,0x10,0x00]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadu_si64:
; X86-AVX512: # %bb.0: # %entry
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT: vmovsd (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero
+; X86-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadu_si64:
; X64-SSE: # %bb.0: # %entry
-; X64-SSE-NEXT: movsd (%rdi), %xmm0 # encoding: [0xf2,0x0f,0x10,0x07]
-; X64-SSE-NEXT: # xmm0 = mem[0],zero
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x07]
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadu_si64:
; X64-AVX1: # %bb.0: # %entry
-; X64-AVX1-NEXT: vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
-; X64-AVX1-NEXT: # xmm0 = mem[0],zero
+; X64-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX1-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadu_si64:
; X64-AVX512: # %bb.0: # %entry
-; X64-AVX512-NEXT: vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
-; X64-AVX512-NEXT: # xmm0 = mem[0],zero
+; X64-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX512-NEXT: retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadu_si64:
; X32-SSE: # %bb.0: # %entry
-; X32-SSE-NEXT: movsd (%edi), %xmm0 # encoding: [0x67,0xf2,0x0f,0x10,0x07]
-; X32-SSE-NEXT: # xmm0 = mem[0],zero
+; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X32-SSE-NEXT: # encoding: [0x67,0xf2,0x0f,0x10,0x07]
; X32-SSE-NEXT: retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadu_si64:
; X32-AVX1: # %bb.0: # %entry
-; X32-AVX1-NEXT: vmovsd (%edi), %xmm0 # encoding: [0x67,0xc5,0xfb,0x10,0x07]
-; X32-AVX1-NEXT: # xmm0 = mem[0],zero
+; X32-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-AVX1-NEXT: # encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX1-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadu_si64:
; X32-AVX512: # %bb.0: # %entry
-; X32-AVX512-NEXT: vmovsd (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
-; X32-AVX512-NEXT: # xmm0 = mem[0],zero
+; X32-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX512-NEXT: retq # encoding: [0xc3]
entry:
%0 = load i64, ptr %A, align 1
@@ -2603,58 +2603,58 @@ define <2 x i64> @test_mm_loadu_si32(ptr nocapture readonly %A) {
; X86-SSE-LABEL: test_mm_loadu_si32:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-SSE-NEXT: movss (%eax), %xmm0 # encoding: [0xf3,0x0f,0x10,0x00]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x00]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadu_si32:
; X86-AVX1: # %bb.0: # %entry
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX1-NEXT: vmovss (%eax), %xmm0 # encoding: [0xc5,0xfa,0x10,0x00]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x00]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadu_si32:
; X86-AVX512: # %bb.0: # %entry
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT: vmovss (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x00]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x00]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadu_si32:
; X64-SSE: # %bb.0: # %entry
-; X64-SSE-NEXT: movss (%rdi), %xmm0 # encoding: [0xf3,0x0f,0x10,0x07]
-; X64-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x07]
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadu_si32:
; X64-AVX1: # %bb.0: # %entry
-; X64-AVX1-NEXT: vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
-; X64-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X64-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x07]
; X64-AVX1-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadu_si32:
; X64-AVX512: # %bb.0: # %entry
-; X64-AVX512-NEXT: vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; X64-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X64-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; X64-AVX512-NEXT: retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadu_si32:
; X32-SSE: # %bb.0: # %entry
-; X32-SSE-NEXT: movss (%edi), %xmm0 # encoding: [0x67,0xf3,0x0f,0x10,0x07]
-; X32-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X32-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-SSE-NEXT: # encoding: [0x67,0xf3,0x0f,0x10,0x07]
; X32-SSE-NEXT: retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadu_si32:
; X32-AVX1: # %bb.0: # %entry
-; X32-AVX1-NEXT: vmovss (%edi), %xmm0 # encoding: [0x67,0xc5,0xfa,0x10,0x07]
-; X32-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X32-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-AVX1-NEXT: # encoding: [0x67,0xc5,0xfa,0x10,0x07]
; X32-AVX1-NEXT: retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadu_si32:
; X32-AVX512: # %bb.0: # %entry
-; X32-AVX512-NEXT: vmovss (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfa,0x10,0x07]
-; X32-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X32-AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfa,0x10,0x07]
; X32-AVX512-NEXT: retq # encoding: [0xc3]
entry:
%0 = load i32, ptr %A, align 1
@@ -3937,16 +3937,16 @@ define <2 x i64> @test_mm_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4,
define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
; X86-SSE-LABEL: test_mm_set_epi32:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x08]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x08]
; X86-SSE-NEXT: unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x0c]
-; X86-SSE-NEXT: # xmm2 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x0c]
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
; X86-SSE-NEXT: unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
@@ -3955,8 +3955,8 @@ define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind
;
; X86-AVX1-LABEL: test_mm_set_epi32:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x10]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x10]
; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x01]
; X86-AVX1-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x02]
; X86-AVX1-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x03]
@@ -3964,8 +3964,8 @@ define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind
;
; X86-AVX512-LABEL: test_mm_set_epi32:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x10]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x10]
; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x01]
; X86-AVX512-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x02]
; X86-AVX512-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x03]
@@ -4043,16 +4043,16 @@ define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind
define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
; X86-SSE-LABEL: test_mm_set_epi64x:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x08]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x08]
; X86-SSE-NEXT: unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x0c]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x10]
-; X86-SSE-NEXT: # xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x0c]
+; X86-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x10]
; X86-SSE-NEXT: unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
@@ -4061,8 +4061,8 @@ define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
;
; X86-AVX1-LABEL: test_mm_set_epi64x:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x0c]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x0c]
; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x01]
; X86-AVX1-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x02]
; X86-AVX1-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x03]
@@ -4070,8 +4070,8 @@ define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
;
; X86-AVX512-LABEL: test_mm_set_epi64x:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x0c]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x0c]
; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x01]
; X86-AVX512-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x02]
; X86-AVX512-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x03]
@@ -4132,30 +4132,30 @@ define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
define <2 x double> @test_mm_set_pd(double %a0, double %a1) nounwind {
; X86-SSE-LABEL: test_mm_set_pd:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x0c]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero
-; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf2,0x0f,0x10,0x4c,0x24,0x04]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x0c]
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x4c,0x24,0x04]
; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_pd:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero
-; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 # encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
-; X86-AVX1-NEXT: # xmm1 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
; X86-AVX1-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
; X86-AVX1-NEXT: # xmm0 = xmm0[0],xmm1[0]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_pd:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero
-; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
-; X86-AVX512-NEXT: # xmm1 = mem[0],zero
+; X86-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
+; X86-AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
; X86-AVX512-NEXT: vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
; X86-AVX512-NEXT: # xmm0 = xmm0[0],xmm1[0]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
@@ -4205,24 +4205,24 @@ define <2 x double> @test_mm_set_pd(double %a0, double %a1) nounwind {
define <2 x double> @test_mm_set_pd1(double %a0) nounwind {
; X86-SSE-LABEL: test_mm_set_pd1:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X86-SSE-NEXT: # xmm0 = xmm0[0,0]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_pd1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_pd1:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero
+; X86-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
@@ -4270,24 +4270,24 @@ define <2 x double> @test_mm_set_pd1(double %a0) nounwind {
define <2 x double> @test_mm_set_sd(double %a0) nounwind {
; X86-SSE-LABEL: test_mm_set_sd:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movq {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x7e,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero
+; X86-SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x7e,0x44,0x24,0x04]
; X86-SSE-NEXT: movq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x7e,0xc0]
; X86-SSE-NEXT: # xmm0 = xmm0[0],zero
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_sd:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovq {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x7e,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x7e,0x44,0x24,0x04]
; X86-AVX1-NEXT: vmovq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x7e,0xc0]
; X86-AVX1-NEXT: # xmm0 = xmm0[0],zero
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_sd:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovq {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x44,0x24,0x04]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero
+; X86-AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x44,0x24,0x04]
; X86-AVX512-NEXT: vmovq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
; X86-AVX512-NEXT: # xmm0 = xmm0[0],zero
; X86-AVX512-NEXT: retl # encoding: [0xc3]
@@ -4513,16 +4513,16 @@ define <2 x i64> @test_mm_set1_epi16(i16 %a0) nounwind {
define <2 x i64> @test_mm_set1_epi32(i32 %a0) nounwind {
; X86-SSE-LABEL: test_mm_set1_epi32:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movd {{[0-9]+}}(%esp), %xmm0 # encoding: [0x66,0x0f,0x6e,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0x66,0x0f,0x6e,0x44,0x24,0x04]
; X86-SSE-NEXT: pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; X86-SSE-NEXT: # xmm0 = xmm0[0,0,0,0]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set1_epi32:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT: vshufps $0, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0xc6,0xc0,0x00]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0,0,0]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
@@ -4583,10 +4583,10 @@ define <2 x i64> @test_mm_set1_epi32(i32 %a0) nounwind {
define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
; X86-SSE-LABEL: test_mm_set1_epi64x:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movd {{[0-9]+}}(%esp), %xmm0 # encoding: [0x66,0x0f,0x6e,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movd {{[0-9]+}}(%esp), %xmm1 # encoding: [0x66,0x0f,0x6e,0x4c,0x24,0x08]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0x66,0x0f,0x6e,0x44,0x24,0x04]
+; X86-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0x66,0x0f,0x6e,0x4c,0x24,0x08]
; X86-SSE-NEXT: punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE-NEXT: pshufd $68, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x44]
@@ -4595,8 +4595,8 @@ define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
;
; X86-AVX1-LABEL: test_mm_set1_epi64x:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX1-NEXT: vpshufd $68, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x44]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,1,0,1]
@@ -4604,8 +4604,8 @@ define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
;
; X86-AVX512-LABEL: test_mm_set1_epi64x:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX512-NEXT: vpbroadcastq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xc0]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
@@ -4655,24 +4655,24 @@ define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
define <2 x double> @test_mm_set1_pd(double %a0) nounwind {
; X86-SSE-LABEL: test_mm_set1_pd:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT: movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X86-SSE-NEXT: # xmm0 = xmm0[0,0]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set1_pd:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT: vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX1-NEXT: # xmm0 = xmm0[0,0]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set1_pd:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero
+; X86-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
; X86-AVX512-NEXT: vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX512-NEXT: # xmm0 = xmm0[0,0]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
@@ -5328,16 +5328,16 @@ define <2 x i64> @test_mm_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4
define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
; X86-SSE-LABEL: test_mm_setr_epi32:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
; X86-SSE-NEXT: unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x08]
-; X86-SSE-NEXT: # xmm2 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x08]
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT: unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
@@ -5346,8 +5346,8 @@ define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwin
;
; X86-AVX1-LABEL: test_mm_setr_epi32:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX1-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
; X86-AVX1-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
@@ -5355,8 +5355,8 @@ define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwin
;
; X86-AVX512-LABEL: test_mm_setr_epi32:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX512-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
; X86-AVX512-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
@@ -5434,16 +5434,16 @@ define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwin
define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
; X86-SSE-LABEL: test_mm_setr_epi64x:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
; X86-SSE-NEXT: unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
; X86-SSE-NEXT: # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm2 # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x08]
-; X86-SSE-NEXT: # xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
+; X86-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x08]
; X86-SSE-NEXT: unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
@@ -5452,8 +5452,8 @@ define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
;
; X86-AVX1-LABEL: test_mm_setr_epi64x:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX1-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX1-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
; X86-AVX1-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
@@ -5461,8 +5461,8 @@ define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
;
; X86-AVX512-LABEL: test_mm_setr_epi64x:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX512-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX512-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
; X86-AVX512-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
@@ -5523,30 +5523,30 @@ define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
define <2 x double> @test_mm_setr_pd(double %a0, double %a1) nounwind {
; X86-SSE-LABEL: test_mm_setr_pd:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm1 # encoding: [0xf2,0x0f,0x10,0x4c,0x24,0x0c]
-; X86-SSE-NEXT: # xmm1 = mem[0],zero
-; X86-SSE-NEXT: movsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x4c,0x24,0x0c]
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT: movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; X86-SSE-NEXT: # xmm0 = xmm0[0],xmm1[0]
; X86-SSE-NEXT: retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_setr_pd:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero
-; X86-AVX1-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 # encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
-; X86-AVX1-NEXT: # xmm1 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
; X86-AVX1-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf0,0x16,0xc0]
; X86-AVX1-NEXT: # xmm0 = xmm1[0],xmm0[0]
; X86-AVX1-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_setr_pd:
; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero
-; X86-AVX512-NEXT: vmovsd {{[0-9]+}}(%esp), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
-; X86-AVX512-NEXT: # xmm1 = mem[0],zero
+; X86-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
+; X86-AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
; X86-AVX512-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
; X86-AVX512-NEXT: # xmm0 = xmm1[0],xmm0[0]
; X86-AVX512-NEXT: retl # encoding: [0xc3]
@@ -5925,8 +5925,8 @@ define double @test_mm_sqrt_sd_scalar(double %a0) nounwind {
; X86-SSE-NEXT: movl %esp, %ebp # encoding: [0x89,0xe5]
; X86-SSE-NEXT: andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
; X86-SSE-NEXT: subl $8, %esp # encoding: [0x83,0xec,0x08]
-; X86-SSE-NEXT: movsd 8(%ebp), %xmm0 # encoding: [0xf2,0x0f,0x10,0x45,0x08]
-; X86-SSE-NEXT: # xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: # encoding: [0xf2,0x0f,0x10,0x45,0x08]
; X86-SSE-NEXT: sqrtsd %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x51,0xc0]
; X86-SSE-NEXT: movsd %xmm0, (%esp) # encoding: [0xf2,0x0f,0x11,0x04,0x24]
; X86-SSE-NEXT: fldl (%esp) # encoding: [0xdd,0x04,0x24]
@@ -5940,8 +5940,8 @@ define double @test_mm_sqrt_sd_scalar(double %a0) nounwind {
; X86-AVX1-NEXT: movl %esp, %ebp # encoding: [0x89,0xe5]
; X86-AVX1-NEXT: andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
; X86-AVX1-NEXT: subl $8, %esp # encoding: [0x83,0xec,0x08]
-; X86-AVX1-NEXT: vmovsd 8(%ebp), %xmm0 # encoding: [0xc5,0xfb,0x10,0x45,0x08]
-; X86-AVX1-NEXT: # xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: # encoding: [0xc5,0xfb,0x10,0x45,0x08]
; X86-AVX1-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x51,0xc0]
; X86-AVX1-NEXT: vmovsd %xmm0, (%esp) # encoding: [0xc5,0xfb,0x11,0x04,0x24]
; X86-AVX1-NEXT: fldl (%esp) # encoding: [0xdd,0x04,0x24]
@@ -5955,8 +5955,8 @@ define double @test_mm_sqrt_sd_scalar(double %a0) nounwind {
; X86-AVX512-NEXT: movl %esp, %ebp # encoding: [0x89,0xe5]
; X86-AVX512-NEXT: andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
; X86-AVX512-NEXT: subl $8, %esp # encoding: [0x83,0xec,0x08]
-; X86-AVX512-NEXT: vmovsd 8(%ebp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x45,0x08]
-; X86-AVX512-NEXT: # xmm0 = mem[0],zero
+; X86-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x45,0x08]
; X86-AVX512-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
; X86-AVX512-NEXT: vmovsd %xmm0, (%esp) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x04,0x24]
; X86-AVX512-NEXT: fldl (%esp) # encoding: [0xdd,0x04,0x24]
diff --git a/llvm/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll b/llvm/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
index 21dcddf2a05cfc3..f6b0df153c260ca 100644
--- a/llvm/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
+++ b/llvm/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
@@ -711,8 +711,8 @@ define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, ptr %p1) {
; X86-SSE-LABEL: test_x86_sse2_cvtss2sd_load:
; X86-SSE: ## %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-SSE-NEXT: movss (%eax), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x08]
-; X86-SSE-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: ## encoding: [0xf3,0x0f,0x10,0x08]
; X86-SSE-NEXT: cvtss2sd %xmm1, %xmm1 ## encoding: [0xf3,0x0f,0x5a,0xc9]
; X86-SSE-NEXT: movsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x10,0xc1]
; X86-SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1]
@@ -721,8 +721,8 @@ define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, ptr %p1) {
; X86-AVX1-LABEL: test_x86_sse2_cvtss2sd_load:
; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX1-NEXT: vmovss (%eax), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x08]
-; X86-AVX1-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: ## encoding: [0xc5,0xfa,0x10,0x08]
; X86-AVX1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf2,0x5a,0xc9]
; X86-AVX1-NEXT: vblendps $3, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
; X86-AVX1-NEXT: ## xmm0 = xmm1[0,1],xmm0[2,3]
@@ -731,8 +731,8 @@ define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, ptr %p1) {
; X86-AVX512-LABEL: test_x86_sse2_cvtss2sd_load:
; X86-AVX512: ## %bb.0:
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT: vmovss (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x08]
-; X86-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x08]
; X86-AVX512-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf2,0x5a,0xc9]
; X86-AVX512-NEXT: vblendps $3, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
; X86-AVX512-NEXT: ## xmm0 = xmm1[0,1],xmm0[2,3]
@@ -740,8 +740,8 @@ define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, ptr %p1) {
;
; X64-SSE-LABEL: test_x86_sse2_cvtss2sd_load:
; X64-SSE: ## %bb.0:
-; X64-SSE-NEXT: movss (%rdi), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x0f]
-; X64-SSE-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: ## encoding: [0xf3,0x0f,0x10,0x0f]
; X64-SSE-NEXT: cvtss2sd %xmm1, %xmm1 ## encoding: [0xf3,0x0f,0x5a,0xc9]
; X64-SSE-NEXT: movsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x10,0xc1]
; X64-SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1]
@@ -749,8 +749,8 @@ define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, ptr %p1) {
;
; X64-AVX1-LABEL: test_x86_sse2_cvtss2sd_load:
; X64-AVX1: ## %bb.0:
-; X64-AVX1-NEXT: vmovss (%rdi), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x0f]
-; X64-AVX1-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X64-AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-AVX1-NEXT: ## encoding: [0xc5,0xfa,0x10,0x0f]
; X64-AVX1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf2,0x5a,0xc9]
; X64-AVX1-NEXT: vblendps $3, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
; X64-AVX1-NEXT: ## xmm0 = xmm1[0,1],xmm0[2,3]
@@ -758,8 +758,8 @@ define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, ptr %p1) {
;
; X64-AVX512-LABEL: test_x86_sse2_cvtss2sd_load:
; X64-AVX512: ## %bb.0:
-; X64-AVX512-NEXT: vmovss (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0f]
-; X64-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X64-AVX512-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0f]
; X64-AVX512-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf2,0x5a,0xc9]
; X64-AVX512-NEXT: vblendps $3, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
; X64-AVX512-NEXT: ## xmm0 = xmm1[0,1],xmm0[2,3]
diff --git a/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll b/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll
index f94dd2e97dc1a67..a88510591ceb888 100644
--- a/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll
+++ b/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll
@@ -439,7 +439,7 @@ define <4 x float> @test15(<4 x float> %A, <4 x float> %B) {
define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: test16:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm3 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm3, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm4
@@ -460,7 +460,7 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
;
; AVX-LABEL: test16:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsubss %xmm2, %xmm0, %xmm3
; AVX-NEXT: vshufpd {{.*#+}} xmm4 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm5 = xmm1[1,0]
diff --git a/llvm/test/CodeGen/X86/sse41.ll b/llvm/test/CodeGen/X86/sse41.ll
index f1ae16a7c96b10f..703834729205a70 100644
--- a/llvm/test/CodeGen/X86/sse41.ll
+++ b/llvm/test/CodeGen/X86/sse41.ll
@@ -343,24 +343,24 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) noun
define <4 x float> @blendps_not_insertps_1(<4 x float> %t1, float %t2) nounwind {
; X86-SSE-LABEL: blendps_not_insertps_1:
; X86-SSE: ## %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
-; X86-SSE-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: ## encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
; X86-SSE-NEXT: blendps $1, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x01]
; X86-SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
; X86-SSE-NEXT: retl ## encoding: [0xc3]
;
; X86-AVX1-LABEL: blendps_not_insertps_1:
; X86-AVX1: ## %bb.0:
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
-; X86-AVX1-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
; X86-AVX1-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
; X86-AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
;
; X86-AVX512-LABEL: blendps_not_insertps_1:
; X86-AVX512: ## %bb.0:
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
-; X86-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
; X86-AVX512-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
; X86-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
@@ -386,24 +386,24 @@ define <4 x float> @blendps_not_insertps_1(<4 x float> %t1, float %t2) nounwind
define <4 x float> @insertps_or_blendps(<4 x float> %t1, float %t2) minsize nounwind {
; X86-SSE-LABEL: insertps_or_blendps:
; X86-SSE: ## %bb.0:
-; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
-; X86-SSE-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss {{[0-9]+}}(%esp), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: ## encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
; X86-SSE-NEXT: movss %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x10,0xc1]
; X86-SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
; X86-SSE-NEXT: retl ## encoding: [0xc3]
;
; X86-AVX1-LABEL: insertps_or_blendps:
; X86-AVX1: ## %bb.0:
-; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
-; X86-AVX1-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
; X86-AVX1-NEXT: vmovss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x10,0xc1]
; X86-AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
;
; X86-AVX512-LABEL: insertps_or_blendps:
; X86-AVX512: ## %bb.0:
-; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
-; X86-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
; X86-AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
; X86-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
@@ -1639,8 +1639,8 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; X86-SSE: ## %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
-; X86-SSE-NEXT: movss (%ecx,%eax,4), %xmm4 ## encoding: [0xf3,0x0f,0x10,0x24,0x81]
-; X86-SSE-NEXT: ## xmm4 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss (%ecx,%eax,4), %xmm4 ## xmm4 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: ## encoding: [0xf3,0x0f,0x10,0x24,0x81]
; X86-SSE-NEXT: insertps $48, %xmm4, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc4,0x30]
; X86-SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm4[0]
; X86-SSE-NEXT: insertps $48, %xmm4, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x21,0xcc,0x30]
@@ -1692,8 +1692,8 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
;
; X64-SSE-LABEL: insertps_from_broadcast_multiple_use:
; X64-SSE: ## %bb.0:
-; X64-SSE-NEXT: movss (%rdi,%rsi,4), %xmm4 ## encoding: [0xf3,0x0f,0x10,0x24,0xb7]
-; X64-SSE-NEXT: ## xmm4 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movss (%rdi,%rsi,4), %xmm4 ## xmm4 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: ## encoding: [0xf3,0x0f,0x10,0x24,0xb7]
; X64-SSE-NEXT: insertps $48, %xmm4, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc4,0x30]
; X64-SSE-NEXT: ## xmm0 = xmm0[0,1,2],xmm4[0]
; X64-SSE-NEXT: insertps $48, %xmm4, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x21,0xcc,0x30]
@@ -1758,8 +1758,8 @@ define <4 x float> @insertps_with_undefs(<4 x float> %a, ptr %b) {
; X86-SSE-LABEL: insertps_with_undefs:
; X86-SSE: ## %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-SSE-NEXT: movss (%eax), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x08]
-; X86-SSE-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movss (%eax), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: ## encoding: [0xf3,0x0f,0x10,0x08]
; X86-SSE-NEXT: movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
; X86-SSE-NEXT: ## xmm1 = xmm1[0],xmm0[0]
; X86-SSE-NEXT: movaps %xmm1, %xmm0 ## encoding: [0x0f,0x28,0xc1]
@@ -1768,8 +1768,8 @@ define <4 x float> @insertps_with_undefs(<4 x float> %a, ptr %b) {
; X86-AVX1-LABEL: insertps_with_undefs:
; X86-AVX1: ## %bb.0:
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX1-NEXT: vmovss (%eax), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x08]
-; X86-AVX1-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vmovss (%eax), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: ## encoding: [0xc5,0xfa,0x10,0x08]
; X86-AVX1-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc0]
; X86-AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[0]
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
@@ -1777,16 +1777,16 @@ define <4 x float> @insertps_with_undefs(<4 x float> %a, ptr %b) {
; X86-AVX512-LABEL: insertps_with_undefs:
; X86-AVX512: ## %bb.0:
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; X86-AVX512-NEXT: vmovss (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x08]
-; X86-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: vmovss (%eax), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x08]
; X86-AVX512-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
; X86-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[0]
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
;
; X64-SSE-LABEL: insertps_with_undefs:
; X64-SSE: ## %bb.0:
-; X64-SSE-NEXT: movss (%rdi), %xmm1 ## encoding: [0xf3,0x0f,0x10,0x0f]
-; X64-SSE-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movss (%rdi), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: ## encoding: [0xf3,0x0f,0x10,0x0f]
; X64-SSE-NEXT: movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
; X64-SSE-NEXT: ## xmm1 = xmm1[0],xmm0[0]
; X64-SSE-NEXT: movaps %xmm1, %xmm0 ## encoding: [0x0f,0x28,0xc1]
@@ -1794,16 +1794,16 @@ define <4 x float> @insertps_with_undefs(<4 x float> %a, ptr %b) {
;
; X64-AVX1-LABEL: insertps_with_undefs:
; X64-AVX1: ## %bb.0:
-; X64-AVX1-NEXT: vmovss (%rdi), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x0f]
-; X64-AVX1-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X64-AVX1-NEXT: vmovss (%rdi), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X64-AVX1-NEXT: ## encoding: [0xc5,0xfa,0x10,0x0f]
; X64-AVX1-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc0]
; X64-AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[0]
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
;
; X64-AVX512-LABEL: insertps_with_undefs:
; X64-AVX512: ## %bb.0:
-; X64-AVX512-NEXT: vmovss (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0f]
-; X64-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
+; X64-AVX512-NEXT: vmovss (%rdi), %xmm1 ## xmm1 = mem[0],zero,zero,zero
+; X64-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0f]
; X64-AVX512-NEXT: vmovlhps %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
; X64-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[0]
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avx2.ll b/llvm/test/CodeGen/X86/stack-folding-int-avx2.ll
index e7302ea1d5351d9..5a55eb247b68853 100644
--- a/llvm/test/CodeGen/X86/stack-folding-int-avx2.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-int-avx2.ll
@@ -17,7 +17,7 @@ define <4 x double> @stack_fold_broadcastsd_ymm(<2 x double> %a0) {
; CHECK-NEXT: nop
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
-; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = [4.9406564584124654E-324,0.0E+0]
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
@@ -35,7 +35,7 @@ define <4 x float> @stack_fold_broadcastss(<4 x float> %a0) {
; CHECK-NEXT: nop
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: vbroadcastss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
@@ -53,7 +53,7 @@ define <8 x float> @stack_fold_broadcastss_ymm(<4 x float> %a0) {
; CHECK-NEXT: nop
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: vbroadcastss {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
-; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
diff --git a/llvm/test/CodeGen/X86/swifterror.ll b/llvm/test/CodeGen/X86/swifterror.ll
index 8fff6405d0d893d..75252309790b1b6 100644
--- a/llvm/test/CodeGen/X86/swifterror.ll
+++ b/llvm/test/CodeGen/X86/swifterror.ll
@@ -17,7 +17,7 @@ define float @foo(ptr swifterror %error_ptr_ref) {
; CHECK-APPLE-NEXT: movl $16, %edi
; CHECK-APPLE-NEXT: callq _malloc
; CHECK-APPLE-NEXT: movb $1, 8(%rax)
-; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-APPLE-NEXT: movq %rax, %r12
; CHECK-APPLE-NEXT: popq %rax
; CHECK-APPLE-NEXT: retq
@@ -30,7 +30,7 @@ define float @foo(ptr swifterror %error_ptr_ref) {
; CHECK-O0-NEXT: callq _malloc
; CHECK-O0-NEXT: movq %rax, %r12
; CHECK-O0-NEXT: movb $1, 8(%rax)
-; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-O0-NEXT: popq %rax
; CHECK-O0-NEXT: retq
;
@@ -83,7 +83,7 @@ define float @caller(ptr %error_ref) {
; CHECK-APPLE-NEXT: movb %al, (%rbx)
; CHECK-APPLE-NEXT: LBB1_2: ## %handler
; CHECK-APPLE-NEXT: callq _free
-; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-APPLE-NEXT: addq $8, %rsp
; CHECK-APPLE-NEXT: popq %rbx
; CHECK-APPLE-NEXT: popq %r12
@@ -112,7 +112,7 @@ define float @caller(ptr %error_ref) {
; CHECK-O0-NEXT: LBB1_2: ## %handler
; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi ## 8-byte Reload
; CHECK-O0-NEXT: callq _free
-; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-O0-NEXT: addq $32, %rsp
; CHECK-O0-NEXT: popq %r12
; CHECK-O0-NEXT: retq
@@ -187,7 +187,7 @@ define float @caller2(ptr %error_ref) {
; CHECK-APPLE-NEXT: LBB2_4: ## %handler
; CHECK-APPLE-NEXT: movq %r12, %rdi
; CHECK-APPLE-NEXT: callq _free
-; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-APPLE-NEXT: addq $8, %rsp
; CHECK-APPLE-NEXT: popq %rbx
; CHECK-APPLE-NEXT: popq %r12
@@ -215,7 +215,7 @@ define float @caller2(ptr %error_ref) {
; CHECK-O0-NEXT: ## in Loop: Header=BB2_1 Depth=1
; CHECK-O0-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 4-byte Reload
; CHECK-O0-NEXT: ## xmm0 = mem[0],zero,zero,zero
-; CHECK-O0-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-O0-NEXT: ucomiss %xmm1, %xmm0
; CHECK-O0-NEXT: jbe LBB2_1
; CHECK-O0-NEXT: ## %bb.3: ## %bb_end
@@ -226,7 +226,7 @@ define float @caller2(ptr %error_ref) {
; CHECK-O0-NEXT: LBB2_4: ## %handler
; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi ## 8-byte Reload
; CHECK-O0-NEXT: callq _free
-; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-O0-NEXT: addq $32, %rsp
; CHECK-O0-NEXT: popq %r12
; CHECK-O0-NEXT: retq
@@ -310,7 +310,7 @@ define float @foo_if(ptr swifterror %error_ptr_ref, i32 %cc) {
; CHECK-APPLE-NEXT: movl $16, %edi
; CHECK-APPLE-NEXT: callq _malloc
; CHECK-APPLE-NEXT: movb $1, 8(%rax)
-; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-APPLE-NEXT: movq %rax, %r12
; CHECK-APPLE-NEXT: popq %rax
; CHECK-APPLE-NEXT: retq
@@ -330,7 +330,7 @@ define float @foo_if(ptr swifterror %error_ptr_ref, i32 %cc) {
; CHECK-O0-NEXT: callq _malloc
; CHECK-O0-NEXT: movq %rax, %r12
; CHECK-O0-NEXT: movb $1, 8(%rax)
-; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-O0-NEXT: popq %rax
; CHECK-O0-NEXT: retq
; CHECK-O0-NEXT: LBB3_2: ## %normal
@@ -444,7 +444,7 @@ define float @foo_loop(ptr swifterror %error_ptr_ref, i32 %cc, float %cc2) {
; CHECK-O0-NEXT: ## xmm0 = mem[0],zero,zero,zero
; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax ## 8-byte Reload
; CHECK-O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
-; CHECK-O0-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: movss {{.*#+}} xmm1 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-O0-NEXT: ucomiss %xmm1, %xmm0
; CHECK-O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
; CHECK-O0-NEXT: jbe LBB4_1
@@ -632,7 +632,7 @@ define float @caller3(ptr %error_ref) {
; CHECK-APPLE-NEXT: movb %al, (%rbx)
; CHECK-APPLE-NEXT: LBB6_2: ## %handler
; CHECK-APPLE-NEXT: callq _free
-; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-APPLE-NEXT: addq $40, %rsp
; CHECK-APPLE-NEXT: popq %rbx
; CHECK-APPLE-NEXT: popq %r12
@@ -663,7 +663,7 @@ define float @caller3(ptr %error_ref) {
; CHECK-O0-NEXT: LBB6_2: ## %handler
; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi ## 8-byte Reload
; CHECK-O0-NEXT: callq _free
-; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-O0-NEXT: addq $48, %rsp
; CHECK-O0-NEXT: popq %r12
; CHECK-O0-NEXT: retq
@@ -757,7 +757,7 @@ define float @caller_with_multiple_swifterror_values(ptr %error_ref, ptr %error_
; CHECK-APPLE-NEXT: movb %al, (%rbx)
; CHECK-APPLE-NEXT: LBB7_4: ## %handler2
; CHECK-APPLE-NEXT: callq _free
-; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-APPLE-NEXT: leaq -24(%rbp), %rsp
; CHECK-APPLE-NEXT: popq %rbx
; CHECK-APPLE-NEXT: popq %r12
@@ -810,7 +810,7 @@ define float @caller_with_multiple_swifterror_values(ptr %error_ref, ptr %error_
; CHECK-O0-NEXT: LBB7_4: ## %handler2
; CHECK-O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi ## 8-byte Reload
; CHECK-O0-NEXT: callq _free
-; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-O0-NEXT: leaq -8(%rbp), %rsp
; CHECK-O0-NEXT: popq %r12
; CHECK-O0-NEXT: popq %rbp
@@ -1033,7 +1033,7 @@ define swiftcc float @foo_swiftcc(ptr swifterror %error_ptr_ref) {
; CHECK-APPLE-NEXT: movl $16, %edi
; CHECK-APPLE-NEXT: callq _malloc
; CHECK-APPLE-NEXT: movb $1, 8(%rax)
-; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-APPLE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-APPLE-NEXT: movq %rax, %r12
; CHECK-APPLE-NEXT: popq %rax
; CHECK-APPLE-NEXT: retq
@@ -1046,7 +1046,7 @@ define swiftcc float @foo_swiftcc(ptr swifterror %error_ptr_ref) {
; CHECK-O0-NEXT: callq _malloc
; CHECK-O0-NEXT: movq %rax, %r12
; CHECK-O0-NEXT: movb $1, 8(%rax)
-; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-O0-NEXT: popq %rax
; CHECK-O0-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
index 349d94d930651b2..0981c45e9d80337 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
@@ -222,7 +222,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 {
; SSE-32-NEXT: .cfi_def_cfa_register %ebp
; SSE-32-NEXT: andl $-8, %esp
; SSE-32-NEXT: subl $24, %esp
-; SSE-32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-32-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; SSE-32-NEXT: comisd %xmm1, %xmm0
; SSE-32-NEXT: movapd %xmm1, %xmm2
; SSE-32-NEXT: jae .LBB1_2
@@ -280,7 +280,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 {
;
; SSE-64-LABEL: strict_vector_fptoui_v2f64_to_v2i64:
; SSE-64: # %bb.0:
-; SSE-64-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-64-NEXT: movsd {{.*#+}} xmm3 = [9.2233720368547758E+18,0.0E+0]
; SSE-64-NEXT: comisd %xmm3, %xmm0
; SSE-64-NEXT: xorpd %xmm2, %xmm2
; SSE-64-NEXT: xorpd %xmm1, %xmm1
@@ -323,7 +323,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 {
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $16, %esp
; AVX-32-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-32-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX-32-NEXT: vcomisd %xmm1, %xmm2
; AVX-32-NEXT: vmovapd %xmm1, %xmm3
; AVX-32-NEXT: jae .LBB1_2
@@ -364,7 +364,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 {
;
; AVX-64-LABEL: strict_vector_fptoui_v2f64_to_v2i64:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-64-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX-64-NEXT: vcomisd %xmm1, %xmm0
; AVX-64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX-64-NEXT: vxorpd %xmm3, %xmm3, %xmm3
@@ -405,7 +405,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 {
; AVX512F-32-NEXT: andl $-8, %esp
; AVX512F-32-NEXT: subl $16, %esp
; AVX512F-32-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX512F-32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-32-NEXT: vmovsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; AVX512F-32-NEXT: xorl %eax, %eax
; AVX512F-32-NEXT: vcomisd %xmm2, %xmm1
; AVX512F-32-NEXT: setae %al
@@ -458,7 +458,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 {
; AVX512VL-32-NEXT: andl $-8, %esp
; AVX512VL-32-NEXT: subl $16, %esp
; AVX512VL-32-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX512VL-32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-32-NEXT: vmovsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; AVX512VL-32-NEXT: xorl %eax, %eax
; AVX512VL-32-NEXT: vcomisd %xmm2, %xmm1
; AVX512VL-32-NEXT: setae %al
@@ -893,7 +893,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 {
; SSE-32-NEXT: .cfi_def_cfa_register %ebp
; SSE-32-NEXT: andl $-8, %esp
; SSE-32-NEXT: subl $24, %esp
-; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-32-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-32-NEXT: comiss %xmm1, %xmm0
; SSE-32-NEXT: movaps %xmm1, %xmm2
; SSE-32-NEXT: jae .LBB4_2
@@ -951,7 +951,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 {
;
; SSE-64-LABEL: strict_vector_fptoui_v2f32_to_v2i64:
; SSE-64: # %bb.0:
-; SSE-64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-64-NEXT: movss {{.*#+}} xmm3 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-64-NEXT: comiss %xmm3, %xmm0
; SSE-64-NEXT: xorps %xmm2, %xmm2
; SSE-64-NEXT: xorps %xmm1, %xmm1
@@ -994,7 +994,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 {
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $16, %esp
; AVX-32-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX-32-NEXT: vcomiss %xmm1, %xmm2
; AVX-32-NEXT: vmovaps %xmm1, %xmm3
; AVX-32-NEXT: jae .LBB4_2
@@ -1035,7 +1035,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 {
;
; AVX-64-LABEL: strict_vector_fptoui_v2f32_to_v2i64:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX-64-NEXT: vcomiss %xmm1, %xmm0
; AVX-64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX-64-NEXT: vxorps %xmm3, %xmm3, %xmm3
@@ -1076,7 +1076,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 {
; AVX512F-32-NEXT: andl $-8, %esp
; AVX512F-32-NEXT: subl $16, %esp
; AVX512F-32-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX512F-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vmovss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX512F-32-NEXT: xorl %eax, %eax
; AVX512F-32-NEXT: vcomiss %xmm2, %xmm1
; AVX512F-32-NEXT: setae %al
@@ -1129,7 +1129,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 {
; AVX512VL-32-NEXT: andl $-8, %esp
; AVX512VL-32-NEXT: subl $16, %esp
; AVX512VL-32-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX512VL-32-NEXT: xorl %eax, %eax
; AVX512VL-32-NEXT: vcomiss %xmm2, %xmm1
; AVX512VL-32-NEXT: setae %al
@@ -1201,7 +1201,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64_load128(ptr %x) strictfp {
; SSE-32-NEXT: subl $24, %esp
; SSE-32-NEXT: movl 8(%ebp), %eax
; SSE-32-NEXT: movaps (%eax), %xmm0
-; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-32-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-32-NEXT: comiss %xmm1, %xmm0
; SSE-32-NEXT: movaps %xmm1, %xmm2
; SSE-32-NEXT: jae .LBB5_2
@@ -1260,7 +1260,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64_load128(ptr %x) strictfp {
; SSE-64-LABEL: strict_vector_fptoui_v2f32_to_v2i64_load128:
; SSE-64: # %bb.0:
; SSE-64-NEXT: movaps (%rdi), %xmm1
-; SSE-64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-64-NEXT: movss {{.*#+}} xmm3 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-64-NEXT: comiss %xmm3, %xmm1
; SSE-64-NEXT: xorps %xmm2, %xmm2
; SSE-64-NEXT: xorps %xmm0, %xmm0
@@ -1304,7 +1304,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64_load128(ptr %x) strictfp {
; AVX-32-NEXT: movl 8(%ebp), %eax
; AVX-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX-32-NEXT: vcomiss %xmm1, %xmm2
; AVX-32-NEXT: vmovaps %xmm1, %xmm3
; AVX-32-NEXT: jae .LBB5_2
@@ -1347,7 +1347,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64_load128(ptr %x) strictfp {
; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-64-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX-64-NEXT: vcomiss %xmm1, %xmm3
; AVX-64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX-64-NEXT: vxorps %xmm4, %xmm4, %xmm4
@@ -1389,7 +1389,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64_load128(ptr %x) strictfp {
; AVX512F-32-NEXT: movl 8(%ebp), %eax
; AVX512F-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX512F-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vmovss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX512F-32-NEXT: xorl %eax, %eax
; AVX512F-32-NEXT: vcomiss %xmm2, %xmm1
; AVX512F-32-NEXT: setae %al
@@ -1443,7 +1443,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64_load128(ptr %x) strictfp {
; AVX512VL-32-NEXT: movl 8(%ebp), %eax
; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX512VL-32-NEXT: xorl %eax, %eax
; AVX512VL-32-NEXT: vcomiss %xmm2, %xmm1
; AVX512VL-32-NEXT: setae %al
@@ -1561,7 +1561,7 @@ define <2 x i32> @strict_vector_fptosi_v2f64_to_v2i32(<2 x double> %a) #0 {
define <2 x i32> @strict_vector_fptoui_v2f64_to_v2i32(<2 x double> %a) #0 {
; SSE-32-LABEL: strict_vector_fptoui_v2f64_to_v2i32:
; SSE-32: # %bb.0:
-; SSE-32-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-32-NEXT: movsd {{.*#+}} xmm3 = [2.147483648E+9,0.0E+0]
; SSE-32-NEXT: comisd %xmm3, %xmm0
; SSE-32-NEXT: xorpd %xmm2, %xmm2
; SSE-32-NEXT: xorpd %xmm1, %xmm1
@@ -1717,7 +1717,7 @@ define <2 x i32> @strict_vector_fptosi_v2f32_to_v2i32(<2 x float> %a) #0 {
define <2 x i32> @strict_vector_fptoui_v2f32_to_v2i32(<2 x float> %a) #0 {
; SSE-32-LABEL: strict_vector_fptoui_v2f32_to_v2i32:
; SSE-32: # %bb.0:
-; SSE-32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-32-NEXT: movss {{.*#+}} xmm3 = [2.14748365E+9,0.0E+0,0.0E+0,0.0E+0]
; SSE-32-NEXT: comiss %xmm3, %xmm0
; SSE-32-NEXT: xorps %xmm2, %xmm2
; SSE-32-NEXT: xorps %xmm1, %xmm1
@@ -2384,7 +2384,7 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 {
; SSE-32-NEXT: .cfi_def_cfa_register %ebp
; SSE-32-NEXT: andl $-8, %esp
; SSE-32-NEXT: subl $24, %esp
-; SSE-32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-32-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; SSE-32-NEXT: comisd %xmm1, %xmm0
; SSE-32-NEXT: movapd %xmm1, %xmm2
; SSE-32-NEXT: jae .LBB19_2
@@ -2442,7 +2442,7 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 {
;
; SSE-64-LABEL: strict_vector_fptoui_v2f64_to_v2i1:
; SSE-64: # %bb.0:
-; SSE-64-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-64-NEXT: movsd {{.*#+}} xmm3 = [9.2233720368547758E+18,0.0E+0]
; SSE-64-NEXT: comisd %xmm3, %xmm0
; SSE-64-NEXT: xorpd %xmm2, %xmm2
; SSE-64-NEXT: xorpd %xmm1, %xmm1
@@ -2485,7 +2485,7 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 {
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $16, %esp
; AVX-32-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-32-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX-32-NEXT: vcomisd %xmm1, %xmm2
; AVX-32-NEXT: vmovapd %xmm1, %xmm3
; AVX-32-NEXT: jae .LBB19_2
@@ -2526,7 +2526,7 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 {
;
; AVX-64-LABEL: strict_vector_fptoui_v2f64_to_v2i1:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-64-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX-64-NEXT: vcomisd %xmm1, %xmm0
; AVX-64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX-64-NEXT: vxorpd %xmm3, %xmm3, %xmm3
@@ -2758,7 +2758,7 @@ define <2 x i1> @strict_vector_fptoui_v2f32_to_v2i1(<2 x float> %a) #0 {
; SSE-32-NEXT: .cfi_def_cfa_register %ebp
; SSE-32-NEXT: andl $-8, %esp
; SSE-32-NEXT: subl $24, %esp
-; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-32-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-32-NEXT: comiss %xmm1, %xmm0
; SSE-32-NEXT: movaps %xmm1, %xmm2
; SSE-32-NEXT: jae .LBB21_2
@@ -2816,7 +2816,7 @@ define <2 x i1> @strict_vector_fptoui_v2f32_to_v2i1(<2 x float> %a) #0 {
;
; SSE-64-LABEL: strict_vector_fptoui_v2f32_to_v2i1:
; SSE-64: # %bb.0:
-; SSE-64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-64-NEXT: movss {{.*#+}} xmm3 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-64-NEXT: comiss %xmm3, %xmm0
; SSE-64-NEXT: xorps %xmm2, %xmm2
; SSE-64-NEXT: xorps %xmm1, %xmm1
@@ -2859,7 +2859,7 @@ define <2 x i1> @strict_vector_fptoui_v2f32_to_v2i1(<2 x float> %a) #0 {
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $16, %esp
; AVX-32-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX-32-NEXT: vcomiss %xmm1, %xmm2
; AVX-32-NEXT: vmovaps %xmm1, %xmm3
; AVX-32-NEXT: jae .LBB21_2
@@ -2900,7 +2900,7 @@ define <2 x i1> @strict_vector_fptoui_v2f32_to_v2i1(<2 x float> %a) #0 {
;
; AVX-64-LABEL: strict_vector_fptoui_v2f32_to_v2i1:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX-64-NEXT: vcomiss %xmm1, %xmm0
; AVX-64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX-64-NEXT: vxorps %xmm3, %xmm3, %xmm3
diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
index b28211bb4388f58..cba3b0944514856 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
@@ -227,7 +227,7 @@ define <4 x i64> @strict_vector_fptoui_v4f64_to_v4i64(<4 x double> %a) #0 {
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $32, %esp
; AVX-32-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-32-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX-32-NEXT: vcomisd %xmm1, %xmm2
; AVX-32-NEXT: vmovapd %xmm1, %xmm3
; AVX-32-NEXT: jae .LBB1_2
@@ -306,7 +306,7 @@ define <4 x i64> @strict_vector_fptoui_v4f64_to_v4i64(<4 x double> %a) #0 {
; AVX-64-LABEL: strict_vector_fptoui_v4f64_to_v4i64:
; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX-64-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-64-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX-64-NEXT: vcomisd %xmm1, %xmm3
; AVX-64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX-64-NEXT: vxorpd %xmm4, %xmm4, %xmm4
@@ -379,7 +379,7 @@ define <4 x i64> @strict_vector_fptoui_v4f64_to_v4i64(<4 x double> %a) #0 {
; AVX512F-32-NEXT: .cfi_offset %ebx, -12
; AVX512F-32-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512F-32-NEXT: vshufpd {{.*#+}} xmm3 = xmm2[1,0]
-; AVX512F-32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-32-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX512F-32-NEXT: xorl %eax, %eax
; AVX512F-32-NEXT: vcomisd %xmm1, %xmm3
; AVX512F-32-NEXT: setae %al
@@ -472,7 +472,7 @@ define <4 x i64> @strict_vector_fptoui_v4f64_to_v4i64(<4 x double> %a) #0 {
; AVX512VL-32-NEXT: .cfi_offset %ebx, -12
; AVX512VL-32-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512VL-32-NEXT: vshufpd {{.*#+}} xmm3 = xmm2[1,0]
-; AVX512VL-32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-32-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX512VL-32-NEXT: xorl %eax, %eax
; AVX512VL-32-NEXT: vcomisd %xmm1, %xmm3
; AVX512VL-32-NEXT: setae %al
@@ -758,7 +758,7 @@ define <4 x i64> @strict_vector_fptoui_v4f32_to_v4i64(<4 x float> %a) #0 {
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $32, %esp
; AVX-32-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX-32-NEXT: vcomiss %xmm1, %xmm2
; AVX-32-NEXT: vmovaps %xmm1, %xmm3
; AVX-32-NEXT: jae .LBB3_2
@@ -837,7 +837,7 @@ define <4 x i64> @strict_vector_fptoui_v4f32_to_v4i64(<4 x float> %a) #0 {
; AVX-64-LABEL: strict_vector_fptoui_v4f32_to_v4i64:
; AVX-64: # %bb.0:
; AVX-64-NEXT: vshufps {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX-64-NEXT: vcomiss %xmm1, %xmm3
; AVX-64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX-64-NEXT: vxorps %xmm4, %xmm4, %xmm4
@@ -909,7 +909,7 @@ define <4 x i64> @strict_vector_fptoui_v4f32_to_v4i64(<4 x float> %a) #0 {
; AVX512F-32-NEXT: subl $40, %esp
; AVX512F-32-NEXT: .cfi_offset %ebx, -12
; AVX512F-32-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX512F-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX512F-32-NEXT: xorl %eax, %eax
; AVX512F-32-NEXT: vcomiss %xmm1, %xmm2
; AVX512F-32-NEXT: setae %al
@@ -1002,7 +1002,7 @@ define <4 x i64> @strict_vector_fptoui_v4f32_to_v4i64(<4 x float> %a) #0 {
; AVX512VL-32-NEXT: subl $40, %esp
; AVX512VL-32-NEXT: .cfi_offset %ebx, -12
; AVX512VL-32-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX512VL-32-NEXT: xorl %eax, %eax
; AVX512VL-32-NEXT: vcomiss %xmm1, %xmm2
; AVX512VL-32-NEXT: setae %al
diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
index 17c5ff7955106a2..ff00779d90e5c43 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
@@ -151,7 +151,7 @@ define <8 x i64> @strict_vector_fptoui_v8f64_to_v8i64(<8 x double> %a) #0 {
; AVX512VL-32-NEXT: .cfi_offset %ebx, -12
; AVX512VL-32-NEXT: vextractf32x4 $3, %zmm0, %xmm2
; AVX512VL-32-NEXT: vshufpd {{.*#+}} xmm3 = xmm2[1,0]
-; AVX512VL-32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-32-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX512VL-32-NEXT: xorl %eax, %eax
; AVX512VL-32-NEXT: vcomisd %xmm1, %xmm3
; AVX512VL-32-NEXT: setae %al
@@ -443,7 +443,7 @@ define <8 x i64> @strict_vector_fptoui_v8f32_to_v8i64(<8 x float> %a) #0 {
; AVX512VL-32-NEXT: .cfi_offset %ebx, -12
; AVX512VL-32-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512VL-32-NEXT: vshufps {{.*#+}} xmm3 = xmm2[3,3,3,3]
-; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512VL-32-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX512VL-32-NEXT: xorl %eax, %eax
; AVX512VL-32-NEXT: vcomiss %xmm1, %xmm3
; AVX512VL-32-NEXT: setae %al
diff --git a/llvm/test/CodeGen/X86/vec_fp_to_int.ll b/llvm/test/CodeGen/X86/vec_fp_to_int.ll
index a49f7e990976021..cb19e202e79473c 100644
--- a/llvm/test/CodeGen/X86/vec_fp_to_int.ll
+++ b/llvm/test/CodeGen/X86/vec_fp_to_int.ll
@@ -250,7 +250,7 @@ define <4 x i32> @fptosi_4f64_to_4i32(<4 x double> %a) {
define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
; SSE-LABEL: fptoui_2f64_to_2i64:
; SSE: # %bb.0:
-; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: subsd %xmm2, %xmm1
; SSE-NEXT: cvttsd2si %xmm1, %rax
@@ -275,7 +275,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
;
; VEX-LABEL: fptoui_2f64_to_2i64:
; VEX: # %bb.0:
-; VEX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; VEX-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; VEX-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttsd2si %xmm2, %rax
; VEX-NEXT: vcvttsd2si %xmm0, %rcx
@@ -511,7 +511,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_4i64:
; SSE: # %bb.0:
; SSE-NEXT: movapd %xmm0, %xmm2
-; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm3 = [9.2233720368547758E+18,0.0E+0]
; SSE-NEXT: subsd %xmm3, %xmm0
; SSE-NEXT: cvttsd2si %xmm0, %rax
; SSE-NEXT: cvttsd2si %xmm2, %rcx
@@ -555,7 +555,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
; AVX1-LABEL: fptoui_4f64_to_4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX1-NEXT: vsubsd %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vcvttsd2si %xmm3, %rax
; AVX1-NEXT: vcvttsd2si %xmm2, %rcx
@@ -598,7 +598,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
; AVX2-LABEL: fptoui_4f64_to_4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX2-NEXT: vsubsd %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttsd2si %xmm3, %rax
; AVX2-NEXT: vcvttsd2si %xmm2, %rcx
@@ -1279,7 +1279,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
; SSE-LABEL: fptoui_2f32_to_2i64:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: subss %xmm2, %xmm1
; SSE-NEXT: cvttss2si %xmm1, %rax
@@ -1304,7 +1304,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
;
; VEX-LABEL: fptoui_2f32_to_2i64:
; VEX: # %bb.0:
-; VEX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; VEX-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; VEX-NEXT: vsubss %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttss2si %xmm2, %rax
; VEX-NEXT: vcvttss2si %xmm0, %rcx
@@ -1365,7 +1365,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_2i64:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: subss %xmm2, %xmm1
; SSE-NEXT: cvttss2si %xmm1, %rax
@@ -1391,7 +1391,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
; VEX-LABEL: fptoui_4f32_to_2i64:
; VEX: # %bb.0:
; VEX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; VEX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; VEX-NEXT: vmovss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; VEX-NEXT: vsubss %xmm2, %xmm1, %xmm3
; VEX-NEXT: vcvttss2si %xmm3, %rax
; VEX-NEXT: vcvttss2si %xmm1, %rcx
@@ -1520,7 +1520,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_4i64:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm1, %xmm2
; SSE-NEXT: cvttss2si %xmm2, %rax
@@ -1567,7 +1567,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; AVX1-LABEL: fptoui_4f32_to_4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vcvttss2si %xmm3, %rax
; AVX1-NEXT: vcvttss2si %xmm2, %rcx
@@ -1610,7 +1610,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; AVX2-LABEL: fptoui_4f32_to_4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttss2si %xmm3, %rax
; AVX2-NEXT: vcvttss2si %xmm2, %rcx
@@ -1704,7 +1704,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; SSE-LABEL: fptoui_8f32_to_4i64:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm1, %xmm2
; SSE-NEXT: cvttss2si %xmm2, %rax
@@ -1751,7 +1751,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; AVX1-LABEL: fptoui_8f32_to_4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vcvttss2si %xmm3, %rax
; AVX1-NEXT: vcvttss2si %xmm2, %rcx
@@ -1794,7 +1794,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; AVX2-LABEL: fptoui_8f32_to_4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
; AVX2-NEXT: vcvttss2si %xmm3, %rax
; AVX2-NEXT: vcvttss2si %xmm2, %rcx
@@ -2684,7 +2684,7 @@ define <2 x i64> @fptoui_2f32_to_2i64_load(ptr %x) {
; SSE-LABEL: fptoui_2f32_to_2i64_load:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: subss %xmm2, %xmm0
; SSE-NEXT: cvttss2si %xmm0, %rax
@@ -2709,7 +2709,7 @@ define <2 x i64> @fptoui_2f32_to_2i64_load(ptr %x) {
; VEX-LABEL: fptoui_2f32_to_2i64_load:
; VEX: # %bb.0:
; VEX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; VEX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; VEX-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; VEX-NEXT: vsubss %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttss2si %xmm2, %rax
; VEX-NEXT: vcvttss2si %xmm0, %rcx
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll
index 5f39f33dd6a5caf..ff208678c9bc759 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll
@@ -4,8 +4,8 @@
define <1 x float> @constrained_vector_fma_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fma_v1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm1 = [5.0E-1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
; CHECK-NEXT: retq
entry:
@@ -38,14 +38,14 @@ entry:
define <3 x float> @constrained_vector_fma_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fma_v3f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = [5.0E-1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: vmovss {{.*#+}} xmm1 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vfmadd213ss {{.*#+}} xmm1 = (xmm0 * xmm1) + mem
-; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: vmovss {{.*#+}} xmm2 = [5.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vfmadd213ss {{.*#+}} xmm2 = (xmm0 * xmm2) + mem
-; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT: vmovss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: vmovss {{.*#+}} xmm3 = [4.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: vfmadd213ss {{.*#+}} xmm3 = (xmm0 * xmm3) + mem
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0],xmm3[0],xmm2[2,3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
@@ -63,8 +63,8 @@ entry:
define <3 x double> @constrained_vector_fma_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fma_v3f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = [5.0E-1,0.0E+0]
+; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = [3.5E+0,0.0E+0]
; CHECK-NEXT: vfmadd213sd {{.*#+}} xmm1 = (xmm0 * xmm1) + mem
; CHECK-NEXT: vmovapd {{.*#+}} xmm0 = [2.5E+0,1.5E+0]
; CHECK-NEXT: vmovapd {{.*#+}} xmm2 = [5.5E+0,4.5E+0]
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index cdabd7fab081cbe..acf45fc4bbeba4f 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -7,13 +7,13 @@
define <1 x float> @constrained_vector_fdiv_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fdiv_v1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: divss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fdiv_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -49,12 +49,12 @@ entry:
define <3 x float> @constrained_vector_fdiv_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fdiv_v3f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: divss %xmm1, %xmm2
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: divss %xmm1, %xmm0
-; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm3 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: divss %xmm1, %xmm3
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
@@ -62,12 +62,12 @@ define <3 x float> @constrained_vector_fdiv_v3f32() #0 {
;
; AVX-LABEL: constrained_vector_fdiv_v3f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss %xmm0, %xmm2, %xmm2
-; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm3 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vdivss %xmm0, %xmm3, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
@@ -86,7 +86,7 @@ define <3 x double> @constrained_vector_fdiv_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
; CHECK-NEXT: divpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; CHECK-NEXT: divsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movapd %xmm0, %xmm1
@@ -97,7 +97,7 @@ define <3 x double> @constrained_vector_fdiv_v3f64() #0 {
;
; AVX-LABEL: constrained_vector_fdiv_v3f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [3.0E+0,0.0E+0]
; AVX-NEXT: vdivsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovapd {{.*#+}} xmm1 = [1.0E+0,2.0E+0]
; AVX-NEXT: vdivpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -150,8 +150,8 @@ define <1 x float> @constrained_vector_frem_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fmodf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -161,8 +161,8 @@ define <1 x float> @constrained_vector_frem_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.0E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fmodf at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -181,12 +181,12 @@ define <2 x double> @constrained_vector_frem_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [2.0E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; CHECK-NEXT: callq fmod at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; CHECK-NEXT: callq fmod at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -198,12 +198,12 @@ define <2 x double> @constrained_vector_frem_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [2.0E+0,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -224,16 +224,16 @@ define <3 x float> @constrained_vector_frem_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fmodf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fmodf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [1.0E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fmodf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -248,16 +248,16 @@ define <3 x float> @constrained_vector_frem_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.0E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fmodf at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.0E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fmodf at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.0E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fmodf at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -280,16 +280,16 @@ define <3 x double> @constrained_vector_frem_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [2.0E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; CHECK-NEXT: callq fmod at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; CHECK-NEXT: callq fmod at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [3.0E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; CHECK-NEXT: callq fmod at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -306,18 +306,18 @@ define <3 x double> @constrained_vector_frem_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [2.0E+0,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [3.0E+0,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -339,22 +339,22 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [2.0E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; CHECK-NEXT: callq fmod at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; CHECK-NEXT: callq fmod at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.0E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; CHECK-NEXT: callq fmod at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [3.0E+0,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; CHECK-NEXT: callq fmod at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -368,22 +368,22 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
; AVX: # %bb.0:
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.0E+0,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [3.0E+0,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [2.0E+0,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -404,13 +404,13 @@ define <4 x double> @constrained_vector_frem_v4f64() #0 {
define <1 x float> @constrained_vector_fmul_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fmul_v1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [+Inf,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fmul_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [+Inf,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -447,10 +447,10 @@ entry:
define <3 x float> @constrained_vector_fmul_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fmul_v3f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [+Inf,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [1.0E+2,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: mulss %xmm1, %xmm2
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: mulss %xmm1, %xmm0
; CHECK-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -459,7 +459,7 @@ define <3 x float> @constrained_vector_fmul_v3f32() #0 {
;
; AVX-LABEL: constrained_vector_fmul_v3f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [+Inf,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
; AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -481,7 +481,7 @@ define <3 x double> @constrained_vector_fmul_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
; CHECK-NEXT: mulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [1.7976931348623157E+308,0.0E+0]
; CHECK-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movapd %xmm0, %xmm1
@@ -492,7 +492,7 @@ define <3 x double> @constrained_vector_fmul_v3f64() #0 {
;
; AVX-LABEL: constrained_vector_fmul_v3f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.7976931348623157E+308,0.0E+0]
; AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
; AVX-NEXT: # xmm1 = mem[0,0]
@@ -537,13 +537,13 @@ entry:
define <1 x float> @constrained_vector_fadd_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fadd_v1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [+Inf,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fadd_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [+Inf,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -581,9 +581,9 @@ define <3 x float> @constrained_vector_fadd_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fadd_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm1, %xmm1
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: addss %xmm2, %xmm1
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: addss %xmm2, %xmm0
; CHECK-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -593,7 +593,7 @@ define <3 x float> @constrained_vector_fadd_v3f32() #0 {
; AVX-LABEL: constrained_vector_fadd_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -671,13 +671,13 @@ entry:
define <1 x float> @constrained_vector_fsub_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fsub_v1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [+Inf,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fsub_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [+Inf,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsubss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -715,7 +715,7 @@ define <3 x float> @constrained_vector_fsub_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fsub_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm0, %xmm0
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movaps %xmm1, %xmm2
; CHECK-NEXT: subss %xmm0, %xmm2
; CHECK-NEXT: movaps %xmm1, %xmm0
@@ -728,7 +728,7 @@ define <3 x float> @constrained_vector_fsub_v3f32() #0 {
; AVX-LABEL: constrained_vector_fsub_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vsubss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX-NEXT: vsubss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -749,7 +749,7 @@ define <3 x double> @constrained_vector_fsub_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fsub_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorpd %xmm0, %xmm0
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [-1.7976931348623157E+308,0.0E+0]
; CHECK-NEXT: subsd %xmm0, %xmm1
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
; CHECK-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -763,7 +763,7 @@ define <3 x double> @constrained_vector_fsub_v3f64() #0 {
; AVX-LABEL: constrained_vector_fsub_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [-1.7976931348623157E+308,0.0E+0]
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
; AVX-NEXT: # xmm1 = mem[0,0]
@@ -808,13 +808,13 @@ entry:
define <1 x float> @constrained_vector_sqrt_v1f32() #0 {
; CHECK-LABEL: constrained_vector_sqrt_v1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: sqrtss %xmm0, %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sqrt_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -846,11 +846,11 @@ entry:
define <3 x float> @constrained_vector_sqrt_v3f32() #0 {
; CHECK-LABEL: constrained_vector_sqrt_v3f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: sqrtss %xmm0, %xmm1
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: sqrtss %xmm0, %xmm0
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: sqrtss %xmm2, %xmm2
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -858,11 +858,11 @@ define <3 x float> @constrained_vector_sqrt_v3f32() #0 {
;
; AVX-LABEL: constrained_vector_sqrt_v3f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsqrtss %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vsqrtss %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
@@ -878,7 +878,7 @@ entry:
define <3 x double> @constrained_vector_sqrt_v3f64() #0 {
; CHECK-LABEL: constrained_vector_sqrt_v3f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: sqrtsd %xmm0, %xmm1
; CHECK-NEXT: sqrtpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp)
@@ -890,7 +890,7 @@ define <3 x double> @constrained_vector_sqrt_v3f64() #0 {
;
; AVX-LABEL: constrained_vector_sqrt_v3f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vsqrtpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -928,8 +928,8 @@ define <1 x float> @constrained_vector_pow_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq powf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -939,8 +939,8 @@ define <1 x float> @constrained_vector_pow_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq powf at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -959,12 +959,12 @@ define <2 x double> @constrained_vector_pow_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -976,12 +976,12 @@ define <2 x double> @constrained_vector_pow_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; AVX-NEXT: callq pow at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; AVX-NEXT: callq pow at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -1002,16 +1002,16 @@ define <3 x float> @constrained_vector_pow_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq powf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq powf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq powf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -1026,16 +1026,16 @@ define <3 x float> @constrained_vector_pow_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq powf at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq powf at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [3.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq powf at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -1058,16 +1058,16 @@ define <3 x double> @constrained_vector_pow_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -1084,18 +1084,18 @@ define <3 x double> @constrained_vector_pow_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; AVX-NEXT: callq pow at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; AVX-NEXT: callq pow at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq pow at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -1117,22 +1117,22 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2399999999999999E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; CHECK-NEXT: callq pow at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -1146,22 +1146,22 @@ define <4 x double> @constrained_vector_pow_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2399999999999999E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; AVX-NEXT: callq pow at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; AVX-NEXT: callq pow at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; AVX-NEXT: callq pow at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
; AVX-NEXT: callq pow at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -1185,7 +1185,7 @@ define <1 x float> @constrained_vector_powi_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powisf2 at PLT
; CHECK-NEXT: popq %rax
@@ -1196,7 +1196,7 @@ define <1 x float> @constrained_vector_powi_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powisf2 at PLT
; AVX-NEXT: popq %rax
@@ -1216,11 +1216,11 @@ define <2 x double> @constrained_vector_powi_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2 at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2 at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
@@ -1233,11 +1233,11 @@ define <2 x double> @constrained_vector_powi_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
@@ -1259,15 +1259,15 @@ define <3 x float> @constrained_vector_powi_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powisf2 at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powisf2 at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powisf2 at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
@@ -1283,15 +1283,15 @@ define <3 x float> @constrained_vector_powi_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powisf2 at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powisf2 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powisf2 at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
@@ -1315,15 +1315,15 @@ define <3 x double> @constrained_vector_powi_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2 at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2 at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2 at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
@@ -1341,17 +1341,17 @@ define <3 x double> @constrained_vector_powi_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq __powidf2 at PLT
@@ -1374,21 +1374,21 @@ define <4 x double> @constrained_vector_powi_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2 at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2 at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2399999999999999E+1,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2 at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2 at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
@@ -1403,21 +1403,21 @@ define <4 x double> @constrained_vector_powi_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2399999999999999E+1,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2 at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2 at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
@@ -1441,7 +1441,7 @@ define <1 x float> @constrained_vector_sin_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq sinf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -1451,7 +1451,7 @@ define <1 x float> @constrained_vector_sin_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq sinf at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -1469,10 +1469,10 @@ define <2 x double> @constrained_vector_sin_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq sin at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq sin at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -1484,10 +1484,10 @@ define <2 x double> @constrained_vector_sin_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq sin at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq sin at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -1507,13 +1507,13 @@ define <3 x float> @constrained_vector_sin_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq sinf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq sinf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq sinf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -1528,13 +1528,13 @@ define <3 x float> @constrained_vector_sin_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq sinf at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq sinf at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq sinf at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -1556,13 +1556,13 @@ define <3 x double> @constrained_vector_sin_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq sin at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq sin at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq sin at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -1579,15 +1579,15 @@ define <3 x double> @constrained_vector_sin_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq sin at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq sin at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq sin at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -1608,18 +1608,18 @@ define <4 x double> @constrained_vector_sin_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq sin at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq sin at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: callq sin at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq sin at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -1633,18 +1633,18 @@ define <4 x double> @constrained_vector_sin_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; AVX-NEXT: callq sin at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: callq sin at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq sin at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq sin at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -1666,7 +1666,7 @@ define <1 x float> @constrained_vector_cos_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq cosf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -1676,7 +1676,7 @@ define <1 x float> @constrained_vector_cos_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq cosf at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -1694,10 +1694,10 @@ define <2 x double> @constrained_vector_cos_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq cos at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq cos at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -1709,10 +1709,10 @@ define <2 x double> @constrained_vector_cos_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq cos at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq cos at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -1732,13 +1732,13 @@ define <3 x float> @constrained_vector_cos_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq cosf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq cosf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq cosf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -1753,13 +1753,13 @@ define <3 x float> @constrained_vector_cos_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq cosf at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq cosf at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq cosf at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -1781,13 +1781,13 @@ define <3 x double> @constrained_vector_cos_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq cos at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq cos at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq cos at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -1804,15 +1804,15 @@ define <3 x double> @constrained_vector_cos_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq cos at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq cos at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq cos at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -1833,18 +1833,18 @@ define <4 x double> @constrained_vector_cos_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq cos at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq cos at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: callq cos at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq cos at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -1858,18 +1858,18 @@ define <4 x double> @constrained_vector_cos_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; AVX-NEXT: callq cos at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: callq cos at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq cos at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq cos at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -1891,7 +1891,7 @@ define <1 x float> @constrained_vector_exp_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq expf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -1901,7 +1901,7 @@ define <1 x float> @constrained_vector_exp_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq expf at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -1919,10 +1919,10 @@ define <2 x double> @constrained_vector_exp_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq exp at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq exp at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -1934,10 +1934,10 @@ define <2 x double> @constrained_vector_exp_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq exp at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq exp at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -1957,13 +1957,13 @@ define <3 x float> @constrained_vector_exp_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq expf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq expf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq expf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -1978,13 +1978,13 @@ define <3 x float> @constrained_vector_exp_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq expf at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq expf at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq expf at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -2006,13 +2006,13 @@ define <3 x double> @constrained_vector_exp_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq exp at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq exp at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq exp at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -2029,15 +2029,15 @@ define <3 x double> @constrained_vector_exp_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq exp at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq exp at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq exp at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -2058,18 +2058,18 @@ define <4 x double> @constrained_vector_exp_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq exp at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq exp at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: callq exp at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq exp at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -2083,18 +2083,18 @@ define <4 x double> @constrained_vector_exp_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; AVX-NEXT: callq exp at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: callq exp at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq exp at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq exp at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2116,7 +2116,7 @@ define <1 x float> @constrained_vector_exp2_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq exp2f at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -2126,7 +2126,7 @@ define <1 x float> @constrained_vector_exp2_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq exp2f at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -2144,10 +2144,10 @@ define <2 x double> @constrained_vector_exp2_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq exp2 at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq exp2 at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2159,10 +2159,10 @@ define <2 x double> @constrained_vector_exp2_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2182,13 +2182,13 @@ define <3 x float> @constrained_vector_exp2_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq exp2f at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq exp2f at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq exp2f at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2203,13 +2203,13 @@ define <3 x float> @constrained_vector_exp2_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq exp2f at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq exp2f at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq exp2f at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -2231,13 +2231,13 @@ define <3 x double> @constrained_vector_exp2_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq exp2 at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq exp2 at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq exp2 at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -2254,15 +2254,15 @@ define <3 x double> @constrained_vector_exp2_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -2283,18 +2283,18 @@ define <4 x double> @constrained_vector_exp2_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq exp2 at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq exp2 at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2399999999999999E+1,0.0E+0]
; CHECK-NEXT: callq exp2 at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: callq exp2 at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -2308,18 +2308,18 @@ define <4 x double> @constrained_vector_exp2_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2399999999999999E+1,0.0E+0]
; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2341,7 +2341,7 @@ define <1 x float> @constrained_vector_log_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq logf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -2351,7 +2351,7 @@ define <1 x float> @constrained_vector_log_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq logf at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -2369,10 +2369,10 @@ define <2 x double> @constrained_vector_log_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq log at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq log at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2384,10 +2384,10 @@ define <2 x double> @constrained_vector_log_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq log at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2407,13 +2407,13 @@ define <3 x float> @constrained_vector_log_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq logf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq logf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq logf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2428,13 +2428,13 @@ define <3 x float> @constrained_vector_log_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq logf at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq logf at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq logf at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -2456,13 +2456,13 @@ define <3 x double> @constrained_vector_log_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq log at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq log at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq log at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -2479,15 +2479,15 @@ define <3 x double> @constrained_vector_log_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq log at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq log at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -2508,18 +2508,18 @@ define <4 x double> @constrained_vector_log_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq log at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq log at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: callq log at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq log at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -2533,18 +2533,18 @@ define <4 x double> @constrained_vector_log_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; AVX-NEXT: callq log at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: callq log at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq log at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2566,7 +2566,7 @@ define <1 x float> @constrained_vector_log10_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq log10f at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -2576,7 +2576,7 @@ define <1 x float> @constrained_vector_log10_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq log10f at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -2594,10 +2594,10 @@ define <2 x double> @constrained_vector_log10_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq log10 at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq log10 at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2609,10 +2609,10 @@ define <2 x double> @constrained_vector_log10_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2632,13 +2632,13 @@ define <3 x float> @constrained_vector_log10_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq log10f at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq log10f at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq log10f at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2653,13 +2653,13 @@ define <3 x float> @constrained_vector_log10_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq log10f at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq log10f at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq log10f at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -2681,13 +2681,13 @@ define <3 x double> @constrained_vector_log10_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq log10 at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq log10 at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq log10 at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -2704,15 +2704,15 @@ define <3 x double> @constrained_vector_log10_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -2733,18 +2733,18 @@ define <4 x double> @constrained_vector_log10_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq log10 at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq log10 at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: callq log10 at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq log10 at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -2758,18 +2758,18 @@ define <4 x double> @constrained_vector_log10_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2791,7 +2791,7 @@ define <1 x float> @constrained_vector_log2_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq log2f at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -2801,7 +2801,7 @@ define <1 x float> @constrained_vector_log2_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq log2f at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -2819,10 +2819,10 @@ define <2 x double> @constrained_vector_log2_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq log2 at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq log2 at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2834,10 +2834,10 @@ define <2 x double> @constrained_vector_log2_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -2857,13 +2857,13 @@ define <3 x float> @constrained_vector_log2_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq log2f at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq log2f at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq log2f at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -2878,13 +2878,13 @@ define <3 x float> @constrained_vector_log2_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq log2f at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq log2f at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq log2f at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -2906,13 +2906,13 @@ define <3 x double> @constrained_vector_log2_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq log2 at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq log2 at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq log2 at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -2929,15 +2929,15 @@ define <3 x double> @constrained_vector_log2_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -2958,18 +2958,18 @@ define <4 x double> @constrained_vector_log2_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq log2 at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq log2 at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: callq log2 at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq log2 at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -2983,18 +2983,18 @@ define <4 x double> @constrained_vector_log2_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -3016,7 +3016,7 @@ define <1 x float> @constrained_vector_rint_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq rintf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -3024,7 +3024,7 @@ define <1 x float> @constrained_vector_rint_v1f32() #0 {
;
; AVX-LABEL: constrained_vector_rint_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -3040,10 +3040,10 @@ define <2 x double> @constrained_vector_rint_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq rint at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq rint at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -3068,13 +3068,13 @@ define <3 x float> @constrained_vector_rint_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq rintf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq rintf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq rintf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -3087,11 +3087,11 @@ define <3 x float> @constrained_vector_rint_v3f32() #0 {
;
; AVX-LABEL: constrained_vector_rint_v3f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $4, %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $4, %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
@@ -3109,13 +3109,13 @@ define <3 x double> @constrained_vector_rint_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq rint at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq rint at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq rint at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -3130,7 +3130,7 @@ define <3 x double> @constrained_vector_rint_v3f64() #0 {
;
; AVX-LABEL: constrained_vector_rint_v3f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vroundpd $4, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -3148,18 +3148,18 @@ define <4 x double> @constrained_vector_rint_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq rint at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq rint at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2399999999999999E+1,0.0E+0]
; CHECK-NEXT: callq rint at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: callq rint at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -3187,7 +3187,7 @@ define <1 x float> @constrained_vector_nearbyint_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq nearbyintf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -3195,7 +3195,7 @@ define <1 x float> @constrained_vector_nearbyint_v1f32() #0 {
;
; AVX-LABEL: constrained_vector_nearbyint_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -3211,10 +3211,10 @@ define <2 x double> @constrained_vector_nearbyint_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq nearbyint at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq nearbyint at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -3239,13 +3239,13 @@ define <3 x float> @constrained_vector_nearbyint_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq nearbyintf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq nearbyintf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq nearbyintf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -3258,11 +3258,11 @@ define <3 x float> @constrained_vector_nearbyint_v3f32() #0 {
;
; AVX-LABEL: constrained_vector_nearbyint_v3f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $12, %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $12, %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
@@ -3280,13 +3280,13 @@ define <3 x double> @constrained_vector_nearby_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq nearbyint at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq nearbyint at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq nearbyint at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -3301,7 +3301,7 @@ define <3 x double> @constrained_vector_nearby_v3f64() #0 {
;
; AVX-LABEL: constrained_vector_nearby_v3f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vroundpd $12, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -3319,18 +3319,18 @@ define <4 x double> @constrained_vector_nearbyint_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: callq nearbyint at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: callq nearbyint at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2399999999999999E+1,0.0E+0]
; CHECK-NEXT: callq nearbyint at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: callq nearbyint at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -3358,8 +3358,8 @@ define <1 x float> @constrained_vector_maxnum_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.1E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fmaxf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -3369,8 +3369,8 @@ define <1 x float> @constrained_vector_maxnum_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.1E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fmaxf at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -3387,12 +3387,12 @@ define <2 x double> @constrained_vector_maxnum_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; CHECK-NEXT: callq fmax at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.3E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; CHECK-NEXT: callq fmax at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -3404,12 +3404,12 @@ define <2 x double> @constrained_vector_maxnum_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; AVX-NEXT: callq fmax at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.3E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; AVX-NEXT: callq fmax at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -3429,16 +3429,16 @@ define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.5E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fmaxf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.1E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fmaxf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fmaxf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -3453,16 +3453,16 @@ define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.5E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fmaxf at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.1E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fmaxf at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fmaxf at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -3484,16 +3484,16 @@ define <3 x double> @constrained_vector_max_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.4E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; CHECK-NEXT: callq fmax at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.3E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; CHECK-NEXT: callq fmax at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.5E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq fmax at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -3510,18 +3510,18 @@ define <3 x double> @constrained_vector_max_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.4E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; AVX-NEXT: callq fmax at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.3E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; AVX-NEXT: callq fmax at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.5E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq fmax at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -3542,22 +3542,22 @@ define <4 x double> @constrained_vector_maxnum_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.5E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; CHECK-NEXT: callq fmax at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.4E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; CHECK-NEXT: callq fmax at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.7E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.3E+1,0.0E+0]
; CHECK-NEXT: callq fmax at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.6E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq fmax at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -3571,22 +3571,22 @@ define <4 x double> @constrained_vector_maxnum_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.7E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.3E+1,0.0E+0]
; AVX-NEXT: callq fmax at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.6E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq fmax at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.5E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; AVX-NEXT: callq fmax at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.4E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; AVX-NEXT: callq fmax at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -3609,8 +3609,8 @@ define <1 x float> @constrained_vector_minnum_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.1E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -3620,8 +3620,8 @@ define <1 x float> @constrained_vector_minnum_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.1E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fminf at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -3638,12 +3638,12 @@ define <2 x double> @constrained_vector_minnum_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; CHECK-NEXT: callq fmin at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.3E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; CHECK-NEXT: callq fmin at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -3655,12 +3655,12 @@ define <2 x double> @constrained_vector_minnum_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; AVX-NEXT: callq fmin at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.3E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; AVX-NEXT: callq fmin at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -3680,16 +3680,16 @@ define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.5E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.1E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq fminf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -3704,16 +3704,16 @@ define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.5E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fminf at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.1E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fminf at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq fminf at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -3735,16 +3735,16 @@ define <3 x double> @constrained_vector_min_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.4E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; CHECK-NEXT: callq fmin at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.3E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; CHECK-NEXT: callq fmin at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.5E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq fmin at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -3761,18 +3761,18 @@ define <3 x double> @constrained_vector_min_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.4E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; AVX-NEXT: callq fmin at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.3E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; AVX-NEXT: callq fmin at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.5E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq fmin at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -3793,22 +3793,22 @@ define <4 x double> @constrained_vector_minnum_v4f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.5E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; CHECK-NEXT: callq fmin at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.4E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; CHECK-NEXT: callq fmin at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.7E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.3E+1,0.0E+0]
; CHECK-NEXT: callq fmin at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.6E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; CHECK-NEXT: callq fmin at PLT
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -3822,22 +3822,22 @@ define <4 x double> @constrained_vector_minnum_v4f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.7E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.3E+1,0.0E+0]
; AVX-NEXT: callq fmin at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.6E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; AVX-NEXT: callq fmin at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.5E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.1E+1,0.0E+0]
; AVX-NEXT: callq fmin at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.4E+1,0.0E+0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.0E+1,0.0E+0]
; AVX-NEXT: callq fmin at PLT
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -4478,8 +4478,8 @@ entry:
define <1 x i64> @constrained_vector_fptoui_v1i64_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v1i64_v1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: comiss %xmm0, %xmm2
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: ja .LBB115_2
@@ -4496,8 +4496,8 @@ define <1 x i64> @constrained_vector_fptoui_v1i64_v1f32() #0 {
;
; AVX1-LABEL: constrained_vector_fptoui_v1i64_v1f32:
; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX1-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vcomiss %xmm0, %xmm1
; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX1-NEXT: ja .LBB115_2
@@ -4526,8 +4526,8 @@ entry:
define <2 x i64> @constrained_vector_fptoui_v2i64_v2f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v2i64_v2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: comiss %xmm2, %xmm1
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: xorps %xmm3, %xmm3
@@ -4542,7 +4542,7 @@ define <2 x i64> @constrained_vector_fptoui_v2i64_v2f32() #0 {
; CHECK-NEXT: shlq $63, %rcx
; CHECK-NEXT: xorq %rax, %rcx
; CHECK-NEXT: movq %rcx, %xmm2
-; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm3 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: comiss %xmm3, %xmm1
; CHECK-NEXT: ja .LBB116_4
; CHECK-NEXT: # %bb.3: # %entry
@@ -4560,8 +4560,8 @@ define <2 x i64> @constrained_vector_fptoui_v2i64_v2f32() #0 {
;
; AVX1-LABEL: constrained_vector_fptoui_v2i64_v2f32:
; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm2 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX1-NEXT: vmovss {{.*#+}} xmm0 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vcomiss %xmm2, %xmm0
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
@@ -4576,7 +4576,7 @@ define <2 x i64> @constrained_vector_fptoui_v2i64_v2f32() #0 {
; AVX1-NEXT: shlq $63, %rcx
; AVX1-NEXT: xorq %rax, %rcx
; AVX1-NEXT: vmovq %rcx, %xmm2
-; AVX1-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm3 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vcomiss %xmm3, %xmm0
; AVX1-NEXT: ja .LBB116_4
; AVX1-NEXT: # %bb.3: # %entry
@@ -4617,8 +4617,8 @@ entry:
define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v3i64_v3f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: comiss %xmm2, %xmm1
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: xorps %xmm3, %xmm3
@@ -4632,7 +4632,7 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() #0 {
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: shlq $63, %rax
; CHECK-NEXT: xorq %rcx, %rax
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: comiss %xmm2, %xmm1
; CHECK-NEXT: xorps %xmm3, %xmm3
; CHECK-NEXT: ja .LBB117_4
@@ -4645,7 +4645,7 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() #0 {
; CHECK-NEXT: movzbl %dl, %edx
; CHECK-NEXT: shlq $63, %rdx
; CHECK-NEXT: xorq %rcx, %rdx
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: comiss %xmm2, %xmm1
; CHECK-NEXT: ja .LBB117_6
; CHECK-NEXT: # %bb.5: # %entry
@@ -4661,8 +4661,8 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() #0 {
;
; AVX1-LABEL: constrained_vector_fptoui_v3i64_v3f32:
; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm2 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX1-NEXT: vmovss {{.*#+}} xmm0 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vcomiss %xmm2, %xmm0
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
@@ -4677,7 +4677,7 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() #0 {
; AVX1-NEXT: shlq $63, %rcx
; AVX1-NEXT: xorq %rax, %rcx
; AVX1-NEXT: vmovq %rcx, %xmm2
-; AVX1-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm3 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vcomiss %xmm3, %xmm0
; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX1-NEXT: ja .LBB117_4
@@ -4692,7 +4692,7 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() #0 {
; AVX1-NEXT: xorq %rax, %rcx
; AVX1-NEXT: vmovq %rcx, %xmm3
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
-; AVX1-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm3 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vcomiss %xmm3, %xmm0
; AVX1-NEXT: ja .LBB117_6
; AVX1-NEXT: # %bb.5: # %entry
@@ -4730,8 +4730,8 @@ entry:
define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v4i64_v4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: comiss %xmm0, %xmm2
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: xorps %xmm3, %xmm3
@@ -4745,7 +4745,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 {
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: shlq $63, %rax
; CHECK-NEXT: xorq %rcx, %rax
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: comiss %xmm0, %xmm2
; CHECK-NEXT: xorps %xmm4, %xmm4
; CHECK-NEXT: ja .LBB118_4
@@ -4760,7 +4760,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 {
; CHECK-NEXT: shlq $63, %rcx
; CHECK-NEXT: xorq %rax, %rcx
; CHECK-NEXT: movq %rcx, %xmm0
-; CHECK-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm4 = [4.5E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: comiss %xmm4, %xmm2
; CHECK-NEXT: xorps %xmm5, %xmm5
; CHECK-NEXT: ja .LBB118_6
@@ -4775,7 +4775,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 {
; CHECK-NEXT: shlq $63, %rcx
; CHECK-NEXT: xorq %rax, %rcx
; CHECK-NEXT: movq %rcx, %xmm3
-; CHECK-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm4 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: comiss %xmm4, %xmm2
; CHECK-NEXT: ja .LBB118_8
; CHECK-NEXT: # %bb.7: # %entry
@@ -4793,8 +4793,8 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 {
;
; AVX1-LABEL: constrained_vector_fptoui_v4i64_v4f32:
; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm2 = [4.5E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX1-NEXT: vmovss {{.*#+}} xmm0 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vcomiss %xmm2, %xmm0
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
@@ -4808,7 +4808,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 {
; AVX1-NEXT: movzbl %al, %eax
; AVX1-NEXT: shlq $63, %rax
; AVX1-NEXT: xorq %rcx, %rax
-; AVX1-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm3 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vcomiss %xmm3, %xmm0
; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX1-NEXT: ja .LBB118_4
@@ -4823,7 +4823,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 {
; AVX1-NEXT: shlq $63, %rcx
; AVX1-NEXT: xorq %rax, %rcx
; AVX1-NEXT: vmovq %rcx, %xmm3
-; AVX1-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm4 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vcomiss %xmm4, %xmm0
; AVX1-NEXT: vxorps %xmm5, %xmm5, %xmm5
; AVX1-NEXT: ja .LBB118_6
@@ -4838,7 +4838,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 {
; AVX1-NEXT: shlq $63, %rcx
; AVX1-NEXT: xorq %rax, %rcx
; AVX1-NEXT: vmovq %rcx, %xmm3
-; AVX1-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; AVX1-NEXT: vmovss {{.*#+}} xmm4 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX1-NEXT: vcomiss %xmm4, %xmm0
; AVX1-NEXT: ja .LBB118_8
; AVX1-NEXT: # %bb.7: # %entry
@@ -5032,8 +5032,8 @@ entry:
define <1 x i64> @constrained_vector_fptoui_v1i64_v1f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v1i64_v1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; CHECK-NEXT: comisd %xmm0, %xmm2
; CHECK-NEXT: xorpd %xmm1, %xmm1
; CHECK-NEXT: ja .LBB123_2
@@ -5050,8 +5050,8 @@ define <1 x i64> @constrained_vector_fptoui_v1i64_v1f64() #0 {
;
; AVX1-LABEL: constrained_vector_fptoui_v1i64_v1f64:
; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; AVX1-NEXT: vcomisd %xmm0, %xmm1
; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: ja .LBB123_2
@@ -5080,8 +5080,8 @@ entry:
define <2 x i64> @constrained_vector_fptoui_v2i64_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v2i64_v2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.2200000000000003E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; CHECK-NEXT: comisd %xmm2, %xmm1
; CHECK-NEXT: xorpd %xmm0, %xmm0
; CHECK-NEXT: xorpd %xmm3, %xmm3
@@ -5096,7 +5096,7 @@ define <2 x i64> @constrained_vector_fptoui_v2i64_v2f64() #0 {
; CHECK-NEXT: shlq $63, %rcx
; CHECK-NEXT: xorq %rax, %rcx
; CHECK-NEXT: movq %rcx, %xmm2
-; CHECK-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm3 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: comisd %xmm3, %xmm1
; CHECK-NEXT: ja .LBB124_4
; CHECK-NEXT: # %bb.3: # %entry
@@ -5114,8 +5114,8 @@ define <2 x i64> @constrained_vector_fptoui_v2i64_v2f64() #0 {
;
; AVX1-LABEL: constrained_vector_fptoui_v2i64_v2f64:
; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm2 = [4.2200000000000003E+1,0.0E+0]
+; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = [9.2233720368547758E+18,0.0E+0]
; AVX1-NEXT: vcomisd %xmm2, %xmm0
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vxorpd %xmm3, %xmm3, %xmm3
@@ -5130,7 +5130,7 @@ define <2 x i64> @constrained_vector_fptoui_v2i64_v2f64() #0 {
; AVX1-NEXT: shlq $63, %rcx
; AVX1-NEXT: xorq %rax, %rcx
; AVX1-NEXT: vmovq %rcx, %xmm2
-; AVX1-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm3 = [4.2100000000000001E+1,0.0E+0]
; AVX1-NEXT: vcomisd %xmm3, %xmm0
; AVX1-NEXT: ja .LBB124_4
; AVX1-NEXT: # %bb.3: # %entry
@@ -5172,8 +5172,8 @@ entry:
define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v3i64_v3f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.2100000000000001E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
; CHECK-NEXT: comisd %xmm2, %xmm1
; CHECK-NEXT: xorpd %xmm0, %xmm0
; CHECK-NEXT: xorpd %xmm3, %xmm3
@@ -5187,7 +5187,7 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() #0 {
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: shlq $63, %rax
; CHECK-NEXT: xorq %rcx, %rax
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: comisd %xmm2, %xmm1
; CHECK-NEXT: xorpd %xmm3, %xmm3
; CHECK-NEXT: ja .LBB125_4
@@ -5200,7 +5200,7 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() #0 {
; CHECK-NEXT: movzbl %dl, %edx
; CHECK-NEXT: shlq $63, %rdx
; CHECK-NEXT: xorq %rcx, %rdx
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: comisd %xmm2, %xmm1
; CHECK-NEXT: ja .LBB125_6
; CHECK-NEXT: # %bb.5: # %entry
@@ -5216,8 +5216,8 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() #0 {
;
; AVX1-LABEL: constrained_vector_fptoui_v3i64_v3f64:
; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm2 = [4.2200000000000003E+1,0.0E+0]
+; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = [9.2233720368547758E+18,0.0E+0]
; AVX1-NEXT: vcomisd %xmm2, %xmm0
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vxorpd %xmm3, %xmm3, %xmm3
@@ -5232,7 +5232,7 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() #0 {
; AVX1-NEXT: shlq $63, %rcx
; AVX1-NEXT: xorq %rax, %rcx
; AVX1-NEXT: vmovq %rcx, %xmm2
-; AVX1-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm3 = [4.2100000000000001E+1,0.0E+0]
; AVX1-NEXT: vcomisd %xmm3, %xmm0
; AVX1-NEXT: vxorpd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: ja .LBB125_4
@@ -5247,7 +5247,7 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() #0 {
; AVX1-NEXT: xorq %rax, %rcx
; AVX1-NEXT: vmovq %rcx, %xmm3
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
-; AVX1-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm3 = [4.2299999999999997E+1,0.0E+0]
; AVX1-NEXT: vcomisd %xmm3, %xmm0
; AVX1-NEXT: ja .LBB125_6
; AVX1-NEXT: # %bb.5: # %entry
@@ -5285,8 +5285,8 @@ entry:
define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v4i64_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; CHECK-NEXT: comisd %xmm0, %xmm2
; CHECK-NEXT: xorpd %xmm1, %xmm1
; CHECK-NEXT: xorpd %xmm3, %xmm3
@@ -5300,7 +5300,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 {
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: shlq $63, %rax
; CHECK-NEXT: xorq %rcx, %rax
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: comisd %xmm0, %xmm2
; CHECK-NEXT: xorpd %xmm4, %xmm4
; CHECK-NEXT: ja .LBB126_4
@@ -5315,7 +5315,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 {
; CHECK-NEXT: shlq $63, %rcx
; CHECK-NEXT: xorq %rax, %rcx
; CHECK-NEXT: movq %rcx, %xmm0
-; CHECK-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm4 = [4.2399999999999999E+1,0.0E+0]
; CHECK-NEXT: comisd %xmm4, %xmm2
; CHECK-NEXT: xorpd %xmm5, %xmm5
; CHECK-NEXT: ja .LBB126_6
@@ -5330,7 +5330,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 {
; CHECK-NEXT: shlq $63, %rcx
; CHECK-NEXT: xorq %rax, %rcx
; CHECK-NEXT: movq %rcx, %xmm3
-; CHECK-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm4 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: comisd %xmm4, %xmm2
; CHECK-NEXT: ja .LBB126_8
; CHECK-NEXT: # %bb.7: # %entry
@@ -5348,8 +5348,8 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 {
;
; AVX1-LABEL: constrained_vector_fptoui_v4i64_v4f64:
; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm2 = [4.2399999999999999E+1,0.0E+0]
+; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = [9.2233720368547758E+18,0.0E+0]
; AVX1-NEXT: vcomisd %xmm2, %xmm0
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vxorpd %xmm3, %xmm3, %xmm3
@@ -5363,7 +5363,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 {
; AVX1-NEXT: movzbl %al, %eax
; AVX1-NEXT: shlq $63, %rax
; AVX1-NEXT: xorq %rcx, %rax
-; AVX1-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm3 = [4.2299999999999997E+1,0.0E+0]
; AVX1-NEXT: vcomisd %xmm3, %xmm0
; AVX1-NEXT: vxorpd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: ja .LBB126_4
@@ -5378,7 +5378,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 {
; AVX1-NEXT: shlq $63, %rcx
; AVX1-NEXT: xorq %rax, %rcx
; AVX1-NEXT: vmovq %rcx, %xmm3
-; AVX1-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm4 = [4.2200000000000003E+1,0.0E+0]
; AVX1-NEXT: vcomisd %xmm4, %xmm0
; AVX1-NEXT: vxorpd %xmm5, %xmm5, %xmm5
; AVX1-NEXT: ja .LBB126_6
@@ -5393,7 +5393,7 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 {
; AVX1-NEXT: shlq $63, %rcx
; AVX1-NEXT: xorq %rax, %rcx
; AVX1-NEXT: vmovq %rcx, %xmm3
-; AVX1-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm4 = [4.2100000000000001E+1,0.0E+0]
; AVX1-NEXT: vcomisd %xmm4, %xmm0
; AVX1-NEXT: ja .LBB126_8
; AVX1-NEXT: # %bb.7: # %entry
@@ -5443,13 +5443,13 @@ entry:
define <1 x float> @constrained_vector_fptrunc_v1f64() #0 {
; CHECK-LABEL: constrained_vector_fptrunc_v1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptrunc_v1f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -5481,24 +5481,24 @@ entry:
define <3 x float> @constrained_vector_fptrunc_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fptrunc_v3f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm1
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = [4.2299999999999997E+1,0.0E+0]
; CHECK-NEXT: cvtsd2ss %xmm1, %xmm1
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptrunc_v3f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2100000000000001E+1,0.0E+0]
; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2299999999999997E+1,0.0E+0]
; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
; AVX-NEXT: retq
@@ -5535,13 +5535,13 @@ entry:
define <1 x double> @constrained_vector_fpext_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fpext_v1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fpext_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -5571,11 +5571,11 @@ entry:
define <3 x double> @constrained_vector_fpext_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fpext_v3f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: cvtss2sd %xmm0, %xmm1
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
-; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: cvtss2sd %xmm2, %xmm2
; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
@@ -5584,12 +5584,12 @@ define <3 x double> @constrained_vector_fpext_v3f32() #0 {
;
; AVX-LABEL: constrained_vector_fpext_v3f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
@@ -5625,7 +5625,7 @@ define <1 x float> @constrained_vector_ceil_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq ceilf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -5633,7 +5633,7 @@ define <1 x float> @constrained_vector_ceil_v1f32() #0 {
;
; AVX-LABEL: constrained_vector_ceil_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -5648,10 +5648,10 @@ define <2 x double> @constrained_vector_ceil_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.8999999999999999E+0,0.0E+0]
; CHECK-NEXT: callq ceil at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.1000000000000001E+0,0.0E+0]
; CHECK-NEXT: callq ceil at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -5675,13 +5675,13 @@ define <3 x float> @constrained_vector_ceil_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq ceilf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq ceilf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq ceilf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -5694,11 +5694,11 @@ define <3 x float> @constrained_vector_ceil_v3f32() #0 {
;
; AVX-LABEL: constrained_vector_ceil_v3f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $10, %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $10, %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
@@ -5715,13 +5715,13 @@ define <3 x double> @constrained_vector_ceil_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.8999999999999999E+0,0.0E+0]
; CHECK-NEXT: callq ceil at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.1000000000000001E+0,0.0E+0]
; CHECK-NEXT: callq ceil at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.5E+0,0.0E+0]
; CHECK-NEXT: callq ceil at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -5736,7 +5736,7 @@ define <3 x double> @constrained_vector_ceil_v3f64() #0 {
;
; AVX-LABEL: constrained_vector_ceil_v3f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.5E+0,0.0E+0]
; AVX-NEXT: vroundsd $10, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vroundpd $10, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -5753,7 +5753,7 @@ define <1 x float> @constrained_vector_floor_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq floorf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -5761,7 +5761,7 @@ define <1 x float> @constrained_vector_floor_v1f32() #0 {
;
; AVX-LABEL: constrained_vector_floor_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -5777,10 +5777,10 @@ define <2 x double> @constrained_vector_floor_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.8999999999999999E+0,0.0E+0]
; CHECK-NEXT: callq floor at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.1000000000000001E+0,0.0E+0]
; CHECK-NEXT: callq floor at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -5804,13 +5804,13 @@ define <3 x float> @constrained_vector_floor_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq floorf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq floorf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq floorf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -5823,11 +5823,11 @@ define <3 x float> @constrained_vector_floor_v3f32() #0 {
;
; AVX-LABEL: constrained_vector_floor_v3f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $9, %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $9, %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
@@ -5844,13 +5844,13 @@ define <3 x double> @constrained_vector_floor_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.8999999999999999E+0,0.0E+0]
; CHECK-NEXT: callq floor at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.1000000000000001E+0,0.0E+0]
; CHECK-NEXT: callq floor at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.5E+0,0.0E+0]
; CHECK-NEXT: callq floor at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -5865,7 +5865,7 @@ define <3 x double> @constrained_vector_floor_v3f64() #0 {
;
; AVX-LABEL: constrained_vector_floor_v3f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.5E+0,0.0E+0]
; AVX-NEXT: vroundsd $9, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vroundpd $9, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -5882,7 +5882,7 @@ define <1 x float> @constrained_vector_round_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq roundf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -5892,7 +5892,7 @@ define <1 x float> @constrained_vector_round_v1f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq roundf at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
@@ -5909,10 +5909,10 @@ define <2 x double> @constrained_vector_round_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.8999999999999999E+0,0.0E+0]
; CHECK-NEXT: callq round at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.1000000000000001E+0,0.0E+0]
; CHECK-NEXT: callq round at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -5924,10 +5924,10 @@ define <2 x double> @constrained_vector_round_v2f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.8999999999999999E+0,0.0E+0]
; AVX-NEXT: callq round at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.1000000000000001E+0,0.0E+0]
; AVX-NEXT: callq round at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -5946,13 +5946,13 @@ define <3 x float> @constrained_vector_round_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq roundf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq roundf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq roundf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -5967,13 +5967,13 @@ define <3 x float> @constrained_vector_round_v3f32() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq roundf at PLT
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq roundf at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: callq roundf at PLT
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
@@ -5995,13 +5995,13 @@ define <3 x double> @constrained_vector_round_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.8999999999999999E+0,0.0E+0]
; CHECK-NEXT: callq round at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.1000000000000001E+0,0.0E+0]
; CHECK-NEXT: callq round at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.5E+0,0.0E+0]
; CHECK-NEXT: callq round at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -6018,15 +6018,15 @@ define <3 x double> @constrained_vector_round_v3f64() #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.8999999999999999E+0,0.0E+0]
; AVX-NEXT: callq round at PLT
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.1000000000000001E+0,0.0E+0]
; AVX-NEXT: callq round at PLT
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.5E+0,0.0E+0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq round at PLT
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
@@ -6046,7 +6046,7 @@ define <1 x float> @constrained_vector_trunc_v1f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq truncf at PLT
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -6054,7 +6054,7 @@ define <1 x float> @constrained_vector_trunc_v1f32() #0 {
;
; AVX-LABEL: constrained_vector_trunc_v1f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
@@ -6069,10 +6069,10 @@ define <2 x double> @constrained_vector_trunc_v2f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.8999999999999999E+0,0.0E+0]
; CHECK-NEXT: callq trunc at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.1000000000000001E+0,0.0E+0]
; CHECK-NEXT: callq trunc at PLT
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
@@ -6096,13 +6096,13 @@ define <3 x float> @constrained_vector_trunc_v3f32() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq truncf at PLT
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq truncf at PLT
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK-NEXT: callq truncf at PLT
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -6115,11 +6115,11 @@ define <3 x float> @constrained_vector_trunc_v3f32() #0 {
;
; AVX-LABEL: constrained_vector_trunc_v3f32:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = [1.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $11, %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [2.5E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vroundss $11, %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
@@ -6136,13 +6136,13 @@ define <3 x double> @constrained_vector_trunc_v3f64() #0 {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.8999999999999999E+0,0.0E+0]
; CHECK-NEXT: callq trunc at PLT
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.1000000000000001E+0,0.0E+0]
; CHECK-NEXT: callq trunc at PLT
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
-; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [1.5E+0,0.0E+0]
; CHECK-NEXT: callq trunc at PLT
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
@@ -6157,7 +6157,7 @@ define <3 x double> @constrained_vector_trunc_v3f64() #0 {
;
; AVX-LABEL: constrained_vector_trunc_v3f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.5E+0,0.0E+0]
; AVX-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vroundpd $11, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
diff --git a/llvm/test/CodeGen/X86/vector-reduce-fadd.ll b/llvm/test/CodeGen/X86/vector-reduce-fadd.ll
index 9dd2a045087dad9..606beeaff750e11 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-fadd.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-fadd.ll
@@ -1901,17 +1901,17 @@ define double @test_v16f64_undef(<16 x double> %a0) {
define float @PR64627() {
; SSE-LABEL: PR64627:
; SSE: # %bb.0:
-; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movss {{.*#+}} xmm0 = [5.0E+0,0.0E+0,0.0E+0,0.0E+0]
; SSE-NEXT: retq
;
; AVX-LABEL: PR64627:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [5.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: retq
;
; AVX512-LABEL: PR64627:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vmovss {{.*#+}} xmm0 = [5.0E+0,0.0E+0,0.0E+0,0.0E+0]
; AVX512-NEXT: retq
%1 = bitcast i5 0 to <5 x i1>
%2 = select <5 x i1> %1, <5 x float> zeroinitializer, <5 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
index be4253b6d5d1096..d0334dfb66e8af4 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -3482,7 +3482,7 @@ define void @SpinningCube() {
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movl $1065353216, (%rax) # imm = 0x3F800000
; SSE2-NEXT: movaps {{.*#+}} xmm0 = <u,u,u,1.0E+0>
-; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm1 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; SSE2-NEXT: movapd {{.*#+}} xmm2 = <u,u,-2.0E+0,u>
; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
; SSE2-NEXT: xorps %xmm3, %xmm3
@@ -3501,7 +3501,7 @@ define void @SpinningCube() {
; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movl $1065353216, (%rax) # imm = 0x3F800000
; SSSE3-NEXT: movaps {{.*#+}} xmm0 = <u,u,u,1.0E+0>
-; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; SSSE3-NEXT: movapd {{.*#+}} xmm2 = <u,u,-2.0E+0,u>
; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
; SSSE3-NEXT: xorps %xmm3, %xmm3
@@ -3521,7 +3521,7 @@ define void @SpinningCube() {
; SSE41-NEXT: movl $1065353216, (%rax) # imm = 0x3F800000
; SSE41-NEXT: movaps {{.*#+}} xmm0 = <u,u,u,1.0E+0>
; SSE41-NEXT: movaps {{.*#+}} xmm1 = <0.0E+0,0.0E+0,-2.0E+0,u>
-; SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE41-NEXT: movss {{.*#+}} xmm2 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; SSE41-NEXT: movaps %xmm1, %xmm3
; SSE41-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[0]
; SSE41-NEXT: movaps %xmm0, %xmm4
@@ -3540,7 +3540,7 @@ define void @SpinningCube() {
; AVX-NEXT: movl $1065353216, (%rax) # imm = 0x3F800000
; AVX-NEXT: vbroadcastss {{.*#+}} xmm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = <0.0E+0,0.0E+0,-2.0E+0,u>
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = [NaN,0.0E+0,0.0E+0,0.0E+0]
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm1[0,1,2],xmm2[0]
; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[2,3]
; AVX-NEXT: vaddps %xmm2, %xmm3, %xmm2
diff --git a/llvm/test/CodeGen/X86/vselect-zero.ll b/llvm/test/CodeGen/X86/vselect-zero.ll
index 1b576b28ce8315e..3a53a7b852233c0 100644
--- a/llvm/test/CodeGen/X86/vselect-zero.ll
+++ b/llvm/test/CodeGen/X86/vselect-zero.ll
@@ -117,7 +117,7 @@ define double @fsel_nonzero_false_val(double %x, double %y, double %z) {
; SSE: # %bb.0:
; SSE-NEXT: cmpeqsd %xmm1, %xmm0
; SSE-NEXT: andpd %xmm0, %xmm2
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; SSE-NEXT: andnpd %xmm1, %xmm0
; SSE-NEXT: orpd %xmm2, %xmm0
; SSE-NEXT: retq
@@ -133,7 +133,7 @@ define double @fsel_nonzero_false_val(double %x, double %y, double %z) {
; AVX512-LABEL: fsel_nonzero_false_val:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqsd %xmm1, %xmm0, %k1
-; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: retq
%cond = fcmp oeq double %x, %y
@@ -145,7 +145,7 @@ define double @fsel_nonzero_true_val(double %x, double %y, double %z) {
; SSE-LABEL: fsel_nonzero_true_val:
; SSE: # %bb.0:
; SSE-NEXT: cmpeqsd %xmm1, %xmm0
-; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; SSE-NEXT: andpd %xmm0, %xmm1
; SSE-NEXT: andnpd %xmm2, %xmm0
; SSE-NEXT: orpd %xmm1, %xmm0
@@ -188,7 +188,7 @@ define double @fsel_nonzero_constants(double %x, double %y) {
; AVX512-LABEL: fsel_nonzero_constants:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqsd %xmm1, %xmm0, %k1
-; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX512-NEXT: vmovsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1}
; AVX512-NEXT: retq
%cond = fcmp oeq double %x, %y
@@ -200,7 +200,7 @@ define <2 x double> @vsel_nonzero_constants(<2 x double> %x, <2 x double> %y) {
; SSE2-LABEL: vsel_nonzero_constants:
; SSE2: # %bb.0:
; SSE2-NEXT: cmplepd %xmm0, %xmm1
-; SSE2-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = [4.2E+1,0.0E+0]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: andnpd %xmm2, %xmm0
; SSE2-NEXT: andpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -210,7 +210,7 @@ define <2 x double> @vsel_nonzero_constants(<2 x double> %x, <2 x double> %y) {
; SSE42-LABEL: vsel_nonzero_constants:
; SSE42: # %bb.0:
; SSE42-NEXT: cmplepd %xmm0, %xmm1
-; SSE42-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE42-NEXT: movsd {{.*#+}} xmm2 = [4.2E+1,0.0E+0]
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE42-NEXT: movapd %xmm2, %xmm0
@@ -219,14 +219,14 @@ define <2 x double> @vsel_nonzero_constants(<2 x double> %x, <2 x double> %y) {
; AVX-LABEL: vsel_nonzero_constants:
; AVX: # %bb.0:
; AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; AVX-NEXT: vblendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: vsel_nonzero_constants:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmplepd %xmm0, %xmm1, %k1
-; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX512-NEXT: vmovapd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1}
; AVX512-NEXT: retq
%cond = fcmp oge <2 x double> %x, %y
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
index 8c7535f61691389..929671d674e5e3b 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
@@ -1334,7 +1334,7 @@ define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst
; no @load_32byte_chunk_of_32byte_alloca
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; ALL: {{.*}}
-; X86-NO-SHLD: {{.*}}
-; X86-SHLD: {{.*}}
; X64-NO-SHLD: {{.*}}
; X64-SHLD: {{.*}}
+; X86-NO-SHLD: {{.*}}
+; X86-SHLD: {{.*}}
diff --git a/llvm/test/CodeGen/X86/x86-64-varargs.ll b/llvm/test/CodeGen/X86/x86-64-varargs.ll
index 884baf174d0aa9a..f947327d4c56233 100644
--- a/llvm/test/CodeGen/X86/x86-64-varargs.ll
+++ b/llvm/test/CodeGen/X86/x86-64-varargs.ll
@@ -579,9 +579,9 @@ define i32 @main() nounwind {
; CHECK-X32: # %bb.0: # %entry
; CHECK-X32-NEXT: pushq %rax
; CHECK-X32-NEXT: movl $12, (%esp)
-; CHECK-X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-X32-NEXT: movsd {{.*#+}} xmm0 = [4.5E+15,0.0E+0]
; CHECK-X32-NEXT: movabsq $123456677890, %r8 # imm = 0x1CBE976802
-; CHECK-X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-X32-NEXT: movsd {{.*#+}} xmm1 = [1.2450000047683716E+0,0.0E+0]
; CHECK-X32-NEXT: movl $1, %edi
; CHECK-X32-NEXT: movl $2, %esi
; CHECK-X32-NEXT: movl $3, %edx
More information about the llvm-commits
mailing list