[llvm] r365224 - [X86] Remove unnecessary isel pattern for MOVLPSmr.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 5 10:31:26 PDT 2019
Author: ctopper
Date: Fri Jul 5 10:31:25 2019
New Revision: 365224
URL: http://llvm.org/viewvc/llvm-project?rev=365224&view=rev
Log:
[X86] Remove unnecessary isel pattern for MOVLPSmr.
This was identical to a pattern for MOVPQI2QImr with a bitcast
as an input. But we should be able to turn MOVPQI2QImr into
MOVLPSmr in the execution domain fixup pass so we shouldn't
need this.
Modified:
llvm/trunk/lib/Target/X86/X86InstrSSE.td
Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=365224&r1=365223&r2=365224&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Fri Jul 5 10:31:25 2019
@@ -657,11 +657,6 @@ def MOVLPDmr : PDI<0x13, MRMDestMem, (ou
} // SchedRW
let Predicates = [UseSSE1] in {
- // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
- def : Pat<(store (i64 (extractelt (bc_v2i64 (v4f32 VR128:$src2)),
- (iPTR 0))), addr:$src1),
- (MOVLPSmr addr:$src1, VR128:$src2)>;
-
// This pattern helps select MOVLPS on SSE1 only targets. With SSE2 we'll
// end up with a movsd or blend instead of shufp.
// No need for aligned load, we're only loading 64-bits.
More information about the llvm-commits
mailing list