[llvm] r364101 - [X86] Add test cases for incorrect shrinking of volatile vector loads from 128-bits to 32 or 64 bits. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 21 13:16:26 PDT 2019
Author: ctopper
Date: Fri Jun 21 13:16:26 2019
New Revision: 364101
URL: http://llvm.org/viewvc/llvm-project?rev=364101&view=rev
Log:
[X86] Add test cases for incorrect shrinking of volatile vector loads from 128-bits to 32 or 64 bits. NFC
This is caused by isel patterns that look for vzmovl+load and
treat it the same as vzload.
Modified:
llvm/trunk/test/CodeGen/X86/vector-zmov.ll
Modified: llvm/trunk/test/CodeGen/X86/vector-zmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-zmov.ll?rev=364101&r1=364100&r2=364101&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-zmov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-zmov.ll Fri Jun 21 13:16:26 2019
@@ -36,3 +36,37 @@ entry:
%Y = shufflevector <2 x i64> %X, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
ret <2 x i64>%Y
}
+
+; FIXME: We shouldn't shrink the load to movss here since it is volatile.
+define <4 x i32> @load_zmov_4i32_to_0zzz_volatile(<4 x i32> *%ptr) {
+; SSE-LABEL: load_zmov_4i32_to_0zzz_volatile:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_zmov_4i32_to_0zzz_volatile:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: retq
+entry:
+ %X = load volatile <4 x i32>, <4 x i32>* %ptr
+ %Y = shufflevector <4 x i32> %X, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 4, i32 4>
+ ret <4 x i32>%Y
+}
+
+; FIXME: We shouldn't shrink the load to movsd here since it is volatile.
+define <2 x i64> @load_zmov_2i64_to_0z_volatile(<2 x i64> *%ptr) {
+; SSE-LABEL: load_zmov_2i64_to_0z_volatile:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_zmov_2i64_to_0z_volatile:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: retq
+entry:
+ %X = load volatile <2 x i64>, <2 x i64>* %ptr
+ %Y = shufflevector <2 x i64> %X, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64>%Y
+}
More information about the llvm-commits
mailing list