r249142 - Make test more resilient to FastIsel changes. NFC.

Andrea Di Biagio via cfe-commits cfe-commits at lists.llvm.org
Fri Oct 2 08:10:22 PDT 2015


Author: adibiagio
Date: Fri Oct  2 10:10:22 2015
New Revision: 249142

URL: http://llvm.org/viewvc/llvm-project?rev=249142&view=rev
Log:
Make test more resilient to FastIsel changes. NFC.

Currently FastISel doesn't know how to select vector bitcasts.
During instruction selection, fast-isel always falls back to SelectionDAG 
every time it encounters a vector bitcast.
As a consequence of this, all the 'packed vector shift by immedate count'
test cases in avx2-builtins.c are optimized by the DAGCombiner.
In particular, the DAGCombiner would always fold trivial stack loads of
constant shift counts into the operands of packed shift builtins.

This behavior would start changing as soon as I reapply revision 249121.
That revision would teach x86 fast-isel how to select bitcasts between vector
types of the same size.

As a consequence of that change, fast-isel would less often fall back to
SelectionDAG. More importantly, DAGCombiner would no longer be able to 
simplify the code by folding the stack reload of a constant.

No functional change.

Modified:
    cfe/trunk/test/CodeGen/avx2-builtins.c

Modified: cfe/trunk/test/CodeGen/avx2-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx2-builtins.c?rev=249142&r1=249141&r2=249142&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx2-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx2-builtins.c Fri Oct  2 10:10:22 2015
@@ -574,7 +574,7 @@ __m256i test_mm256_bslli_epi128(__m256i
 
 __m256i test_mm256_slli_epi16(__m256i a) {
   // CHECK: @llvm.x86.avx2.pslli.w
-  // CHECK-ASM: vpsllw $3, %ymm{{.*}}, %ymm{{.*}}
+  // CHECK-ASM: vpsllw {{\$3|%xmm[0-9]+}}, %ymm{{.*}}, %ymm{{.*}}
   return _mm256_slli_epi16(a, 3);
 }
 
@@ -586,7 +586,7 @@ __m256i test_mm256_sll_epi16(__m256i a,
 
 __m256i test_mm256_slli_epi32(__m256i a) {
   // CHECK: @llvm.x86.avx2.pslli.d
-  // CHECK-ASM: vpslld $3, %ymm{{.*}}, %ymm{{.*}}
+  // CHECK-ASM: vpslld {{\$3|%xmm[0-9]+}}, %ymm{{.*}}, %ymm{{.*}}
   return _mm256_slli_epi32(a, 3);
 }
 
@@ -610,7 +610,7 @@ __m256i test_mm256_sll_epi64(__m256i a,
 
 __m256i test_mm256_srai_epi16(__m256i a) {
   // CHECK: @llvm.x86.avx2.psrai.w
-  // CHECK-ASM: vpsraw $3, %ymm{{.*}}, %ymm{{.*}}
+  // CHECK-ASM: vpsraw {{\$3|%xmm[0-9]+}}, %ymm{{.*}}, %ymm{{.*}}
   return _mm256_srai_epi16(a, 3);
 }
 
@@ -622,7 +622,7 @@ __m256i test_mm256_sra_epi16(__m256i a,
 
 __m256i test_mm256_srai_epi32(__m256i a) {
   // CHECK: @llvm.x86.avx2.psrai.d
-  // CHECK-ASM: vpsrad $3, %ymm{{.*}}, %ymm{{.*}}
+  // CHECK-ASM: vpsrad {{\$3|%xmm[0-9]+}}, %ymm{{.*}}, %ymm{{.*}}
   return _mm256_srai_epi32(a, 3);
 }
 
@@ -646,7 +646,7 @@ __m256i test_mm256_bsrli_epi128(__m256i
 
 __m256i test_mm256_srli_epi16(__m256i a) {
   // CHECK: @llvm.x86.avx2.psrli.w
-  // CHECK-ASM: vpsrlw $3, %ymm{{.*}}, %ymm{{.*}}
+  // CHECK-ASM: vpsrlw {{\$3|%xmm[0-9]+}}, %ymm{{.*}}, %ymm{{.*}}
   return _mm256_srli_epi16(a, 3);
 }
 
@@ -658,7 +658,7 @@ __m256i test_mm256_srl_epi16(__m256i a,
 
 __m256i test_mm256_srli_epi32(__m256i a) {
   // CHECK: @llvm.x86.avx2.psrli.d
-  // CHECK-ASM: vpsrld $3, %ymm{{.*}}, %ymm{{.*}}
+  // CHECK-ASM: vpsrld {{\$3|%xmm[0-9]+}}, %ymm{{.*}}, %ymm{{.*}}
   return _mm256_srli_epi32(a, 3);
 }
 




More information about the cfe-commits mailing list