[llvm-branch-commits] [llvm] 420d046 - clang-format, address warnings

Juneyoung Lee via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Dec 30 06:11:05 PST 2020


Author: Juneyoung Lee
Date: 2020-12-30T23:05:07+09:00
New Revision: 420d046d6bdd8d950dad389a97e31f841052efb2

URL: https://github.com/llvm/llvm-project/commit/420d046d6bdd8d950dad389a97e31f841052efb2
DIFF: https://github.com/llvm/llvm-project/commit/420d046d6bdd8d950dad389a97e31f841052efb2.diff

LOG: clang-format, address warnings

Added: 
    

Modified: 
    clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c
    clang/test/CodeGen/X86/avx512f-builtins.c
    clang/test/CodeGen/X86/avx512vl-builtins-constrained.c
    clang/test/CodeGen/arm64-abi-vector.c
    llvm/lib/IR/AutoUpgrade.cpp
    llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
    llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
    llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c b/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c
index bb43d348205f..d32eb4b00040 100644
--- a/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c
+++ b/clang/test/CodeGen/X86/avx512-reduceMinMaxIntrin.c
@@ -15,16 +15,16 @@ unsigned long long test_mm512_reduce_max_epu64(__m512i __W){
 }
 
 double test_mm512_reduce_max_pd(__m512d __W){
-// CHECK-LABEL: @test_mm512_reduce_max_pd(
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
-// CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 0, i32 1>
-// CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
-// CHECK:    call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
-// CHECK:    shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 0>
-// CHECK:    call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
-// CHECK:    extractelement <2 x double> %{{.*}}, i32 0
+  // CHECK-LABEL: @test_mm512_reduce_max_pd(
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
+  // CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 0, i32 1>
+  // CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
+  // CHECK:    call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK:    shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 0>
+  // CHECK:    call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK:    extractelement <2 x double> %{{.*}}, i32 0
   return _mm512_reduce_max_pd(__W); 
 }
 
@@ -41,16 +41,16 @@ unsigned long long test_mm512_reduce_min_epu64(__m512i __W){
 }
 
 double test_mm512_reduce_min_pd(__m512d __W){
-// CHECK-LABEL: @test_mm512_reduce_min_pd(
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
-// CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 0, i32 1>
-// CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
-// CHECK:    call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
-// CHECK:    shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 0>
-// CHECK:    call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
-// CHECK:    extractelement <2 x double> %{{.*}}, i32 0
+  // CHECK-LABEL: @test_mm512_reduce_min_pd(
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
+  // CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 0, i32 1>
+  // CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
+  // CHECK:    call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK:    shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 0>
+  // CHECK:    call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK:    extractelement <2 x double> %{{.*}}, i32 0
   return _mm512_reduce_min_pd(__W); 
 }
 
@@ -71,18 +71,18 @@ unsigned long test_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __W){
 }
 
 double test_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_max_pd(
-// CHECK:    bitcast i8 %{{.*}} to <8 x i1>
-// CHECK:    select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
-// CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 0, i32 1>
-// CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
-// CHECK:    call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
-// CHECK:    shufflevector <2 x double> %{{.*}}, <2 x double>  %{{.*}}, <2 x i32> <i32 1, i32 0>
-// CHECK:    call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
-// CHECK:    extractelement <2 x double> %{{.*}}, i32 0
+  // CHECK-LABEL: @test_mm512_mask_reduce_max_pd(
+  // CHECK:    bitcast i8 %{{.*}} to <8 x i1>
+  // CHECK:    select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
+  // CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 0, i32 1>
+  // CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
+  // CHECK:    call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK:    shufflevector <2 x double> %{{.*}}, <2 x double>  %{{.*}}, <2 x i32> <i32 1, i32 0>
+  // CHECK:    call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK:    extractelement <2 x double> %{{.*}}, i32 0
   return _mm512_mask_reduce_max_pd(__M, __W); 
 }
 
@@ -103,18 +103,18 @@ unsigned long long test_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __W){
 }
 
 double test_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __W){
-// CHECK-LABEL: @test_mm512_mask_reduce_min_pd(
-// CHECK:    bitcast i8 %{{.*}} to <8 x i1>
-// CHECK:    select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
-// CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 0, i32 1>
-// CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
-// CHECK:    call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
-// CHECK:    shufflevector <2 x double> %{{.*}}, <2 x double>  %{{.*}}, <2 x i32> <i32 1, i32 0>
-// CHECK:    call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
-// CHECK:    extractelement <2 x double> %{{.*}}, i32 0
+  // CHECK-LABEL: @test_mm512_mask_reduce_min_pd(
+  // CHECK:    bitcast i8 %{{.*}} to <8 x i1>
+  // CHECK:    select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
+  // CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 0, i32 1>
+  // CHECK:    shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
+  // CHECK:    call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK:    shufflevector <2 x double> %{{.*}}, <2 x double>  %{{.*}}, <2 x i32> <i32 1, i32 0>
+  // CHECK:    call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
+  // CHECK:    extractelement <2 x double> %{{.*}}, i32 0
   return _mm512_mask_reduce_min_pd(__M, __W); 
 }
 
@@ -131,18 +131,18 @@ unsigned int test_mm512_reduce_max_epu32(__m512i __W){
 }
 
 float test_mm512_reduce_max_ps(__m512 __W){
-// CHECK-LABEL: define float @test_mm512_reduce_max_ps(
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
-// CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
-// CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-// CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    extractelement <4 x float> %{{.*}}, i32 0
+  // CHECK-LABEL: define float @test_mm512_reduce_max_ps(
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
+  // CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+  // CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  // CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    extractelement <4 x float> %{{.*}}, i32 0
   return _mm512_reduce_max_ps(__W); 
 }
 
@@ -159,18 +159,18 @@ unsigned int test_mm512_reduce_min_epu32(__m512i __W){
 }
 
 float test_mm512_reduce_min_ps(__m512 __W){
-// CHECK-LABEL: define float @test_mm512_reduce_min_ps(
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
-// CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
-// CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-// CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    extractelement <4 x float> %{{.*}}, i32 0
+  // CHECK-LABEL: define float @test_mm512_reduce_min_ps(
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
+  // CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+  // CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  // CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    extractelement <4 x float> %{{.*}}, i32 0
   return _mm512_reduce_min_ps(__W); 
 }
 
@@ -191,20 +191,20 @@ unsigned int test_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __W){
 }
 
 float test_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __W){
-// CHECK-LABEL: define float @test_mm512_mask_reduce_max_ps(
-// CHECK:    bitcast i16 %{{.*}} to <16 x i1>
-// CHECK:    select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
-// CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
-// CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-// CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    extractelement <4 x float> %{{.*}}, i32 0
+  // CHECK-LABEL: define float @test_mm512_mask_reduce_max_ps(
+  // CHECK:    bitcast i16 %{{.*}} to <16 x i1>
+  // CHECK:    select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
+  // CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+  // CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  // CHECK:    call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    extractelement <4 x float> %{{.*}}, i32 0
   return _mm512_mask_reduce_max_ps(__M, __W); 
 }
 
@@ -225,20 +225,20 @@ unsigned int test_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __W){
 }
 
 float test_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __W){
-// CHECK-LABEL: define float @test_mm512_mask_reduce_min_ps(
-// CHECK:    bitcast i16 %{{.*}} to <16 x i1>
-// CHECK:    select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
-// CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-// CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-// CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
-// CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-// CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
-// CHECK:    extractelement <4 x float> %{{.*}}, i32 0
+  // CHECK-LABEL: define float @test_mm512_mask_reduce_min_ps(
+  // CHECK:    bitcast i16 %{{.*}} to <16 x i1>
+  // CHECK:    select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
+  // CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  // CHECK:    shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  // CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+  // CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  // CHECK:    call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
+  // CHECK:    extractelement <4 x float> %{{.*}}, i32 0
   return _mm512_mask_reduce_min_ps(__M, __W); 
 }
 

diff  --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c
index 81bd5a040ae7..173b7267270e 100644
--- a/clang/test/CodeGen/X86/avx512f-builtins.c
+++ b/clang/test/CodeGen/X86/avx512f-builtins.c
@@ -2398,7 +2398,7 @@ __m128 test_mm512_maskz_extractf32x4_ps( __mmask8  __U,__m512 __A){
   // CHECK-LABEL:@test_mm512_maskz_extractf32x4_ps
   // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
- return _mm512_maskz_extractf32x4_ps(  __U, __A, 1);
+  return _mm512_maskz_extractf32x4_ps(__U, __A, 1);
 }
 
 __mmask16 test_mm512_cmpeq_epu32_mask(__m512i __a, __m512i __b) {

diff  --git a/clang/test/CodeGen/X86/avx512vl-builtins-constrained.c b/clang/test/CodeGen/X86/avx512vl-builtins-constrained.c
index 4e6ac9d367f8..357eae1c7279 100644
--- a/clang/test/CodeGen/X86/avx512vl-builtins-constrained.c
+++ b/clang/test/CodeGen/X86/avx512vl-builtins-constrained.c
@@ -12,7 +12,7 @@ __m128 test_mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A) {
   // COMMONIR: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   // COMMONIR: bitcast <4 x i16> %{{.*}} to <4 x half>
   // UNCONSTRAINED: fpext <4 x half> %{{.*}} to <4 x float>
-  // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict") 
+  // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
   // COMMONIR: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
   return _mm_mask_cvtph_ps(__W, __U, __A);
 }
@@ -23,7 +23,7 @@ __m128 test_mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
   // COMMONIR: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   // COMMONIR: bitcast <4 x i16> %{{.*}} to <4 x half>
   // UNCONSTRAINED: fpext <4 x half> %{{.*}} to <4 x float>
-  // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict") 
+  // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
   // COMMONIR: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
   return _mm_maskz_cvtph_ps(__U, __A);
 }

diff  --git a/clang/test/CodeGen/arm64-abi-vector.c b/clang/test/CodeGen/arm64-abi-vector.c
index c911400a0c7b..da4af7a8cc82 100644
--- a/clang/test/CodeGen/arm64-abi-vector.c
+++ b/clang/test/CodeGen/arm64-abi-vector.c
@@ -430,10 +430,10 @@ double fixed_5i(__int5 *in) {
 }
 
 __attribute__((noinline)) double args_vec_3d(int fixed, __double3 c3) {
-// CHECK: args_vec_3d
-// CHECK: [[CAST:%.*]] = bitcast <3 x double>* {{%.*}} to <4 x double>*
-// CHECK: [[LOAD:%.*]] = load <4 x double>, <4 x double>* [[CAST]]
-// CHECK: shufflevector <4 x double> [[LOAD]], <4 x double> poison, <3 x i32> <i32 0, i32 1, i32 2>
+  // CHECK: args_vec_3d
+  // CHECK: [[CAST:%.*]] = bitcast <3 x double>* {{%.*}} to <4 x double>*
+  // CHECK: [[LOAD:%.*]] = load <4 x double>, <4 x double>* [[CAST]]
+  // CHECK: shufflevector <4 x double> [[LOAD]], <4 x double> poison, <3 x i32> <i32 0, i32 1, i32 2>
   double sum = fixed;
   sum = sum + c3.x + c3.y;
   return sum;

diff  --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 04f8cb51eb11..4d17fc304d0c 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -2405,8 +2405,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
       for (unsigned i = 0; i != NumDstElts; ++i)
         ShuffleMask[i] = i;
 
-      Value *SV = Builder.CreateShuffleVector(CI->getArgOperand(0),
-                                              ShuffleMask);
+      Value *SV =
+          Builder.CreateShuffleVector(CI->getArgOperand(0), ShuffleMask);
 
       bool DoSext = (StringRef::npos != Name.find("pmovsx"));
       Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
@@ -2435,9 +2435,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
       if (NumSrcElts == 2)
         Rep = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 0, 1});
       else
-        Rep =
-            Builder.CreateShuffleVector(Load,
-                                        ArrayRef<int>{0, 1, 2, 3, 0, 1, 2, 3});
+        Rep = Builder.CreateShuffleVector(
+            Load, ArrayRef<int>{0, 1, 2, 3, 0, 1, 2, 3});
     } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") ||
                          Name.startswith("avx512.mask.shuf.f"))) {
       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
index 63aee097c0da..5a75b82ea22e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
@@ -241,8 +241,7 @@ bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
                                             Arg.getName() + ".load");
       Arg.replaceAllUsesWith(NewVal);
     } else if (IsV3) {
-      Value *Shuf = Builder.CreateShuffleVector(Load,
-                                                ArrayRef<int>{0, 1, 2},
+      Value *Shuf = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 2},
                                                 Arg.getName() + ".load");
       Arg.replaceAllUsesWith(Shuf);
     } else {

diff  --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 2f456200a002..7a6874584d59 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3190,18 +3190,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   // and then apply the usual shadow combining logic.
   void handlePclmulIntrinsic(IntrinsicInst &I) {
     IRBuilder<> IRB(&I);
-    Type *ShadowTy = getShadowTy(&I);
     unsigned Width =
         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
     assert(isa<ConstantInt>(I.getArgOperand(2)) &&
            "pclmul 3rd operand must be a constant");
     unsigned Imm = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
-    Value *Shuf0 =
-        IRB.CreateShuffleVector(getShadow(&I, 0),
-                                getPclmulMask(Width, Imm & 0x01));
-    Value *Shuf1 =
-        IRB.CreateShuffleVector(getShadow(&I, 1),
-                                getPclmulMask(Width, Imm & 0x10));
+    Value *Shuf0 = IRB.CreateShuffleVector(getShadow(&I, 0),
+                                           getPclmulMask(Width, Imm & 0x01));
+    Value *Shuf1 = IRB.CreateShuffleVector(getShadow(&I, 1),
+                                           getPclmulMask(Width, Imm & 0x10));
     ShadowAndOriginCombiner SOC(this, IRB);
     SOC.Add(Shuf0, getOrigin(&I, 0));
     SOC.Add(Shuf1, getOrigin(&I, 1));

diff  --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index fb08891bc90c..191739728798 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -334,7 +334,6 @@ class LowerMatrixIntrinsics {
     Value *extractVector(unsigned I, unsigned J, unsigned NumElts,
                          IRBuilder<> &Builder) const {
       Value *Vec = isColumnMajor() ? getColumn(J) : getRow(I);
-      Value *Undef = UndefValue::get(Vec->getType());
       return Builder.CreateShuffleVector(
           Vec, createSequentialMask(isColumnMajor() ? I : J, NumElts, 0),
           "block");
@@ -447,7 +446,6 @@ class LowerMatrixIntrinsics {
 
     // Otherwise split MatrixVal.
     SmallVector<Value *, 16> SplitVecs;
-    Value *Undef = UndefValue::get(VType);
     for (unsigned MaskStart = 0;
          MaskStart < cast<FixedVectorType>(VType)->getNumElements();
          MaskStart += SI.getStride()) {
@@ -941,7 +939,6 @@ class LowerMatrixIntrinsics {
     unsigned NumElts = cast<FixedVectorType>(Col->getType())->getNumElements();
     assert(NumElts >= BlockNumElts && "Too few elements for current block");
 
-    Value *Undef = UndefValue::get(Block->getType());
     Block = Builder.CreateShuffleVector(
         Block, createSequentialMask(0, BlockNumElts, NumElts - BlockNumElts));
 


        


More information about the llvm-branch-commits mailing list