r271246 - [AVX512] Emit generic masked store instrinsics instead of using x86 specific intrinsics.
Craig Topper via cfe-commits
cfe-commits at lists.llvm.org
Mon May 30 18:50:10 PDT 2016
Author: ctopper
Date: Mon May 30 20:50:10 2016
New Revision: 271246
URL: http://llvm.org/viewvc/llvm-project?rev=271246&view=rev
Log:
[AVX512] Emit generic masked store instrinsics instead of using x86 specific intrinsics.
This will allow us to remove the x86 instrinics from the backend.
Modified:
cfe/trunk/lib/CodeGen/CGBuiltin.cpp
cfe/trunk/test/CodeGen/avx512bw-builtins.c
cfe/trunk/test/CodeGen/avx512f-builtins.c
cfe/trunk/test/CodeGen/avx512vl-builtins.c
cfe/trunk/test/CodeGen/avx512vlbw-builtins.c
Modified: cfe/trunk/lib/CodeGen/CGBuiltin.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGBuiltin.cpp?rev=271246&r1=271245&r2=271246&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGBuiltin.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp Mon May 30 20:50:10 2016
@@ -6277,6 +6277,38 @@ BuildVector(ArrayRef<llvm::Value*> Ops)
return Result;
}
+static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned Align) {
+ // Cast the pointer to right type.
+ Ops[0] = CGF.Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()));
+
+ // If the mask is all ones just emit a regular store.
+ if (const auto *C = dyn_cast<Constant>(Ops[2]))
+ if (C->isAllOnesValue())
+ return CGF.Builder.CreateAlignedStore(Ops[1], Ops[0], Align);
+
+ // Convert the mask from an integer type to a vector of i1.
+ unsigned NumElts = Ops[1]->getType()->getVectorNumElements();
+ llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(),
+ cast<IntegerType>(Ops[2]->getType())->getBitWidth());
+ Ops[2] = CGF.Builder.CreateBitCast(Ops[2], MaskTy);
+
+ // If we have less than 8 elements, then the starting mask was an i8 and
+ // we need to extract down to the right number of elements.
+ if (NumElts < 8) {
+ int Indices[4];
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = i;
+ Ops[2] = CGF.Builder.CreateShuffleVector(Ops[2], Ops[2],
+ makeArrayRef(Indices, NumElts),
+ "extract");
+ }
+
+ return CGF.Builder.CreateMaskedStore(Ops[1], Ops[0], Align, Ops[2]);
+}
+
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == X86::BI__builtin_ms_va_start ||
@@ -6500,6 +6532,42 @@ Value *CodeGenFunction::EmitX86BuiltinEx
Ops.push_back(Mlo);
return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
}
+ case X86::BI__builtin_ia32_storedqudi128_mask:
+ case X86::BI__builtin_ia32_storedqusi128_mask:
+ case X86::BI__builtin_ia32_storedquhi128_mask:
+ case X86::BI__builtin_ia32_storedquqi128_mask:
+ case X86::BI__builtin_ia32_storeupd128_mask:
+ case X86::BI__builtin_ia32_storeups128_mask:
+ case X86::BI__builtin_ia32_storedqudi256_mask:
+ case X86::BI__builtin_ia32_storedqusi256_mask:
+ case X86::BI__builtin_ia32_storedquhi256_mask:
+ case X86::BI__builtin_ia32_storedquqi256_mask:
+ case X86::BI__builtin_ia32_storeupd256_mask:
+ case X86::BI__builtin_ia32_storeups256_mask:
+ case X86::BI__builtin_ia32_storedqudi512_mask:
+ case X86::BI__builtin_ia32_storedqusi512_mask:
+ case X86::BI__builtin_ia32_storedquhi512_mask:
+ case X86::BI__builtin_ia32_storedquqi512_mask:
+ case X86::BI__builtin_ia32_storeupd512_mask:
+ case X86::BI__builtin_ia32_storeups512_mask:
+ return EmitX86MaskedStore(*this, Ops, 1);
+
+ case X86::BI__builtin_ia32_movdqa32store128_mask:
+ case X86::BI__builtin_ia32_movdqa64store128_mask:
+ case X86::BI__builtin_ia32_storeaps128_mask:
+ case X86::BI__builtin_ia32_storeapd128_mask:
+ case X86::BI__builtin_ia32_movdqa32store256_mask:
+ case X86::BI__builtin_ia32_movdqa64store256_mask:
+ case X86::BI__builtin_ia32_storeaps256_mask:
+ case X86::BI__builtin_ia32_storeapd256_mask:
+ case X86::BI__builtin_ia32_movdqa32store512_mask:
+ case X86::BI__builtin_ia32_movdqa64store512_mask:
+ case X86::BI__builtin_ia32_storeaps512_mask:
+ case X86::BI__builtin_ia32_storeapd512_mask: {
+ unsigned Align =
+ getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
+ return EmitX86MaskedStore(*this, Ops, Align);
+ }
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
Modified: cfe/trunk/test/CodeGen/avx512bw-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512bw-builtins.c?rev=271246&r1=271245&r2=271246&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512bw-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512bw-builtins.c Mon May 30 20:50:10 2016
@@ -1348,7 +1348,7 @@ __m512i test_mm512_maskz_loadu_epi8(__mm
}
void test_mm512_mask_storeu_epi16(void *__P, __mmask32 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_storeu_epi16
- // CHECK: @llvm.x86.avx512.mask.storeu.w.
+ // CHECK: @llvm.masked.store.v32i16(<32 x i16> %{{.*}}, <32 x i16>* %{{.*}}, i32 1, <32 x i1> %{{.*}})
return _mm512_mask_storeu_epi16(__P, __U, __A);
}
__mmask64 test_mm512_test_epi8_mask(__m512i __A, __m512i __B) {
@@ -1359,7 +1359,7 @@ __mmask64 test_mm512_test_epi8_mask(__m5
void test_mm512_mask_storeu_epi8(void *__P, __mmask64 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_storeu_epi8
- // CHECK: @llvm.x86.avx512.mask.storeu.b.
+ // CHECK: @llvm.masked.store.v64i8(<64 x i8> %{{.*}}, <64 x i8>* %{{.*}}, i32 1, <64 x i1> %{{.*}})
return _mm512_mask_storeu_epi8(__P, __U, __A);
}
__mmask64 test_mm512_mask_test_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) {
Modified: cfe/trunk/test/CodeGen/avx512f-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512f-builtins.c?rev=271246&r1=271245&r2=271246&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512f-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512f-builtins.c Mon May 30 20:50:10 2016
@@ -119,29 +119,32 @@ __m512d test_mm512_mul_pd(__m512d a, __m
void test_mm512_storeu_si512 (void *__P, __m512i __A)
{
- // CHECK-LABEL: @test_mm512_storeu_si512
- // CHECK: @llvm.x86.avx512.mask.storeu.d.512
+ // CHECK-LABEL: @test_mm512_storeu_si512
+ // CHECK: store <16 x i32> %{{.*}}, <16 x i32>* %{{.*}}, align 1{{$}}
+ // CHECK-NEXT: ret void
_mm512_storeu_si512 ( __P,__A);
}
void test_mm512_storeu_ps(void *p, __m512 a)
{
// CHECK-LABEL: @test_mm512_storeu_ps
- // CHECK: @llvm.x86.avx512.mask.storeu.ps.512
+ // CHECK: store <16 x float> %{{.*}}, <16 x float>* %{{.*}}, align 1{{$}}
+ // CHECK-NEXT: ret void
_mm512_storeu_ps(p, a);
}
void test_mm512_storeu_pd(void *p, __m512d a)
{
// CHECK-LABEL: @test_mm512_storeu_pd
- // CHECK: @llvm.x86.avx512.mask.storeu.pd.512
+ // CHECK: store <8 x double> %{{.*}}, <8 x double>* %{{.*}}, align 1{{$}}
+ // CHECK-NEXT: ret void
_mm512_storeu_pd(p, a);
}
void test_mm512_mask_store_ps(void *p, __m512 a, __mmask16 m)
{
// CHECK-LABEL: @test_mm512_mask_store_ps
- // CHECK: @llvm.x86.avx512.mask.store.ps.512
+ // CHECK: @llvm.masked.store.v16f32(<16 x float> %{{.*}}, <16 x float>* %{{.*}}, i32 64, <16 x i1> %{{.*}})
_mm512_mask_store_ps(p, m, a);
}
@@ -192,19 +195,19 @@ void test_mm512_store_pd(void *p, __m512
void test_mm512_mask_store_pd(void *p, __m512d a, __mmask8 m)
{
// CHECK-LABEL: @test_mm512_mask_store_pd
- // CHECK: @llvm.x86.avx512.mask.store.pd.512
+ // CHECK: @llvm.masked.store.v8f64(<8 x double> %{{.*}}, <8 x double>* %{{.*}}, i32 64, <8 x i1> %{{.*}})
_mm512_mask_store_pd(p, m, a);
}
void test_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_storeu_epi32
- // CHECK: @llvm.x86.avx512.mask.storeu.d.512
+ // CHECK: @llvm.masked.store.v16i32(<16 x i32> %{{.*}}, <16 x i32>* %{{.*}}, i32 1, <16 x i1> %{{.*}})
return _mm512_mask_storeu_epi32(__P, __U, __A);
}
void test_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_storeu_epi64
- // CHECK: @llvm.x86.avx512.mask.storeu.q.512
+ // CHECK: @llvm.masked.store.v8i64(<8 x i64> %{{.*}}, <8 x i64>* %{{.*}}, i32 1, <8 x i1> %{{.*}})
return _mm512_mask_storeu_epi64(__P, __U, __A);
}
@@ -2585,13 +2588,13 @@ __m512i test_mm512_maskz_load_epi64(__mm
void test_mm512_mask_store_epi32(void *__P, __mmask16 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_store_epi32
- // CHECK: @llvm.x86.avx512.mask.store.d.512
+ // CHECK: @llvm.masked.store.v16i32(<16 x i32> %{{.*}}, <16 x i32>* %{{.*}}, i32 64, <16 x i1> %{{.*}})
return _mm512_mask_store_epi32(__P, __U, __A);
}
void test_mm512_mask_store_epi64(void *__P, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_store_epi64
- // CHECK: @llvm.x86.avx512.mask.store.q.512
+ // CHECK: @llvm.masked.store.v8i64(<8 x i64> %{{.*}}, <8 x i64>* %{{.*}}, i32 64, <8 x i1> %{{.*}})
return _mm512_mask_store_epi64(__P, __U, __A);
}
Modified: cfe/trunk/test/CodeGen/avx512vl-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512vl-builtins.c?rev=271246&r1=271245&r2=271246&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512vl-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512vl-builtins.c Mon May 30 20:50:10 2016
@@ -3935,13 +3935,13 @@ __m256i test_mm256_maskz_srav_epi64(__mm
void test_mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_store_epi32
- // CHECK: @llvm.x86.avx512.mask.store.d.128
+ // CHECK: @llvm.masked.store.v4i32(<4 x i32> %{{.*}}, <4 x i32>* %{{.}}, i32 16, <4 x i1> %{{.*}})
return _mm_mask_store_epi32(__P, __U, __A);
}
void test_mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_store_epi32
- // CHECK: @llvm.x86.avx512.mask.store.d.256
+ // CHECK: @llvm.masked.store.v8i32(<8 x i32> %{{.*}}, <8 x i32>* %{{.}}, i32 32, <8 x i1> %{{.*}})
return _mm256_mask_store_epi32(__P, __U, __A);
}
@@ -4043,13 +4043,13 @@ __m256i test_mm256_maskz_load_epi64(__mm
void test_mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_store_epi64
- // CHECK: @llvm.x86.avx512.mask.store.q.128
+ // CHECK: @llvm.masked.store.v2i64(<2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, i32 16, <2 x i1> %{{.*}})
return _mm_mask_store_epi64(__P, __U, __A);
}
void test_mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_store_epi64
- // CHECK: @llvm.x86.avx512.mask.store.q.256
+ // CHECK: @llvm.masked.store.v4i64(<4 x i64> %{{.*}}, <4 x i64>* %{{.*}}, i32 32, <4 x i1> %{{.*}})
return _mm256_mask_store_epi64(__P, __U, __A);
}
@@ -4343,73 +4343,73 @@ __m256 test_mm256_maskz_loadu_ps(__mmask
void test_mm_mask_store_pd(void *__P, __mmask8 __U, __m128d __A) {
// CHECK-LABEL: @test_mm_mask_store_pd
- // CHECK: @llvm.x86.avx512.mask.store.pd.128
+ // CHECK: @llvm.masked.store.v2f64(<2 x double> %{{.*}}, <2 x double>* %{{.*}}, i32 16, <2 x i1> %{{.*}})
return _mm_mask_store_pd(__P, __U, __A);
}
void test_mm256_mask_store_pd(void *__P, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: @test_mm256_mask_store_pd
- // CHECK: @llvm.x86.avx512.mask.store.pd.256
+ // CHECK: @llvm.masked.store.v4f64(<4 x double> %{{.*}}, <4 x double>* %{{.*}}, i32 32, <4 x i1> %{{.*}})
return _mm256_mask_store_pd(__P, __U, __A);
}
void test_mm_mask_store_ps(void *__P, __mmask8 __U, __m128 __A) {
// CHECK-LABEL: @test_mm_mask_store_ps
- // CHECK: @llvm.x86.avx512.mask.store.ps.128
+ // CHECK: @llvm.masked.store.v4f32(<4 x float> %{{.*}}, <4 x float>* %{{.*}}, i32 16, <4 x i1> %{{.*}})
return _mm_mask_store_ps(__P, __U, __A);
}
void test_mm256_mask_store_ps(void *__P, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: @test_mm256_mask_store_ps
- // CHECK: @llvm.x86.avx512.mask.store.ps.256
+ // CHECK: @llvm.masked.store.v8f32(<8 x float> %{{.*}}, <8 x float>* %{{.*}}, i32 32, <8 x i1> %{{.*}})
return _mm256_mask_store_ps(__P, __U, __A);
}
void test_mm_mask_storeu_epi64(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_storeu_epi64
- // CHECK: @llvm.x86.avx512.mask.storeu
+ // CHECK: @llvm.masked.store.v2i64(<2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, i32 1, <2 x i1> %{{.*}})
return _mm_mask_storeu_epi64(__P, __U, __A);
}
void test_mm256_mask_storeu_epi64(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_storeu_epi64
- // CHECK: @llvm.x86.avx512.mask.storeu
+ // CHECK: @llvm.masked.store.v4i64(<4 x i64> %{{.*}}, <4 x i64>* %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm256_mask_storeu_epi64(__P, __U, __A);
}
void test_mm_mask_storeu_epi32(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_storeu_epi32
- // CHECK: @llvm.x86.avx512.mask.storeu
+ // CHECK: @llvm.masked.store.v4i32(<4 x i32> %{{.*}}, <4 x i32>* %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm_mask_storeu_epi32(__P, __U, __A);
}
void test_mm256_mask_storeu_epi32(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_storeu_epi32
- // CHECK: @llvm.x86.avx512.mask.storeu
+ // CHECK: @llvm.masked.store.v8i32(<8 x i32> %{{.*}}, <8 x i32>* %{{.*}}, i32 1, <8 x i1> %{{.*}})
return _mm256_mask_storeu_epi32(__P, __U, __A);
}
void test_mm_mask_storeu_pd(void *__P, __mmask8 __U, __m128d __A) {
// CHECK-LABEL: @test_mm_mask_storeu_pd
- // CHECK: @llvm.x86.avx512.mask.storeu.pd.128
+ // CHECK: @llvm.masked.store.v2f64(<2 x double> %{{.*}}, <2 x double>* %{{.*}}, i32 1, <2 x i1> %{{.*}})
return _mm_mask_storeu_pd(__P, __U, __A);
}
void test_mm256_mask_storeu_pd(void *__P, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: @test_mm256_mask_storeu_pd
- // CHECK: @llvm.x86.avx512.mask.storeu.pd.256
+ // CHECK: @llvm.masked.store.v4f64(<4 x double> %{{.*}}, <4 x double>* %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm256_mask_storeu_pd(__P, __U, __A);
}
void test_mm_mask_storeu_ps(void *__P, __mmask8 __U, __m128 __A) {
// CHECK-LABEL: @test_mm_mask_storeu_ps
- // CHECK: @llvm.x86.avx512.mask.storeu.ps.128
+ // CHECK: @llvm.masked.store.v4f32(<4 x float> %{{.*}}, <4 x float>* %{{.*}}, i32 1, <4 x i1> %{{.*}})
return _mm_mask_storeu_ps(__P, __U, __A);
}
void test_mm256_mask_storeu_ps(void *__P, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: @test_mm256_mask_storeu_ps
- // CHECK: @llvm.x86.avx512.mask.storeu.ps.256
+ // CHECK: @llvm.masked.store.v8f32(<8 x float> %{{.*}}, <8 x float>* %{{.*}}, i32 1, <8 x i1> %{{.*}})
return _mm256_mask_storeu_ps(__P, __U, __A);
}
Modified: cfe/trunk/test/CodeGen/avx512vlbw-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512vlbw-builtins.c?rev=271246&r1=271245&r2=271246&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512vlbw-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512vlbw-builtins.c Mon May 30 20:50:10 2016
@@ -2055,25 +2055,25 @@ __m256i test_mm256_maskz_loadu_epi8(__mm
void test_mm_mask_storeu_epi16(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_storeu_epi16
- // CHECK: @llvm.x86.avx512.mask.storeu.w.128
+ // CHECK: @llvm.masked.store.v8i16(<8 x i16> %{{.*}}, <8 x i16>* %{{.*}}, i32 1, <8 x i1> %{{.*}})
return _mm_mask_storeu_epi16(__P, __U, __A);
}
void test_mm256_mask_storeu_epi16(void *__P, __mmask16 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_storeu_epi16
- // CHECK: @llvm.x86.avx512.mask.storeu.w.256
+ // CHECK: @llvm.masked.store.v16i16(<16 x i16> %{{.*}}, <16 x i16>* %{{.*}}, i32 1, <16 x i1> %{{.*}})
return _mm256_mask_storeu_epi16(__P, __U, __A);
}
void test_mm_mask_storeu_epi8(void *__P, __mmask16 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_storeu_epi8
- // CHECK: @llvm.x86.avx512.mask.storeu.b.128
+ // CHECK: @llvm.masked.store.v16i8(<16 x i8> %{{.*}}, <16 x i8>* %{{.*}}, i32 1, <16 x i1> %{{.*}})
return _mm_mask_storeu_epi8(__P, __U, __A);
}
void test_mm256_mask_storeu_epi8(void *__P, __mmask32 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_storeu_epi8
- // CHECK: @llvm.x86.avx512.mask.storeu.b.256
+ // CHECK: @llvm.masked.store.v32i8(<32 x i8> %{{.*}}, <32 x i8>* %{{.*}}, i32 1, <32 x i1> %{{.*}})
return _mm256_mask_storeu_epi8(__P, __U, __A);
}
__mmask16 test_mm_test_epi8_mask(__m128i __A, __m128i __B) {
More information about the cfe-commits
mailing list