[llvm] r355767 - [ScalarizeMaskedMemIntrin] Use IRBuilder functions that take uint32_t/uint64_t for getelementptr, extractelement, and insertelement.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 8 18:08:41 PST 2019


Author: ctopper
Date: Fri Mar  8 18:08:41 2019
New Revision: 355767

URL: http://llvm.org/viewvc/llvm-project?rev=355767&view=rev
Log:
[ScalarizeMaskedMemIntrin] Use IRBuilder functions that take uint32_t/uint64_t for getelementptr, extractelement, and insertelement.

This saves needing to call getInt32 ourselves. Making the code a little shorter.

The test changes are because insert/extract use getInt64 internally. Shouldn't be a functional issue.

This cleanup because I plan to write similar code for expandload/compressstore.

Modified:
    llvm/trunk/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
    llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll
    llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-gather.ll
    llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-load.ll
    llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-store.ll

Modified: llvm/trunk/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp?rev=355767&r1=355766&r2=355767&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp (original)
+++ llvm/trunk/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp Fri Mar  8 18:08:41 2019
@@ -151,9 +151,9 @@ static void scalarizeMaskedLoad(CallInst
 
   // Adjust alignment for the scalar instruction.
   AlignVal = MinAlign(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
-  // Bitcast %addr fron i8* to EltTy*
+  // Bitcast %addr from i8* to EltTy*
   Type *NewPtrType =
-      EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace());
+      EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
   Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
   unsigned VectorWidth = VecType->getNumElements();
 
@@ -164,11 +164,9 @@ static void scalarizeMaskedLoad(CallInst
     for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
       if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
         continue;
-      Value *Gep =
-          Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
+      Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
       LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
-      VResult =
-          Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx));
+      VResult = Builder.CreateInsertElement(VResult, Load, Idx);
     }
     CI->replaceAllUsesWith(VResult);
     CI->eraseFromParent();
@@ -183,8 +181,7 @@ static void scalarizeMaskedLoad(CallInst
     //  br i1 %mask_1, label %cond.load, label %else
     //
 
-    Value *Predicate =
-        Builder.CreateExtractElement(Mask, Builder.getInt32(Idx));
+    Value *Predicate = Builder.CreateExtractElement(Mask, Idx);
 
     // Create "cond" block
     //
@@ -196,11 +193,9 @@ static void scalarizeMaskedLoad(CallInst
                                                      "cond.load");
     Builder.SetInsertPoint(InsertPt);
 
-    Value *Gep =
-        Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
+    Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
     LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
-    Value *NewVResult = Builder.CreateInsertElement(VResult, Load,
-                                                    Builder.getInt32(Idx));
+    Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
 
     // Create "else" block, fill it in the next iteration
     BasicBlock *NewIfBlock =
@@ -277,9 +272,9 @@ static void scalarizeMaskedStore(CallIns
 
   // Adjust alignment for the scalar instruction.
   AlignVal = MinAlign(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
-  // Bitcast %addr fron i8* to EltTy*
+  // Bitcast %addr from i8* to EltTy*
   Type *NewPtrType =
-      EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace());
+      EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
   Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
   unsigned VectorWidth = VecType->getNumElements();
 
@@ -287,9 +282,8 @@ static void scalarizeMaskedStore(CallIns
     for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
       if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
         continue;
-      Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx));
-      Value *Gep =
-          Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
+      Value *OneElt = Builder.CreateExtractElement(Src, Idx);
+      Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
       Builder.CreateAlignedStore(OneElt, Gep, AlignVal);
     }
     CI->eraseFromParent();
@@ -302,8 +296,7 @@ static void scalarizeMaskedStore(CallIns
     //  %mask_1 = extractelement <16 x i1> %mask, i32 Idx
     //  br i1 %mask_1, label %cond.store, label %else
     //
-    Value *Predicate =
-        Builder.CreateExtractElement(Mask, Builder.getInt32(Idx));
+    Value *Predicate = Builder.CreateExtractElement(Mask, Idx);
 
     // Create "cond" block
     //
@@ -315,9 +308,8 @@ static void scalarizeMaskedStore(CallIns
         IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.store");
     Builder.SetInsertPoint(InsertPt);
 
-    Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx));
-    Value *Gep =
-        Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
+    Value *OneElt = Builder.CreateExtractElement(Src, Idx);
+    Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
     Builder.CreateAlignedStore(OneElt, Gep, AlignVal);
 
     // Create "else" block, fill it in the next iteration
@@ -389,12 +381,11 @@ static void scalarizeMaskedGather(CallIn
     for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
       if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
         continue;
-      Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
-                                                "Ptr" + Twine(Idx));
+      Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
       LoadInst *Load =
           Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
-      VResult = Builder.CreateInsertElement(
-          VResult, Load, Builder.getInt32(Idx), "Res" + Twine(Idx));
+      VResult =
+          Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
     }
     CI->replaceAllUsesWith(VResult);
     CI->eraseFromParent();
@@ -408,8 +399,8 @@ static void scalarizeMaskedGather(CallIn
     //  br i1 %Mask1, label %cond.load, label %else
     //
 
-    Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx),
-                                                    "Mask" + Twine(Idx));
+    Value *Predicate =
+        Builder.CreateExtractElement(Mask, Idx, "Mask" + Twine(Idx));
 
     // Create "cond" block
     //
@@ -420,13 +411,11 @@ static void scalarizeMaskedGather(CallIn
     BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load");
     Builder.SetInsertPoint(InsertPt);
 
-    Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
-                                              "Ptr" + Twine(Idx));
+    Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
     LoadInst *Load =
         Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
-    Value *NewVResult = Builder.CreateInsertElement(VResult, Load,
-                                                    Builder.getInt32(Idx),
-                                                    "Res" + Twine(Idx));
+    Value *NewVResult =
+        Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
 
     // Create "else" block, fill it in the next iteration
     BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else");
@@ -501,10 +490,9 @@ static void scalarizeMaskedScatter(CallI
     for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
       if (cast<ConstantVector>(Mask)->getAggregateElement(Idx)->isNullValue())
         continue;
-      Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx),
-                                                   "Elt" + Twine(Idx));
-      Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
-                                                "Ptr" + Twine(Idx));
+      Value *OneElt =
+          Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx));
+      Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
       Builder.CreateAlignedStore(OneElt, Ptr, AlignVal);
     }
     CI->eraseFromParent();
@@ -517,8 +505,8 @@ static void scalarizeMaskedScatter(CallI
     //  %Mask1 = extractelement <16 x i1> %Mask, i32 Idx
     //  br i1 %Mask1, label %cond.store, label %else
     //
-    Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx),
-                                                    "Mask" + Twine(Idx));
+    Value *Predicate =
+        Builder.CreateExtractElement(Mask, Idx, "Mask" + Twine(Idx));
 
     // Create "cond" block
     //
@@ -529,10 +517,8 @@ static void scalarizeMaskedScatter(CallI
     BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store");
     Builder.SetInsertPoint(InsertPt);
 
-    Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx),
-                                                 "Elt" + Twine(Idx));
-    Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
-                                              "Ptr" + Twine(Idx));
+    Value *OneElt = Builder.CreateExtractElement(Src, Idx, "Elt" + Twine(Idx));
+    Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
     Builder.CreateAlignedStore(OneElt, Ptr, AlignVal);
 
     // Create "else" block, fill it in the next iteration

Modified: llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll?rev=355767&r1=355766&r2=355767&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll Fri Mar  8 18:08:41 2019
@@ -69,7 +69,7 @@ declare <8 x i32> @llvm.masked.gather.v8
 ; SCALAR-NEXT: br label %else
 ; SCALAR: else:
 ; SCALAR-NEXT:  %res.phi.else = phi
-; SCALAR-NEXT:  %Mask1 = extractelement <16 x i1> %imask, i32 1
+; SCALAR-NEXT:  %Mask1 = extractelement <16 x i1> %imask, i64 1
 ; SCALAR-NEXT:  br i1 %Mask1, label %cond.load1, label %else2
 
 define <16 x float> @test2(float* %base, <16 x i32> %ind, i16 %mask) {
@@ -211,15 +211,15 @@ define <16 x i32> @test4(i32* %base, <16
 
 
 ; SCALAR-LABEL: test5
-; SCALAR:        %Mask0 = extractelement <16 x i1> %imask, i32 0
+; SCALAR:        %Mask0 = extractelement <16 x i1> %imask, i64 0
 ; SCALAR-NEXT:   br i1 %Mask0, label %cond.store, label %else
 ; SCALAR: cond.store:
-; SCALAR-NEXT:  %Elt0 = extractelement <16 x i32> %val, i32 0
-; SCALAR-NEXT:  %Ptr0 = extractelement <16 x i32*> %gep.random, i32 0
+; SCALAR-NEXT:  %Elt0 = extractelement <16 x i32> %val, i64 0
+; SCALAR-NEXT:  %Ptr0 = extractelement <16 x i32*> %gep.random, i64 0
 ; SCALAR-NEXT:  store i32 %Elt0, i32* %Ptr0, align 4
 ; SCALAR-NEXT:  br label %else
 ; SCALAR: else:
-; SCALAR-NEXT: %Mask1 = extractelement <16 x i1> %imask, i32 1
+; SCALAR-NEXT: %Mask1 = extractelement <16 x i1> %imask, i64 1
 ; SCALAR-NEXT:  br i1 %Mask1, label %cond.store1, label %else2
 
 define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) {
@@ -277,11 +277,11 @@ declare void @llvm.masked.scatter.v16i32
 
 ; SCALAR-LABEL: test6
 ; SCALAR:        store i32 %Elt0, i32* %Ptr01, align 4
-; SCALAR-NEXT:   %Elt1 = extractelement <8 x i32> %a1, i32 1
-; SCALAR-NEXT:   %Ptr12 = extractelement <8 x i32*> %ptr, i32 1
+; SCALAR-NEXT:   %Elt1 = extractelement <8 x i32> %a1, i64 1
+; SCALAR-NEXT:   %Ptr12 = extractelement <8 x i32*> %ptr, i64 1
 ; SCALAR-NEXT:   store i32 %Elt1, i32* %Ptr12, align 4
-; SCALAR-NEXT:   %Elt2 = extractelement <8 x i32> %a1, i32 2
-; SCALAR-NEXT:   %Ptr23 = extractelement <8 x i32*> %ptr, i32 2
+; SCALAR-NEXT:   %Elt2 = extractelement <8 x i32> %a1, i64 2
+; SCALAR-NEXT:   %Ptr23 = extractelement <8 x i32*> %ptr, i64 2
 ; SCALAR-NEXT:   store i32 %Elt2, i32* %Ptr23, align 4
 
 define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {

Modified: llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-gather.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-gather.ll?rev=355767&r1=355766&r2=355767&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-gather.ll (original)
+++ llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-gather.ll Fri Mar  8 18:08:41 2019
@@ -3,21 +3,21 @@
 
 define <2 x i64> @scalarize_v2i64(<2 x i64*> %p, <2 x i1> %mask, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64(
-; CHECK-NEXT:    [[MASK0:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i32 0
+; CHECK-NEXT:    [[MASK0:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i64 0
 ; CHECK-NEXT:    br i1 [[MASK0]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.load:
-; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x i64*> [[P:%.*]], i32 0
+; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x i64*> [[P:%.*]], i64 0
 ; CHECK-NEXT:    [[LOAD0:%.*]] = load i64, i64* [[PTR0]], align 8
-; CHECK-NEXT:    [[RES0:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[LOAD0]], i32 0
+; CHECK-NEXT:    [[RES0:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[LOAD0]], i64 0
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
 ; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[RES0]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-NEXT:    [[MASK1:%.*]] = extractelement <2 x i1> [[MASK]], i32 1
+; CHECK-NEXT:    [[MASK1:%.*]] = extractelement <2 x i1> [[MASK]], i64 1
 ; CHECK-NEXT:    br i1 [[MASK1]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.load1:
-; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P]], i32 1
+; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P]], i64 1
 ; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[PTR1]], align 8
-; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[LOAD1]], i32 1
+; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[LOAD1]], i64 1
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
 ; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[RES1]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
@@ -29,12 +29,12 @@ define <2 x i64> @scalarize_v2i64(<2 x i
 
 define <2 x i64> @scalarize_v2i64_ones_mask(<2 x i64*> %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_ones_mask(
-; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x i64*> [[P:%.*]], i32 0
+; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x i64*> [[P:%.*]], i64 0
 ; CHECK-NEXT:    [[LOAD0:%.*]] = load i64, i64* [[PTR0]], align 8
-; CHECK-NEXT:    [[RES0:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[LOAD0]], i32 0
-; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P]], i32 1
+; CHECK-NEXT:    [[RES0:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[LOAD0]], i64 0
+; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P]], i64 1
 ; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[PTR1]], align 8
-; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[RES0]], i64 [[LOAD1]], i32 1
+; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[RES0]], i64 [[LOAD1]], i64 1
 ; CHECK-NEXT:    ret <2 x i64> [[RES1]]
 ;
   %ret = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> %p, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
@@ -51,9 +51,9 @@ define <2 x i64> @scalarize_v2i64_zero_m
 
 define <2 x i64> @scalarize_v2i64_const_mask(<2 x i64*> %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_const_mask(
-; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P:%.*]], i32 1
+; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P:%.*]], i64 1
 ; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[PTR1]], align 8
-; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[LOAD1]], i32 1
+; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[LOAD1]], i64 1
 ; CHECK-NEXT:    ret <2 x i64> [[RES1]]
 ;
   %ret = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> %p, i32 8, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)

Modified: llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-load.ll?rev=355767&r1=355766&r2=355767&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-load.ll (original)
+++ llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-load.ll Fri Mar  8 18:08:41 2019
@@ -4,21 +4,21 @@
 define <2 x i64> @scalarize_v2i64(<2 x i64>* %p, <2 x i1> %mask, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64(
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i64 0
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.load:
 ; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP4]], i64 0
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
 ; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[MASK]], i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[MASK]], i64 1
 ; CHECK-NEXT:    br i1 [[TMP6]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.load1:
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
-; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP8]], i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP8]], i64 1
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
 ; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP9]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
@@ -51,7 +51,7 @@ define <2 x i64> @scalarize_v2i64_const_
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP2]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP3]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP3]], i64 1
 ; CHECK-NEXT:    ret <2 x i64> [[TMP4]]
 ;
   %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
@@ -62,21 +62,21 @@ define <2 x i64> @scalarize_v2i64_const_
 define <2 x i24> @scalarize_v2i24(<2 x i24>* %p, <2 x i1> %mask, <2 x i24> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i24(
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i24>* [[P:%.*]] to i24*
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i64 0
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.load:
 ; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = load i24, i24* [[TMP3]], align 1
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i24> [[PASSTHRU:%.*]], i24 [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i24> [[PASSTHRU:%.*]], i24 [[TMP4]], i64 0
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
 ; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i24> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[MASK]], i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[MASK]], i64 1
 ; CHECK-NEXT:    br i1 [[TMP6]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.load1:
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i24, i24* [[TMP7]], align 1
-; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <2 x i24> [[RES_PHI_ELSE]], i24 [[TMP8]], i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <2 x i24> [[RES_PHI_ELSE]], i24 [[TMP8]], i64 1
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
 ; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i24> [ [[TMP9]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
@@ -90,21 +90,21 @@ define <2 x i24> @scalarize_v2i24(<2 x i
 define <2 x i48> @scalarize_v2i48(<2 x i48>* %p, <2 x i1> %mask, <2 x i48> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i48(
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i48>* [[P:%.*]] to i48*
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i64 0
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.load:
 ; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = load i48, i48* [[TMP3]], align 2
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i48> [[PASSTHRU:%.*]], i48 [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i48> [[PASSTHRU:%.*]], i48 [[TMP4]], i64 0
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
 ; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i48> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[MASK]], i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[MASK]], i64 1
 ; CHECK-NEXT:    br i1 [[TMP6]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.load1:
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i48, i48* [[TMP7]], align 2
-; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <2 x i48> [[RES_PHI_ELSE]], i48 [[TMP8]], i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <2 x i48> [[RES_PHI_ELSE]], i48 [[TMP8]], i64 1
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
 ; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i48> [ [[TMP9]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]

Modified: llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-store.ll?rev=355767&r1=355766&r2=355767&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-store.ll (original)
+++ llvm/trunk/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-store.ll Fri Mar  8 18:08:41 2019
@@ -4,18 +4,18 @@
 define void @scalarize_v2i64(<2 x i64>* %p, <2 x i1> %mask, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64(
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[MASK:%.*]], i64 0
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_STORE:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.store:
-; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 0
 ; CHECK-NEXT:    store i64 [[TMP3]], i64* [[TMP4]], align 8
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i1> [[MASK]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i1> [[MASK]], i64 1
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[COND_STORE1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.store1:
-; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[DATA]], i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[DATA]], i64 1
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
 ; CHECK-NEXT:    store i64 [[TMP6]], i64* [[TMP7]], align 8
 ; CHECK-NEXT:    br label [[ELSE2]]
@@ -47,7 +47,7 @@ define void @scalarize_v2i64_zero_mask(<
 define void @scalarize_v2i64_const_mask(<2 x i64>* %p, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64_const_mask(
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 1
 ; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
 ; CHECK-NEXT:    store i64 [[TMP2]], i64* [[TMP3]], align 8
 ; CHECK-NEXT:    ret void




More information about the llvm-commits mailing list