[llvm] 926681b - [CostModel] Add basic implementation of getGatherScatterOpCost.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 26 04:03:01 PST 2020


Author: Florian Hahn
Date: 2020-11-26T12:02:25Z
New Revision: 926681b6be701176f4f806e28932e8013fe4dec1

URL: https://github.com/llvm/llvm-project/commit/926681b6be701176f4f806e28932e8013fe4dec1
DIFF: https://github.com/llvm/llvm-project/commit/926681b6be701176f4f806e28932e8013fe4dec1.diff

LOG: [CostModel] Add basic implementation of getGatherScatterOpCost.

Add a basic implementation of getGatherScatterOpCost to BasicTTIImpl.

The implementation estimates the cost of scalarizing the loads/stores,
the cost of packing/extracting the individual lanes and the cost of
only selecting enabled lanes.

This more accurately reflects the current cost on targets like AArch64.

Reviewed By: dmgreen

Differential Revision: https://reviews.llvm.org/D91984

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/BasicTTIImpl.h
    llvm/test/Analysis/CostModel/AArch64/mem-op-cost-model.ll
    llvm/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index fce025aa75f8..0b6b2655e0d5 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -991,6 +991,51 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return Cost;
   }
 
+  unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
+                                  const Value *Ptr, bool VariableMask,
+                                  Align Alignment, TTI::TargetCostKind CostKind,
+                                  const Instruction *I = nullptr) {
+    auto *VT = cast<FixedVectorType>(DataTy);
+    // Assume the target does not have support for gather/scatter operations
+    // and provide a rough estimate.
+    //
+    // First, compute the cost of extracting the individual addresses and the
+    // individual memory operations.
+    int LoadCost =
+        VT->getNumElements() *
+        (getVectorInstrCost(
+             Instruction::ExtractElement,
+             FixedVectorType::get(PointerType::get(VT->getElementType(), 0),
+                                  VT->getNumElements()),
+             -1) +
+         getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
+
+    // Next, compute the cost of packing the result in a vector.
+    int PackingCost = getScalarizationOverhead(VT, Opcode != Instruction::Store,
+                                               Opcode == Instruction::Store);
+
+    int ConditionalCost = 0;
+    if (VariableMask) {
+      // Compute the cost of conditionally executing the memory operations with
+      // variable masks. This includes extracting the individual conditions, a
+      // branches and PHIs to combine the results.
+      // NOTE: Estimating the cost of conditionally executing the memory
+      // operations accurately is quite 
diff icult and the current solution
+      // provides a very rough estimate only.
+      ConditionalCost =
+          VT->getNumElements() *
+          (getVectorInstrCost(
+               Instruction::ExtractElement,
+               FixedVectorType::get(Type::getInt1Ty(DataTy->getContext()),
+                                    VT->getNumElements()),
+               -1) +
+           getCFInstrCost(Instruction::Br, CostKind) +
+           getCFInstrCost(Instruction::PHI, CostKind));
+    }
+
+    return LoadCost + PackingCost + ConditionalCost;
+  }
+
   unsigned getInterleavedMemoryOpCost(
       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,

diff  --git a/llvm/test/Analysis/CostModel/AArch64/mem-op-cost-model.ll b/llvm/test/Analysis/CostModel/AArch64/mem-op-cost-model.ll
index 9e9081fe4aea..78f6b8d1a03d 100644
--- a/llvm/test/Analysis/CostModel/AArch64/mem-op-cost-model.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/mem-op-cost-model.ll
@@ -90,10 +90,10 @@ define <8 x i64> @load_512(<8 x i64>* %ptr) {
 declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32 immarg, <4 x i1>, <4 x i8>)
 define <4 x i8> @gather_load_4xi8_constant_mask(<4 x i8*> %ptrs) {
 ; CHECK:         gather_load_4xi8_constant_mask
-; CHECK-NEON:    Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
-; CHECK-SVE-128: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
-; CHECK-SVE-256: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
-; CHECK-SVE-512: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
+; CHECK-NEON:    Cost Model: Found an estimated cost of 17 for instruction:  %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
+; CHECK-SVE-128: Cost Model: Found an estimated cost of 17 for instruction:  %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
+; CHECK-SVE-256: Cost Model: Found an estimated cost of 17 for instruction:  %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
+; CHECK-SVE-512: Cost Model: Found an estimated cost of 17 for instruction:  %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
 ;
   %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   ret <4 x i8> %lv
@@ -101,10 +101,10 @@ define <4 x i8> @gather_load_4xi8_constant_mask(<4 x i8*> %ptrs) {
 
 define <4 x i8> @gather_load_4xi8_variable_mask(<4 x i8*> %ptrs, <4 x i1> %cond) {
 ; CHECK:         gather_load_4xi8_variable_mask
-; CHECK-NEON:    Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
-; CHECK-SVE-128: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
-; CHECK-SVE-256: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
-; CHECK-SVE-512: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
+; CHECK-NEON:    Cost Model: Found an estimated cost of 29 for instruction:  %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
+; CHECK-SVE-128: Cost Model: Found an estimated cost of 29 for instruction:  %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
+; CHECK-SVE-256: Cost Model: Found an estimated cost of 29 for instruction:  %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
+; CHECK-SVE-512: Cost Model: Found an estimated cost of 29 for instruction:  %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8
 ;
   %lv = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> %cond, <4 x i8> undef)
   ret <4 x i8> %lv
@@ -113,10 +113,10 @@ define <4 x i8> @gather_load_4xi8_variable_mask(<4 x i8*> %ptrs, <4 x i1> %cond)
 declare void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, i32 immarg, <4 x i1>)
 define void @scatter_store_4xi8_constant_mask(<4 x i8> %val, <4 x i8*> %ptrs) {
 ; CHECK:         scatter_store_4xi8_constant_mask
-; CHECK-NEON:    Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i8.v4p0i8(
-; CHECK-SVE-128: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i8.v4p0i8(
-; CHECK-SVE-256: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i8.v4p0i8(
-; CHECK-SVE-512: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i8.v4p0i8(
+; CHECK-NEON:    Cost Model: Found an estimated cost of 17 for instruction:  call void @llvm.masked.scatter.v4i8.v4p0i8(
+; CHECK-SVE-128: Cost Model: Found an estimated cost of 17 for instruction:  call void @llvm.masked.scatter.v4i8.v4p0i8(
+; CHECK-SVE-256: Cost Model: Found an estimated cost of 17 for instruction:  call void @llvm.masked.scatter.v4i8.v4p0i8(
+; CHECK-SVE-512: Cost Model: Found an estimated cost of 17 for instruction:  call void @llvm.masked.scatter.v4i8.v4p0i8(
 ;
   call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
@@ -124,10 +124,10 @@ define void @scatter_store_4xi8_constant_mask(<4 x i8> %val, <4 x i8*> %ptrs) {
 
 define void @scatter_store_4xi8_variable_mask(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %cond) {
 ; CHECK:         scatter_store_4xi8_variable_mask
-; CHECK-NEON:    Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i8.v4p0i8(
-; CHECK-SVE-128: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i8.v4p0i8(
-; CHECK-SVE-256: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i8.v4p0i8(
-; CHECK-SVE-512: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i8.v4p0i8(
+; CHECK-NEON:    Cost Model: Found an estimated cost of 29 for instruction:  call void @llvm.masked.scatter.v4i8.v4p0i8(
+; CHECK-SVE-128: Cost Model: Found an estimated cost of 29 for instruction:  call void @llvm.masked.scatter.v4i8.v4p0i8(
+; CHECK-SVE-256: Cost Model: Found an estimated cost of 29 for instruction:  call void @llvm.masked.scatter.v4i8.v4p0i8(
+; CHECK-SVE-512: Cost Model: Found an estimated cost of 29 for instruction:  call void @llvm.masked.scatter.v4i8.v4p0i8(
 ;
   call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, i32 1, <4 x i1> %cond)
   ret void
@@ -136,10 +136,10 @@ define void @scatter_store_4xi8_variable_mask(<4 x i8> %val, <4 x i8*> %ptrs, <4
 declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32 immarg, <4 x i1>, <4 x i32>)
 define <4 x i32> @gather_load_4xi32_constant_mask(<4 x i32*> %ptrs) {
 ; CHECK:         gather_load_4xi32_constant_mask
-; CHECK-NEON:    Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
-; CHECK-SVE-128: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
-; CHECK-SVE-256: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
-; CHECK-SVE-512: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
+; CHECK-NEON:    Cost Model: Found an estimated cost of 17 for instruction:  %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
+; CHECK-SVE-128: Cost Model: Found an estimated cost of 17 for instruction:  %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
+; CHECK-SVE-256: Cost Model: Found an estimated cost of 17 for instruction:  %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
+; CHECK-SVE-512: Cost Model: Found an estimated cost of 17 for instruction:  %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
 ;
   %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %lv
@@ -147,10 +147,10 @@ define <4 x i32> @gather_load_4xi32_constant_mask(<4 x i32*> %ptrs) {
 
 define <4 x i32> @gather_load_4xi32_variable_mask(<4 x i32*> %ptrs, <4 x i1> %cond) {
 ; CHECK:         gather_load_4xi32_variable_mask
-; CHECK-NEON:    Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
-; CHECK-SVE-128: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
-; CHECK-SVE-256: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
-; CHECK-SVE-512: Cost Model: Found an estimated cost of 1 for instruction:   %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
+; CHECK-NEON:    Cost Model: Found an estimated cost of 29 for instruction:  %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
+; CHECK-SVE-128: Cost Model: Found an estimated cost of 29 for instruction:  %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
+; CHECK-SVE-256: Cost Model: Found an estimated cost of 29 for instruction:  %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
+; CHECK-SVE-512: Cost Model: Found an estimated cost of 29 for instruction:  %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32
 ;
   %lv = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 1, <4 x i1> %cond, <4 x i32> undef)
   ret <4 x i32> %lv
@@ -159,10 +159,10 @@ define <4 x i32> @gather_load_4xi32_variable_mask(<4 x i32*> %ptrs, <4 x i1> %co
 declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32 immarg, <4 x i1>)
 define void @scatter_store_4xi32_constant_mask(<4 x i32> %val, <4 x i32*> %ptrs) {
 ; CHECK:         scatter_store_4xi32_constant_mask
-; CHECK-NEON:    Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i32.v4p0i32(
-; CHECK-SVE-128: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i32.v4p0i32(
-; CHECK-SVE-256: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i32.v4p0i32(
-; CHECK-SVE-512: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i32.v4p0i32(
+; CHECK-NEON:    Cost Model: Found an estimated cost of 17 for instruction:  call void @llvm.masked.scatter.v4i32.v4p0i32(
+; CHECK-SVE-128: Cost Model: Found an estimated cost of 17 for instruction:  call void @llvm.masked.scatter.v4i32.v4p0i32(
+; CHECK-SVE-256: Cost Model: Found an estimated cost of 17 for instruction:  call void @llvm.masked.scatter.v4i32.v4p0i32(
+; CHECK-SVE-512: Cost Model: Found an estimated cost of 17 for instruction:  call void @llvm.masked.scatter.v4i32.v4p0i32(
 ;
   call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
@@ -170,10 +170,10 @@ define void @scatter_store_4xi32_constant_mask(<4 x i32> %val, <4 x i32*> %ptrs)
 
 define void @scatter_store_4xi32_variable_mask(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %cond) {
 ; CHECK:         scatter_store_4xi32_variable_mask
-; CHECK-NEON:    Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i32.v4p0i32(
-; CHECK-SVE-128: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i32.v4p0i32(
-; CHECK-SVE-256: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i32.v4p0i32(
-; CHECK-SVE-512: Cost Model: Found an estimated cost of 1 for instruction:   call void @llvm.masked.scatter.v4i32.v4p0i32(
+; CHECK-NEON:    Cost Model: Found an estimated cost of 29 for instruction:  call void @llvm.masked.scatter.v4i32.v4p0i32(
+; CHECK-SVE-128: Cost Model: Found an estimated cost of 29 for instruction:  call void @llvm.masked.scatter.v4i32.v4p0i32(
+; CHECK-SVE-256: Cost Model: Found an estimated cost of 29 for instruction:  call void @llvm.masked.scatter.v4i32.v4p0i32(
+; CHECK-SVE-512: Cost Model: Found an estimated cost of 29 for instruction:  call void @llvm.masked.scatter.v4i32.v4p0i32(
 ;
   call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, i32 1, <4 x i1> %cond)
   ret void

diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll
index 021f9d5d9419..0c16daa1ef3d 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll
@@ -12,10 +12,7 @@ target triple = "aarch64--linux-gnu"
 ; REMARK-NEXT:    - String: 'Vectorized horizontal reduction with cost '
 ; REMARK-NEXT:    - Cost: '-7'
 ;
-; REMARK-LABEL: Function: gather_load
-; REMARK:       Args:
-; REMARK-NEXT:    - String: 'Stores SLP vectorized with cost
-; REMARK-NEXT:    - Cost: '-2'
+; REMARK-NOT: Function: gather_load
 
 define internal i32 @gather_multiple_use(i32 %a, i32 %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: @gather_multiple_use(
@@ -61,11 +58,25 @@ define internal i32 @gather_multiple_use(i32 %a, i32 %b, i32 %c, i32 %d) {
 define void @gather_load(i16* noalias %ptr) {
 ; CHECK-LABEL: @gather_load(
 ; CHECK-NEXT:    [[ARRAYIDX182:%.*]] = getelementptr inbounds i16, i16* [[PTR:%.*]], i64 1
-; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> <i8* getelementptr inbounds ([6 x [258 x i8]], [6 x [258 x i8]]* @data, i64 0, i64 1, i64 0), i8* getelementptr inbounds ([6 x [258 x i8]], [6 x [258 x i8]]* @data, i64 0, i64 2, i64 1), i8* getelementptr inbounds ([6 x [258 x i8]], [6 x [258 x i8]]* @data, i64 0, i64 3, i64 2), i8* getelementptr inbounds ([6 x [258 x i8]], [6 x [258 x i8]]* @data, i64 0, i64 4, i64 3)>, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
-; CHECK-NEXT:    [[TMP2:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i16>
-; CHECK-NEXT:    [[TMP3:%.*]] = add nuw nsw <4 x i16> [[TMP2]], <i16 10, i16 20, i16 30, i16 40>
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i16* [[ARRAYIDX182]] to <4 x i16>*
-; CHECK-NEXT:    store <4 x i16> [[TMP3]], <4 x i16>* [[TMP4]], align 2
+; CHECK-NEXT:    [[ARRAYIDX183:%.*]] = getelementptr inbounds i16, i16* [[PTR]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX184:%.*]] = getelementptr inbounds i16, i16* [[PTR]], i64 3
+; CHECK-NEXT:    [[ARRAYIDX185:%.*]] = getelementptr inbounds i16, i16* [[PTR]], i64 4
+; CHECK-NEXT:    [[L0:%.*]] = load i8, i8* getelementptr inbounds ([6 x [258 x i8]], [6 x [258 x i8]]* @data, i64 0, i64 1, i64 0), align 1
+; CHECK-NEXT:    [[CONV150:%.*]] = zext i8 [[L0]] to i16
+; CHECK-NEXT:    [[ADD152:%.*]] = add nuw nsw i16 [[CONV150]], 10
+; CHECK-NEXT:    [[L1:%.*]] = load i8, i8* getelementptr inbounds ([6 x [258 x i8]], [6 x [258 x i8]]* @data, i64 0, i64 2, i64 1), align 1
+; CHECK-NEXT:    [[CONV156:%.*]] = zext i8 [[L1]] to i16
+; CHECK-NEXT:    [[ADD158:%.*]] = add nuw nsw i16 [[CONV156]], 20
+; CHECK-NEXT:    [[L2:%.*]] = load i8, i8* getelementptr inbounds ([6 x [258 x i8]], [6 x [258 x i8]]* @data, i64 0, i64 3, i64 2), align 1
+; CHECK-NEXT:    [[CONV162:%.*]] = zext i8 [[L2]] to i16
+; CHECK-NEXT:    [[ADD164:%.*]] = add nuw nsw i16 [[CONV162]], 30
+; CHECK-NEXT:    [[L3:%.*]] = load i8, i8* getelementptr inbounds ([6 x [258 x i8]], [6 x [258 x i8]]* @data, i64 0, i64 4, i64 3), align 1
+; CHECK-NEXT:    [[CONV168:%.*]] = zext i8 [[L3]] to i16
+; CHECK-NEXT:    [[ADD170:%.*]] = add nuw nsw i16 [[CONV168]], 40
+; CHECK-NEXT:    store i16 [[ADD152]], i16* [[ARRAYIDX182]], align 2
+; CHECK-NEXT:    store i16 [[ADD158]], i16* [[ARRAYIDX183]], align 2
+; CHECK-NEXT:    store i16 [[ADD164]], i16* [[ARRAYIDX184]], align 2
+; CHECK-NEXT:    store i16 [[ADD170]], i16* [[ARRAYIDX185]], align 2
 ; CHECK-NEXT:    ret void
 ;
   %arrayidx182 = getelementptr inbounds i16, i16* %ptr, i64 1


        


More information about the llvm-commits mailing list