[llvm] [NVPTX] Optimize v16i8 reductions (PR #67322)

Pierre-Andre Saulais via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 25 05:52:47 PDT 2023


https://github.com/pasaulais updated https://github.com/llvm/llvm-project/pull/67322

>From e22188030e7d61900974c4ff54ee26b59823839a Mon Sep 17 00:00:00 2001
From: Pierre-Andre Saulais <pierre-andre at codeplay.com>
Date: Thu, 21 Sep 2023 11:42:19 +0100
Subject: [PATCH 1/2] [NVPTX] Preserve v16i8 vector loads when legalizing

This is done by lowering v16i8 loads into LoadV4 operations with i32
results instead of letting ReplaceLoadVector split it into smaller
loads during legalization. This is done at dag-combine1 time, so that
vector operations with i8 elements can be optimised away instead of
being needlessly split during legalization, which involves storing to
the stack and loading it back.
---
 llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp   |  46 ++++++-
 .../test/CodeGen/NVPTX/LoadStoreVectorizer.ll | 126 ++++++++++++++++++
 2 files changed, 171 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index e9401d4b93c371e..ece3e10977d1e34 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -673,7 +673,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
 
   // We have some custom DAG combine patterns for these nodes
   setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::FADD, ISD::MUL, ISD::SHL,
-                       ISD::SREM, ISD::UREM, ISD::EXTRACT_VECTOR_ELT});
+                       ISD::SREM, ISD::UREM, ISD::EXTRACT_VECTOR_ELT,
+                       ISD::LOAD});
 
   // setcc for f16x2 and bf16x2 needs special handling to prevent
   // legalizer's attempt to scalarize it due to v2i1 not being legal.
@@ -5294,6 +5295,47 @@ static SDValue PerformEXTRACTCombine(SDNode *N,
   return Result;
 }
 
+static SDValue PerformLOADCombine(SDNode *N,
+                                  TargetLowering::DAGCombinerInfo &DCI) {
+  SelectionDAG &DAG = DCI.DAG;
+  LoadSDNode *LD = cast<LoadSDNode>(N);
+
+  // Lower a v16i8 load into a LoadV4 operation with i32 results instead of
+  // letting ReplaceLoadVector split it into smaller loads during legalization.
+  // This is done at dag-combine1 time, so that vector operations with i8
+  // elements can be optimised away instead of being needlessly split during
+  // legalization, which involves storing to the stack and loading it back.
+  EVT VT = N->getValueType(0);
+  if (VT != MVT::v16i8)
+    return SDValue();
+
+  SDLoc DL(N);
+
+  // Create a v4i32 vector load operation, effectively <4 x v4i8>.
+  unsigned Opc = NVPTXISD::LoadV4;
+  EVT NewVT = MVT::v4i32;
+  EVT EleVT = NewVT.getVectorElementType();
+  unsigned NumEles = NewVT.getVectorNumElements();
+  EVT RetVTs[] = {EleVT, EleVT, EleVT, EleVT, MVT::Other};
+  SDVTList RetVTList = DAG.getVTList(RetVTs);
+  SmallVector<SDValue, 8> Ops(N->op_begin(), N->op_end());
+  Ops.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
+  SDValue NewLoad = DAG.getMemIntrinsicNode(Opc, DL, RetVTList, Ops, NewVT,
+                                            LD->getMemOperand());
+  SDValue NewChain = NewLoad.getValue(NumEles);
+
+  // Create a vector of the same type returned by the original load.
+  SmallVector<SDValue, 4> Eles;
+  SDValue Vec;
+  for (unsigned i = 0; i < NumEles; i++)
+    Eles.push_back(NewLoad.getValue(i));
+  Vec = DAG.getBuildVector(NewVT, DL, Eles);
+  Vec = DCI.DAG.getBitcast(VT, Vec);
+
+  // Wrap the new vector and chain from the new load.
+  return DCI.DAG.getMergeValues({Vec, NewChain}, DL);
+}
+
 SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
                                                DAGCombinerInfo &DCI) const {
   CodeGenOptLevel OptLevel = getTargetMachine().getOptLevel();
@@ -5313,6 +5355,8 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
       return PerformREMCombine(N, DCI, OptLevel);
     case ISD::SETCC:
       return PerformSETCCCombine(N, DCI);
+    case ISD::LOAD:
+      return PerformLOADCombine(N, DCI);
     case NVPTXISD::StoreRetval:
     case NVPTXISD::StoreRetvalV2:
     case NVPTXISD::StoreRetvalV4:
diff --git a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
index 4f13b6d9d1a8a9d..c1af2f12424b1f3 100644
--- a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
+++ b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
@@ -52,3 +52,129 @@ define float @ff(ptr %p) {
   %sum = fadd float %sum3, %v4
   ret float %sum
 }
+
+define void @combine_v16i8(ptr noundef align 16 %ptr1, ptr noundef align 16 %ptr2) {
+  ; ENABLED-LABEL: combine_v16i8
+  ; ENABLED: ld.v4.u32
+  ; ENABLED: st.u32
+  %val0 = load i8, ptr %ptr1, align 16
+  %ptr1.1 = getelementptr inbounds i8, ptr %ptr1, i64 1
+  %val1 = load i8, ptr %ptr1.1, align 1
+  %ptr1.2 = getelementptr inbounds i8, ptr %ptr1, i64 2
+  %val2 = load i8, ptr %ptr1.2, align 2
+  %ptr1.3 = getelementptr inbounds i8, ptr %ptr1, i64 3
+  %val3 = load i8, ptr %ptr1.3, align 1
+  %ptr1.4 = getelementptr inbounds i8, ptr %ptr1, i64 4
+  %val4 = load i8, ptr %ptr1.4, align 4
+  %ptr1.5 = getelementptr inbounds i8, ptr %ptr1, i64 5
+  %val5 = load i8, ptr %ptr1.5, align 1
+  %ptr1.6 = getelementptr inbounds i8, ptr %ptr1, i64 6
+  %val6 = load i8, ptr %ptr1.6, align 2
+  %ptr1.7 = getelementptr inbounds i8, ptr %ptr1, i64 7
+  %val7 = load i8, ptr %ptr1.7, align 1
+  %ptr1.8 = getelementptr inbounds i8, ptr %ptr1, i64 8
+  %val8 = load i8, ptr %ptr1.8, align 8
+  %ptr1.9 = getelementptr inbounds i8, ptr %ptr1, i64 9
+  %val9 = load i8, ptr %ptr1.9, align 1
+  %ptr1.10 = getelementptr inbounds i8, ptr %ptr1, i64 10
+  %val10 = load i8, ptr %ptr1.10, align 2
+  %ptr1.11 = getelementptr inbounds i8, ptr %ptr1, i64 11
+  %val11 = load i8, ptr %ptr1.11, align 1
+  %ptr1.12 = getelementptr inbounds i8, ptr %ptr1, i64 12
+  %val12 = load i8, ptr %ptr1.12, align 4
+  %ptr1.13 = getelementptr inbounds i8, ptr %ptr1, i64 13
+  %val13 = load i8, ptr %ptr1.13, align 1
+  %ptr1.14 = getelementptr inbounds i8, ptr %ptr1, i64 14
+  %val14 = load i8, ptr %ptr1.14, align 2
+  %ptr1.15 = getelementptr inbounds i8, ptr %ptr1, i64 15
+  %val15 = load i8, ptr %ptr1.15, align 1
+  %lane0 = zext i8 %val0 to i32
+  %lane1 = zext i8 %val1 to i32
+  %lane2 = zext i8 %val2 to i32
+  %lane3 = zext i8 %val3 to i32
+  %lane4 = zext i8 %val4 to i32
+  %lane5 = zext i8 %val5 to i32
+  %lane6 = zext i8 %val6 to i32
+  %lane7 = zext i8 %val7 to i32
+  %lane8 = zext i8 %val8 to i32
+  %lane9 = zext i8 %val9 to i32
+  %lane10 = zext i8 %val10 to i32
+  %lane11 = zext i8 %val11 to i32
+  %lane12 = zext i8 %val12 to i32
+  %lane13 = zext i8 %val13 to i32
+  %lane14 = zext i8 %val14 to i32
+  %lane15 = zext i8 %val15 to i32
+  %red.1 = add i32 %lane0, %lane1
+  %red.2 = add i32 %red.1, %lane2
+  %red.3 = add i32 %red.2, %lane3
+  %red.4 = add i32 %red.3, %lane4
+  %red.5 = add i32 %red.4, %lane5
+  %red.6 = add i32 %red.5, %lane6
+  %red.7 = add i32 %red.6, %lane7
+  %red.8 = add i32 %red.7, %lane8
+  %red.9 = add i32 %red.8, %lane9
+  %red.10 = add i32 %red.9, %lane10
+  %red.11 = add i32 %red.10, %lane11
+  %red.12 = add i32 %red.11, %lane12
+  %red.13 = add i32 %red.12, %lane13
+  %red.14 = add i32 %red.13, %lane14
+  %red = add i32 %red.14, %lane15
+  store i32 %red, ptr %ptr2, align 4
+  ret void
+}
+
+define void @combine_v8i16(ptr noundef align 16 %ptr1, ptr noundef align 16 %ptr2) {
+  ; ENABLED-LABEL: combine_v8i16
+  ; ENABLED: ld.v4.b32
+  ; ENABLED: st.u32
+  %val0 = load i16, ptr %ptr1, align 16
+  %ptr1.1 = getelementptr inbounds i16, ptr %ptr1, i64 1
+  %val1 = load i16, ptr %ptr1.1, align 2
+  %ptr1.2 = getelementptr inbounds i16, ptr %ptr1, i64 2
+  %val2 = load i16, ptr %ptr1.2, align 4
+  %ptr1.3 = getelementptr inbounds i16, ptr %ptr1, i64 3
+  %val3 = load i16, ptr %ptr1.3, align 2
+  %ptr1.4 = getelementptr inbounds i16, ptr %ptr1, i64 4
+  %val4 = load i16, ptr %ptr1.4, align 4
+  %ptr1.5 = getelementptr inbounds i16, ptr %ptr1, i64 5
+  %val5 = load i16, ptr %ptr1.5, align 2
+  %ptr1.6 = getelementptr inbounds i16, ptr %ptr1, i64 6
+  %val6 = load i16, ptr %ptr1.6, align 4
+  %ptr1.7 = getelementptr inbounds i16, ptr %ptr1, i64 7
+  %val7 = load i16, ptr %ptr1.7, align 2
+  %lane0 = zext i16 %val0 to i32
+  %lane1 = zext i16 %val1 to i32
+  %lane2 = zext i16 %val2 to i32
+  %lane3 = zext i16 %val3 to i32
+  %lane4 = zext i16 %val4 to i32
+  %lane5 = zext i16 %val5 to i32
+  %lane6 = zext i16 %val6 to i32
+  %lane7 = zext i16 %val7 to i32
+  %red.1 = add i32 %lane0, %lane1
+  %red.2 = add i32 %red.1, %lane2
+  %red.3 = add i32 %red.2, %lane3
+  %red.4 = add i32 %red.3, %lane4
+  %red.5 = add i32 %red.4, %lane5
+  %red.6 = add i32 %red.5, %lane6
+  %red = add i32 %red.6, %lane7
+  store i32 %red, ptr %ptr2, align 4
+  ret void
+}
+
+define void @combine_v4i32(ptr noundef align 16 %ptr1, ptr noundef align 16 %ptr2) {
+  ; ENABLED-LABEL: combine_v4i32
+  ; ENABLED: ld.v4.u32
+  ; ENABLED: st.u32
+  %val0 = load i32, ptr %ptr1, align 16
+  %ptr1.1 = getelementptr inbounds i32, ptr %ptr1, i64 1
+  %val1 = load i32, ptr %ptr1.1, align 4
+  %ptr1.2 = getelementptr inbounds i32, ptr %ptr1, i64 2
+  %val2 = load i32, ptr %ptr1.2, align 8
+  %ptr1.3 = getelementptr inbounds i32, ptr %ptr1, i64 3
+  %val3 = load i32, ptr %ptr1.3, align 4
+  %red.1 = add i32 %val0, %val1
+  %red.2 = add i32 %red.1, %val2
+  %red = add i32 %red.2, %val3
+  store i32 %red, ptr %ptr2, align 4
+  ret void
+}

>From 012e4ccb698cda3a7110db98b1b9d526355d53a1 Mon Sep 17 00:00:00 2001
From: Pierre-Andre Saulais <pierre-andre at codeplay.com>
Date: Thu, 21 Sep 2023 11:56:36 +0100
Subject: [PATCH 2/2] [NVPTX] More efficient extraction of i8 vector elements

---
 llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp |  57 +++++++++-
 llvm/test/CodeGen/NVPTX/v4i8-operations.ll  | 114 ++++++++++++++++++++
 2 files changed, 169 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/NVPTX/v4i8-operations.ll

diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index ece3e10977d1e34..2ae16f0d992f315 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -673,8 +673,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
 
   // We have some custom DAG combine patterns for these nodes
   setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::FADD, ISD::MUL, ISD::SHL,
-                       ISD::SREM, ISD::UREM, ISD::EXTRACT_VECTOR_ELT,
-                       ISD::LOAD});
+                       ISD::SREM, ISD::UREM, ISD::EXTRACT_VECTOR_ELT, ISD::LOAD,
+                       ISD::ZERO_EXTEND});
 
   // setcc for f16x2 and bf16x2 needs special handling to prevent
   // legalizer's attempt to scalarize it due to v2i1 not being legal.
@@ -5336,6 +5336,57 @@ static SDValue PerformLOADCombine(SDNode *N,
   return DCI.DAG.getMergeValues({Vec, NewChain}, DL);
 }
 
+static SDValue PerformZERO_EXTENDCombine(SDNode *N,
+                                         TargetLowering::DAGCombinerInfo &DCI) {
+  // Transforms (i32 (zext (i8 (vector_extract (v*i8 v) i))))
+  // into:      (and (srl (i32 (...) amount) 0xff)
+  //
+  // This has to be done before legalization, since i8 is illegal.
+
+  SelectionDAG &DAG = DCI.DAG;
+
+  SDValue Src = N->getOperand(0);
+  EVT SrcVT = Src.getValueType();
+  EVT DstVT = N->getValueType(0);
+  if (DstVT != MVT::i32 || SrcVT != MVT::i8 ||
+      Src->getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
+    return SDValue();
+  }
+  SDValue Vec = Src.getOperand(0);
+  SDValue Idx = Src.getOperand(1);
+  ConstantSDNode *ConstIdx = dyn_cast<ConstantSDNode>(Idx.getNode());
+  EVT VecVT = Vec.getValueType();
+  if (!ConstIdx || (VecVT.getVectorNumElements() < 4))
+    return SDValue();
+
+  SDLoc DL(N);
+  SDValue Ret = Vec;
+
+  // Extract one i32 word worth of elements from the vector.
+  unsigned EleIdx = ConstIdx->getZExtValue();
+  EVT WordVT = MVT::i32;
+  unsigned NumWords = VecVT.getSizeInBits() / WordVT.getSizeInBits();
+  unsigned NumElePerWord = VecVT.getVectorNumElements() / NumWords;
+  if (NumWords > 1) {
+    EVT WordVecVT = EVT::getVectorVT(*DAG.getContext(), WordVT, NumWords);
+    SDValue WordVec = DAG.getBitcast(WordVecVT, Ret);
+    SDValue WordIdx = DAG.getConstant(EleIdx / NumElePerWord, DL, MVT::i32);
+    Ret = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, WordVT, {WordVec, WordIdx});
+  } else {
+    Ret = DAG.getBitcast(WordVT, Ret);
+  }
+  EleIdx = EleIdx % NumElePerWord;
+
+  // Extract the desired i8 element from that i32 word and zero-extend it.
+  unsigned ShiftAmount = EleIdx * 8;
+  SDValue Shift = DAG.getConstant(ShiftAmount, DL, MVT::i32);
+  Ret = DAG.getNode(ISD::SRL, DL, DstVT, {Ret, Shift});
+
+  SDValue Mask = DAG.getConstant(0xff, DL, MVT::i32);
+  Ret = DAG.getNode(ISD::AND, DL, DstVT, {Ret, Mask});
+  return Ret;
+}
+
 SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
                                                DAGCombinerInfo &DCI) const {
   CodeGenOptLevel OptLevel = getTargetMachine().getOptLevel();
@@ -5357,6 +5408,8 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
       return PerformSETCCCombine(N, DCI);
     case ISD::LOAD:
       return PerformLOADCombine(N, DCI);
+    case ISD::ZERO_EXTEND:
+      return PerformZERO_EXTENDCombine(N, DCI);
     case NVPTXISD::StoreRetval:
     case NVPTXISD::StoreRetvalV2:
     case NVPTXISD::StoreRetvalV4:
diff --git a/llvm/test/CodeGen/NVPTX/v4i8-operations.ll b/llvm/test/CodeGen/NVPTX/v4i8-operations.ll
new file mode 100644
index 000000000000000..65a36f68ee82d54
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/v4i8-operations.ll
@@ -0,0 +1,114 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
+target triple = "nvptx64-nvidia-cuda"
+
+define void @extract_zext_i8_from_v4i8(i32 %input, ptr noundef align 16 %out) {
+  ; CHECK-LABEL: extract_zext_i8_from_v4i8
+  ; CHECK: ld.param.u32 [[I:%r[0-9]+]], [extract_zext_i8_from_v4i8_param_0];
+  ; CHECK: and.b32 [[E0:%r[0-9]+]], [[I]], 255;
+  ; CHECK: bfe.u32 [[E1:%r[0-9]+]], [[I]], 8, 8;
+  ; CHECK: bfe.u32 [[E2:%r[0-9]+]], [[I]], 16, 8;
+  ; CHECK: shr.u32 [[E3:%r[0-9]+]], [[I]], 24;
+  ; CHECK: st.u32 [{{%rd[0-9]+}}], [[E0]];
+  ; CHECK: st.u32 [{{%rd[0-9]+}}+4], [[E1]];
+  ; CHECK: st.u32 [{{%rd[0-9]+}}+8], [[E2]];
+  ; CHECK: st.u32 [{{%rd[0-9]+}}+12], [[E3]];
+  %vec = bitcast i32 %input to <4 x i8>
+  %el.0 = extractelement <4 x i8> %vec, i32 0
+  %el.1 = extractelement <4 x i8> %vec, i32 1
+  %el.2 = extractelement <4 x i8> %vec, i32 2
+  %el.3 = extractelement <4 x i8> %vec, i32 3
+  %ext.0 = zext i8 %el.0 to i32
+  %ext.1 = zext i8 %el.1 to i32
+  %ext.2 = zext i8 %el.2 to i32
+  %ext.3 = zext i8 %el.3 to i32
+  %addr.0 = getelementptr inbounds i32, ptr %out, i64 0
+  %addr.1 = getelementptr inbounds i32, ptr %out, i64 1
+  %addr.2 = getelementptr inbounds i32, ptr %out, i64 2
+  %addr.3 = getelementptr inbounds i32, ptr %out, i64 3
+  store i32 %ext.0, ptr %addr.0, align 4
+  store i32 %ext.1, ptr %addr.1, align 4
+  store i32 %ext.2, ptr %addr.2, align 4
+  store i32 %ext.3, ptr %addr.3, align 4
+  ret void
+}
+
+define void @preserve_v16i8_v4i32(ptr noundef align 16 %ptr1, ptr noundef align 16 %ptr2, i32 %idx) {
+  ; CHECK-LABEL: preserve_v16i8_v4i32
+  
+  ; CHECK: ld.v4.u32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], [[R4:%r[0-9]+]]}, [{{%rd[0-9]+}}];
+  
+  ; CHECK-DAG: shr.u32 %r{{[0-9]+}}, [[R1]], 24
+  ; CHECK-DAG: shr.u32 %r{{[0-9]+}}, [[R2]], 24
+  ; CHECK-DAG: shr.u32 %r{{[0-9]+}}, [[R3]], 24
+  ; CHECK-DAG: shr.u32 %r{{[0-9]+}}, [[R4]], 24
+  
+  ; CHECK-DAG: and.b32 %r{{[0-9]+}}, [[R1]], 255
+  ; CHECK-DAG: and.b32 %r{{[0-9]+}}, [[R2]], 255
+  ; CHECK-DAG: and.b32 %r{{[0-9]+}}, [[R3]], 255
+  ; CHECK-DAG: and.b32 %r{{[0-9]+}}, [[R4]], 255
+  
+  ; CHECK-DAG: bfe.u32 %r{{[0-9]+}}, [[R1]], 8, 8
+  ; CHECK-DAG: bfe.u32 %r{{[0-9]+}}, [[R2]], 8, 8
+  ; CHECK-DAG: bfe.u32 %r{{[0-9]+}}, [[R3]], 8, 8
+  ; CHECK-DAG: bfe.u32 %r{{[0-9]+}}, [[R4]], 8, 8
+  
+  ; CHECK-DAG: bfe.u32 %r{{[0-9]+}}, [[R1]], 16, 8
+  ; CHECK-DAG: bfe.u32 %r{{[0-9]+}}, [[R2]], 16, 8
+  ; CHECK-DAG: bfe.u32 %r{{[0-9]+}}, [[R3]], 16, 8
+  ; CHECK-DAG: bfe.u32 %r{{[0-9]+}}, [[R4]], 16, 8
+
+  ; CHECK: st.u32
+
+  %vec = load <16 x i8>, ptr %ptr1, align 16
+  %val0 = extractelement <16 x i8> %vec, i32 0
+  %val1 = extractelement <16 x i8> %vec, i32 1
+  %val2 = extractelement <16 x i8> %vec, i32 2
+  %val3 = extractelement <16 x i8> %vec, i32 3
+  %val4 = extractelement <16 x i8> %vec, i32 4
+  %val5 = extractelement <16 x i8> %vec, i32 5
+  %val6 = extractelement <16 x i8> %vec, i32 6
+  %val7 = extractelement <16 x i8> %vec, i32 7
+  %val8 = extractelement <16 x i8> %vec, i32 8
+  %val9 = extractelement <16 x i8> %vec, i32 9
+  %val10 = extractelement <16 x i8> %vec, i32 10
+  %val11 = extractelement <16 x i8> %vec, i32 11
+  %val12 = extractelement <16 x i8> %vec, i32 12
+  %val13 = extractelement <16 x i8> %vec, i32 13
+  %val14 = extractelement <16 x i8> %vec, i32 14
+  %val15 = extractelement <16 x i8> %vec, i32 15
+  %lane0 = zext i8 %val0 to i32
+  %lane1 = zext i8 %val1 to i32
+  %lane2 = zext i8 %val2 to i32
+  %lane3 = zext i8 %val3 to i32
+  %lane4 = zext i8 %val4 to i32
+  %lane5 = zext i8 %val5 to i32
+  %lane6 = zext i8 %val6 to i32
+  %lane7 = zext i8 %val7 to i32
+  %lane8 = zext i8 %val8 to i32
+  %lane9 = zext i8 %val9 to i32
+  %lane10 = zext i8 %val10 to i32
+  %lane11 = zext i8 %val11 to i32
+  %lane12 = zext i8 %val12 to i32
+  %lane13 = zext i8 %val13 to i32
+  %lane14 = zext i8 %val14 to i32
+  %lane15 = zext i8 %val15 to i32
+  %red.1 = add i32 %lane0, %lane1
+  %red.2 = add i32 %red.1, %lane2
+  %red.3 = add i32 %red.2, %lane3
+  %red.4 = add i32 %red.3, %lane4
+  %red.5 = add i32 %red.4, %lane5
+  %red.6 = add i32 %red.5, %lane6
+  %red.7 = add i32 %red.6, %lane7
+  %red.8 = add i32 %red.7, %lane8
+  %red.9 = add i32 %red.8, %lane9
+  %red.10 = add i32 %red.9, %lane10
+  %red.11 = add i32 %red.10, %lane11
+  %red.12 = add i32 %red.11, %lane12
+  %red.13 = add i32 %red.12, %lane13
+  %red.14 = add i32 %red.13, %lane14
+  %red = add i32 %red.14, %lane15
+  store i32 %red, ptr %ptr2, align 4
+  ret void
+}
+



More information about the llvm-commits mailing list