[PATCH] Teach the DAGCombiner how to fold a OR of two shufflevector into a single shufflevector node

Andrea Di Biagio andrea.dibiagio at gmail.com
Wed Mar 5 08:41:54 PST 2014


Ping.

On Wed, Feb 26, 2014 at 3:07 PM, Andrea Di Biagio
<andrea.dibiagio at gmail.com> wrote:
> Hi,
>
> this patch teaches the dagcombiner how to fold a binary OR between two
> shufflevector nodes into a single shuffle vector when possible.
>
> The combine rule is:
> - fold (or (shuffle A, V_0, Mask0), (shuffle B, V_0, Mask1)) ->
> (shuffle A, B, Mask2)
>
> (V_0 is a build_vector of AllZeros).
>
> This new combine rule make easier for the backend to identify blend operations.
> This rule also triggers when the resulting shuffle is not a blend
> between instructions.
>
> The new 'test/CodeGen/X86/combine-or.ll' contains some test cases that may
> benefit from this transformation.
>
> Please let me know if ok to submit.
>
> Thanks!
> Andrea Di Biagio
> SN Systems - Sony Computer Entertainment Group
-------------- next part --------------
Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/DAGCombiner.cpp	(revision 202988)
+++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp	(working copy)
@@ -3200,6 +3200,42 @@
       return N0;
     if (ISD::isBuildVectorAllOnes(N1.getNode()))
       return N1;
+
+    // fold (or (shuf A, V_0, M0), (shuf B, V_0, M1)) -> (shuf A, B, M2)
+    if (isa<ShuffleVectorSDNode>(N0) &&
+        isa<ShuffleVectorSDNode>(N1) &&
+        N0->getOperand(1) == N1->getOperand(1) &&
+        ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode())) {
+      bool CanFold = true;
+      unsigned NumElts = VT.getVectorNumElements();
+      const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0);
+      const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1);
+      SmallVector<int,4> Mask;
+      
+      for (unsigned i = 0; i != NumElts && CanFold; ++i) {
+        int M0 = SV0->getMaskElt(i);
+        int M1 = SV1->getMaskElt(i);
+   
+        // Both shuffle indexes are undef. Propagate Undef.
+        if (M0 < 0 && M1 < 0) {
+          Mask.push_back(M0);
+          continue;
+        }
+
+        if (M0 < 0 || M1 < 0 ||
+            (M0 < (int)NumElts && M1 < (int)NumElts) ||
+            (M0 >= (int)NumElts && M1 >= (int)NumElts)) {
+          CanFold = false;
+          break;
+        }
+        
+        Mask.push_back(M0 < (int)NumElts ? M0 : M1 + NumElts);
+      }
+
+      if (CanFold)
+        return DAG.getVectorShuffle(VT, SDLoc(N), N0->getOperand(0),
+                                    N1->getOperand(0), &Mask[0]);
+    }
   }
 
   // fold (or x, undef) -> -1
Index: test/CodeGen/X86/combine-or.ll
===================================================================
--- test/CodeGen/X86/combine-or.ll	(revision 0)
+++ test/CodeGen/X86/combine-or.ll	(working copy)
@@ -0,0 +1,296 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+
+; Verify that each of the following test cases is folded into a single
+; instruction which performs a blend operation.
+
+define <2 x i64> @test1(<2 x i64> %a, <2 x i64> %b) {
+  %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+  %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 1>
+  %or = or <2 x i64> %shuf1, %shuf2
+  ret <2 x i64> %or
+}
+; CHECK-LABEL: test1
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test2
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK-NEXT: ret
+
+
+define <2 x i64> @test3(<2 x i64> %a, <2 x i64> %b) {
+  %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 1>
+  %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+  %or = or <2 x i64> %shuf1, %shuf2
+  ret <2 x i64> %or
+}
+; CHECK-LABEL: test3
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test4(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 1, i32 2, i32 3>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test4
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+define <4 x i32> @test5(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 1, i32 2, i32 3>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test5
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test6(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test6
+; CHECK-NOT: xorps
+; CHECK: shufps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test7(<4 x i32> %a, <4 x i32> %b) {
+  %and1 = and <4 x i32> %a, <i32 -1, i32 -1, i32 0, i32 0>
+  %and2 = and <4 x i32> %b, <i32 0, i32 0, i32 -1, i32 -1>
+  %or = or <4 x i32> %and1, %and2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test7
+; CHECK-NOT: xorps
+; CHECK: shufps
+; CHECK-NEXT: ret
+
+
+define <2 x i64> @test8(<2 x i64> %a, <2 x i64> %b) {
+  %and1 = and <2 x i64> %a, <i64 -1, i64 0>
+  %and2 = and <2 x i64> %b, <i64 0, i64 -1>
+  %or = or <2 x i64> %and1, %and2
+  ret <2 x i64> %or
+}
+; CHECK-LABEL: test8
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {
+  %and1 = and <4 x i32> %a, <i32 0, i32 0, i32 -1, i32 -1>
+  %and2 = and <4 x i32> %b, <i32 -1, i32 -1, i32 0, i32 0>
+  %or = or <4 x i32> %and1, %and2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test9
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK-NEXT: ret
+
+
+define <2 x i64> @test10(<2 x i64> %a, <2 x i64> %b) {
+  %and1 = and <2 x i64> %a, <i64 0, i64 -1>
+  %and2 = and <2 x i64> %b, <i64 -1, i64 0>
+  %or = or <2 x i64> %and1, %and2
+  ret <2 x i64> %or
+}
+; CHECK-LABEL: test10
+; CHECK-NOT: xorps
+; CHECK: movsd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test11(<4 x i32> %a, <4 x i32> %b) {
+  %and1 = and <4 x i32> %a, <i32 -1, i32 0, i32 0, i32 0>
+  %and2 = and <4 x i32> %b, <i32 0, i32 -1, i32 -1, i32 -1>
+  %or = or <4 x i32> %and1, %and2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test11
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+define <4 x i32> @test12(<4 x i32> %a, <4 x i32> %b) {
+  %and1 = and <4 x i32> %a, <i32 0, i32 -1, i32 -1, i32 -1>
+  %and2 = and <4 x i32> %b, <i32 -1, i32 0, i32 0, i32 0>
+  %or = or <4 x i32> %and1, %and2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test12
+; CHECK-NOT: xorps
+; CHECK: movss
+; CHECK-NEXT: ret
+
+
+; Verify that the following test cases are folded into single shuffles.
+
+define <4 x i32> @test13(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 1, i32 1, i32 4, i32 4>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: xorps
+; CHECK: shufps
+; CHECK-NEXT: ret
+
+
+define <2 x i64> @test14(<2 x i64> %a, <2 x i64> %b) {
+  %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+  %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 0>
+  %or = or <2 x i64> %shuf1, %shuf2
+  ret <2 x i64> %or
+}
+; CHECK-LABEL: test14
+; CHECK-NOT: pslldq
+; CHECK-NOT: por
+; CHECK: punpcklqdq
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test15(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 1>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 2, i32 1, i32 4, i32 4>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test15
+; CHECK-NOT: xorps
+; CHECK: shufps
+; CHECK-NOT: shufps
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+define <2 x i64> @test16(<2 x i64> %a, <2 x i64> %b) {
+  %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 0>
+  %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+  %or = or <2 x i64> %shuf1, %shuf2
+  ret <2 x i64> %or
+}
+; CHECK-LABEL: test16
+; CHECK-NOT: pslldq
+; CHECK-NOT: por
+; CHECK: punpcklqdq
+; CHECK: ret
+
+
+define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 3, i32 2>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test17
+; CHECK-NOT: xorps
+; CHECK-NOT: orps
+; CHECK: movlhps
+; CHECK-NEXT: shufps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 3, i32 3>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test18
+; CHECK-NOT: pslldq
+; CHECK: movlhps
+; CHECK-NEXT: shufps
+; CHECK-NOT: xorps
+; CHECK-NOT: orps
+; CHECK: ret
+
+
+; Verify that the dag-combiner does not fold a OR of two shuffles into a single
+; shuffle instruction when the shuffle indexes are not compatible.
+
+define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 2>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test19
+; CHECK: por
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test20(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 4>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test20
+; CHECK: orps
+; CHECK: ret
+
+
+define <4 x i32> @test21(<4 x i32> %a, <4 x i32> %b) {
+  %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 3>
+  %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 2, i32 2>
+  %or = or <4 x i32> %shuf1, %shuf2
+  ret <4 x i32> %or
+}
+; CHECK-LABEL: test21
+; CHECK: por
+; CHECK-NEXT: ret
+
+
+define <2 x i64> @test22(<2 x i64> %a, <2 x i64> %b) {
+  %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+  %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
+  %or = or <2 x i64> %shuf1, %shuf2
+  ret <2 x i64> %or
+}
+; CHECK-LABEL: test22
+; CHECK-NOT: xorps
+; CHECK: orps
+; CHECK-NEXT: ret
+
+
+define <2 x i64> @test23(<2 x i64> %a, <2 x i64> %b) {
+  %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 0>
+  %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 0>
+  %or = or <2 x i64> %shuf1, %shuf2
+  ret <2 x i64> %or
+}
+; CHECK-LABEL: test23
+; CHECK: por
+; CHECK-NEXT: ret
+
+


More information about the llvm-commits mailing list