[llvm] r247516 - [x86] enable machine combiner reassociations for 128-bit vector logical integer insts (2nd try)

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 12 12:47:50 PDT 2015


Author: spatel
Date: Sat Sep 12 14:47:50 2015
New Revision: 247516

URL: http://llvm.org/viewvc/llvm-project?rev=247516&view=rev
Log:
[x86] enable machine combiner reassociations for 128-bit vector logical integer insts (2nd try)

The changes in:
test/CodeGen/X86/machine-cp.ll
are just due to scheduling differences after some logic instructions were reassociated.


Added:
    llvm/trunk/test/CodeGen/X86/machine-combiner-int-vec.ll
Modified:
    llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
    llvm/trunk/test/CodeGen/X86/machine-cp.ll

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=247516&r1=247515&r2=247516&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Sat Sep 12 14:47:50 2015
@@ -6408,6 +6408,12 @@ static bool isAssociativeAndCommutative(
   case X86::IMUL16rr:
   case X86::IMUL32rr:
   case X86::IMUL64rr:
+  case X86::PANDrr:
+  case X86::PORrr:
+  case X86::PXORrr:
+  case X86::VPANDrr:
+  case X86::VPORrr:
+  case X86::VPXORrr:
   // Normal min/max instructions are not commutative because of NaN and signed
   // zero semantics, but these are. Thus, there's no need to check for global
   // relaxed math; the instructions themselves have the properties we need.

Added: llvm/trunk/test/CodeGen/X86/machine-combiner-int-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/machine-combiner-int-vec.ll?rev=247516&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/machine-combiner-int-vec.ll (added)
+++ llvm/trunk/test/CodeGen/X86/machine-combiner-int-vec.ll Sat Sep 12 14:47:50 2015
@@ -0,0 +1,68 @@
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse < %s | FileCheck %s --check-prefix=SSE
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx < %s | FileCheck %s --check-prefix=AVX
+
+; Verify that 128-bit vector logical ops are reassociated.
+
+define <4 x i32> @reassociate_and_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
+; SSE-LABEL: reassociate_and_v4i32:
+; SSE:       # BB#0:
+; SSE-NEXT:    paddd %xmm1, %xmm0
+; SSE-NEXT:    pand %xmm3, %xmm2
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: reassociate_and_v4i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm3, %xmm2, %xmm1
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+
+  %t0 = add <4 x i32> %x0, %x1
+  %t1 = and <4 x i32> %x2, %t0
+  %t2 = and <4 x i32> %x3, %t1
+  ret <4 x i32> %t2
+}
+
+define <4 x i32> @reassociate_or_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
+; SSE-LABEL: reassociate_or_v4i32:
+; SSE:       # BB#0:
+; SSE-NEXT:    paddd %xmm1, %xmm0
+; SSE-NEXT:    por %xmm3, %xmm2
+; SSE-NEXT:    por %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: reassociate_or_v4i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm3, %xmm2, %xmm1
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+
+  %t0 = add <4 x i32> %x0, %x1
+  %t1 = or <4 x i32> %x2, %t0
+  %t2 = or <4 x i32> %x3, %t1
+  ret <4 x i32> %t2
+}
+
+define <4 x i32> @reassociate_xor_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
+; SSE-LABEL: reassociate_xor_v4i32:
+; SSE:       # BB#0:
+; SSE-NEXT:    paddd %xmm1, %xmm0
+; SSE-NEXT:    pxor %xmm3, %xmm2
+; SSE-NEXT:    pxor %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: reassociate_xor_v4i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm3, %xmm2, %xmm1
+; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+
+  %t0 = add <4 x i32> %x0, %x1
+  %t1 = xor <4 x i32> %x2, %t0
+  %t2 = xor <4 x i32> %x3, %t1
+  ret <4 x i32> %t2
+}
+

Modified: llvm/trunk/test/CodeGen/X86/machine-cp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/machine-cp.ll?rev=247516&r1=247515&r2=247516&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/machine-cp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/machine-cp.ll Sat Sep 12 14:47:50 2015
@@ -73,22 +73,18 @@ while.end:
 ; Machine propagation used to delete the first copy as the
 ; first few uses were <undef>.
 ; CHECK-NEXT: movdqa [[SRC]], [[CPY1:%xmm[0-9]+]]
-; CHECK-NEXT: movdqa [[SRC]], [[CPY2:%xmm[0-9]+]]
-; CHECK-NEXT: punpckhbw [[SRC]],
-; Check that CPY1 is not redefined.
-; CHECK-NOT: , [[CPY1]]
-; undef use, we do not care.
-; CHECK: punpcklwd [[CPY1]],
-; Check that CPY1 is not redefined.
-; CHECK-NOT: , [[CPY1]]
+; CHECK: punpcklbw [[CPY1]], [[CPY1]]
+; CHECK-NEXT: punpcklwd [[CPY1]], [[CPY1]]
+; CHECK-NEXT: pslld $31, [[CPY1]]
+; CHECK: movdqa [[SRC]], [[CPY2:%xmm[0-9]+]]
 ; CHECK: punpcklbw [[CPY2]], [[CPY2]]
 ; CHECK-NEXT: punpckhwd [[CPY2]], [[CPY2]]
 ; CHECK-NEXT: pslld $31, [[CPY2]]
+; CHECK: punpckhbw [[SRC]],
 ; Check that CPY1 is not redefined.
 ; CHECK-NOT: , [[CPY1]]
-; CHECK: punpcklbw [[CPY1]], [[CPY1]]
-; CHECK-NEXT: punpcklwd [[CPY1]], [[CPY1]]
-; CHECK-NEXT: pslld $31, [[CPY1]]
+; undef use, we do not care.
+; CHECK: punpcklwd [[CPY1]],
 define <16 x float> @foo(<16 x float> %x) {
 bb:
   %v3 = icmp slt <16 x i32> undef, zeroinitializer




More information about the llvm-commits mailing list