[llvm] [LegalizeTypes][X86][PowerPC] Use shift by 1 instead of adding a value to itself to double. (PR #86857)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 27 13:52:33 PDT 2024


https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/86857

>From b19897a8a55725dc65d26332ab9af863eb816bd5 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 27 Mar 2024 12:04:15 -0700
Subject: [PATCH] [LegalizeTypes][X86][PowerPC] Use shift by 1 instead of
 adding a value to itself to double.

Using a shift is the correct way to handle undef and works better with
our optimizations that move freeze around.

The X86 code looks like an improvment, but PowerPC might be a regression.

Hoping this improves some code for #86850.
---
 .../CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp   |  3 ++-
 llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll     | 13 +++++++------
 llvm/test/CodeGen/PowerPC/vec_insert_elt.ll         |  8 ++++----
 llvm/test/CodeGen/X86/insertelement-var-index.ll    |  8 ++++----
 4 files changed, 17 insertions(+), 15 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index a55364ea2c4e5b..73e4b50e316a90 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -428,7 +428,8 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
     std::swap(Lo, Hi);
 
   SDValue Idx = N->getOperand(2);
-  Idx = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, Idx);
+  Idx = DAG.getNode(ISD::SHL, dl, Idx.getValueType(), Idx,
+                    DAG.getShiftAmountConstant(1, Idx.getValueType(), dl));
   NewVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, NewVec, Lo, Idx);
   Idx = DAG.getNode(ISD::ADD, dl,
                     Idx.getValueType(), Idx,
diff --git a/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
index aae23265710ce0..b05ebade5f78ed 100644
--- a/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll
@@ -165,12 +165,12 @@ define <2 x i64> @testDoubleword(<2 x i64> %a, i64 %b, i64 %idx) {
 ;
 ; CHECK-32-LABEL: testDoubleword:
 ; CHECK-32:       # %bb.0: # %entry
-; CHECK-32-NEXT:    add 5, 6, 6
 ; CHECK-32-NEXT:    addi 7, 1, -32
+; CHECK-32-NEXT:    slwi 5, 6, 1
+; CHECK-32-NEXT:    rlwinm 6, 6, 3, 28, 28
 ; CHECK-32-NEXT:    stxv 34, -32(1)
-; CHECK-32-NEXT:    rlwinm 6, 5, 2, 28, 29
 ; CHECK-32-NEXT:    stwx 3, 7, 6
-; CHECK-32-NEXT:    addi 3, 5, 1
+; CHECK-32-NEXT:    ori 3, 5, 1
 ; CHECK-32-NEXT:    addi 5, 1, -16
 ; CHECK-32-NEXT:    lxv 0, -32(1)
 ; CHECK-32-NEXT:    rlwinm 3, 3, 2, 28, 29
@@ -187,10 +187,11 @@ define <2 x i64> @testDoubleword(<2 x i64> %a, i64 %b, i64 %idx) {
 ;
 ; CHECK-32-P10-LABEL: testDoubleword:
 ; CHECK-32-P10:       # %bb.0: # %entry
-; CHECK-32-P10-NEXT:    add 5, 6, 6
-; CHECK-32-P10-NEXT:    slwi 6, 5, 2
+; CHECK-32-P10-NEXT:    slwi 5, 6, 1
+; CHECK-32-P10-NEXT:    slwi 6, 6, 3
 ; CHECK-32-P10-NEXT:    vinswlx 2, 6, 3
-; CHECK-32-P10-NEXT:    addi 3, 5, 1
+; CHECK-32-P10-NEXT:    li 3, 1
+; CHECK-32-P10-NEXT:    rlwimi 3, 5, 0, 0, 30
 ; CHECK-32-P10-NEXT:    slwi 3, 3, 2
 ; CHECK-32-P10-NEXT:    vinswlx 2, 3, 4
 ; CHECK-32-P10-NEXT:    blr
diff --git a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
index b98aed8616509e..e63bf47a85bd1a 100644
--- a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
@@ -241,14 +241,14 @@ define <2 x i64> @testDoubleword(<2 x i64> %a, i64 %b, i64 %idx) {
 ;
 ; AIX-P8-32-LABEL: testDoubleword:
 ; AIX-P8-32:       # %bb.0: # %entry
-; AIX-P8-32-NEXT:    add r6, r6, r6
 ; AIX-P8-32-NEXT:    addi r5, r1, -32
-; AIX-P8-32-NEXT:    rlwinm r7, r6, 2, 28, 29
+; AIX-P8-32-NEXT:    slwi r7, r6, 1
+; AIX-P8-32-NEXT:    rlwinm r6, r6, 3, 28, 28
 ; AIX-P8-32-NEXT:    stxvw4x v2, 0, r5
-; AIX-P8-32-NEXT:    stwx r3, r5, r7
+; AIX-P8-32-NEXT:    stwx r3, r5, r6
 ; AIX-P8-32-NEXT:    addi r3, r1, -16
 ; AIX-P8-32-NEXT:    lxvw4x vs0, 0, r5
-; AIX-P8-32-NEXT:    addi r5, r6, 1
+; AIX-P8-32-NEXT:    ori r5, r7, 1
 ; AIX-P8-32-NEXT:    rlwinm r5, r5, 2, 28, 29
 ; AIX-P8-32-NEXT:    stxvw4x vs0, 0, r3
 ; AIX-P8-32-NEXT:    stwx r4, r3, r5
diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll
index 5420e6b5ce86f3..bd588e54adbae0 100644
--- a/llvm/test/CodeGen/X86/insertelement-var-index.ll
+++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll
@@ -1019,7 +1019,7 @@ define <2 x i64> @arg_i64_v2i64(<2 x i64> %v, i64 %x, i32 %y) nounwind {
 ; X86AVX2-NEXT:    movl %edx, (%esp,%esi,4)
 ; X86AVX2-NEXT:    vmovaps (%esp), %xmm0
 ; X86AVX2-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT:    incl %ecx
+; X86AVX2-NEXT:    orl $1, %ecx
 ; X86AVX2-NEXT:    andl $3, %ecx
 ; X86AVX2-NEXT:    movl %eax, 16(%esp,%ecx,4)
 ; X86AVX2-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0
@@ -1369,7 +1369,7 @@ define <2 x i64> @load_i64_v2i64(<2 x i64> %v, ptr %p, i32 %y) nounwind {
 ; X86AVX2-NEXT:    movl %edx, (%esp,%esi,4)
 ; X86AVX2-NEXT:    vmovaps (%esp), %xmm0
 ; X86AVX2-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT:    incl %eax
+; X86AVX2-NEXT:    orl $1, %eax
 ; X86AVX2-NEXT:    andl $3, %eax
 ; X86AVX2-NEXT:    movl %ecx, 16(%esp,%eax,4)
 ; X86AVX2-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0
@@ -1754,7 +1754,7 @@ define <4 x i64> @arg_i64_v4i64(<4 x i64> %v, i64 %x, i32 %y) nounwind {
 ; X86AVX2-NEXT:    movl %edx, (%esp,%esi,4)
 ; X86AVX2-NEXT:    vmovaps (%esp), %ymm0
 ; X86AVX2-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT:    incl %ecx
+; X86AVX2-NEXT:    orl $1, %ecx
 ; X86AVX2-NEXT:    andl $7, %ecx
 ; X86AVX2-NEXT:    movl %eax, 32(%esp,%ecx,4)
 ; X86AVX2-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0
@@ -2137,7 +2137,7 @@ define <4 x i64> @load_i64_v4i64(<4 x i64> %v, ptr %p, i32 %y) nounwind {
 ; X86AVX2-NEXT:    movl %edx, (%esp,%esi,4)
 ; X86AVX2-NEXT:    vmovaps (%esp), %ymm0
 ; X86AVX2-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT:    incl %eax
+; X86AVX2-NEXT:    orl $1, %eax
 ; X86AVX2-NEXT:    andl $7, %eax
 ; X86AVX2-NEXT:    movl %ecx, 32(%esp,%eax,4)
 ; X86AVX2-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0



More information about the llvm-commits mailing list