[llvm] [X86] LowerShift - if a vXi8 shift amount is small enough skip additional incremental shift stages (PR #174207)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 2 05:08:58 PST 2026
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-x86
Author: Simon Pilgrim (RKSimon)
<details>
<summary>Changes</summary>
We expand vXi8 shifts into shift-by-4/2/1 stages and use the shift amount MSBs to select when to apply each stage.
However, to move the 3-bit shift amount MSBs into position we first shift up by 5 using PSLLW (no need for a mask), and then use PADDB for each additional stage increment (PADDBW being quicker than PSLLW on many targets).
The problem however is when an earlier stage is completely unused - the PADDBs can't be easily merged back into the PSLLW resulting in a costly set of dependent instructions.
This patch uses computeKnownBits to determine the upper bound of the shift amount (via count leading zeros), and adds these into the initial PSLLW shift, and skips the empty stages entirely.
Fixes #<!-- -->162812
---
Patch is 45.36 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/174207.diff
2 Files Affected:
- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+43-37)
- (modified) llvm/test/CodeGen/X86/pr162812.ll (+135-346)
``````````diff
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 21dd26bec7681..25b557eeb5a68 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -31244,30 +31244,35 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
return DAG.getSelect(dl, SelVT, C, V0, V1);
};
+ KnownBits KnownAmt = DAG.computeKnownBits(Amt).trunc(3);
+ unsigned MinLZ = KnownAmt.countMinLeadingZeros();
+ assert(MinLZ <= 2 && "Illegal shift amount");
+
// Turn 'a' into a mask suitable for VSELECT: a = a << 5;
// We can safely do this using i16 shifts as we're only interested in
- // the 3 lower bits of each byte.
+ // the 3 lower bits of each byte. If the amount is even smaller, we can shift
+ // the mask further and skip additional incremental shifts.
Amt = DAG.getBitcast(ExtVT, Amt);
- Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
+ Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5 + MinLZ, DAG);
Amt = DAG.getBitcast(VT, Amt);
if (Opc == ISD::SHL || Opc == ISD::SRL) {
- // r = VSELECT(r, shift(r, 4), a);
- SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
- R = SignBitSelect(VT, Amt, M, R);
-
- // a += a
- Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
-
- // r = VSELECT(r, shift(r, 2), a);
- M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
- R = SignBitSelect(VT, Amt, M, R);
-
- // a += a
- Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
-
+ if (MinLZ < 2) {
+ // r = VSELECT(r, shift(r, 4), a);
+ SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
+ R = SignBitSelect(VT, Amt, M, R);
+ // a += a
+ Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+ }
+ if (MinLZ < 1) {
+ // r = VSELECT(r, shift(r, 2), a);
+ SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
+ R = SignBitSelect(VT, Amt, M, R);
+ // a += a
+ Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+ }
// return VSELECT(r, shift(r, 1), a);
- M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
+ SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
R = SignBitSelect(VT, Amt, M, R);
return R;
}
@@ -31285,26 +31290,27 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
RLo = DAG.getBitcast(ExtVT, RLo);
RHi = DAG.getBitcast(ExtVT, RHi);
- // r = VSELECT(r, shift(r, 4), a);
- SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
- SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
- RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
- RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
-
- // a += a
- ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
- AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
-
- // r = VSELECT(r, shift(r, 2), a);
- MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
- MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
- RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
- RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
-
- // a += a
- ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
- AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
-
+ SDValue MLo, MHi;
+ if (MinLZ < 2) {
+ // r = VSELECT(r, shift(r, 4), a);
+ MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
+ MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
+ RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
+ RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
+ // a += a
+ ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
+ AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
+ }
+ if (MinLZ < 1) {
+ // r = VSELECT(r, shift(r, 2), a);
+ MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
+ MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
+ RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
+ RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
+ // a += a
+ ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
+ AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
+ }
// r = VSELECT(r, shift(r, 1), a);
MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
diff --git a/llvm/test/CodeGen/X86/pr162812.ll b/llvm/test/CodeGen/X86/pr162812.ll
index 1b8c66f649426..6ef3866155969 100644
--- a/llvm/test/CodeGen/X86/pr162812.ll
+++ b/llvm/test/CodeGen/X86/pr162812.ll
@@ -7,50 +7,32 @@
define <32 x i8> @shl1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
; SSE2-LABEL: shl1_v32i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: psrlw $2, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [8224,8224,8224,8224,8224,8224,8224,8224]
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: paddb %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm2
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: paddb %xmm0, %xmm0
-; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: psrlw $2, %xmm3
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: paddb %xmm3, %xmm3
-; SSE2-NEXT: paddb %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm2
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: shl1_v32i8:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
-; SSE42-NEXT: psrlw $2, %xmm2
-; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [8224,8224,8224,8224,8224,8224,8224,8224]
-; SSE42-NEXT: pand %xmm5, %xmm2
-; SSE42-NEXT: paddb %xmm2, %xmm2
-; SSE42-NEXT: paddb %xmm2, %xmm2
-; SSE42-NEXT: movdqa %xmm0, %xmm6
-; SSE42-NEXT: paddb %xmm0, %xmm6
-; SSE42-NEXT: movdqa %xmm2, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm6, %xmm4
-; SSE42-NEXT: psrlw $2, %xmm3
-; SSE42-NEXT: pand %xmm3, %xmm5
-; SSE42-NEXT: paddb %xmm5, %xmm5
-; SSE42-NEXT: paddb %xmm5, %xmm5
+; SSE42-NEXT: movdqa %xmm0, %xmm5
+; SSE42-NEXT: paddb %xmm0, %xmm5
+; SSE42-NEXT: movaps %xmm2, %xmm0
+; SSE42-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE42-NEXT: movdqa %xmm1, %xmm2
; SSE42-NEXT: paddb %xmm1, %xmm2
-; SSE42-NEXT: movdqa %xmm5, %xmm0
+; SSE42-NEXT: movaps %xmm3, %xmm0
; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movdqa %xmm4, %xmm0
; SSE42-NEXT: retq
@@ -58,20 +40,12 @@ define <32 x i8> @shl1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
; AVX2-LABEL: shl1_v32i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX2-NEXT: vpsrlw $2, %ymm1, %ymm1
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: shl1_v32i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX512-NEXT: vpsrlw $2, %ymm1, %ymm1
-; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm1
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = lshr <32 x i8> %mask, splat (i8 7)
@@ -82,69 +56,46 @@ define <32 x i8> @shl1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
define <32 x i8> @lshr1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
; SSE2-LABEL: lshr1_v32i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: psrlw $2, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [8224,8224,8224,8224,8224,8224,8224,8224]
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: paddb %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm2
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE2-NEXT: pand %xmm7, %xmm0
-; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: psrlw $2, %xmm3
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: paddb %xmm3, %xmm3
-; SSE2-NEXT: paddb %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm2
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: psrlw $1, %xmm1
-; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: lshr1_v32i8:
; SSE42: # %bb.0:
-; SSE42-NEXT: movdqa %xmm2, %xmm4
-; SSE42-NEXT: movdqa %xmm0, %xmm2
-; SSE42-NEXT: movdqa %xmm0, %xmm6
-; SSE42-NEXT: psrlw $1, %xmm6
-; SSE42-NEXT: movdqa {{.*#+}} xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE42-NEXT: pand %xmm7, %xmm6
-; SSE42-NEXT: psrlw $2, %xmm4
-; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [8224,8224,8224,8224,8224,8224,8224,8224]
-; SSE42-NEXT: pand %xmm5, %xmm4
-; SSE42-NEXT: paddb %xmm4, %xmm4
-; SSE42-NEXT: paddb %xmm4, %xmm4
+; SSE42-NEXT: movdqa %xmm0, %xmm4
+; SSE42-NEXT: movdqa %xmm0, %xmm5
+; SSE42-NEXT: psrlw $1, %xmm5
+; SSE42-NEXT: movdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE42-NEXT: pand %xmm6, %xmm5
+; SSE42-NEXT: movaps %xmm2, %xmm0
+; SSE42-NEXT: pblendvb %xmm0, %xmm5, %xmm4
+; SSE42-NEXT: movdqa %xmm1, %xmm2
+; SSE42-NEXT: psrlw $1, %xmm2
+; SSE42-NEXT: pand %xmm6, %xmm2
+; SSE42-NEXT: movaps %xmm3, %xmm0
+; SSE42-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE42-NEXT: movdqa %xmm4, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm6, %xmm2
-; SSE42-NEXT: movdqa %xmm1, %xmm4
-; SSE42-NEXT: psrlw $1, %xmm4
-; SSE42-NEXT: pand %xmm7, %xmm4
-; SSE42-NEXT: psrlw $2, %xmm3
-; SSE42-NEXT: pand %xmm3, %xmm5
-; SSE42-NEXT: paddb %xmm5, %xmm5
-; SSE42-NEXT: paddb %xmm5, %xmm5
-; SSE42-NEXT: movdqa %xmm5, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm4, %xmm1
-; SSE42-NEXT: movdqa %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX2-LABEL: lshr1_v32i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpsrlw $2, %ymm1, %ymm1
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
@@ -152,10 +103,6 @@ define <32 x i8> @lshr1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm2
; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm2, %ymm2
-; AVX512-NEXT: vpsrlw $2, %ymm1, %ymm1
-; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm1
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = lshr <32 x i8> %mask, splat (i8 7)
@@ -167,22 +114,12 @@ define <32 x i8> @ashr1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
; SSE2-LABEL: ashr1_v32i8:
; SSE2: # %bb.0:
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
-; SSE2-NEXT: psrlw $2, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [8224,8224,8224,8224,8224,8224,8224,8224]
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [32896,32896,32896,32896,32896,32896,32896,32896]
; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm2[8],xmm7[9],xmm2[9],xmm7[10],xmm2[10],xmm7[11],xmm2[11],xmm7[12],xmm2[12],xmm7[13],xmm2[13],xmm7[14],xmm2[14],xmm7[15],xmm2[15]
-; SSE2-NEXT: paddw %xmm7, %xmm7
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pxor %xmm8, %xmm8
; SSE2-NEXT: pcmpgtw %xmm7, %xmm8
-; SSE2-NEXT: movdqa %xmm8, %xmm9
-; SSE2-NEXT: pandn %xmm6, %xmm9
-; SSE2-NEXT: psraw $2, %xmm6
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: por %xmm9, %xmm6
-; SSE2-NEXT: paddw %xmm7, %xmm7
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pcmpgtw %xmm7, %xmm8
; SSE2-NEXT: movdqa %xmm8, %xmm7
; SSE2-NEXT: pandn %xmm6, %xmm7
; SSE2-NEXT: psraw $1, %xmm6
@@ -191,15 +128,6 @@ define <32 x i8> @ashr1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
; SSE2-NEXT: psrlw $8, %xmm6
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: paddw %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm7, %xmm7
-; SSE2-NEXT: pcmpgtw %xmm2, %xmm7
-; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: pandn %xmm0, %xmm8
-; SSE2-NEXT: psraw $2, %xmm0
-; SSE2-NEXT: pand %xmm7, %xmm0
-; SSE2-NEXT: por %xmm8, %xmm0
-; SSE2-NEXT: paddw %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm7, %xmm7
; SSE2-NEXT: pcmpgtw %xmm2, %xmm7
; SSE2-NEXT: movdqa %xmm7, %xmm2
@@ -210,18 +138,8 @@ define <32 x i8> @ashr1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
; SSE2-NEXT: psrlw $8, %xmm0
; SSE2-NEXT: packuswb %xmm6, %xmm0
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE2-NEXT: psrlw $2, %xmm3
; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
-; SSE2-NEXT: paddw %xmm5, %xmm5
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: pcmpgtw %xmm5, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm7
-; SSE2-NEXT: pandn %xmm2, %xmm7
-; SSE2-NEXT: psraw $2, %xmm2
-; SSE2-NEXT: pand %xmm6, %xmm2
-; SSE2-NEXT: por %xmm7, %xmm2
-; SSE2-NEXT: paddw %xmm5, %xmm5
; SSE2-NEXT: pxor %xmm6, %xmm6
; SSE2-NEXT: pcmpgtw %xmm5, %xmm6
; SSE2-NEXT: movdqa %xmm6, %xmm5
@@ -232,15 +150,6 @@ define <32 x i8> @ashr1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: paddw %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm3, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm1, %xmm6
-; SSE2-NEXT: psraw $2, %xmm1
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: paddw %xmm3, %xmm3
; SSE2-NEXT: pcmpgtw %xmm3, %xmm4
; SSE2-NEXT: movdqa %xmm4, %xmm3
; SSE2-NEXT: pandn %xmm1, %xmm3
@@ -254,66 +163,32 @@ define <32 x i8> @ashr1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
; SSE42-LABEL: ashr1_v32i8:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
-; SSE42-NEXT: psrlw $2, %xmm2
-; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [8224,8224,8224,8224,8224,8224,8224,8224]
+; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [32896,32896,32896,32896,32896,32896,32896,32896]
; SSE42-NEXT: pand %xmm5, %xmm2
; SSE42-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
; SSE42-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
; SSE42-NEXT: movdqa %xmm6, %xmm7
-; SSE42-NEXT: psraw $4, %xmm7
-; SSE42-NEXT: pblendvb %xmm0, %xmm7, %xmm6
-; SSE42-NEXT: movdqa %xmm6, %xmm7
-; SSE42-NEXT: psraw $2, %xmm7
-; SSE42-NEXT: paddw %xmm0, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm7, %xmm6
-; SSE42-NEXT: movdqa %xmm6, %xmm7
; SSE42-NEXT: psraw $1, %xmm7
-; SSE42-NEXT: paddw %xmm0, %xmm0
; SSE42-NEXT: pblendvb %xmm0, %xmm7, %xmm6
+; SSE42-NEXT: psrlw $8, %xmm6
; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSE42-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
; SSE42-NEXT: movdqa %xmm2, %xmm4
-; SSE42-NEXT: psraw $4, %xmm4
-; SSE42-NEXT: pblendvb %xmm0, %xmm4, %xmm2
-; SSE42-NEXT: movdqa %xmm2, %xmm4
-; SSE42-NEXT: psraw $2, %xmm4
-; SSE42-NEXT: paddw %xmm0, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm4, %xmm2
-; SSE42-NEXT: movdqa %xmm2, %xmm4
; SSE42-NEXT: psraw $1, %xmm4
-; SSE42-NEXT: paddw %xmm0, %xmm0
; SSE42-NEXT: pblendvb %xmm0, %xmm4, %xmm2
-; SSE42-NEXT: psrlw $8, %xmm6
; SSE42-NEXT: psrlw $8, %xmm2
; SSE42-NEXT: packuswb %xmm6, %xmm2
-; SSE42-NEXT: psrlw $2, %xmm3
; SSE42-NEXT: pand %xmm5, %xmm3
; SSE42-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
; SSE42-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
; SSE42-NEXT: movdqa %xmm4, %xmm5
-; SSE42-NEXT: psraw $4, %xmm5
-; SSE42-NEXT: pblendvb %xmm0, %xmm5, %xmm4
-; SSE42-NEXT: movdqa %xmm4, %xmm5
-; SSE42-NEXT: psraw $2, %xmm5
-; SSE42-NEXT: paddw %xmm0, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm5, %xmm4
-; SSE42-NEXT: movdqa %xmm4, %xmm5
; SSE42-NEXT: psraw $1, %xmm5
-; SSE42-NEXT: paddw %xmm0, %xmm0
; SSE42-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE42-NEXT: psrlw $8, %xmm4
; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE42-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE42-NEXT: movdqa %xmm1, %xmm3
-; SSE42-NEXT: psraw $4, %xmm3
-; SSE42-NEXT: pblendvb %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movdqa %xmm1, %xmm3
-; SSE42-NEXT: psraw $2, %xmm3
-; SSE42-NEXT: paddw %xmm0, %xmm0
-; SSE42-NEXT: pblendvb %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movdqa %xmm1, %xmm3
; SSE42-NEXT: psraw $1, %xmm3
-; SSE42-NEXT: paddw %xmm0, %xmm0
; SSE42-NEXT: pblendvb %xmm0, %xmm3, %xmm1
; SSE42-NEXT: psrlw $8, %xmm1
; SSE42-NEXT: packuswb %xmm4, %xmm1
@@ -322,28 +197,15 @@ define <32 x i8> @ashr1_v32i8(<32 x i8> %a, <32 x i8> %mask) {
;
; AVX2-LABEL: ashr1_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpsrlw $2, %ymm1, %ymm1
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
-; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX2-NEXT:...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/174207
More information about the llvm-commits
mailing list