[llvm] [X86] Fold shift into GF2P8AFFINEQB instruction (PR #180019)

Abhiram Jampani via llvm-commits llvm-commits at lists.llvm.org
Sat Feb 14 06:44:45 PST 2026


https://github.com/Abhiramjampani updated https://github.com/llvm/llvm-project/pull/180019

>From 85a4389dde61ddbdd9b5ac9d72720302b53201cf Mon Sep 17 00:00:00 2001
From: Abhiramjampani <lcs2022059 at iiitl.ac.in>
Date: Fri, 6 Feb 2026 01:13:01 +0530
Subject: [PATCH] [X86] Fold shift into GF2P8AFFINEQB instruction

---
 llvm/lib/Target/X86/X86ISelLowering.cpp  |  69 ++++++-
 llvm/test/CodeGen/X86/gfni-shift-fold.ll | 219 +++++++++++++++++++++++
 2 files changed, 284 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/gfni-shift-fold.ll

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1837c8bbedf0e..ad410fb2cef9f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -29240,12 +29240,12 @@ uint64_t getGFNICtrlImm(unsigned Opcode, unsigned Amt = 0) {
   llvm_unreachable("Unsupported GFNI opcode");
 }
 
-// Generate a GFNI gf2p8affine bitmask for vXi8 bitreverse/shift/rotate.
-SDValue getGFNICtrlMask(unsigned Opcode, SelectionDAG &DAG, const SDLoc &DL,
-                        MVT VT, unsigned Amt = 0) {
+
+// Build a GFNI gf2p8affine bitmask from a raw 64-bit matrix value.
+static SDValue buildGFNIMatrixMask(uint64_t Imm, SelectionDAG &DAG,
+                                   const SDLoc &DL, MVT VT) {
   assert(VT.getVectorElementType() == MVT::i8 &&
          (VT.getSizeInBits() % 64) == 0 && "Illegal GFNI control type");
-  uint64_t Imm = getGFNICtrlImm(Opcode, Amt);
   SmallVector<SDValue> MaskBits;
   for (unsigned I = 0, E = VT.getSizeInBits(); I != E; I += 8) {
     uint64_t Bits = (Imm >> (I % 64)) & 255;
@@ -29254,6 +29254,13 @@ SDValue getGFNICtrlMask(unsigned Opcode, SelectionDAG &DAG, const SDLoc &DL,
   return DAG.getBuildVector(VT, DL, MaskBits);
 }
 
+// Generate a GFNI gf2p8affine bitmask for vXi8 bitreverse/shift/rotate.
+SDValue getGFNICtrlMask(unsigned Opcode, SelectionDAG &DAG, const SDLoc &DL,
+                        MVT VT, unsigned Amt = 0) {
+  uint64_t Imm = getGFNICtrlImm(Opcode, Amt);
+  return buildGFNIMatrixMask(Imm, DAG, DL, VT);
+}
+
 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
 //
 // i8/i16 vector implemented using dword LZCNT vector instruction
@@ -50624,6 +50631,60 @@ static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG,
   unsigned EltSizeInBits = VT.getScalarSizeInBits();
   SDLoc DL(N);
 
+  // Fold: shl(gf2p8affineqb(X, M), amt) -> gf2p8affineqb(X, M')
+  // where M' = M composed with shift matrix.
+  // This folds the shift into the matrix transformation.
+  // Handle both the X86ISD::GF2P8AFFINEQB form and the intrinsic form.
+  if (Subtarget.hasGFNI() && VT.isVector() && EltSizeInBits == 8) {
+    bool IsGF2P8 = N0.getOpcode() == X86ISD::GF2P8AFFINEQB;
+    bool IsIntrinsic =
+        N0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
+        (N0.getConstantOperandVal(0) == Intrinsic::x86_vgf2p8affineqb_128 ||
+         N0.getConstantOperandVal(0) == Intrinsic::x86_vgf2p8affineqb_256 ||
+         N0.getConstantOperandVal(0) == Intrinsic::x86_vgf2p8affineqb_512);
+
+    if (IsGF2P8 || IsIntrinsic) {
+      // For vector shifts, the shift amount is a splat vector
+      APInt SplatVal;
+      if (ISD::isConstantSplatVector(N1.getNode(), SplatVal)) {
+        uint64_t ShiftAmt = SplatVal.getZExtValue();
+        if (ShiftAmt > 0 && ShiftAmt < 8) {
+          // Operand indices differ: X86ISD::GF2P8AFFINEQB uses 0,1,2
+          // INTRINSIC_WO_CHAIN uses 1,2,3 (operand 0 is intrinsic ID)
+          unsigned BaseIdx = IsIntrinsic ? 1 : 0;
+          SDValue Input = N0.getOperand(BaseIdx);
+          SDValue MatrixOp = N0.getOperand(BaseIdx + 1);
+          // Fold if matrix is constant. For non-zero XOR immediate, shift it
+          // too: (x ^ imm8) << i = (x << i) ^ (imm8 << i)
+          auto *BV = dyn_cast<BuildVectorSDNode>(MatrixOp);
+          if (BV) {
+            SmallVector<APInt> RawBits;
+            BitVector UndefElts;
+            if (BV->getConstantRawBits(/*IsLE=*/true, 64, RawBits,
+                                       UndefElts) &&
+                !UndefElts[0]) {
+              uint64_t OrigMatrix = RawBits[0].getZExtValue();
+              // Shifting the matrix is equivalent to right-shifting by
+              // ShiftAmt bytes (each row moves to next position)
+              uint64_t NewMatrix = OrigMatrix >> (ShiftAmt * 8);
+
+              // Shift the XOR immediate as well
+              uint64_t OldImm = N0.getConstantOperandVal(BaseIdx + 2);
+              uint64_t NewImm = (OldImm << ShiftAmt) & 0xFF;
+
+              // Build new matrix vector and return new GF2P8AFFINEQB
+              SDValue NewMatrixOp = buildGFNIMatrixMask(
+                  NewMatrix, DAG, DL, MatrixOp.getSimpleValueType());
+              return DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, Input,
+                                 NewMatrixOp,
+                                 DAG.getTargetConstant(NewImm, DL, MVT::i8));
+            }
+          }
+        }
+      }
+    }
+  }
+
   // Exploits AVX2 VSHLV/VSRLV instructions for efficient unsigned vector shifts
   // with out-of-bounds clamping.
   if (N0.getOpcode() == ISD::VSELECT &&
diff --git a/llvm/test/CodeGen/X86/gfni-shift-fold.ll b/llvm/test/CodeGen/X86/gfni-shift-fold.ll
new file mode 100644
index 0000000000000..59b8330048eb1
--- /dev/null
+++ b/llvm/test/CodeGen/X86/gfni-shift-fold.ll
@@ -0,0 +1,219 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+gfni | FileCheck %s --check-prefixes=GFNI
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+gfni,+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+gfni,+avx512bw | FileCheck %s --check-prefixes=AVX512
+
+; Test that shift operations on gf2p8affineqb results are folded
+; into the matrix transformation.
+
+;
+; 128-bit tests
+;
+
+define <16 x i8> @test_shl1_v16i8(<16 x i8> %src) {
+; GFNI-LABEL: test_shl1_v16i8:
+; GFNI:       # %bb.0:
+; GFNI-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; GFNI-NEXT:    retq
+;
+; AVX2-LABEL: test_shl1_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test_shl1_v16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src,
+       <16 x i8> <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1>, i8 0)
+  %2 = shl <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  ret <16 x i8> %2
+}
+
+define <16 x i8> @test_shl2_v16i8(<16 x i8> %src) {
+; GFNI-LABEL: test_shl2_v16i8:
+; GFNI:       # %bb.0:
+; GFNI-NEXT:    gf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; GFNI-NEXT:    retq
+;
+; AVX2-LABEL: test_shl2_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test_shl2_v16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src,
+       <16 x i8> <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1>, i8 0)
+  %2 = shl <16 x i8> %1, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2,
+                          i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
+  ret <16 x i8> %2
+}
+
+define <16 x i8> @test_shl1_nonzero_imm_v16i8(<16 x i8> %src) {
+; GFNI-LABEL: test_shl1_nonzero_imm_v16i8:
+; GFNI:       # %bb.0:
+; GFNI-NEXT:    gf2p8affineqb $2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; GFNI-NEXT:    retq
+;
+; AVX2-LABEL: test_shl1_nonzero_imm_v16i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vgf2p8affineqb $2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test_shl1_nonzero_imm_v16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vgf2p8affineqb $2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src,
+       <16 x i8> <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1>, i8 1)
+  %2 = shl <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  ret <16 x i8> %2
+}
+
+;
+; 256-bit tests (require avx2)
+;
+
+define <32 x i8> @test_shl1_v32i8(<32 x i8> %src) #1 {
+; GFNI-LABEL: test_shl1_v32i8:
+; GFNI:       # %bb.0:
+; GFNI-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; GFNI-NEXT:    retq
+;
+; AVX2-LABEL: test_shl1_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test_shl1_v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %1 = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> %src,
+       <32 x i8> <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1>, i8 0)
+  %2 = shl <32 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  ret <32 x i8> %2
+}
+
+define <32 x i8> @test_shl1_nonzero_imm_v32i8(<32 x i8> %src) #1 {
+; GFNI-LABEL: test_shl1_nonzero_imm_v32i8:
+; GFNI:       # %bb.0:
+; GFNI-NEXT:    vgf2p8affineqb $2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; GFNI-NEXT:    retq
+;
+; AVX2-LABEL: test_shl1_nonzero_imm_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vgf2p8affineqb $2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test_shl1_nonzero_imm_v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vgf2p8affineqb $2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %1 = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> %src,
+       <32 x i8> <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1>, i8 1)
+  %2 = shl <32 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  ret <32 x i8> %2
+}
+
+;
+; 512-bit tests (require avx512bw)
+;
+
+define <64 x i8> @test_shl1_v64i8(<64 x i8> %src) #0 {
+; GFNI-LABEL: test_shl1_v64i8:
+; GFNI:       # %bb.0:
+; GFNI-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; GFNI-NEXT:    retq
+;
+; AVX2-LABEL: test_shl1_v64i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test_shl1_v64i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %1 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src,
+       <64 x i8> <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1>, i8 0)
+  %2 = shl <64 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  ret <64 x i8> %2
+}
+
+define <64 x i8> @test_shl1_nonzero_imm_v64i8(<64 x i8> %src) #0 {
+; GFNI-LABEL: test_shl1_nonzero_imm_v64i8:
+; GFNI:       # %bb.0:
+; GFNI-NEXT:    vgf2p8affineqb $2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; GFNI-NEXT:    retq
+;
+; AVX2-LABEL: test_shl1_nonzero_imm_v64i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vgf2p8affineqb $2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test_shl1_nonzero_imm_v64i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vgf2p8affineqb $2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %1 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src,
+       <64 x i8> <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1,
+                  i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1>, i8 1)
+  %2 = shl <64 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
+                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  ret <64 x i8> %2
+}
+
+attributes #0 = { "target-features"="+avx512bw" }
+attributes #1 = { "target-features"="+avx2" }
+
+declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
+declare <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i8)
+declare <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8>, <64 x i8>, i8)



More information about the llvm-commits mailing list