[llvm] [X86] Fold XOR of two vgf2p8affineqb instructions with same input (PR #179900)

via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 5 06:55:46 PST 2026


https://github.com/bala-bhargav updated https://github.com/llvm/llvm-project/pull/179900

>From 3c09bb14173b889c5e39e13706d89d6717b5b51f Mon Sep 17 00:00:00 2001
From: bhargav <penugondabalabharghav at gmail.com>
Date: Thu, 5 Feb 2026 15:40:04 +0530
Subject: [PATCH 1/2] [X86] Fold XOR of two vgf2p8affineqb with same input

This patch implements an optimization to fold:
  vgf2p8affineqb(x, m1, i1) ^ vgf2p8affineqb(x, m2, i2)
into:
  vgf2p8affineqb(x, m1 ^ m2, i1 ^ i2)

The matrix in vgf2p8affineqb determines which bits of the input are
XORed together. When XORing two affine transformations of the same
input, we can fold them by XORing both their matrices and immediates.

This optimization reduces instruction count and improves performance
for GFNI operations.

Fixes #178785
---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |  41 ++++++
 llvm/test/CodeGen/X86/gfni-xor-fold-avx512.ll |  74 ++++++++++
 llvm/test/CodeGen/X86/gfni-xor-fold.ll        | 127 ++++++++++++++++++
 3 files changed, 242 insertions(+)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 53d1427d516c9..d4567e77c4dd8 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56005,6 +56005,43 @@ static SDValue combineXorWithGF2P8AFFINEQB(SDNode *N, const SDLoc &DL,
                      DAG.getTargetConstant(NewImm, DL, MVT::i8));
 }
 
+// Fold: vgf2p8affineqb(x, m1, i1) ^ vgf2p8affineqb(x, m2, i2)
+//   =>  vgf2p8affineqb(x, m1 ^ m2, i1 ^ i2)
+// The matrix in vgf2p8affineqb determines which bits of the input are XORed
+// together. XORing two affine transformations of the same input can be folded
+// by XORing both their matrices and immediates together.
+static SDValue combineXorWithTwoGF2P8AFFINEQB(SDNode *N, const SDLoc &DL,
+                                               SelectionDAG &DAG, EVT VT) {
+  using namespace SDPatternMatch;
+
+  SDValue X0, Y0, X1, Y1;
+  APInt Imm0, Imm1;
+  
+  // Use sd_match for structure matching - m_Xor handles commutation
+  // Match: GF2P8AFFINEQB(x, m1, i1) ^ GF2P8AFFINEQB(x, m2, i2)
+  if (!sd_match(N, m_Xor(m_OneUse(m_TernaryOp(X86ISD::GF2P8AFFINEQB, 
+                                               m_Value(X0), m_Value(Y0), 
+                                               m_ConstInt(Imm0))),
+                          m_OneUse(m_TernaryOp(X86ISD::GF2P8AFFINEQB,
+                                               m_Value(X1), m_Value(Y1),
+                                               m_ConstInt(Imm1))))))
+    return SDValue();
+
+  assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) &&
+         "Unsupported GFNI type");
+
+  // Both must operate on the same input
+  if (X0 != X1)
+    return SDValue();
+
+  uint64_t NewImm = Imm0.getZExtValue() ^ Imm1.getZExtValue();
+
+  SDValue NewMatrix = DAG.getNode(ISD::XOR, DL, VT, Y0, Y1);
+
+  return DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, X0, NewMatrix,
+                     DAG.getTargetConstant(NewImm, DL, MVT::i8));
+}
+
 static SDValue combineXorSubCTLZ(SDNode *N, const SDLoc &DL, SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {
   assert((N->getOpcode() == ISD::XOR || N->getOpcode() == ISD::SUB) &&
@@ -56110,6 +56147,10 @@ static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
   if (SDValue R = combineXorWithGF2P8AFFINEQB(N, DL, DAG, VT))
     return R;
 
+  if (SDValue R = combineXorWithTwoGF2P8AFFINEQB(N, DL, DAG, VT))
+    return R;
+
+
   // Fold not(iX bitcast(vXi1)) -> (iX bitcast(not(vec))) for legal boolvecs.
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   if (llvm::isAllOnesConstant(N1) && N0.getOpcode() == ISD::BITCAST &&
diff --git a/llvm/test/CodeGen/X86/gfni-xor-fold-avx512.ll b/llvm/test/CodeGen/X86/gfni-xor-fold-avx512.ll
index bf5dd46579813..7c75fbfb6ded6 100644
--- a/llvm/test/CodeGen/X86/gfni-xor-fold-avx512.ll
+++ b/llvm/test/CodeGen/X86/gfni-xor-fold-avx512.ll
@@ -61,3 +61,77 @@ define <64 x i8> @test_affine_xor_no_fold_512_variable(<64 x i8> %src1, <64 x i8
   %xor = xor <64 x i8> %gfni, %var
   ret <64 x i8> %xor
 }
+
+; Test folding XOR of two vgf2p8affineqb with same input - 512-bit
+define <64 x i8> @test_affine_affine_xor_fold_512(<64 x i8> %src, <64 x i8> %m1, <64 x i8> %m2) nounwind {
+;
+; CHECK-LABEL: test_affine_affine_xor_fold_512:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpxorq %zmm2, %zmm1, %zmm1
+; CHECK-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+  %gfni1 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src, <64 x i8> %m1, i8 0)
+  %gfni2 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src, <64 x i8> %m2, i8 0)
+  %xor = xor <64 x i8> %gfni1, %gfni2
+  ret <64 x i8> %xor
+}
+
+; Test with non-zero immediates - 512-bit
+define <64 x i8> @test_affine_affine_xor_fold_512_nonzero(<64 x i8> %src, <64 x i8> %m1, <64 x i8> %m2) nounwind {
+;
+; CHECK-LABEL: test_affine_affine_xor_fold_512_nonzero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpxorq %zmm2, %zmm1, %zmm1
+; CHECK-NEXT:    vgf2p8affineqb $15, %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+  %gfni1 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src, <64 x i8> %m1, i8 5)
+  %gfni2 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src, <64 x i8> %m2, i8 10)
+  %xor = xor <64 x i8> %gfni1, %gfni2
+  ret <64 x i8> %xor
+}
+
+; Test commutative XOR - 512-bit
+define <64 x i8> @test_affine_affine_xor_fold_512_commutative(<64 x i8> %src, <64 x i8> %m1, <64 x i8> %m2) nounwind {
+;
+; CHECK-LABEL: test_affine_affine_xor_fold_512_commutative:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpxorq %zmm1, %zmm2, %zmm1
+; CHECK-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+  %gfni1 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src, <64 x i8> %m1, i8 0)
+  %gfni2 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src, <64 x i8> %m2, i8 0)
+  %xor = xor <64 x i8> %gfni2, %gfni1
+  ret <64 x i8> %xor
+}
+
+; Negative test: multi-use should not fold - 512-bit
+define <64 x i8> @test_affine_affine_xor_no_fold_512_multi_use(<64 x i8> %src, <64 x i8> %m1, <64 x i8> %m2, ptr %out) nounwind {
+;
+; CHECK-LABEL: test_affine_affine_xor_no_fold_512_multi_use:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vgf2p8affineqb $0, %zmm1, %zmm0, %zmm1
+; CHECK-NEXT:    vgf2p8affineqb $0, %zmm2, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqa64 %zmm1, (%rdi)
+; CHECK-NEXT:    vpxorq %zmm0, %zmm1, %zmm0
+; CHECK-NEXT:    retq
+  %gfni1 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src, <64 x i8> %m1, i8 0)
+  %gfni2 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src, <64 x i8> %m2, i8 0)
+  store <64 x i8> %gfni1, ptr %out
+  %xor = xor <64 x i8> %gfni1, %gfni2
+  ret <64 x i8> %xor
+}
+
+; Negative test: different inputs should not fold - 512-bit
+define <64 x i8> @test_affine_affine_xor_no_fold_512_different_inputs(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %m1, <64 x i8> %m2) nounwind {
+;
+; CHECK-LABEL: test_affine_affine_xor_no_fold_512_different_inputs:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vgf2p8affineqb $0, %zmm2, %zmm0, %zmm0
+; CHECK-NEXT:    vgf2p8affineqb $0, %zmm3, %zmm1, %zmm1
+; CHECK-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    retq
+  %gfni1 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src1, <64 x i8> %m1, i8 0)
+  %gfni2 = call <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8> %src2, <64 x i8> %m2, i8 0)
+  %xor = xor <64 x i8> %gfni1, %gfni2
+  ret <64 x i8> %xor
+}
diff --git a/llvm/test/CodeGen/X86/gfni-xor-fold.ll b/llvm/test/CodeGen/X86/gfni-xor-fold.ll
index 510f4cefa4e57..91b7da8afd64c 100644
--- a/llvm/test/CodeGen/X86/gfni-xor-fold.ll
+++ b/llvm/test/CodeGen/X86/gfni-xor-fold.ll
@@ -142,3 +142,130 @@ define <16 x i8> @test_affine_xor_no_fold_variable(<16 x i8> %src1, <16 x i8> %s
   %xor = xor <16 x i8> %gfni, %var
   ret <16 x i8> %xor
 }
+
+; Test folding XOR of two vgf2p8affineqb with same input - 128-bit
+define <16 x i8> @test_affine_affine_xor_fold_128(<16 x i8> %src, <16 x i8> %m1, <16 x i8> %m2) nounwind {
+;
+; AVX-LABEL: test_affine_affine_xor_fold_128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_affine_affine_xor_fold_128:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpxor %xmm2, %xmm1, %xmm1
+; AVX512-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %gfni1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src, <16 x i8> %m1, i8 0)
+  %gfni2 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src, <16 x i8> %m2, i8 0)
+  %xor = xor <16 x i8> %gfni1, %gfni2
+  ret <16 x i8> %xor
+}
+
+; Test with non-zero immediates - 128-bit
+define <16 x i8> @test_affine_affine_xor_fold_128_nonzero(<16 x i8> %src, <16 x i8> %m1, <16 x i8> %m2) nounwind {
+;
+; AVX-LABEL: test_affine_affine_xor_fold_128_nonzero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vgf2p8affineqb $15, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_affine_affine_xor_fold_128_nonzero:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpxor %xmm2, %xmm1, %xmm1
+; AVX512-NEXT:    vgf2p8affineqb $15, %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %gfni1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src, <16 x i8> %m1, i8 5)
+  %gfni2 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src, <16 x i8> %m2, i8 10)
+  %xor = xor <16 x i8> %gfni1, %gfni2
+  ret <16 x i8> %xor
+}
+
+; Test commutative XOR - 128-bit
+define <16 x i8> @test_affine_affine_xor_fold_128_commutative(<16 x i8> %src, <16 x i8> %m1, <16 x i8> %m2) nounwind {
+;
+; AVX-LABEL: test_affine_affine_xor_fold_128_commutative:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm1, %xmm2, %xmm1
+; AVX-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_affine_affine_xor_fold_128_commutative:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpxor %xmm1, %xmm2, %xmm1
+; AVX512-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %gfni1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src, <16 x i8> %m1, i8 0)
+  %gfni2 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src, <16 x i8> %m2, i8 0)
+  %xor = xor <16 x i8> %gfni2, %gfni1
+  ret <16 x i8> %xor
+}
+
+; Negative test: multi-use should not fold - 128-bit
+define <16 x i8> @test_affine_affine_xor_no_fold_multi_use(<16 x i8> %src, <16 x i8> %m1, <16 x i8> %m2, ptr %out) nounwind {
+;
+; AVX-LABEL: test_affine_affine_xor_no_fold_multi_use:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_affine_affine_xor_no_fold_multi_use:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vgf2p8affineqb $0, %xmm1, %xmm0, %xmm1
+; AVX512-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX512-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+  %gfni1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src, <16 x i8> %m1, i8 0)
+  %gfni2 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src, <16 x i8> %m2, i8 0)
+  store <16 x i8> %gfni1, ptr %out
+  %xor = xor <16 x i8> %gfni1, %gfni2
+  ret <16 x i8> %xor
+}
+
+; Negative test: different inputs should not fold - 128-bit
+define <16 x i8> @test_affine_affine_xor_no_fold_different_inputs(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %m1, <16 x i8> %m2) nounwind {
+;
+; AVX-LABEL: test_affine_affine_xor_no_fold_different_inputs:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_affine_affine_xor_no_fold_different_inputs:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
+; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+  %gfni1 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src1, <16 x i8> %m1, i8 0)
+  %gfni2 = call <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8> %src2, <16 x i8> %m2, i8 0)
+  %xor = xor <16 x i8> %gfni1, %gfni2
+  ret <16 x i8> %xor
+}
+
+; Test 256-bit vectors
+define <32 x i8> @test_affine_affine_xor_fold_256(<32 x i8> %src, <32 x i8> %m1, <32 x i8> %m2) nounwind {
+;
+; AVX-LABEL: test_affine_affine_xor_fold_256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %ymm2, %ymm1, %ymm1
+; AVX-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_affine_affine_xor_fold_256:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpxor %ymm2, %ymm1, %ymm1
+; AVX512-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %gfni1 = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> %src, <32 x i8> %m1, i8 0)
+  %gfni2 = call <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8> %src, <32 x i8> %m2, i8 0)
+  %xor = xor <32 x i8> %gfni1, %gfni2
+  ret <32 x i8> %xor
+}

>From 14b3c827cebbabcbd7b079e1ff85afb20cf846eb Mon Sep 17 00:00:00 2001
From: bhargav <penugondabalabharghav at gmail.com>
Date: Thu, 5 Feb 2026 18:54:50 +0530
Subject: [PATCH 2/2] [X86] Use m_Deferred in combineXorWithTwoGF2P8AFFINEQB
 (NFC)

Address review comment from RKSimon to use m_Deferred pattern matcher
instead of manual equality check for matching the same input value.

This change:
- Replaces m_Value(X1) with m_Deferred(X0) in pattern matching
- Removes the X1 variable and manual if (X0 != X1) check
- Removes redundant comment and extra blank line for consistency

The m_Deferred matcher is the idiomatic LLVM way to ensure both
GF2P8AFFINEQB operations use the same input value.
---
 llvm/lib/Target/X86/X86ISelLowering.cpp | 21 +++++++--------------
 1 file changed, 7 insertions(+), 14 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d4567e77c4dd8..bfd3b6ddc36c7 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56011,29 +56011,23 @@ static SDValue combineXorWithGF2P8AFFINEQB(SDNode *N, const SDLoc &DL,
 // together. XORing two affine transformations of the same input can be folded
 // by XORing both their matrices and immediates together.
 static SDValue combineXorWithTwoGF2P8AFFINEQB(SDNode *N, const SDLoc &DL,
-                                               SelectionDAG &DAG, EVT VT) {
+                                              SelectionDAG &DAG, EVT VT) {
   using namespace SDPatternMatch;
 
-  SDValue X0, Y0, X1, Y1;
+  SDValue X0, Y0, Y1;
   APInt Imm0, Imm1;
-  
   // Use sd_match for structure matching - m_Xor handles commutation
   // Match: GF2P8AFFINEQB(x, m1, i1) ^ GF2P8AFFINEQB(x, m2, i2)
-  if (!sd_match(N, m_Xor(m_OneUse(m_TernaryOp(X86ISD::GF2P8AFFINEQB, 
-                                               m_Value(X0), m_Value(Y0), 
-                                               m_ConstInt(Imm0))),
-                          m_OneUse(m_TernaryOp(X86ISD::GF2P8AFFINEQB,
-                                               m_Value(X1), m_Value(Y1),
-                                               m_ConstInt(Imm1))))))
+  if (!sd_match(
+          N, m_Xor(m_OneUse(m_TernaryOp(X86ISD::GF2P8AFFINEQB, m_Value(X0),
+                                        m_Value(Y0), m_ConstInt(Imm0))),
+                   m_OneUse(m_TernaryOp(X86ISD::GF2P8AFFINEQB, m_Deferred(X0),
+                                        m_Value(Y1), m_ConstInt(Imm1))))))
     return SDValue();
 
   assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) &&
          "Unsupported GFNI type");
 
-  // Both must operate on the same input
-  if (X0 != X1)
-    return SDValue();
-
   uint64_t NewImm = Imm0.getZExtValue() ^ Imm1.getZExtValue();
 
   SDValue NewMatrix = DAG.getNode(ISD::XOR, DL, VT, Y0, Y1);
@@ -56150,7 +56144,6 @@ static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
   if (SDValue R = combineXorWithTwoGF2P8AFFINEQB(N, DL, DAG, VT))
     return R;
 
-
   // Fold not(iX bitcast(vXi1)) -> (iX bitcast(not(vec))) for legal boolvecs.
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   if (llvm::isAllOnesConstant(N1) && N0.getOpcode() == ISD::BITCAST &&



More information about the llvm-commits mailing list