[llvm] e722d96 - [DAG] Avoid a crash when checking size of scalable type in visitANDLike

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 5 14:30:31 PST 2024


Author: Philip Reames
Date: 2024-02-05T14:30:10-08:00
New Revision: e722d9662dd8cdd3be9e434b057593e97a7d4417

URL: https://github.com/llvm/llvm-project/commit/e722d9662dd8cdd3be9e434b057593e97a7d4417
DIFF: https://github.com/llvm/llvm-project/commit/e722d9662dd8cdd3be9e434b057593e97a7d4417.diff

LOG: [DAG] Avoid a crash when checking size of scalable type in visitANDLike

Fixes https://github.com/llvm/llvm-project/issues/80744.  This transform
doesn't handled vectors at all,  The fixed length ones pass the first
check, but would fail the constant operand checks which immediate follow.
This patch takes the simplest approach, and just guards the transform
for scalar integers.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/RISCV/and-add-lsr.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 3ce45e0e43bf4..7f91de12e10d0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6376,7 +6376,7 @@ SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) {
 
   // TODO: Rewrite this to return a new 'AND' instead of using CombineTo.
   if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL &&
-      VT.getSizeInBits() <= 64 && N0->hasOneUse()) {
+      VT.isScalarInteger() && VT.getSizeInBits() <= 64 && N0->hasOneUse()) {
     if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
       if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) {
         // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal

diff  --git a/llvm/test/CodeGen/RISCV/and-add-lsr.ll b/llvm/test/CodeGen/RISCV/and-add-lsr.ll
index c2c7a4999b2f1..22ac9a2705aea 100644
--- a/llvm/test/CodeGen/RISCV/and-add-lsr.ll
+++ b/llvm/test/CodeGen/RISCV/and-add-lsr.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+v < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32I
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+v < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64I
 
 define i32 @and_add_lsr(i32 %x, i32 %y) {
@@ -23,3 +23,57 @@ define i32 @and_add_lsr(i32 %x, i32 %y) {
   %r = and i32 %2, %1
   ret i32 %r
 }
+
+; Make sure we don't crash on fixed length vectors
+define <2 x i32> @and_add_lsr_vec(<2 x i32> %x, <2 x i32> %y) {
+; RV32I-LABEL: and_add_lsr_vec:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a0, 1
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32I-NEXT:    vadd.vx v8, v8, a0
+; RV32I-NEXT:    vsrl.vi v9, v9, 20
+; RV32I-NEXT:    vand.vv v8, v9, v8
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and_add_lsr_vec:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a0, 1
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64I-NEXT:    vadd.vx v8, v8, a0
+; RV64I-NEXT:    vsrl.vi v9, v9, 20
+; RV64I-NEXT:    vand.vv v8, v9, v8
+; RV64I-NEXT:    ret
+  %1 = add <2 x i32> %x, splat (i32 4095)
+  %2 = lshr <2 x i32> %y, splat (i32 20)
+  %r = and <2 x i32> %2, %1
+  ret <2 x i32> %r
+}
+
+; Make sure we don't crash on scalable vectors
+define <vscale x 2 x i32> @and_add_lsr_vec2(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
+; RV32I-LABEL: and_add_lsr_vec2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a0, 1
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV32I-NEXT:    vadd.vx v8, v8, a0
+; RV32I-NEXT:    vsrl.vi v9, v9, 20
+; RV32I-NEXT:    vand.vv v8, v9, v8
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and_add_lsr_vec2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a0, 1
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV64I-NEXT:    vadd.vx v8, v8, a0
+; RV64I-NEXT:    vsrl.vi v9, v9, 20
+; RV64I-NEXT:    vand.vv v8, v9, v8
+; RV64I-NEXT:    ret
+  %1 = add <vscale x 2 x i32> %x, splat (i32 4095)
+  %2 = lshr <vscale x 2 x i32> %y, splat (i32 20)
+  %r = and <vscale x 2 x i32> %2, %1
+  ret <vscale x 2 x i32> %r
+}


        


More information about the llvm-commits mailing list