[llvm] [LLVM][ConstProp] Enable intrinsic simplifications for vector ConstantInt based operands. (PR #159358)

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 17 07:48:43 PDT 2025


https://github.com/paulwalker-arm updated https://github.com/llvm/llvm-project/pull/159358

>From 47a15e353ba0bc5433d55eaf3ce595109e59d4a7 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Wed, 17 Sep 2025 12:17:30 +0000
Subject: [PATCH 1/3] Update tests to show the affect of using vector
 ConstantInts.

---
 .../ConstProp/WebAssembly/any_all_true.ll     |  31 ++--
 .../InstSimplify/ConstProp/bitcount.ll        |  17 ++
 .../InstSimplify/ConstProp/bitreverse.ll      |  51 ++++++
 .../InstSimplify/ConstProp/bswap.ll           |  17 ++
 .../InstSimplify/ConstProp/vecreduce.ll       | 147 +++++++++++++-----
 5 files changed, 217 insertions(+), 46 deletions(-)
 create mode 100644 llvm/test/Transforms/InstSimplify/ConstProp/bitreverse.ll

diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll b/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll
index 7b30edbf7792b..96844c522960e 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 
-; RUN: opt -passes=instsimplify -S < %s | FileCheck %s
+; RUN: opt -passes=instsimplify -S < %s | FileCheck %s -check-prefixes=CHECK,CHECK-CV
+; RUN: opt -passes=instsimplify -use-constant-int-for-fixed-length-splat -S < %s | FileCheck %s -check-prefixes=CHECK,CHECK-CI
 
 ; Test that intrinsics wasm call are constant folded
 
@@ -41,14 +42,26 @@ define void @all_true_splat_not_all_non_zero(ptr %ptr) {
 }
 
 define void @all_true_splat_all_non_zero(ptr %ptr) {
-; CHECK-LABEL: define void @all_true_splat_all_non_zero(
-; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-NEXT:    ret void
+; CHECK-CV-LABEL: define void @all_true_splat_all_non_zero(
+; CHECK-CV-SAME: ptr [[PTR:%.*]]) {
+; CHECK-CV-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-CV-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-CV-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-CV-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-CV-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-CV-NEXT:    ret void
+;
+; CHECK-CI-LABEL: define void @all_true_splat_all_non_zero(
+; CHECK-CI-SAME: ptr [[PTR:%.*]]) {
+; CHECK-CI-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-CI-NEXT:    [[B:%.*]] = call i32 @llvm.wasm.alltrue.v8i16(<8 x i16> splat (i16 1))
+; CHECK-CI-NEXT:    store volatile i32 [[B]], ptr [[PTR]], align 4
+; CHECK-CI-NEXT:    [[C:%.*]] = call i32 @llvm.wasm.alltrue.v4i32(<4 x i32> splat (i32 1))
+; CHECK-CI-NEXT:    store volatile i32 [[C]], ptr [[PTR]], align 4
+; CHECK-CI-NEXT:    [[D:%.*]] = call i32 @llvm.wasm.alltrue.v2i64(<2 x i64> splat (i64 2))
+; CHECK-CI-NEXT:    store volatile i32 [[D]], ptr [[PTR]], align 4
+; CHECK-CI-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-CI-NEXT:    ret void
 ;
   %a = call i32 @llvm.wasm.alltrue(<16 x i8> <i8 1, i8 3, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
   store volatile i32 %a, ptr %ptr
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/bitcount.ll b/llvm/test/Transforms/InstSimplify/ConstProp/bitcount.ll
index 68b45a94af4bc..f68b85ed4db26 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/bitcount.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/bitcount.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
+; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -use-constant-int-for-scalable-splat -S | FileCheck %s
 
 declare i31 @llvm.ctpop.i31(i31 %val)
 declare i32 @llvm.cttz.i32(i32 %val, i1)
@@ -120,6 +121,22 @@ define <2 x i31> @ctpop_vector() {
   ret <2 x i31> %x
 }
 
+define <2 x i31> @ctpop_vector_splat_v2i31() {
+; CHECK-LABEL: @ctpop_vector_splat_v2i31(
+; CHECK-NEXT:    ret <2 x i31> splat (i31 1)
+;
+  %x = call <2 x i31> @llvm.ctpop.v2i31(<2 x i31> splat(i31 16))
+  ret <2 x i31> %x
+}
+
+define <vscale x 2 x i31> @ctpop_vector_splat_nxv2i31() {
+; CHECK-LABEL: @ctpop_vector_splat_nxv2i31(
+; CHECK-NEXT:    ret <vscale x 2 x i31> splat (i31 1)
+;
+  %x = call <vscale x 2 x i31> @llvm.ctpop.nxv2i31(<vscale x 2 x i31> splat(i31 16))
+  ret <vscale x 2 x i31> %x
+}
+
 define <2 x i31> @ctpop_vector_undef() {
 ; CHECK-LABEL: @ctpop_vector_undef(
 ; CHECK-NEXT:    ret <2 x i31> zeroinitializer
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/bitreverse.ll b/llvm/test/Transforms/InstSimplify/ConstProp/bitreverse.ll
new file mode 100644
index 0000000000000..409141a2c872b
--- /dev/null
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/bitreverse.ll
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
+; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -use-constant-int-for-scalable-splat -S | FileCheck %s
+
+define i16 @W() {
+; CHECK-LABEL: define i16 @W() {
+; CHECK-NEXT:    ret i16 -32768
+;
+  %Z = call i16 @llvm.bitreverse.i16(i16 1)
+  ret i16 %Z
+}
+
+define i32 @X() {
+; CHECK-LABEL: define i32 @X() {
+; CHECK-NEXT:    ret i32 -2147483648
+;
+  %Z = call i32 @llvm.bitreverse.i32(i32 1)
+  ret i32 %Z
+}
+
+define i64 @Y() {
+; CHECK-LABEL: define i64 @Y() {
+; CHECK-NEXT:    ret i64 -9223372036854775808
+;
+  %Z = call i64 @llvm.bitreverse.i64(i64 1)
+  ret i64 %Z
+}
+
+define i80 @Z() {
+; CHECK-LABEL: define i80 @Z() {
+; CHECK-NEXT:    ret i80 23777929115895377691656
+;
+  %Z = call i80 @llvm.bitreverse.i80(i80 76151636403560493650080)
+  ret i80 %Z
+}
+
+define <4 x i32> @bitreverse_splat_v4i32() {
+; CHECK-LABEL: define <4 x i32> @bitreverse_splat_v4i32() {
+; CHECK-NEXT:    ret <4 x i32> splat (i32 -2147483648)
+;
+  %Z = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> splat(i32 1))
+  ret <4 x i32> %Z
+}
+
+define <vscale x 4 x i32> @bitreverse_splat_nxv4i32() {
+; CHECK-LABEL: define <vscale x 4 x i32> @bitreverse_splat_nxv4i32() {
+; CHECK-NEXT:    ret <vscale x 4 x i32> splat (i32 -2147483648)
+;
+  %Z = call <vscale x 4 x i32> @llvm.bitreverse.v4i32(<vscale x 4 x i32> splat(i32 1))
+  ret <vscale x 4 x i32> %Z
+}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/bswap.ll b/llvm/test/Transforms/InstSimplify/ConstProp/bswap.ll
index 42bb73344995b..4db8ced58327a 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/bswap.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/bswap.ll
@@ -2,6 +2,7 @@
 ; bswap should be constant folded when it is passed a constant argument
 
 ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
+; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -use-constant-int-for-scalable-splat -S | FileCheck %s
 
 declare i16 @llvm.bswap.i16(i16)
 
@@ -42,3 +43,19 @@ define i80 @Z() {
   %Z = call i80 @llvm.bswap.i80( i80 76151636403560493650080 )
   ret i80 %Z
 }
+
+define <4 x i32> @bswap_splat_v4i32() {
+; CHECK-LABEL: define <4 x i32> @bswap_splat_v4i32() {
+; CHECK-NEXT:    ret <4 x i32> splat (i32 16777216)
+;
+  %Z = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> splat(i32 1))
+  ret <4 x i32> %Z
+}
+
+define <vscale x 4 x i32> @bswap_splat_nxv4i32() {
+; CHECK-LABEL: define <vscale x 4 x i32> @bswap_splat_nxv4i32() {
+; CHECK-NEXT:    ret <vscale x 4 x i32> splat (i32 16777216)
+;
+  %Z = call <vscale x 4 x i32> @llvm.bswap.v4i32(<vscale x 4 x i32> splat(i32 1))
+  ret <vscale x 4 x i32> %Z
+}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
index e994921f62574..da1432fd342ef 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
+; RUN: opt < %s -passes=instsimplify -S | FileCheck %s -check-prefixes=CHECK,CHECK-CV
+; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -S | FileCheck %s -check-prefixes=CHECK,CHECK-CI
 
 declare i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %a)
 declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %a)
@@ -30,8 +31,12 @@ define i32 @add_0() {
 }
 
 define i32 @add_1() {
-; CHECK-LABEL: @add_1(
-; CHECK-NEXT:    ret i32 8
+; CHECK-CV-LABEL: @add_1(
+; CHECK-CV-NEXT:    ret i32 8
+;
+; CHECK-CI-LABEL: @add_1(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> splat (i32 1))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -46,8 +51,12 @@ define i32 @add_inc() {
 }
 
 define i32 @add_1v() {
-; CHECK-LABEL: @add_1v(
-; CHECK-NEXT:    ret i32 10
+; CHECK-CV-LABEL: @add_1v(
+; CHECK-CV-NEXT:    ret i32 10
+;
+; CHECK-CI-LABEL: @add_1v(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> splat (i32 10))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -105,8 +114,12 @@ define i32 @mul_0() {
 }
 
 define i32 @mul_1() {
-; CHECK-LABEL: @mul_1(
-; CHECK-NEXT:    ret i32 1
+; CHECK-CV-LABEL: @mul_1(
+; CHECK-CV-NEXT:    ret i32 1
+;
+; CHECK-CI-LABEL: @mul_1(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> splat (i32 1))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -121,8 +134,12 @@ define i32 @mul_inc() {
 }
 
 define i32 @mul_1v() {
-; CHECK-LABEL: @mul_1v(
-; CHECK-NEXT:    ret i32 10
+; CHECK-CV-LABEL: @mul_1v(
+; CHECK-CV-NEXT:    ret i32 10
+;
+; CHECK-CI-LABEL: @mul_1v(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.mul.v1i32(<1 x i32> splat (i32 10))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.mul.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -171,8 +188,12 @@ define i32 @and_0() {
 }
 
 define i32 @and_1() {
-; CHECK-LABEL: @and_1(
-; CHECK-NEXT:    ret i32 1
+; CHECK-CV-LABEL: @and_1(
+; CHECK-CV-NEXT:    ret i32 1
+;
+; CHECK-CI-LABEL: @and_1(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> splat (i32 1))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -187,8 +208,12 @@ define i32 @and_inc() {
 }
 
 define i32 @and_1v() {
-; CHECK-LABEL: @and_1v(
-; CHECK-NEXT:    ret i32 10
+; CHECK-CV-LABEL: @and_1v(
+; CHECK-CV-NEXT:    ret i32 10
+;
+; CHECK-CI-LABEL: @and_1v(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.and.v1i32(<1 x i32> splat (i32 10))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.and.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -237,8 +262,12 @@ define i32 @or_0() {
 }
 
 define i32 @or_1() {
-; CHECK-LABEL: @or_1(
-; CHECK-NEXT:    ret i32 1
+; CHECK-CV-LABEL: @or_1(
+; CHECK-CV-NEXT:    ret i32 1
+;
+; CHECK-CI-LABEL: @or_1(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> splat (i32 1))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -253,8 +282,12 @@ define i32 @or_inc() {
 }
 
 define i32 @or_1v() {
-; CHECK-LABEL: @or_1v(
-; CHECK-NEXT:    ret i32 10
+; CHECK-CV-LABEL: @or_1v(
+; CHECK-CV-NEXT:    ret i32 10
+;
+; CHECK-CI-LABEL: @or_1v(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.or.v1i32(<1 x i32> splat (i32 10))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.or.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -303,8 +336,12 @@ define i32 @xor_0() {
 }
 
 define i32 @xor_1() {
-; CHECK-LABEL: @xor_1(
-; CHECK-NEXT:    ret i32 0
+; CHECK-CV-LABEL: @xor_1(
+; CHECK-CV-NEXT:    ret i32 0
+;
+; CHECK-CI-LABEL: @xor_1(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> splat (i32 1))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -319,8 +356,12 @@ define i32 @xor_inc() {
 }
 
 define i32 @xor_1v() {
-; CHECK-LABEL: @xor_1v(
-; CHECK-NEXT:    ret i32 10
+; CHECK-CV-LABEL: @xor_1v(
+; CHECK-CV-NEXT:    ret i32 10
+;
+; CHECK-CI-LABEL: @xor_1v(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.xor.v1i32(<1 x i32> splat (i32 10))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.xor.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -369,8 +410,12 @@ define i32 @smin_0() {
 }
 
 define i32 @smin_1() {
-; CHECK-LABEL: @smin_1(
-; CHECK-NEXT:    ret i32 1
+; CHECK-CV-LABEL: @smin_1(
+; CHECK-CV-NEXT:    ret i32 1
+;
+; CHECK-CI-LABEL: @smin_1(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> splat (i32 1))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -385,8 +430,12 @@ define i32 @smin_inc() {
 }
 
 define i32 @smin_1v() {
-; CHECK-LABEL: @smin_1v(
-; CHECK-NEXT:    ret i32 10
+; CHECK-CV-LABEL: @smin_1v(
+; CHECK-CV-NEXT:    ret i32 10
+;
+; CHECK-CI-LABEL: @smin_1v(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> splat (i32 10))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -435,8 +484,12 @@ define i32 @smax_0() {
 }
 
 define i32 @smax_1() {
-; CHECK-LABEL: @smax_1(
-; CHECK-NEXT:    ret i32 1
+; CHECK-CV-LABEL: @smax_1(
+; CHECK-CV-NEXT:    ret i32 1
+;
+; CHECK-CI-LABEL: @smax_1(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> splat (i32 1))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -451,8 +504,12 @@ define i32 @smax_inc() {
 }
 
 define i32 @smax_1v() {
-; CHECK-LABEL: @smax_1v(
-; CHECK-NEXT:    ret i32 10
+; CHECK-CV-LABEL: @smax_1v(
+; CHECK-CV-NEXT:    ret i32 10
+;
+; CHECK-CI-LABEL: @smax_1v(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> splat (i32 10))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -501,8 +558,12 @@ define i32 @umin_0() {
 }
 
 define i32 @umin_1() {
-; CHECK-LABEL: @umin_1(
-; CHECK-NEXT:    ret i32 1
+; CHECK-CV-LABEL: @umin_1(
+; CHECK-CV-NEXT:    ret i32 1
+;
+; CHECK-CI-LABEL: @umin_1(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> splat (i32 1))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -517,8 +578,12 @@ define i32 @umin_inc() {
 }
 
 define i32 @umin_1v() {
-; CHECK-LABEL: @umin_1v(
-; CHECK-NEXT:    ret i32 10
+; CHECK-CV-LABEL: @umin_1v(
+; CHECK-CV-NEXT:    ret i32 10
+;
+; CHECK-CI-LABEL: @umin_1v(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> splat (i32 10))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -567,8 +632,12 @@ define i32 @umax_0() {
 }
 
 define i32 @umax_1() {
-; CHECK-LABEL: @umax_1(
-; CHECK-NEXT:    ret i32 1
+; CHECK-CV-LABEL: @umax_1(
+; CHECK-CV-NEXT:    ret i32 1
+;
+; CHECK-CI-LABEL: @umax_1(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> splat (i32 1))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -583,8 +652,12 @@ define i32 @umax_inc() {
 }
 
 define i32 @umax_1v() {
-; CHECK-LABEL: @umax_1v(
-; CHECK-NEXT:    ret i32 10
+; CHECK-CV-LABEL: @umax_1v(
+; CHECK-CV-NEXT:    ret i32 10
+;
+; CHECK-CI-LABEL: @umax_1v(
+; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> splat (i32 10))
+; CHECK-CI-NEXT:    ret i32 [[X]]
 ;
   %x = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> <i32 10>)
   ret i32 %x

>From 262b074d326130c9d0a88bab7f3a8eb029a27f50 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Wed, 17 Sep 2025 12:19:00 +0000
Subject: [PATCH 2/3] [LLVM][ConstProp] Enable intrinsic simplifications for
 vector ConstantInt based operands.

Simplifcation of vector.reduce intrinsics are prevented by an
early bailout for ConstantInt base operands. This PR removes
the bailout and updates the tests to show matching output when
-use-constant-int-for-*-splat is used.

No new simplications are added, which is why I only add scalable
vector tests for things like bswap and not the vector reductions.
---
 llvm/lib/Analysis/ConstantFolding.cpp         |  10 +-
 .../ConstProp/WebAssembly/any_all_true.ll     |  32 ++--
 .../InstSimplify/ConstProp/vecreduce.ll       | 148 +++++-------------
 3 files changed, 51 insertions(+), 139 deletions(-)

diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index a3b2e62a1b8ba..e50120d345ebe 100755
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -2165,7 +2165,7 @@ Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
     return PoisonValue::get(VT->getElementType());
 
   // TODO: Handle undef.
-  if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
+  if (!isa<ConstantVector, ConstantDataVector, ConstantInt>(Op))
     return nullptr;
 
   auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
@@ -3040,9 +3040,6 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
       Val = Val | Val << 1;
       return ConstantInt::get(Ty, Val);
     }
-
-    default:
-      return nullptr;
     }
   }
 
@@ -3063,9 +3060,8 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
   }
 
   // Support ConstantVector in case we have an Undef in the top.
-  if (isa<ConstantVector>(Operands[0]) ||
-      isa<ConstantDataVector>(Operands[0]) ||
-      isa<ConstantAggregateZero>(Operands[0])) {
+  if (isa<ConstantVector, ConstantDataVector, ConstantAggregateZero,
+          ConstantInt>(Operands[0])) {
     auto *Op = cast<Constant>(Operands[0]);
     switch (IntrinsicID) {
     default: break;
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll b/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll
index 96844c522960e..71dad41b971b5 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/any_all_true.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 
-; RUN: opt -passes=instsimplify -S < %s | FileCheck %s -check-prefixes=CHECK,CHECK-CV
-; RUN: opt -passes=instsimplify -use-constant-int-for-fixed-length-splat -S < %s | FileCheck %s -check-prefixes=CHECK,CHECK-CI
+; RUN: opt -passes=instsimplify -S < %s | FileCheck %s
+; RUN: opt -passes=instsimplify -use-constant-int-for-fixed-length-splat -S < %s | FileCheck %s
 
 ; Test that intrinsics wasm call are constant folded
 
@@ -42,26 +42,14 @@ define void @all_true_splat_not_all_non_zero(ptr %ptr) {
 }
 
 define void @all_true_splat_all_non_zero(ptr %ptr) {
-; CHECK-CV-LABEL: define void @all_true_splat_all_non_zero(
-; CHECK-CV-SAME: ptr [[PTR:%.*]]) {
-; CHECK-CV-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-CV-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-CV-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-CV-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-CV-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-CV-NEXT:    ret void
-;
-; CHECK-CI-LABEL: define void @all_true_splat_all_non_zero(
-; CHECK-CI-SAME: ptr [[PTR:%.*]]) {
-; CHECK-CI-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-CI-NEXT:    [[B:%.*]] = call i32 @llvm.wasm.alltrue.v8i16(<8 x i16> splat (i16 1))
-; CHECK-CI-NEXT:    store volatile i32 [[B]], ptr [[PTR]], align 4
-; CHECK-CI-NEXT:    [[C:%.*]] = call i32 @llvm.wasm.alltrue.v4i32(<4 x i32> splat (i32 1))
-; CHECK-CI-NEXT:    store volatile i32 [[C]], ptr [[PTR]], align 4
-; CHECK-CI-NEXT:    [[D:%.*]] = call i32 @llvm.wasm.alltrue.v2i64(<2 x i64> splat (i64 2))
-; CHECK-CI-NEXT:    store volatile i32 [[D]], ptr [[PTR]], align 4
-; CHECK-CI-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
-; CHECK-CI-NEXT:    ret void
+; CHECK-LABEL: define void @all_true_splat_all_non_zero(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[PTR]], align 4
+; CHECK-NEXT:    ret void
 ;
   %a = call i32 @llvm.wasm.alltrue(<16 x i8> <i8 1, i8 3, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
   store volatile i32 %a, ptr %ptr
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
index da1432fd342ef..9f9e3f9ffc070 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vecreduce.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=instsimplify -S | FileCheck %s -check-prefixes=CHECK,CHECK-CV
-; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -S | FileCheck %s -check-prefixes=CHECK,CHECK-CI
+; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
+; RUN: opt < %s -passes=instsimplify -use-constant-int-for-fixed-length-splat -S | FileCheck %s
 
 declare i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %a)
 declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %a)
@@ -31,12 +31,8 @@ define i32 @add_0() {
 }
 
 define i32 @add_1() {
-; CHECK-CV-LABEL: @add_1(
-; CHECK-CV-NEXT:    ret i32 8
-;
-; CHECK-CI-LABEL: @add_1(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> splat (i32 1))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @add_1(
+; CHECK-NEXT:    ret i32 8
 ;
   %x = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -51,12 +47,8 @@ define i32 @add_inc() {
 }
 
 define i32 @add_1v() {
-; CHECK-CV-LABEL: @add_1v(
-; CHECK-CV-NEXT:    ret i32 10
-;
-; CHECK-CI-LABEL: @add_1v(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> splat (i32 10))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @add_1v(
+; CHECK-NEXT:    ret i32 10
 ;
   %x = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -114,12 +106,8 @@ define i32 @mul_0() {
 }
 
 define i32 @mul_1() {
-; CHECK-CV-LABEL: @mul_1(
-; CHECK-CV-NEXT:    ret i32 1
-;
-; CHECK-CI-LABEL: @mul_1(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> splat (i32 1))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @mul_1(
+; CHECK-NEXT:    ret i32 1
 ;
   %x = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -134,12 +122,8 @@ define i32 @mul_inc() {
 }
 
 define i32 @mul_1v() {
-; CHECK-CV-LABEL: @mul_1v(
-; CHECK-CV-NEXT:    ret i32 10
-;
-; CHECK-CI-LABEL: @mul_1v(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.mul.v1i32(<1 x i32> splat (i32 10))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @mul_1v(
+; CHECK-NEXT:    ret i32 10
 ;
   %x = call i32 @llvm.vector.reduce.mul.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -188,12 +172,8 @@ define i32 @and_0() {
 }
 
 define i32 @and_1() {
-; CHECK-CV-LABEL: @and_1(
-; CHECK-CV-NEXT:    ret i32 1
-;
-; CHECK-CI-LABEL: @and_1(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> splat (i32 1))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @and_1(
+; CHECK-NEXT:    ret i32 1
 ;
   %x = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -208,12 +188,8 @@ define i32 @and_inc() {
 }
 
 define i32 @and_1v() {
-; CHECK-CV-LABEL: @and_1v(
-; CHECK-CV-NEXT:    ret i32 10
-;
-; CHECK-CI-LABEL: @and_1v(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.and.v1i32(<1 x i32> splat (i32 10))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @and_1v(
+; CHECK-NEXT:    ret i32 10
 ;
   %x = call i32 @llvm.vector.reduce.and.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -262,12 +238,8 @@ define i32 @or_0() {
 }
 
 define i32 @or_1() {
-; CHECK-CV-LABEL: @or_1(
-; CHECK-CV-NEXT:    ret i32 1
-;
-; CHECK-CI-LABEL: @or_1(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> splat (i32 1))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @or_1(
+; CHECK-NEXT:    ret i32 1
 ;
   %x = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -282,12 +254,8 @@ define i32 @or_inc() {
 }
 
 define i32 @or_1v() {
-; CHECK-CV-LABEL: @or_1v(
-; CHECK-CV-NEXT:    ret i32 10
-;
-; CHECK-CI-LABEL: @or_1v(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.or.v1i32(<1 x i32> splat (i32 10))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @or_1v(
+; CHECK-NEXT:    ret i32 10
 ;
   %x = call i32 @llvm.vector.reduce.or.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -336,12 +304,8 @@ define i32 @xor_0() {
 }
 
 define i32 @xor_1() {
-; CHECK-CV-LABEL: @xor_1(
-; CHECK-CV-NEXT:    ret i32 0
-;
-; CHECK-CI-LABEL: @xor_1(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> splat (i32 1))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @xor_1(
+; CHECK-NEXT:    ret i32 0
 ;
   %x = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -356,12 +320,8 @@ define i32 @xor_inc() {
 }
 
 define i32 @xor_1v() {
-; CHECK-CV-LABEL: @xor_1v(
-; CHECK-CV-NEXT:    ret i32 10
-;
-; CHECK-CI-LABEL: @xor_1v(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.xor.v1i32(<1 x i32> splat (i32 10))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @xor_1v(
+; CHECK-NEXT:    ret i32 10
 ;
   %x = call i32 @llvm.vector.reduce.xor.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -410,12 +370,8 @@ define i32 @smin_0() {
 }
 
 define i32 @smin_1() {
-; CHECK-CV-LABEL: @smin_1(
-; CHECK-CV-NEXT:    ret i32 1
-;
-; CHECK-CI-LABEL: @smin_1(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> splat (i32 1))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @smin_1(
+; CHECK-NEXT:    ret i32 1
 ;
   %x = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -430,12 +386,8 @@ define i32 @smin_inc() {
 }
 
 define i32 @smin_1v() {
-; CHECK-CV-LABEL: @smin_1v(
-; CHECK-CV-NEXT:    ret i32 10
-;
-; CHECK-CI-LABEL: @smin_1v(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> splat (i32 10))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @smin_1v(
+; CHECK-NEXT:    ret i32 10
 ;
   %x = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -484,12 +436,8 @@ define i32 @smax_0() {
 }
 
 define i32 @smax_1() {
-; CHECK-CV-LABEL: @smax_1(
-; CHECK-CV-NEXT:    ret i32 1
-;
-; CHECK-CI-LABEL: @smax_1(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> splat (i32 1))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @smax_1(
+; CHECK-NEXT:    ret i32 1
 ;
   %x = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -504,12 +452,8 @@ define i32 @smax_inc() {
 }
 
 define i32 @smax_1v() {
-; CHECK-CV-LABEL: @smax_1v(
-; CHECK-CV-NEXT:    ret i32 10
-;
-; CHECK-CI-LABEL: @smax_1v(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> splat (i32 10))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @smax_1v(
+; CHECK-NEXT:    ret i32 10
 ;
   %x = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -558,12 +502,8 @@ define i32 @umin_0() {
 }
 
 define i32 @umin_1() {
-; CHECK-CV-LABEL: @umin_1(
-; CHECK-CV-NEXT:    ret i32 1
-;
-; CHECK-CI-LABEL: @umin_1(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> splat (i32 1))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @umin_1(
+; CHECK-NEXT:    ret i32 1
 ;
   %x = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -578,12 +518,8 @@ define i32 @umin_inc() {
 }
 
 define i32 @umin_1v() {
-; CHECK-CV-LABEL: @umin_1v(
-; CHECK-CV-NEXT:    ret i32 10
-;
-; CHECK-CI-LABEL: @umin_1v(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> splat (i32 10))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @umin_1v(
+; CHECK-NEXT:    ret i32 10
 ;
   %x = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> <i32 10>)
   ret i32 %x
@@ -632,12 +568,8 @@ define i32 @umax_0() {
 }
 
 define i32 @umax_1() {
-; CHECK-CV-LABEL: @umax_1(
-; CHECK-CV-NEXT:    ret i32 1
-;
-; CHECK-CI-LABEL: @umax_1(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> splat (i32 1))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @umax_1(
+; CHECK-NEXT:    ret i32 1
 ;
   %x = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
   ret i32 %x
@@ -652,12 +584,8 @@ define i32 @umax_inc() {
 }
 
 define i32 @umax_1v() {
-; CHECK-CV-LABEL: @umax_1v(
-; CHECK-CV-NEXT:    ret i32 10
-;
-; CHECK-CI-LABEL: @umax_1v(
-; CHECK-CI-NEXT:    [[X:%.*]] = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> splat (i32 10))
-; CHECK-CI-NEXT:    ret i32 [[X]]
+; CHECK-LABEL: @umax_1v(
+; CHECK-NEXT:    ret i32 10
 ;
   %x = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> <i32 10>)
   ret i32 %x

>From 5b3bd5e288dccd8bd7edfc26fc106b7a4c7e4c70 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Wed, 17 Sep 2025 14:47:41 +0000
Subject: [PATCH 3/3] Reduce number of isa<Constant...> tests.

---
 llvm/lib/Analysis/ConstantFolding.cpp | 40 ++++++++++++---------------
 1 file changed, 17 insertions(+), 23 deletions(-)

diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index e50120d345ebe..749f9aecf885b 100755
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -2165,10 +2165,7 @@ Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
     return PoisonValue::get(VT->getElementType());
 
   // TODO: Handle undef.
-  if (!isa<ConstantVector, ConstantDataVector, ConstantInt>(Op))
-    return nullptr;
-
-  auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
+  auto *EltC = dyn_cast_or_null<ConstantInt>(Op->getAggregateElement(0U));
   if (!EltC)
     return nullptr;
 
@@ -3043,28 +3040,22 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
     }
   }
 
-  switch (IntrinsicID) {
-  default: break;
-  case Intrinsic::vector_reduce_add:
-  case Intrinsic::vector_reduce_mul:
-  case Intrinsic::vector_reduce_and:
-  case Intrinsic::vector_reduce_or:
-  case Intrinsic::vector_reduce_xor:
-  case Intrinsic::vector_reduce_smin:
-  case Intrinsic::vector_reduce_smax:
-  case Intrinsic::vector_reduce_umin:
-  case Intrinsic::vector_reduce_umax:
-    if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
-      return C;
-    break;
-  }
-
-  // Support ConstantVector in case we have an Undef in the top.
-  if (isa<ConstantVector, ConstantDataVector, ConstantAggregateZero,
-          ConstantInt>(Operands[0])) {
+  if (Operands[0]->getType()->isVectorTy()) {
     auto *Op = cast<Constant>(Operands[0]);
     switch (IntrinsicID) {
     default: break;
+    case Intrinsic::vector_reduce_add:
+    case Intrinsic::vector_reduce_mul:
+    case Intrinsic::vector_reduce_and:
+    case Intrinsic::vector_reduce_or:
+    case Intrinsic::vector_reduce_xor:
+    case Intrinsic::vector_reduce_smin:
+    case Intrinsic::vector_reduce_smax:
+    case Intrinsic::vector_reduce_umin:
+    case Intrinsic::vector_reduce_umax:
+      if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
+        return C;
+      break;
     case Intrinsic::x86_sse_cvtss2si:
     case Intrinsic::x86_sse_cvtss2si64:
     case Intrinsic::x86_sse2_cvtsd2si:
@@ -3092,6 +3083,9 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
 
     case Intrinsic::wasm_alltrue:
       // Check each element individually
+      if (!isa<ConstantVector, ConstantDataVector, ConstantAggregateZero,
+               ConstantInt>(Op))
+        break;
       unsigned E = cast<FixedVectorType>(Op->getType())->getNumElements();
       for (unsigned I = 0; I != E; ++I)
         if (Constant *Elt = Op->getAggregateElement(I))



More information about the llvm-commits mailing list