[llvm] [DAG] Fold nested add(add(reduce(a), b), add(reduce(c), d)) (PR #115150)

David Green via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 7 02:23:51 PST 2024


https://github.com/davemgreen updated https://github.com/llvm/llvm-project/pull/115150

>From 4717ff467bd5c3811e5bff0bef0014ba700bcb0f Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Wed, 6 Nov 2024 21:47:51 +0000
Subject: [PATCH 1/2] [GlobalISel][AArch64] Generate ptrtoint/inttoptr as
 opposed to bitcast in unmerge.

When combining unmerge we could end up with ptr to i64 bitcasts. Make sure they
are created as ptrtoint/inttoptr instead.
---
 .../GlobalISel/LegalizationArtifactCombiner.h |  10 +-
 llvm/test/CodeGen/AArch64/getelementptr.ll    | 454 ++++++++++++++++++
 2 files changed, 462 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/getelementptr.ll

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index 471a7f70dd546c..9dea4c1b412dbb 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -1218,8 +1218,14 @@ class LegalizationArtifactCombiner {
     } else {
       LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());
 
-      if (!ConvertOp && DestTy != MergeSrcTy)
-        ConvertOp = TargetOpcode::G_BITCAST;
+      if (!ConvertOp && DestTy != MergeSrcTy) {
+        if (DestTy.isPointer())
+          ConvertOp = TargetOpcode::G_INTTOPTR;
+        else if (MergeSrcTy.isPointer())
+          ConvertOp = TargetOpcode::G_PTRTOINT;
+        else
+          ConvertOp = TargetOpcode::G_BITCAST;
+      }
 
       if (ConvertOp) {
         Builder.setInstr(MI);
diff --git a/llvm/test/CodeGen/AArch64/getelementptr.ll b/llvm/test/CodeGen/AArch64/getelementptr.ll
new file mode 100644
index 00000000000000..002f032726275d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/getelementptr.ll
@@ -0,0 +1,454 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+define ptr @s(ptr %p, i32 %q) {
+; CHECK-LABEL: s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x0, x0, w1, sxtw #2
+; CHECK-NEXT:    ret
+  %d = getelementptr i32, ptr %p, i32 %q
+  ret ptr %d
+}
+
+define <2 x ptr> @v2(<2 x ptr> %p, i32 %q) {
+; CHECK-SD-LABEL: v2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    dup v1.2s, w0
+; CHECK-SD-NEXT:    sshll v1.2d, v1.2s, #2
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    dup v1.2s, w0
+; CHECK-GI-NEXT:    adrp x8, .LCPI1_0
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI1_0]
+; CHECK-GI-NEXT:    fmov x9, d2
+; CHECK-GI-NEXT:    mov x11, v2.d[1]
+; CHECK-GI-NEXT:    sshll v1.2d, v1.2s, #0
+; CHECK-GI-NEXT:    fmov x8, d1
+; CHECK-GI-NEXT:    mov x10, v1.d[1]
+; CHECK-GI-NEXT:    mul x8, x8, x9
+; CHECK-GI-NEXT:    mul x9, x10, x11
+; CHECK-GI-NEXT:    mov v1.d[0], x8
+; CHECK-GI-NEXT:    mov v1.d[1], x9
+; CHECK-GI-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, <2 x ptr> %p, i32 %q
+  ret <2 x ptr> %d
+}
+
+define <3 x ptr> @v3(<3 x ptr> %p, i32 %q) {
+; CHECK-SD-LABEL: v3:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    dup v3.2s, w0
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT:    mov w8, #2 // =0x2
+; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    sshll v1.2d, v3.2s, #2
+; CHECK-SD-NEXT:    fmov s3, w0
+; CHECK-SD-NEXT:    sshll v3.2d, v3.2s, #0
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    fmov d1, x8
+; CHECK-SD-NEXT:    ushl d3, d3, d1
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT:    add d2, d2, d3
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v3:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT:    sxtw x9, w0
+; CHECK-GI-NEXT:    adrp x8, .LCPI2_0
+; CHECK-GI-NEXT:    ldr q4, [x8, :lo12:.LCPI2_0]
+; CHECK-GI-NEXT:    dup v3.2d, x9
+; CHECK-GI-NEXT:    fmov x9, d4
+; CHECK-GI-NEXT:    mov x11, v4.d[1]
+; CHECK-GI-NEXT:    fmov x8, d3
+; CHECK-GI-NEXT:    mov x10, v3.d[1]
+; CHECK-GI-NEXT:    mul x8, x8, x9
+; CHECK-GI-NEXT:    mul x9, x10, x11
+; CHECK-GI-NEXT:    fmov x10, d0
+; CHECK-GI-NEXT:    mov v0.d[0], x8
+; CHECK-GI-NEXT:    mov v3.d[0], x10
+; CHECK-GI-NEXT:    fmov x8, d1
+; CHECK-GI-NEXT:    mov v0.d[1], x9
+; CHECK-GI-NEXT:    mov v3.d[1], x8
+; CHECK-GI-NEXT:    mov w8, #4 // =0x4
+; CHECK-GI-NEXT:    fmov x9, d2
+; CHECK-GI-NEXT:    smaddl x8, w0, w8, x9
+; CHECK-GI-NEXT:    add v0.2d, v3.2d, v0.2d
+; CHECK-GI-NEXT:    fmov d2, x8
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, <3 x ptr> %p, i32 %q
+  ret <3 x ptr> %d
+}
+
+;define <4 x ptr> @v4(<4 x ptr> %p, i32 %q) {
+;  %d = getelementptr i32, <4 x ptr> %p, i32 %q
+;  ret <4 x ptr> %d
+;}
+
+define <2 x ptr> @v2b(ptr %p, <2 x i32> %q) {
+; CHECK-SD-LABEL: v2b:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    sshll v0.2d, v0.2s, #2
+; CHECK-SD-NEXT:    dup v1.2d, x0
+; CHECK-SD-NEXT:    add v0.2d, v1.2d, v0.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v2b:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI3_0
+; CHECK-GI-NEXT:    sshll v0.2d, v0.2s, #0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI3_0]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    mov x10, v0.d[1]
+; CHECK-GI-NEXT:    mov x11, v1.d[1]
+; CHECK-GI-NEXT:    dup v1.2d, x0
+; CHECK-GI-NEXT:    mul x8, x8, x9
+; CHECK-GI-NEXT:    mul x9, x10, x11
+; CHECK-GI-NEXT:    mov v0.d[0], x8
+; CHECK-GI-NEXT:    mov v0.d[1], x9
+; CHECK-GI-NEXT:    add v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, ptr %p, <2 x i32> %q
+  ret <2 x ptr> %d
+}
+
+define <3 x ptr> @v3b(ptr %p, <3 x i32> %q) {
+; CHECK-SD-LABEL: v3b:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    dup v1.2d, x0
+; CHECK-SD-NEXT:    sshll v2.2d, v0.2s, #2
+; CHECK-SD-NEXT:    mov w8, #2 // =0x2
+; CHECK-SD-NEXT:    sshll2 v3.2d, v0.4s, #0
+; CHECK-SD-NEXT:    add v0.2d, v1.2d, v2.2d
+; CHECK-SD-NEXT:    fmov d1, x8
+; CHECK-SD-NEXT:    ushl d2, d3, d1
+; CHECK-SD-NEXT:    fmov d3, x0
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT:    add d2, d3, d2
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v3b:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    smov x9, v0.s[0]
+; CHECK-GI-NEXT:    smov x10, v0.s[1]
+; CHECK-GI-NEXT:    adrp x8, .LCPI4_0
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI4_0]
+; CHECK-GI-NEXT:    mov x11, v2.d[1]
+; CHECK-GI-NEXT:    mov v1.d[0], x9
+; CHECK-GI-NEXT:    fmov x9, d2
+; CHECK-GI-NEXT:    dup v2.2d, x0
+; CHECK-GI-NEXT:    mov v1.d[1], x10
+; CHECK-GI-NEXT:    fmov x8, d1
+; CHECK-GI-NEXT:    mov x10, v1.d[1]
+; CHECK-GI-NEXT:    mul x8, x8, x9
+; CHECK-GI-NEXT:    mul x9, x10, x11
+; CHECK-GI-NEXT:    mov v1.d[0], x8
+; CHECK-GI-NEXT:    mov w8, v0.s[2]
+; CHECK-GI-NEXT:    mov v1.d[1], x9
+; CHECK-GI-NEXT:    mov w9, #4 // =0x4
+; CHECK-GI-NEXT:    smaddl x8, w8, w9, x0
+; CHECK-GI-NEXT:    add v1.2d, v2.2d, v1.2d
+; CHECK-GI-NEXT:    fmov d2, x8
+; CHECK-GI-NEXT:    mov d0, v1.d[1]
+; CHECK-GI-NEXT:    fmov x10, d0
+; CHECK-GI-NEXT:    fmov d0, d1
+; CHECK-GI-NEXT:    fmov d1, x10
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, ptr %p, <3 x i32> %q
+  ret <3 x ptr> %d
+}
+
+;define <4 x ptr> @v4b(ptr %p, <4 x i32> %q) {
+;  %d = getelementptr i32, ptr %p, <4 x i32> %q
+;  ret <4 x ptr> %d
+;}
+
+
+define ptr @s_10(ptr %p, i32 %q) {
+; CHECK-LABEL: s_10:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x0, x0, #40
+; CHECK-NEXT:    ret
+  %d = getelementptr i32, ptr %p, i32 10
+  ret ptr %d
+}
+
+define <2 x ptr> @v2_10(<2 x ptr> %p, i32 %q) {
+; CHECK-SD-LABEL: v2_10:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #40 // =0x28
+; CHECK-SD-NEXT:    dup v1.2d, x8
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v2_10:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI6_0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI6_0]
+; CHECK-GI-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, <2 x ptr> %p, i32 10
+  ret <2 x ptr> %d
+}
+
+define <3 x ptr> @v3_10(<3 x ptr> %p, i32 %q) {
+; CHECK-SD-LABEL: v3_10:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT:    mov w8, #40 // =0x28
+; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    dup v3.2d, x8
+; CHECK-SD-NEXT:    add d2, d2, d3
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v3.2d
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v3_10:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    mov v0.d[0], x8
+; CHECK-GI-NEXT:    fmov x8, d1
+; CHECK-GI-NEXT:    mov v0.d[1], x8
+; CHECK-GI-NEXT:    adrp x8, .LCPI7_0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI7_0]
+; CHECK-GI-NEXT:    fmov x8, d2
+; CHECK-GI-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT:    add x8, x8, #40
+; CHECK-GI-NEXT:    fmov d2, x8
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, <3 x ptr> %p, i32 10
+  ret <3 x ptr> %d
+}
+
+;define <4 x ptr> @v4_10(<4 x ptr> %p, i32 %q) {
+;  %d = getelementptr i32, <4 x ptr> %p, i32 10
+;  ret <4 x ptr> %d
+;}
+
+define <2 x ptr> @v2b_10(ptr %p, <2 x i32> %q) {
+; CHECK-SD-LABEL: v2b_10:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #40 // =0x28
+; CHECK-SD-NEXT:    dup v0.2d, x0
+; CHECK-SD-NEXT:    dup v1.2d, x8
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v2b_10:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v0.2s, #10
+; CHECK-GI-NEXT:    adrp x8, .LCPI8_0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI8_0]
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    mov x11, v1.d[1]
+; CHECK-GI-NEXT:    dup v1.2d, x0
+; CHECK-GI-NEXT:    sshll v0.2d, v0.2s, #0
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    mov x10, v0.d[1]
+; CHECK-GI-NEXT:    mul x8, x8, x9
+; CHECK-GI-NEXT:    mul x9, x10, x11
+; CHECK-GI-NEXT:    mov v0.d[0], x8
+; CHECK-GI-NEXT:    mov v0.d[1], x9
+; CHECK-GI-NEXT:    add v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, ptr %p, <2 x i32> <i32 10, i32 10>
+  ret <2 x ptr> %d
+}
+
+define <3 x ptr> @v3b_10(ptr %p, <3 x i32> %q) {
+; CHECK-SD-LABEL: v3b_10:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #40 // =0x28
+; CHECK-SD-NEXT:    dup v0.2d, x0
+; CHECK-SD-NEXT:    fmov d3, x0
+; CHECK-SD-NEXT:    dup v2.2d, x8
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v2.2d
+; CHECK-SD-NEXT:    add d2, d3, d2
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v3b_10:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI9_1
+; CHECK-GI-NEXT:    adrp x9, .LCPI9_0
+; CHECK-GI-NEXT:    ldr q0, [x8, :lo12:.LCPI9_1]
+; CHECK-GI-NEXT:    ldr q1, [x9, :lo12:.LCPI9_0]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    mov x10, v0.d[1]
+; CHECK-GI-NEXT:    mov x11, v1.d[1]
+; CHECK-GI-NEXT:    dup v1.2d, x0
+; CHECK-GI-NEXT:    mul x8, x8, x9
+; CHECK-GI-NEXT:    mul x9, x10, x11
+; CHECK-GI-NEXT:    mov v0.d[0], x8
+; CHECK-GI-NEXT:    add x8, x0, #40
+; CHECK-GI-NEXT:    fmov d2, x8
+; CHECK-GI-NEXT:    mov v0.d[1], x9
+; CHECK-GI-NEXT:    add v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, ptr %p, <3 x i32> <i32 10, i32 10, i32 10>
+  ret <3 x ptr> %d
+}
+
+;define <4 x ptr> @v4b_10(ptr %p, <4 x i32> %q) {
+;  %d = getelementptr i32, ptr %p, <4 x i32> <i32 10, i32 10, i32 10, i32 10>
+;  ret <4 x ptr> %d
+;}
+
+
+define ptr @s_m10(ptr %p, i32 %q) {
+; CHECK-LABEL: s_m10:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x0, x0, #40
+; CHECK-NEXT:    ret
+  %d = getelementptr i32, ptr %p, i32 -10
+  ret ptr %d
+}
+
+define <2 x ptr> @v2_m10(<2 x ptr> %p, i32 %q) {
+; CHECK-SD-LABEL: v2_m10:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov x8, #-40 // =0xffffffffffffffd8
+; CHECK-SD-NEXT:    dup v1.2d, x8
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v2_m10:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI11_0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI11_0]
+; CHECK-GI-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, <2 x ptr> %p, i32 -10
+  ret <2 x ptr> %d
+}
+
+define <3 x ptr> @v3_m10(<3 x ptr> %p, i32 %q) {
+; CHECK-SD-LABEL: v3_m10:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT:    mov x8, #-40 // =0xffffffffffffffd8
+; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    dup v3.2d, x8
+; CHECK-SD-NEXT:    add d2, d2, d3
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v3.2d
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v3_m10:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    mov v0.d[0], x8
+; CHECK-GI-NEXT:    fmov x8, d1
+; CHECK-GI-NEXT:    mov v0.d[1], x8
+; CHECK-GI-NEXT:    adrp x8, .LCPI12_0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI12_0]
+; CHECK-GI-NEXT:    fmov x8, d2
+; CHECK-GI-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT:    sub x8, x8, #40
+; CHECK-GI-NEXT:    fmov d2, x8
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, <3 x ptr> %p, i32 -10
+  ret <3 x ptr> %d
+}
+
+;define <4 x ptr> @v4_m10(<4 x ptr> %p, i32 %q) {
+;  %d = getelementptr i32, <4 x ptr> %p, i32 -10
+;  ret <4 x ptr> %d
+;}
+
+define <2 x ptr> @v2b_m10(ptr %p, <2 x i32> %q) {
+; CHECK-SD-LABEL: v2b_m10:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov x8, #-40 // =0xffffffffffffffd8
+; CHECK-SD-NEXT:    dup v1.2d, x0
+; CHECK-SD-NEXT:    dup v0.2d, x8
+; CHECK-SD-NEXT:    add v0.2d, v1.2d, v0.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v2b_m10:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mvni v0.2s, #9
+; CHECK-GI-NEXT:    adrp x8, .LCPI13_0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI13_0]
+; CHECK-GI-NEXT:    sshll v0.2d, v0.2s, #0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    mov x11, v1.d[1]
+; CHECK-GI-NEXT:    dup v1.2d, x0
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    mov x10, v0.d[1]
+; CHECK-GI-NEXT:    mul x8, x8, x9
+; CHECK-GI-NEXT:    mul x9, x10, x11
+; CHECK-GI-NEXT:    mov v0.d[0], x8
+; CHECK-GI-NEXT:    mov v0.d[1], x9
+; CHECK-GI-NEXT:    add v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, ptr %p, <2 x i32> <i32 -10, i32 -10>
+  ret <2 x ptr> %d
+}
+
+define <3 x ptr> @v3b_m10(ptr %p, <3 x i32> %q) {
+; CHECK-SD-LABEL: v3b_m10:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov x8, #-40 // =0xffffffffffffffd8
+; CHECK-SD-NEXT:    dup v0.2d, x0
+; CHECK-SD-NEXT:    fmov d3, x0
+; CHECK-SD-NEXT:    dup v2.2d, x8
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v2.2d
+; CHECK-SD-NEXT:    add d2, d3, d2
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v3b_m10:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI14_1
+; CHECK-GI-NEXT:    adrp x9, .LCPI14_0
+; CHECK-GI-NEXT:    ldr q0, [x8, :lo12:.LCPI14_1]
+; CHECK-GI-NEXT:    ldr q1, [x9, :lo12:.LCPI14_0]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    mov x10, v0.d[1]
+; CHECK-GI-NEXT:    mov x11, v1.d[1]
+; CHECK-GI-NEXT:    dup v1.2d, x0
+; CHECK-GI-NEXT:    mul x8, x8, x9
+; CHECK-GI-NEXT:    mul x9, x10, x11
+; CHECK-GI-NEXT:    mov v0.d[0], x8
+; CHECK-GI-NEXT:    sub x8, x0, #40
+; CHECK-GI-NEXT:    fmov d2, x8
+; CHECK-GI-NEXT:    mov v0.d[1], x9
+; CHECK-GI-NEXT:    add v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    ret
+  %d = getelementptr i32, ptr %p, <3 x i32> <i32 -10, i32 -10, i32 -10>
+  ret <3 x ptr> %d
+}
+
+;define <4 x ptr> @v4b_m10(ptr %p, <4 x i32> %q) {
+;  %d = getelementptr i32, ptr %p, <4 x i32> <i32 -10, i32 -10, i32 -10, i32 -10>
+;  ret <4 x ptr> %d
+;}

>From 6229a8aff68d07e6a54f904716b2ac880da5ea4b Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Thu, 7 Nov 2024 10:23:33 +0000
Subject: [PATCH 2/2] [DAG] Fold nested add(add(reduce(a), b), add(reduce(c),
 d))

This patch reassociates add(add(vecreduce(a), b), add(vecreduce(c), d)) into
add(vecreduce(add(a, c)), add(b, d)), to combine the reductions into a single
node. This comes up after unrolling vectorized loops.

There is another small change to move reassociateReduction inside fadd outside
of a AllowNewConst block, as new constants will not be created and it should be
OK to perform the combine later after legalization.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  27 ++-
 llvm/test/CodeGen/AArch64/double_reduct.ll    | 149 ++++++---------
 llvm/test/CodeGen/Thumb2/mve-doublereduct.ll  | 176 +++++++-----------
 3 files changed, 144 insertions(+), 208 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index dcd5ca3b936e72..c88a1abf090bbb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -1329,6 +1329,28 @@ SDValue DAGCombiner::reassociateReduction(unsigned RedOpc, unsigned Opc,
                        DAG.getNode(Opc, DL, N0.getOperand(0).getValueType(),
                                    N0.getOperand(0), N1.getOperand(0)));
   }
+
+  // Reassociate op(op(vecreduce(a), b), op(vecreduce(c), d)) into
+  // op(vecreduce(op(a, c)), op(b, d)), to combine the reductions into a
+  // single node.
+  SDValue A, B, C, D;
+  if (sd_match(N0,
+               m_OneUse(m_c_BinOp(Opc, m_OneUse(m_UnaryOp(RedOpc, m_Value(A))),
+                                  m_Value(B)))) &&
+      sd_match(N1,
+               m_OneUse(m_c_BinOp(Opc, m_OneUse(m_UnaryOp(RedOpc, m_Value(C))),
+                                  m_Value(D)))) &&
+      !sd_match(B, m_UnaryOp(RedOpc, m_Value())) &&
+      !sd_match(D, m_UnaryOp(RedOpc, m_Value())) &&
+      A.getValueType() == C.getValueType() &&
+      hasOperation(Opc, A.getValueType()) &&
+      TLI.shouldReassociateReduction(RedOpc, VT)) {
+    SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
+    SDValue Op = DAG.getNode(Opc, DL, A.getValueType(), A, C);
+    SDValue Red = DAG.getNode(RedOpc, DL, VT, Op);
+    SDValue Op2 = DAG.getNode(Opc, DL, VT, B, D);
+    return DAG.getNode(Opc, DL, VT, Red, Op2);
+  }
   return SDValue();
 }
 
@@ -17098,12 +17120,15 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
                            DAG.getConstantFP(4.0, DL, VT));
       }
     }
+  } // enable-unsafe-fp-math && AllowNewConst
 
+  if (((Options.UnsafeFPMath && Options.NoSignedZerosFPMath) ||
+       (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros()))) {
     // Fold fadd(vecreduce(x), vecreduce(y)) -> vecreduce(fadd(x, y))
     if (SDValue SD = reassociateReduction(ISD::VECREDUCE_FADD, ISD::FADD, DL,
                                           VT, N0, N1, Flags))
       return SD;
-  } // enable-unsafe-fp-math
+  }
 
   // FADD -> FMA combines:
   if (SDValue Fused = visitFADDForFMACombine<EmptyMatchContext>(N)) {
diff --git a/llvm/test/CodeGen/AArch64/double_reduct.ll b/llvm/test/CodeGen/AArch64/double_reduct.ll
index 0c356b1d98287f..b04e34a54af475 100644
--- a/llvm/test/CodeGen/AArch64/double_reduct.ll
+++ b/llvm/test/CodeGen/AArch64/double_reduct.ll
@@ -288,13 +288,11 @@ define i32 @smax_i32(<8 x i32> %a, <4 x i32> %b) {
 define float @nested_fadd_f32(<4 x float> %a, <4 x float> %b, float %c, float %d) {
 ; CHECK-LABEL: nested_fadd_f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    faddp v1.4s, v1.4s, v1.4s
+; CHECK-NEXT:    fadd v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    fadd s2, s2, s3
 ; CHECK-NEXT:    faddp v0.4s, v0.4s, v0.4s
-; CHECK-NEXT:    faddp s1, v1.2s
 ; CHECK-NEXT:    faddp s0, v0.2s
-; CHECK-NEXT:    fadd s1, s1, s3
 ; CHECK-NEXT:    fadd s0, s0, s2
-; CHECK-NEXT:    fadd s0, s0, s1
 ; CHECK-NEXT:    ret
   %r1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %a)
   %a1 = fadd fast float %r1, %c
@@ -332,15 +330,12 @@ define float @nested_fadd_f32_slow(<4 x float> %a, <4 x float> %b, float %c, flo
 define float @nested_mul_f32(<4 x float> %a, <4 x float> %b, float %c, float %d) {
 ; CHECK-LABEL: nested_mul_f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v4.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v5.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    fmul v1.2s, v1.2s, v4.2s
-; CHECK-NEXT:    fmul v0.2s, v0.2s, v5.2s
-; CHECK-NEXT:    fmul s1, s1, v1.s[1]
+; CHECK-NEXT:    fmul v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    fmul s2, s2, s3
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    fmul v0.2s, v0.2s, v1.2s
 ; CHECK-NEXT:    fmul s0, s0, v0.s[1]
-; CHECK-NEXT:    fmul s1, s1, s3
 ; CHECK-NEXT:    fmul s0, s0, s2
-; CHECK-NEXT:    fmul s0, s0, s1
 ; CHECK-NEXT:    ret
   %r1 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a)
   %a1 = fmul fast float %r1, %c
@@ -353,12 +348,10 @@ define float @nested_mul_f32(<4 x float> %a, <4 x float> %b, float %c, float %d)
 define i32 @nested_add_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_add_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addv s1, v1.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    add w8, w0, w1
 ; CHECK-NEXT:    addv s0, v0.4s
-; CHECK-NEXT:    fmov w8, s1
 ; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    add w9, w9, w0
-; CHECK-NEXT:    add w8, w8, w1
 ; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
   %r1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
@@ -372,12 +365,10 @@ define i32 @nested_add_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_add_c1_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_add_c1_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addv s1, v1.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    add w8, w0, w1
 ; CHECK-NEXT:    addv s0, v0.4s
-; CHECK-NEXT:    fmov w8, s1
 ; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    add w9, w0, w9
-; CHECK-NEXT:    add w8, w8, w1
 ; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
   %r1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
@@ -391,12 +382,10 @@ define i32 @nested_add_c1_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_add_c2_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_add_c2_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addv s1, v1.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    add w8, w0, w1
 ; CHECK-NEXT:    addv s0, v0.4s
-; CHECK-NEXT:    fmov w8, s1
 ; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    add w9, w9, w0
-; CHECK-NEXT:    add w8, w1, w8
 ; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
   %r1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
@@ -429,19 +418,14 @@ define i32 @nested_add_manyreduct_i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c,
 define i32 @nested_mul_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_mul_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    mul v0.2s, v0.2s, v3.2s
-; CHECK-NEXT:    mul v1.2s, v1.2s, v2.2s
-; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    mul v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    mul w8, w0, w1
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    mul v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    mov w9, v0.s[1]
 ; CHECK-NEXT:    fmov w10, s0
-; CHECK-NEXT:    mov w9, v1.s[1]
-; CHECK-NEXT:    mul w8, w10, w8
-; CHECK-NEXT:    fmov w10, s1
 ; CHECK-NEXT:    mul w9, w10, w9
-; CHECK-NEXT:    mul w8, w8, w0
-; CHECK-NEXT:    mul w9, w9, w1
-; CHECK-NEXT:    mul w0, w8, w9
+; CHECK-NEXT:    mul w0, w9, w8
 ; CHECK-NEXT:    ret
   %r1 = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %a)
   %a1 = mul i32 %r1, %c
@@ -454,19 +438,14 @@ define i32 @nested_mul_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_and_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_and_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT:    and v0.8b, v0.8b, v3.8b
-; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    and w8, w0, w1
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    fmov x9, d0
 ; CHECK-NEXT:    lsr x10, x9, #32
-; CHECK-NEXT:    lsr x11, x8, #32
-; CHECK-NEXT:    and w9, w9, w0
-; CHECK-NEXT:    and w8, w8, w1
-; CHECK-NEXT:    and w9, w9, w10
-; CHECK-NEXT:    and w8, w8, w11
-; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    and w8, w9, w8
+; CHECK-NEXT:    and w0, w8, w10
 ; CHECK-NEXT:    ret
   %r1 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %a)
   %a1 = and i32 %r1, %c
@@ -479,19 +458,14 @@ define i32 @nested_and_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_or_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_or_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    orr v1.8b, v1.8b, v2.8b
-; CHECK-NEXT:    orr v0.8b, v0.8b, v3.8b
-; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    orr w8, w0, w1
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    fmov x9, d0
 ; CHECK-NEXT:    lsr x10, x9, #32
-; CHECK-NEXT:    lsr x11, x8, #32
-; CHECK-NEXT:    orr w9, w9, w0
-; CHECK-NEXT:    orr w8, w8, w1
-; CHECK-NEXT:    orr w9, w9, w10
-; CHECK-NEXT:    orr w8, w8, w11
-; CHECK-NEXT:    orr w0, w9, w8
+; CHECK-NEXT:    orr w8, w9, w8
+; CHECK-NEXT:    orr w0, w8, w10
 ; CHECK-NEXT:    ret
   %r1 = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %a)
   %a1 = or i32 %r1, %c
@@ -504,19 +478,14 @@ define i32 @nested_or_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_xor_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_xor_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    eor v1.8b, v1.8b, v2.8b
-; CHECK-NEXT:    eor v0.8b, v0.8b, v3.8b
-; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    eor v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    eor w8, w0, w1
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    fmov x9, d0
 ; CHECK-NEXT:    lsr x10, x9, #32
-; CHECK-NEXT:    lsr x11, x8, #32
-; CHECK-NEXT:    eor w9, w9, w0
-; CHECK-NEXT:    eor w8, w8, w1
-; CHECK-NEXT:    eor w9, w9, w10
-; CHECK-NEXT:    eor w8, w8, w11
-; CHECK-NEXT:    eor w0, w9, w8
+; CHECK-NEXT:    eor w8, w9, w8
+; CHECK-NEXT:    eor w0, w8, w10
 ; CHECK-NEXT:    ret
   %r1 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %a)
   %a1 = xor i32 %r1, %c
@@ -529,14 +498,11 @@ define i32 @nested_xor_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_smin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_smin_i32:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    smin v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    csel w8, w0, w1, lt
 ; CHECK-NEXT:    sminv s0, v0.4s
-; CHECK-NEXT:    sminv s1, v1.4s
 ; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    cmp w9, w0
-; CHECK-NEXT:    csel w9, w9, w0, lt
-; CHECK-NEXT:    cmp w8, w1
-; CHECK-NEXT:    csel w8, w8, w1, lt
 ; CHECK-NEXT:    cmp w9, w8
 ; CHECK-NEXT:    csel w0, w9, w8, lt
 ; CHECK-NEXT:    ret
@@ -551,14 +517,11 @@ define i32 @nested_smin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_smax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_smax_i32:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    smax v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    csel w8, w0, w1, gt
 ; CHECK-NEXT:    smaxv s0, v0.4s
-; CHECK-NEXT:    smaxv s1, v1.4s
 ; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    cmp w9, w0
-; CHECK-NEXT:    csel w9, w9, w0, gt
-; CHECK-NEXT:    cmp w8, w1
-; CHECK-NEXT:    csel w8, w8, w1, gt
 ; CHECK-NEXT:    cmp w9, w8
 ; CHECK-NEXT:    csel w0, w9, w8, gt
 ; CHECK-NEXT:    ret
@@ -573,14 +536,11 @@ define i32 @nested_smax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_umin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_umin_i32:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    umin v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    csel w8, w0, w1, lo
 ; CHECK-NEXT:    uminv s0, v0.4s
-; CHECK-NEXT:    uminv s1, v1.4s
 ; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    cmp w9, w0
-; CHECK-NEXT:    csel w9, w9, w0, lo
-; CHECK-NEXT:    cmp w8, w1
-; CHECK-NEXT:    csel w8, w8, w1, lo
 ; CHECK-NEXT:    cmp w9, w8
 ; CHECK-NEXT:    csel w0, w9, w8, lo
 ; CHECK-NEXT:    ret
@@ -595,14 +555,11 @@ define i32 @nested_umin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_umax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_umax_i32:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    umax v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    csel w8, w0, w1, hi
 ; CHECK-NEXT:    umaxv s0, v0.4s
-; CHECK-NEXT:    umaxv s1, v1.4s
 ; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    cmp w9, w0
-; CHECK-NEXT:    csel w9, w9, w0, hi
-; CHECK-NEXT:    cmp w8, w1
-; CHECK-NEXT:    csel w8, w8, w1, hi
 ; CHECK-NEXT:    cmp w9, w8
 ; CHECK-NEXT:    csel w0, w9, w8, hi
 ; CHECK-NEXT:    ret
@@ -617,11 +574,10 @@ define i32 @nested_umax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define float @nested_fmin_float(<4 x float> %a, <4 x float> %b, float %c, float %d) {
 ; CHECK-LABEL: nested_fmin_float:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fminnmv s1, v1.4s
+; CHECK-NEXT:    fminnm v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    fminnm s2, s2, s3
 ; CHECK-NEXT:    fminnmv s0, v0.4s
-; CHECK-NEXT:    fminnm s1, s1, s3
 ; CHECK-NEXT:    fminnm s0, s0, s2
-; CHECK-NEXT:    fminnm s0, s0, s1
 ; CHECK-NEXT:    ret
   %r1 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a)
   %a1 = call float @llvm.minnum.f32(float %r1, float %c)
@@ -634,11 +590,10 @@ define float @nested_fmin_float(<4 x float> %a, <4 x float> %b, float %c, float
 define float @nested_fmax_float(<4 x float> %a, <4 x float> %b, float %c, float %d) {
 ; CHECK-LABEL: nested_fmax_float:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmaxnmv s1, v1.4s
+; CHECK-NEXT:    fmaxnm v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    fmaxnm s2, s2, s3
 ; CHECK-NEXT:    fmaxnmv s0, v0.4s
-; CHECK-NEXT:    fmaxnm s1, s1, s3
 ; CHECK-NEXT:    fmaxnm s0, s0, s2
-; CHECK-NEXT:    fmaxnm s0, s0, s1
 ; CHECK-NEXT:    ret
   %r1 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a)
   %a1 = call float @llvm.maxnum.f32(float %r1, float %c)
diff --git a/llvm/test/CodeGen/Thumb2/mve-doublereduct.ll b/llvm/test/CodeGen/Thumb2/mve-doublereduct.ll
index 67723e8aa41ad7..1136246f6b14dd 100644
--- a/llvm/test/CodeGen/Thumb2/mve-doublereduct.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-doublereduct.ll
@@ -244,15 +244,12 @@ define i32 @smax_i32(<8 x i32> %a, <4 x i32> %b) {
 define float @nested_add_f32(<4 x float> %a, <4 x float> %b, float %c, float %d) {
 ; CHECK-LABEL: nested_add_f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vadd.f32 s6, s6, s7
-; CHECK-NEXT:    vadd.f32 s4, s4, s5
+; CHECK-NEXT:    vadd.f32 q0, q0, q1
+; CHECK-NEXT:    vadd.f32 s4, s8, s9
 ; CHECK-NEXT:    vadd.f32 s2, s2, s3
 ; CHECK-NEXT:    vadd.f32 s0, s0, s1
-; CHECK-NEXT:    vadd.f32 s4, s4, s6
-; CHECK-NEXT:    vadd.f32 s0, s0, s2
-; CHECK-NEXT:    vadd.f32 s2, s4, s9
-; CHECK-NEXT:    vadd.f32 s0, s0, s8
 ; CHECK-NEXT:    vadd.f32 s0, s0, s2
+; CHECK-NEXT:    vadd.f32 s0, s0, s4
 ; CHECK-NEXT:    bx lr
   %r1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %a)
   %a1 = fadd fast float %r1, %c
@@ -265,15 +262,12 @@ define float @nested_add_f32(<4 x float> %a, <4 x float> %b, float %c, float %d)
 define float @nested_mul_f32(<4 x float> %a, <4 x float> %b, float %c, float %d) {
 ; CHECK-LABEL: nested_mul_f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmul.f32 s6, s6, s7
-; CHECK-NEXT:    vmul.f32 s4, s4, s5
+; CHECK-NEXT:    vmul.f32 q0, q0, q1
+; CHECK-NEXT:    vmul.f32 s4, s8, s9
 ; CHECK-NEXT:    vmul.f32 s2, s2, s3
 ; CHECK-NEXT:    vmul.f32 s0, s0, s1
-; CHECK-NEXT:    vmul.f32 s4, s4, s6
-; CHECK-NEXT:    vmul.f32 s0, s0, s2
-; CHECK-NEXT:    vmul.f32 s2, s4, s9
-; CHECK-NEXT:    vmul.f32 s0, s0, s8
 ; CHECK-NEXT:    vmul.f32 s0, s0, s2
+; CHECK-NEXT:    vmul.f32 s0, s0, s4
 ; CHECK-NEXT:    bx lr
   %r1 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a)
   %a1 = fmul fast float %r1, %c
@@ -301,22 +295,17 @@ define i32 @nested_add_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_mul_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_mul_i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    vmov r8, r3, d2
-; CHECK-NEXT:    vmov r4, r5, d1
-; CHECK-NEXT:    vmov r6, r7, d0
-; CHECK-NEXT:    vmov r12, lr, d3
-; CHECK-NEXT:    mul r3, r8, r3
-; CHECK-NEXT:    muls r5, r4, r5
-; CHECK-NEXT:    mul r2, r12, lr
-; CHECK-NEXT:    muls r7, r6, r7
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov r12, lr, d1
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    mul r12, r12, lr
 ; CHECK-NEXT:    muls r2, r3, r2
-; CHECK-NEXT:    mul r3, r7, r5
-; CHECK-NEXT:    muls r1, r2, r1
-; CHECK-NEXT:    muls r0, r3, r0
+; CHECK-NEXT:    mul r1, r2, r12
 ; CHECK-NEXT:    muls r0, r1, r0
-; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; CHECK-NEXT:    pop {r7, pc}
   %r1 = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %a)
   %a1 = mul i32 %r1, %c
   %r2 = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %b)
@@ -328,22 +317,17 @@ define i32 @nested_mul_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_and_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_and_i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    vmov r2, r3, d2
-; CHECK-NEXT:    vmov r12, lr, d3
-; CHECK-NEXT:    vmov r8, r5, d1
-; CHECK-NEXT:    vmov r6, r7, d0
-; CHECK-NEXT:    ands r2, r3
-; CHECK-NEXT:    and.w r4, r12, lr
-; CHECK-NEXT:    ands r2, r4
-; CHECK-NEXT:    ands r1, r2
-; CHECK-NEXT:    and.w r2, r8, r5
-; CHECK-NEXT:    and.w r3, r6, r7
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov r12, lr, d1
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    and.w r12, r12, lr
 ; CHECK-NEXT:    ands r2, r3
+; CHECK-NEXT:    and.w r2, r2, r12
 ; CHECK-NEXT:    ands r0, r2
-; CHECK-NEXT:    ands r0, r1
-; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; CHECK-NEXT:    pop {r7, pc}
   %r1 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %a)
   %a1 = and i32 %r1, %c
   %r2 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %b)
@@ -355,22 +339,17 @@ define i32 @nested_and_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_or_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_or_i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    vmov r2, r3, d2
-; CHECK-NEXT:    vmov r12, lr, d3
-; CHECK-NEXT:    vmov r8, r5, d1
-; CHECK-NEXT:    vmov r6, r7, d0
-; CHECK-NEXT:    orrs r2, r3
-; CHECK-NEXT:    orr.w r4, r12, lr
-; CHECK-NEXT:    orrs r2, r4
-; CHECK-NEXT:    orrs r1, r2
-; CHECK-NEXT:    orr.w r2, r8, r5
-; CHECK-NEXT:    orr.w r3, r6, r7
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r12, lr, d1
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    orr.w r12, r12, lr
 ; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    orr.w r2, r2, r12
 ; CHECK-NEXT:    orrs r0, r2
-; CHECK-NEXT:    orrs r0, r1
-; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; CHECK-NEXT:    pop {r7, pc}
   %r1 = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %a)
   %a1 = or i32 %r1, %c
   %r2 = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %b)
@@ -382,22 +361,17 @@ define i32 @nested_or_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_xor_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_xor_i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    vmov r2, r3, d2
-; CHECK-NEXT:    vmov r12, lr, d3
-; CHECK-NEXT:    vmov r8, r5, d1
-; CHECK-NEXT:    vmov r6, r7, d0
-; CHECK-NEXT:    eors r2, r3
-; CHECK-NEXT:    eor.w r4, r12, lr
-; CHECK-NEXT:    eors r2, r4
-; CHECK-NEXT:    eors r1, r2
-; CHECK-NEXT:    eor.w r2, r8, r5
-; CHECK-NEXT:    eor.w r3, r6, r7
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    veor q0, q0, q1
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov r12, lr, d1
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    eor.w r12, r12, lr
 ; CHECK-NEXT:    eors r2, r3
+; CHECK-NEXT:    eor.w r2, r2, r12
 ; CHECK-NEXT:    eors r0, r2
-; CHECK-NEXT:    eors r0, r1
-; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; CHECK-NEXT:    pop {r7, pc}
   %r1 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %a)
   %a1 = xor i32 %r1, %c
   %r2 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %b)
@@ -409,16 +383,13 @@ define i32 @nested_xor_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_smin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_smin_i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mvn r3, #-2147483648
-; CHECK-NEXT:    mvn r2, #-2147483648
-; CHECK-NEXT:    vminv.s32 r3, q0
-; CHECK-NEXT:    vminv.s32 r2, q1
-; CHECK-NEXT:    cmp r3, r0
-; CHECK-NEXT:    csel r0, r3, r0, lt
-; CHECK-NEXT:    cmp r2, r1
-; CHECK-NEXT:    csel r1, r2, r1, lt
 ; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    mvn r2, #-2147483648
+; CHECK-NEXT:    vmin.s32 q0, q0, q1
 ; CHECK-NEXT:    csel r0, r0, r1, lt
+; CHECK-NEXT:    vminv.s32 r2, q0
+; CHECK-NEXT:    cmp r2, r0
+; CHECK-NEXT:    csel r0, r2, r0, lt
 ; CHECK-NEXT:    bx lr
   %r1 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %a)
   %a1 = call i32 @llvm.smin.i32(i32 %r1, i32 %c)
@@ -431,16 +402,13 @@ define i32 @nested_smin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_smax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_smax_i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov.w r3, #-2147483648
-; CHECK-NEXT:    mov.w r2, #-2147483648
-; CHECK-NEXT:    vmaxv.s32 r3, q0
-; CHECK-NEXT:    vmaxv.s32 r2, q1
-; CHECK-NEXT:    cmp r3, r0
-; CHECK-NEXT:    csel r0, r3, r0, gt
-; CHECK-NEXT:    cmp r2, r1
-; CHECK-NEXT:    csel r1, r2, r1, gt
 ; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    mov.w r2, #-2147483648
+; CHECK-NEXT:    vmax.s32 q0, q0, q1
 ; CHECK-NEXT:    csel r0, r0, r1, gt
+; CHECK-NEXT:    vmaxv.s32 r2, q0
+; CHECK-NEXT:    cmp r2, r0
+; CHECK-NEXT:    csel r0, r2, r0, gt
 ; CHECK-NEXT:    bx lr
   %r1 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %a)
   %a1 = call i32 @llvm.smax.i32(i32 %r1, i32 %c)
@@ -453,16 +421,13 @@ define i32 @nested_smax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_umin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_umin_i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov.w r3, #-1
-; CHECK-NEXT:    mov.w r2, #-1
-; CHECK-NEXT:    vminv.u32 r3, q0
-; CHECK-NEXT:    vminv.u32 r2, q1
-; CHECK-NEXT:    cmp r3, r0
-; CHECK-NEXT:    csel r0, r3, r0, lo
-; CHECK-NEXT:    cmp r2, r1
-; CHECK-NEXT:    csel r1, r2, r1, lo
 ; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    vmin.u32 q0, q0, q1
 ; CHECK-NEXT:    csel r0, r0, r1, lo
+; CHECK-NEXT:    vminv.u32 r2, q0
+; CHECK-NEXT:    cmp r2, r0
+; CHECK-NEXT:    csel r0, r2, r0, lo
 ; CHECK-NEXT:    bx lr
   %r1 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %a)
   %a1 = call i32 @llvm.umin.i32(i32 %r1, i32 %c)
@@ -475,16 +440,13 @@ define i32 @nested_umin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define i32 @nested_umax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: nested_umax_i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    movs r3, #0
-; CHECK-NEXT:    movs r2, #0
-; CHECK-NEXT:    vmaxv.u32 r3, q0
-; CHECK-NEXT:    vmaxv.u32 r2, q1
-; CHECK-NEXT:    cmp r3, r0
-; CHECK-NEXT:    csel r0, r3, r0, hi
-; CHECK-NEXT:    cmp r2, r1
-; CHECK-NEXT:    csel r1, r2, r1, hi
 ; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    mov.w r2, #0
+; CHECK-NEXT:    vmax.u32 q0, q0, q1
 ; CHECK-NEXT:    csel r0, r0, r1, hi
+; CHECK-NEXT:    vmaxv.u32 r2, q0
+; CHECK-NEXT:    cmp r2, r0
+; CHECK-NEXT:    csel r0, r2, r0, hi
 ; CHECK-NEXT:    bx lr
   %r1 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %a)
   %a1 = call i32 @llvm.umax.i32(i32 %r1, i32 %c)
@@ -497,14 +459,11 @@ define i32 @nested_umax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
 define float @nested_fmin_float(<4 x float> %a, <4 x float> %b, float %c, float %d) {
 ; CHECK-LABEL: nested_fmin_float:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vminnm.f32 q0, q0, q1
 ; CHECK-NEXT:    vminnm.f32 s2, s2, s3
 ; CHECK-NEXT:    vminnm.f32 s0, s0, s1
 ; CHECK-NEXT:    vminnm.f32 s0, s0, s2
-; CHECK-NEXT:    vminnm.f32 s2, s6, s7
-; CHECK-NEXT:    vminnm.f32 s4, s4, s5
-; CHECK-NEXT:    vminnm.f32 s0, s0, s8
-; CHECK-NEXT:    vminnm.f32 s2, s4, s2
-; CHECK-NEXT:    vminnm.f32 s2, s2, s9
+; CHECK-NEXT:    vminnm.f32 s2, s8, s9
 ; CHECK-NEXT:    vminnm.f32 s0, s0, s2
 ; CHECK-NEXT:    bx lr
   %r1 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a)
@@ -518,14 +477,11 @@ define float @nested_fmin_float(<4 x float> %a, <4 x float> %b, float %c, float
 define float @nested_fmax_float(<4 x float> %a, <4 x float> %b, float %c, float %d) {
 ; CHECK-LABEL: nested_fmax_float:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmaxnm.f32 q0, q0, q1
 ; CHECK-NEXT:    vmaxnm.f32 s2, s2, s3
 ; CHECK-NEXT:    vmaxnm.f32 s0, s0, s1
 ; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
-; CHECK-NEXT:    vmaxnm.f32 s2, s6, s7
-; CHECK-NEXT:    vmaxnm.f32 s4, s4, s5
-; CHECK-NEXT:    vmaxnm.f32 s0, s0, s8
-; CHECK-NEXT:    vmaxnm.f32 s2, s4, s2
-; CHECK-NEXT:    vmaxnm.f32 s2, s2, s9
+; CHECK-NEXT:    vmaxnm.f32 s2, s8, s9
 ; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
 ; CHECK-NEXT:    bx lr
   %r1 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a)



More information about the llvm-commits mailing list