[llvm] 8f104b8 - Revert "[AArch64] Add GPR rr instructions to isAssociativeAndCommutative"

Vitaly Buka via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 22 11:03:26 PST 2022


Author: Vitaly Buka
Date: 2022-11-22T11:03:13-08:00
New Revision: 8f104b806a2837f36463277f7d9162f53b595ebd

URL: https://github.com/llvm/llvm-project/commit/8f104b806a2837f36463277f7d9162f53b595ebd
DIFF: https://github.com/llvm/llvm-project/commit/8f104b806a2837f36463277f7d9162f53b595ebd.diff

LOG: Revert "[AArch64] Add GPR rr instructions to isAssociativeAndCommutative"

Breaks msan on aarch64.

This reverts commit 5f7f484ee54ebbf702ee4c5fe9852502dc237121.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
    llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
    llvm/test/CodeGen/AArch64/arm64-rev.ll
    llvm/test/CodeGen/AArch64/cmp-chains.ll
    llvm/test/CodeGen/AArch64/reduce-and.ll
    llvm/test/CodeGen/AArch64/reduce-or.ll
    llvm/test/CodeGen/AArch64/reduce-xor.ll
    llvm/test/CodeGen/AArch64/swift-return.ll
    llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index ba3d7550eea5..1ee076921a1a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -4965,19 +4965,6 @@ bool AArch64InstrInfo::isAssociativeAndCommutative(
     return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath ||
            (Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
             Inst.getFlag(MachineInstr::MIFlag::FmNsz));
-  case AArch64::ADDXrr:
-  case AArch64::ANDXrr:
-  case AArch64::ORRXrr:
-  case AArch64::EORXrr:
-  case AArch64::EONXrr:
-  case AArch64::ADDWrr:
-  case AArch64::ANDWrr:
-  case AArch64::ORRWrr:
-  case AArch64::EORWrr:
-  case AArch64::EONWrr:
-  case AArch64::ANDSXrr:
-  case AArch64::ANDSWrr:
-    return true;
   default:
     return false;
   }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
index 058738c1ba25..347b83f3c982 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
@@ -710,8 +710,8 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
 ; CHECK-NOLSE-O1-NEXT:    ldrb w10, [x0, w1, sxtw]
 ; CHECK-NOLSE-O1-NEXT:    ldurb w11, [x0, #-256]
 ; CHECK-NOLSE-O1-NEXT:    ldrb w8, [x8]
+; CHECK-NOLSE-O1-NEXT:    add w9, w9, w10
 ; CHECK-NOLSE-O1-NEXT:    add w9, w9, w11
-; CHECK-NOLSE-O1-NEXT:    add w9, w10, w9
 ; CHECK-NOLSE-O1-NEXT:    add w0, w9, w8
 ; CHECK-NOLSE-O1-NEXT:    ret
 ;
@@ -733,9 +733,9 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
 ; CHECK-LSE-O1:       ; %bb.0:
 ; CHECK-LSE-O1-NEXT:    ldrb w8, [x0, #4095]
 ; CHECK-LSE-O1-NEXT:    ldrb w9, [x0, w1, sxtw]
-; CHECK-LSE-O1-NEXT:    ldurb w10, [x0, #-256]
-; CHECK-LSE-O1-NEXT:    add w8, w8, w10
-; CHECK-LSE-O1-NEXT:    add w8, w9, w8
+; CHECK-LSE-O1-NEXT:    add w8, w8, w9
+; CHECK-LSE-O1-NEXT:    ldurb w9, [x0, #-256]
+; CHECK-LSE-O1-NEXT:    add w8, w8, w9
 ; CHECK-LSE-O1-NEXT:    add x9, x0, #291, lsl #12 ; =1191936
 ; CHECK-LSE-O1-NEXT:    ldrb w9, [x9]
 ; CHECK-LSE-O1-NEXT:    add w0, w8, w9
@@ -780,8 +780,8 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
 ; CHECK-NOLSE-O1-NEXT:    ldrh w10, [x0, w1, sxtw #1]
 ; CHECK-NOLSE-O1-NEXT:    ldurh w11, [x0, #-256]
 ; CHECK-NOLSE-O1-NEXT:    ldrh w8, [x8]
+; CHECK-NOLSE-O1-NEXT:    add w9, w9, w10
 ; CHECK-NOLSE-O1-NEXT:    add w9, w9, w11
-; CHECK-NOLSE-O1-NEXT:    add w9, w10, w9
 ; CHECK-NOLSE-O1-NEXT:    add w0, w9, w8
 ; CHECK-NOLSE-O1-NEXT:    ret
 ;
@@ -803,9 +803,9 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
 ; CHECK-LSE-O1:       ; %bb.0:
 ; CHECK-LSE-O1-NEXT:    ldrh w8, [x0, #8190]
 ; CHECK-LSE-O1-NEXT:    ldrh w9, [x0, w1, sxtw #1]
-; CHECK-LSE-O1-NEXT:    ldurh w10, [x0, #-256]
-; CHECK-LSE-O1-NEXT:    add w8, w8, w10
-; CHECK-LSE-O1-NEXT:    add w8, w9, w8
+; CHECK-LSE-O1-NEXT:    add w8, w8, w9
+; CHECK-LSE-O1-NEXT:    ldurh w9, [x0, #-256]
+; CHECK-LSE-O1-NEXT:    add w8, w8, w9
 ; CHECK-LSE-O1-NEXT:    add x9, x0, #291, lsl #12 ; =1191936
 ; CHECK-LSE-O1-NEXT:    ldrh w9, [x9]
 ; CHECK-LSE-O1-NEXT:    add w0, w8, w9
@@ -850,8 +850,8 @@ define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 {
 ; CHECK-NOLSE-O1-NEXT:    ldr w10, [x0, w1, sxtw #2]
 ; CHECK-NOLSE-O1-NEXT:    ldur w11, [x0, #-256]
 ; CHECK-NOLSE-O1-NEXT:    ldr w8, [x8]
+; CHECK-NOLSE-O1-NEXT:    add w9, w9, w10
 ; CHECK-NOLSE-O1-NEXT:    add w9, w9, w11
-; CHECK-NOLSE-O1-NEXT:    add w9, w10, w9
 ; CHECK-NOLSE-O1-NEXT:    add w0, w9, w8
 ; CHECK-NOLSE-O1-NEXT:    ret
 ;
@@ -871,9 +871,9 @@ define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 {
 ; CHECK-LSE-O1:       ; %bb.0:
 ; CHECK-LSE-O1-NEXT:    ldr w8, [x0, #16380]
 ; CHECK-LSE-O1-NEXT:    ldr w9, [x0, w1, sxtw #2]
-; CHECK-LSE-O1-NEXT:    ldur w10, [x0, #-256]
-; CHECK-LSE-O1-NEXT:    add w8, w8, w10
-; CHECK-LSE-O1-NEXT:    add w8, w9, w8
+; CHECK-LSE-O1-NEXT:    add w8, w8, w9
+; CHECK-LSE-O1-NEXT:    ldur w9, [x0, #-256]
+; CHECK-LSE-O1-NEXT:    add w8, w8, w9
 ; CHECK-LSE-O1-NEXT:    add x9, x0, #291, lsl #12 ; =1191936
 ; CHECK-LSE-O1-NEXT:    ldr w9, [x9]
 ; CHECK-LSE-O1-NEXT:    add w0, w8, w9
@@ -916,8 +916,8 @@ define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 {
 ; CHECK-NOLSE-O1-NEXT:    ldr x10, [x0, w1, sxtw #3]
 ; CHECK-NOLSE-O1-NEXT:    ldur x11, [x0, #-256]
 ; CHECK-NOLSE-O1-NEXT:    ldr x8, [x8]
+; CHECK-NOLSE-O1-NEXT:    add x9, x9, x10
 ; CHECK-NOLSE-O1-NEXT:    add x9, x9, x11
-; CHECK-NOLSE-O1-NEXT:    add x9, x10, x9
 ; CHECK-NOLSE-O1-NEXT:    add x0, x9, x8
 ; CHECK-NOLSE-O1-NEXT:    ret
 ;
@@ -937,9 +937,9 @@ define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 {
 ; CHECK-LSE-O1:       ; %bb.0:
 ; CHECK-LSE-O1-NEXT:    ldr x8, [x0, #32760]
 ; CHECK-LSE-O1-NEXT:    ldr x9, [x0, w1, sxtw #3]
-; CHECK-LSE-O1-NEXT:    ldur x10, [x0, #-256]
-; CHECK-LSE-O1-NEXT:    add x8, x8, x10
-; CHECK-LSE-O1-NEXT:    add x8, x9, x8
+; CHECK-LSE-O1-NEXT:    add x8, x8, x9
+; CHECK-LSE-O1-NEXT:    ldur x9, [x0, #-256]
+; CHECK-LSE-O1-NEXT:    add x8, x8, x9
 ; CHECK-LSE-O1-NEXT:    add x9, x0, #291, lsl #12 ; =1191936
 ; CHECK-LSE-O1-NEXT:    ldr x9, [x9]
 ; CHECK-LSE-O1-NEXT:    add x0, x8, x9

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
index 9cf6af2e6edd..578be5bf21fa 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
@@ -389,8 +389,8 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) {
   ; CHECK-NEXT:   renamable $w10 = LDRBBroW renamable $x0, killed renamable $w1, 1, 0, pcsections !0 :: (load unordered (s8) from %ir.ptr_regoff)
   ; CHECK-NEXT:   renamable $w11 = LDURBBi killed renamable $x0, -256, pcsections !0 :: (load monotonic (s8) from %ir.ptr_unscaled)
   ; CHECK-NEXT:   renamable $w8 = LDRBBui killed renamable $x8, 0, pcsections !0 :: (load unordered (s8) from %ir.ptr_random)
+  ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w9, killed renamable $w10, 0, pcsections !0
   ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w9, killed renamable $w11, 0, pcsections !0
-  ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w10, killed renamable $w9, 0, pcsections !0
   ; CHECK-NEXT:   $w0 = ADDWrs killed renamable $w9, killed renamable $w8, 0, pcsections !0
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
   %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
@@ -421,8 +421,8 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) {
   ; CHECK-NEXT:   renamable $w10 = LDRHHroW renamable $x0, killed renamable $w1, 1, 1, pcsections !0 :: (load unordered (s16) from %ir.ptr_regoff)
   ; CHECK-NEXT:   renamable $w11 = LDURHHi killed renamable $x0, -256, pcsections !0 :: (load monotonic (s16) from %ir.ptr_unscaled)
   ; CHECK-NEXT:   renamable $w8 = LDRHHui killed renamable $x8, 0, pcsections !0 :: (load unordered (s16) from %ir.ptr_random)
+  ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w9, killed renamable $w10, 0, pcsections !0
   ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w9, killed renamable $w11, 0, pcsections !0
-  ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w10, killed renamable $w9, 0, pcsections !0
   ; CHECK-NEXT:   $w0 = ADDWrs killed renamable $w9, killed renamable $w8, 0, pcsections !0
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
   %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
@@ -453,8 +453,8 @@ define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) {
   ; CHECK-NEXT:   renamable $w10 = LDRWroW renamable $x0, killed renamable $w1, 1, 1, pcsections !0 :: (load unordered (s32) from %ir.ptr_regoff)
   ; CHECK-NEXT:   renamable $w11 = LDURWi killed renamable $x0, -256, pcsections !0 :: (load monotonic (s32) from %ir.ptr_unscaled)
   ; CHECK-NEXT:   renamable $w8 = LDRWui killed renamable $x8, 0, pcsections !0 :: (load unordered (s32) from %ir.ptr_random)
+  ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w9, killed renamable $w10, 0, pcsections !0
   ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w9, killed renamable $w11, 0, pcsections !0
-  ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w10, killed renamable $w9, 0, pcsections !0
   ; CHECK-NEXT:   $w0 = ADDWrs killed renamable $w9, killed renamable $w8, 0, pcsections !0
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
   %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
@@ -485,8 +485,8 @@ define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) {
   ; CHECK-NEXT:   renamable $x10 = LDRXroW renamable $x0, killed renamable $w1, 1, 1, pcsections !0 :: (load unordered (s64) from %ir.ptr_regoff)
   ; CHECK-NEXT:   renamable $x11 = LDURXi killed renamable $x0, -256, pcsections !0 :: (load monotonic (s64) from %ir.ptr_unscaled)
   ; CHECK-NEXT:   renamable $x8 = LDRXui killed renamable $x8, 0, pcsections !0 :: (load unordered (s64) from %ir.ptr_random)
+  ; CHECK-NEXT:   $x9 = ADDXrs killed renamable $x9, killed renamable $x10, 0, pcsections !0
   ; CHECK-NEXT:   $x9 = ADDXrs killed renamable $x9, killed renamable $x11, 0, pcsections !0
-  ; CHECK-NEXT:   $x9 = ADDXrs killed renamable $x10, killed renamable $x9, 0, pcsections !0
   ; CHECK-NEXT:   $x0 = ADDXrs killed renamable $x9, killed renamable $x8, 0, pcsections !0
   ; CHECK-NEXT:   RET undef $lr, implicit $x0
   %ptr_unsigned = getelementptr i64, i64* %p, i32 4095

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
index 106074ea9add..107cc4f15cca 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
@@ -90,7 +90,7 @@ entry:
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
   %l1.0.l1.0. = load volatile i32, i32* %l1, align 4
-  %add1 = or i32 %add, %l1.0.l1.0.
+  %add1 = add nsw i32 %add, %l1.0.l1.0.
   %call = tail call i32 @g()
   %add2 = add nsw i32 %add1, %call
   ret i32 %add2
@@ -172,7 +172,7 @@ entry:
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
   %l1.0.l1.0. = load volatile i32, i32* %l1, align 128
-  %add1 = or i32 %add, %l1.0.l1.0.
+  %add1 = add nsw i32 %add, %l1.0.l1.0.
   %call = tail call i32 @g()
   %add2 = add nsw i32 %add1, %call
   ret i32 %add2
@@ -276,7 +276,7 @@ entry:
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
   %l1.0.l1.0. = load volatile i32, i32* %l1, align 4
-  %add1 = or i32 %add, %l1.0.l1.0.
+  %add1 = add nsw i32 %add, %l1.0.l1.0.
   %call = tail call i32 @g()
   %add2 = add nsw i32 %add1, %call
   %1 = load volatile i32, i32* %vla, align 4, !tbaa !1
@@ -376,7 +376,7 @@ entry:
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
   %l1.0.l1.0. = load volatile i32, i32* %l1, align 128
-  %add1 = or i32 %add, %l1.0.l1.0.
+  %add1 = add nsw i32 %add, %l1.0.l1.0.
   %call = tail call i32 @g()
   %add2 = add nsw i32 %add1, %call
   %1 = load volatile i32, i32* %vla, align 4, !tbaa !1

diff  --git a/llvm/test/CodeGen/AArch64/arm64-rev.ll b/llvm/test/CodeGen/AArch64/arm64-rev.ll
index 6d7909070f6d..563c426b68a0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-rev.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-rev.ll
@@ -183,11 +183,11 @@ define i32 @test_rev16_w(i32 %X) nounwind {
 ; GISEL-NEXT:    lsl w9, w0, #8
 ; GISEL-NEXT:    and w10, w8, #0xff0000
 ; GISEL-NEXT:    and w11, w9, #0xff000000
-; GISEL-NEXT:    and w8, w8, #0xff
 ; GISEL-NEXT:    and w9, w9, #0xff00
 ; GISEL-NEXT:    orr w10, w11, w10
-; GISEL-NEXT:    orr w8, w9, w8
-; GISEL-NEXT:    orr w0, w10, w8
+; GISEL-NEXT:    and w8, w8, #0xff
+; GISEL-NEXT:    orr w9, w10, w9
+; GISEL-NEXT:    orr w0, w9, w8
 ; GISEL-NEXT:    ret
 entry:
   %tmp1 = lshr i32 %X, 8
@@ -729,16 +729,16 @@ define i64 @test_rev16_x_hwbyteswaps_complex1(i64 %a) nounwind {
 ; GISEL-NEXT:    lsl x9, x0, #8
 ; GISEL-NEXT:    and x10, x8, #0xff000000000000
 ; GISEL-NEXT:    and x11, x9, #0xff00000000000000
-; GISEL-NEXT:    and x12, x8, #0xff00000000
-; GISEL-NEXT:    and x13, x9, #0xff0000000000
 ; GISEL-NEXT:    orr x10, x10, x11
-; GISEL-NEXT:    orr x11, x12, x13
-; GISEL-NEXT:    and x12, x8, #0xff0000
-; GISEL-NEXT:    and x13, x9, #0xff000000
-; GISEL-NEXT:    orr x12, x12, x13
-; GISEL-NEXT:    and x8, x8, #0xff
+; GISEL-NEXT:    and x11, x8, #0xff00000000
+; GISEL-NEXT:    orr x10, x10, x11
+; GISEL-NEXT:    and x11, x9, #0xff0000000000
+; GISEL-NEXT:    orr x10, x10, x11
+; GISEL-NEXT:    and x11, x8, #0xff0000
+; GISEL-NEXT:    orr x10, x10, x11
+; GISEL-NEXT:    and x11, x9, #0xff000000
 ; GISEL-NEXT:    orr x10, x10, x11
-; GISEL-NEXT:    orr x8, x12, x8
+; GISEL-NEXT:    and x8, x8, #0xff
 ; GISEL-NEXT:    orr x8, x10, x8
 ; GISEL-NEXT:    and x9, x9, #0xff00
 ; GISEL-NEXT:    orr x0, x8, x9
@@ -782,21 +782,21 @@ define i64 @test_rev16_x_hwbyteswaps_complex2(i64 %a) nounwind {
 ; GISEL-LABEL: test_rev16_x_hwbyteswaps_complex2:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    lsr x8, x0, #8
-; GISEL-NEXT:    lsl x9, x0, #8
-; GISEL-NEXT:    and x10, x8, #0xff000000000000
+; GISEL-NEXT:    lsl x10, x0, #8
+; GISEL-NEXT:    and x9, x8, #0xff000000000000
 ; GISEL-NEXT:    and x11, x8, #0xff00000000
-; GISEL-NEXT:    and x12, x8, #0xff0000
+; GISEL-NEXT:    orr x9, x9, x11
+; GISEL-NEXT:    and x11, x8, #0xff0000
+; GISEL-NEXT:    orr x9, x9, x11
 ; GISEL-NEXT:    and x8, x8, #0xff
-; GISEL-NEXT:    orr x10, x10, x11
-; GISEL-NEXT:    orr x8, x12, x8
-; GISEL-NEXT:    and x11, x9, #0xff00000000000000
-; GISEL-NEXT:    and x12, x9, #0xff0000000000
-; GISEL-NEXT:    orr x11, x11, x12
-; GISEL-NEXT:    and x12, x9, #0xff000000
-; GISEL-NEXT:    orr x8, x10, x8
-; GISEL-NEXT:    orr x10, x11, x12
-; GISEL-NEXT:    orr x8, x8, x10
-; GISEL-NEXT:    and x9, x9, #0xff00
+; GISEL-NEXT:    orr x8, x9, x8
+; GISEL-NEXT:    and x9, x10, #0xff00000000000000
+; GISEL-NEXT:    orr x8, x8, x9
+; GISEL-NEXT:    and x9, x10, #0xff0000000000
+; GISEL-NEXT:    orr x8, x8, x9
+; GISEL-NEXT:    and x9, x10, #0xff000000
+; GISEL-NEXT:    orr x8, x8, x9
+; GISEL-NEXT:    and x9, x10, #0xff00
 ; GISEL-NEXT:    orr x0, x8, x9
 ; GISEL-NEXT:    ret
 entry:
@@ -847,17 +847,17 @@ define i64 @test_rev16_x_hwbyteswaps_complex3(i64 %a) nounwind {
 ; GISEL-NEXT:    lsl x9, x0, #8
 ; GISEL-NEXT:    and x10, x8, #0xff000000000000
 ; GISEL-NEXT:    and x11, x9, #0xff00000000000000
-; GISEL-NEXT:    and x12, x8, #0xff00000000
-; GISEL-NEXT:    and x13, x9, #0xff0000000000
 ; GISEL-NEXT:    orr x10, x11, x10
-; GISEL-NEXT:    orr x11, x12, x13
-; GISEL-NEXT:    and x12, x8, #0xff0000
-; GISEL-NEXT:    and x13, x9, #0xff000000
-; GISEL-NEXT:    orr x12, x12, x13
+; GISEL-NEXT:    and x11, x8, #0xff00000000
+; GISEL-NEXT:    orr x10, x11, x10
+; GISEL-NEXT:    and x11, x9, #0xff0000000000
+; GISEL-NEXT:    orr x10, x11, x10
+; GISEL-NEXT:    and x11, x8, #0xff0000
+; GISEL-NEXT:    orr x10, x11, x10
+; GISEL-NEXT:    and x11, x9, #0xff000000
+; GISEL-NEXT:    orr x10, x11, x10
 ; GISEL-NEXT:    and x8, x8, #0xff
-; GISEL-NEXT:    orr x10, x10, x11
-; GISEL-NEXT:    orr x8, x12, x8
-; GISEL-NEXT:    orr x8, x10, x8
+; GISEL-NEXT:    orr x8, x8, x10
 ; GISEL-NEXT:    and x9, x9, #0xff00
 ; GISEL-NEXT:    orr x0, x9, x8
 ; GISEL-NEXT:    ret
@@ -918,24 +918,24 @@ define i64 @test_or_and_combine2(i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: test_or_and_combine2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsr x8, x0, #8
-; CHECK-NEXT:    lsl x9, x0, #8
-; CHECK-NEXT:    and x10, x8, #0xff000000000000
-; CHECK-NEXT:    and x11, x9, #0xff00000000
+; CHECK-NEXT:    lsl x10, x0, #8
+; CHECK-NEXT:    and x9, x8, #0xff000000000000
 ; CHECK-NEXT:    and x8, x8, #0xff0000
-; CHECK-NEXT:    orr x9, x10, x9
-; CHECK-NEXT:    orr x8, x11, x8
+; CHECK-NEXT:    orr x9, x9, x10
+; CHECK-NEXT:    and x10, x10, #0xff00000000
+; CHECK-NEXT:    orr x9, x9, x10
 ; CHECK-NEXT:    orr x0, x9, x8
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_or_and_combine2:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    lsr x8, x0, #8
-; GISEL-NEXT:    lsl x9, x0, #8
-; GISEL-NEXT:    and x10, x8, #0xff000000000000
-; GISEL-NEXT:    and x11, x9, #0xff00000000
+; GISEL-NEXT:    lsl x10, x0, #8
+; GISEL-NEXT:    and x9, x8, #0xff000000000000
 ; GISEL-NEXT:    and x8, x8, #0xff0000
-; GISEL-NEXT:    orr x9, x10, x9
-; GISEL-NEXT:    orr x8, x11, x8
+; GISEL-NEXT:    orr x9, x9, x10
+; GISEL-NEXT:    and x10, x10, #0xff00000000
+; GISEL-NEXT:    orr x9, x9, x10
 ; GISEL-NEXT:    orr x0, x9, x8
 ; GISEL-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/cmp-chains.ll b/llvm/test/CodeGen/AArch64/cmp-chains.ll
index 2de06975d8b3..e31ddc39c9fe 100644
--- a/llvm/test/CodeGen/AArch64/cmp-chains.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-chains.ll
@@ -76,11 +76,11 @@ define i32 @cmp_and4(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32
 ; GISEL-NEXT:    cmp w0, w1
 ; GISEL-NEXT:    cset w9, lo
 ; GISEL-NEXT:    cmp w4, w5
-; GISEL-NEXT:    cset w10, ne
+; GISEL-NEXT:    and w8, w8, w9
+; GISEL-NEXT:    cset w9, ne
 ; GISEL-NEXT:    cmp w6, w7
-; GISEL-NEXT:    cset w11, eq
 ; GISEL-NEXT:    and w8, w8, w9
-; GISEL-NEXT:    and w9, w10, w11
+; GISEL-NEXT:    cset w9, eq
 ; GISEL-NEXT:    and w0, w8, w9
 ; GISEL-NEXT:    ret
   %9 = icmp ugt i32 %2, %3
@@ -166,11 +166,11 @@ define i32 @cmp_or4(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32
 ; GISEL-NEXT:    cmp w2, w3
 ; GISEL-NEXT:    cset w9, hi
 ; GISEL-NEXT:    cmp w4, w5
-; GISEL-NEXT:    cset w10, ne
+; GISEL-NEXT:    orr w8, w8, w9
+; GISEL-NEXT:    cset w9, ne
 ; GISEL-NEXT:    cmp w6, w7
-; GISEL-NEXT:    cset w11, eq
 ; GISEL-NEXT:    orr w8, w8, w9
-; GISEL-NEXT:    orr w9, w10, w11
+; GISEL-NEXT:    cset w9, eq
 ; GISEL-NEXT:    orr w0, w8, w9
 ; GISEL-NEXT:    ret
   %9 = icmp ult i32 %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/reduce-and.ll b/llvm/test/CodeGen/AArch64/reduce-and.ll
index 5adecbe5a9f2..05b1d2946685 100644
--- a/llvm/test/CodeGen/AArch64/reduce-and.ll
+++ b/llvm/test/CodeGen/AArch64/reduce-and.ll
@@ -264,13 +264,13 @@ define i8 @test_redand_v4i8(<4 x i8> %a) {
 ; CHECK-LABEL: test_redand_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.h[3]
-; CHECK-NEXT:    umov w9, v0.h[2]
-; CHECK-NEXT:    umov w10, v0.h[1]
-; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    and w8, w9, w8
-; CHECK-NEXT:    and w10, w11, w10
-; CHECK-NEXT:    and w0, w10, w8
+; CHECK-NEXT:    and w8, w8, w10
+; CHECK-NEXT:    and w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redand_v4i8:
@@ -295,21 +295,21 @@ define i8 @test_redand_v8i8(<8 x i8> %a) {
 ; CHECK-LABEL: test_redand_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.b[5]
-; CHECK-NEXT:    umov w9, v0.b[4]
-; CHECK-NEXT:    umov w10, v0.b[1]
-; CHECK-NEXT:    umov w11, v0.b[0]
-; CHECK-NEXT:    umov w12, v0.b[3]
-; CHECK-NEXT:    umov w13, v0.b[2]
-; CHECK-NEXT:    umov w14, v0.b[6]
-; CHECK-NEXT:    umov w15, v0.b[7]
-; CHECK-NEXT:    and w8, w9, w8
-; CHECK-NEXT:    and w10, w11, w10
-; CHECK-NEXT:    and w11, w13, w12
-; CHECK-NEXT:    and w9, w10, w11
-; CHECK-NEXT:    and w8, w8, w14
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    umov w10, v0.b[2]
+; CHECK-NEXT:    umov w11, v0.b[3]
+; CHECK-NEXT:    umov w12, v0.b[4]
+; CHECK-NEXT:    umov w13, v0.b[5]
 ; CHECK-NEXT:    and w8, w9, w8
-; CHECK-NEXT:    and w0, w8, w15
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    and w8, w8, w10
+; CHECK-NEXT:    umov w10, v0.b[7]
+; CHECK-NEXT:    and w8, w8, w11
+; CHECK-NEXT:    and w8, w8, w12
+; CHECK-NEXT:    and w8, w8, w13
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    and w0, w8, w10
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redand_v8i8:
@@ -352,16 +352,16 @@ define i8 @test_redand_v16i8(<16 x i8> %a) {
 ; CHECK-NEXT:    umov w10, v0.b[2]
 ; CHECK-NEXT:    umov w11, v0.b[3]
 ; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    umov w13, v0.b[5]
-; CHECK-NEXT:    umov w14, v0.b[6]
 ; CHECK-NEXT:    and w8, w9, w8
-; CHECK-NEXT:    umov w9, v0.b[7]
-; CHECK-NEXT:    and w10, w10, w11
-; CHECK-NEXT:    and w11, w12, w13
+; CHECK-NEXT:    umov w9, v0.b[5]
 ; CHECK-NEXT:    and w8, w8, w10
-; CHECK-NEXT:    and w10, w11, w14
+; CHECK-NEXT:    umov w10, v0.b[6]
+; CHECK-NEXT:    and w8, w8, w11
+; CHECK-NEXT:    umov w11, v0.b[7]
+; CHECK-NEXT:    and w8, w8, w12
+; CHECK-NEXT:    and w8, w8, w9
 ; CHECK-NEXT:    and w8, w8, w10
-; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    and w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redand_v16i8:
@@ -406,16 +406,16 @@ define i8 @test_redand_v32i8(<32 x i8> %a) {
 ; CHECK-NEXT:    umov w10, v0.b[2]
 ; CHECK-NEXT:    umov w11, v0.b[3]
 ; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    umov w13, v0.b[5]
-; CHECK-NEXT:    umov w14, v0.b[6]
 ; CHECK-NEXT:    and w8, w9, w8
-; CHECK-NEXT:    umov w9, v0.b[7]
-; CHECK-NEXT:    and w10, w10, w11
-; CHECK-NEXT:    and w11, w12, w13
+; CHECK-NEXT:    umov w9, v0.b[5]
 ; CHECK-NEXT:    and w8, w8, w10
-; CHECK-NEXT:    and w10, w11, w14
+; CHECK-NEXT:    umov w10, v0.b[6]
+; CHECK-NEXT:    and w8, w8, w11
+; CHECK-NEXT:    umov w11, v0.b[7]
+; CHECK-NEXT:    and w8, w8, w12
+; CHECK-NEXT:    and w8, w8, w9
 ; CHECK-NEXT:    and w8, w8, w10
-; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    and w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redand_v32i8:
@@ -454,13 +454,13 @@ define i16 @test_redand_v4i16(<4 x i16> %a) {
 ; CHECK-LABEL: test_redand_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.h[3]
-; CHECK-NEXT:    umov w9, v0.h[2]
-; CHECK-NEXT:    umov w10, v0.h[1]
-; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    and w8, w9, w8
-; CHECK-NEXT:    and w10, w11, w10
-; CHECK-NEXT:    and w0, w10, w8
+; CHECK-NEXT:    and w8, w8, w10
+; CHECK-NEXT:    and w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redand_v4i16:
@@ -491,8 +491,8 @@ define i16 @test_redand_v8i16(<8 x i16> %a) {
 ; CHECK-NEXT:    umov w10, v0.h[2]
 ; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    and w8, w9, w8
-; CHECK-NEXT:    and w9, w10, w11
-; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    and w8, w8, w10
+; CHECK-NEXT:    and w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redand_v8i16:
@@ -525,8 +525,8 @@ define i16 @test_redand_v16i16(<16 x i16> %a) {
 ; CHECK-NEXT:    umov w10, v0.h[2]
 ; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    and w8, w9, w8
-; CHECK-NEXT:    and w9, w10, w11
-; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    and w8, w8, w10
+; CHECK-NEXT:    and w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redand_v16i16:

diff  --git a/llvm/test/CodeGen/AArch64/reduce-or.ll b/llvm/test/CodeGen/AArch64/reduce-or.ll
index dee681d5226b..c5419a6630b5 100644
--- a/llvm/test/CodeGen/AArch64/reduce-or.ll
+++ b/llvm/test/CodeGen/AArch64/reduce-or.ll
@@ -263,13 +263,13 @@ define i8 @test_redor_v4i8(<4 x i8> %a) {
 ; CHECK-LABEL: test_redor_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.h[3]
-; CHECK-NEXT:    umov w9, v0.h[2]
-; CHECK-NEXT:    umov w10, v0.h[1]
-; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    orr w8, w9, w8
-; CHECK-NEXT:    orr w10, w11, w10
-; CHECK-NEXT:    orr w0, w10, w8
+; CHECK-NEXT:    orr w8, w8, w10
+; CHECK-NEXT:    orr w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redor_v4i8:
@@ -294,21 +294,21 @@ define i8 @test_redor_v8i8(<8 x i8> %a) {
 ; CHECK-LABEL: test_redor_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.b[5]
-; CHECK-NEXT:    umov w9, v0.b[4]
-; CHECK-NEXT:    umov w10, v0.b[1]
-; CHECK-NEXT:    umov w11, v0.b[0]
-; CHECK-NEXT:    umov w12, v0.b[3]
-; CHECK-NEXT:    umov w13, v0.b[2]
-; CHECK-NEXT:    umov w14, v0.b[6]
-; CHECK-NEXT:    umov w15, v0.b[7]
-; CHECK-NEXT:    orr w8, w9, w8
-; CHECK-NEXT:    orr w10, w11, w10
-; CHECK-NEXT:    orr w11, w13, w12
-; CHECK-NEXT:    orr w9, w10, w11
-; CHECK-NEXT:    orr w8, w8, w14
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    umov w10, v0.b[2]
+; CHECK-NEXT:    umov w11, v0.b[3]
+; CHECK-NEXT:    umov w12, v0.b[4]
+; CHECK-NEXT:    umov w13, v0.b[5]
 ; CHECK-NEXT:    orr w8, w9, w8
-; CHECK-NEXT:    orr w0, w8, w15
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    orr w8, w8, w10
+; CHECK-NEXT:    umov w10, v0.b[7]
+; CHECK-NEXT:    orr w8, w8, w11
+; CHECK-NEXT:    orr w8, w8, w12
+; CHECK-NEXT:    orr w8, w8, w13
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    orr w0, w8, w10
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redor_v8i8:
@@ -351,16 +351,16 @@ define i8 @test_redor_v16i8(<16 x i8> %a) {
 ; CHECK-NEXT:    umov w10, v0.b[2]
 ; CHECK-NEXT:    umov w11, v0.b[3]
 ; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    umov w13, v0.b[5]
-; CHECK-NEXT:    umov w14, v0.b[6]
 ; CHECK-NEXT:    orr w8, w9, w8
-; CHECK-NEXT:    umov w9, v0.b[7]
-; CHECK-NEXT:    orr w10, w10, w11
-; CHECK-NEXT:    orr w11, w12, w13
+; CHECK-NEXT:    umov w9, v0.b[5]
 ; CHECK-NEXT:    orr w8, w8, w10
-; CHECK-NEXT:    orr w10, w11, w14
+; CHECK-NEXT:    umov w10, v0.b[6]
+; CHECK-NEXT:    orr w8, w8, w11
+; CHECK-NEXT:    umov w11, v0.b[7]
+; CHECK-NEXT:    orr w8, w8, w12
+; CHECK-NEXT:    orr w8, w8, w9
 ; CHECK-NEXT:    orr w8, w8, w10
-; CHECK-NEXT:    orr w0, w8, w9
+; CHECK-NEXT:    orr w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redor_v16i8:
@@ -405,16 +405,16 @@ define i8 @test_redor_v32i8(<32 x i8> %a) {
 ; CHECK-NEXT:    umov w10, v0.b[2]
 ; CHECK-NEXT:    umov w11, v0.b[3]
 ; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    umov w13, v0.b[5]
-; CHECK-NEXT:    umov w14, v0.b[6]
 ; CHECK-NEXT:    orr w8, w9, w8
-; CHECK-NEXT:    umov w9, v0.b[7]
-; CHECK-NEXT:    orr w10, w10, w11
-; CHECK-NEXT:    orr w11, w12, w13
+; CHECK-NEXT:    umov w9, v0.b[5]
 ; CHECK-NEXT:    orr w8, w8, w10
-; CHECK-NEXT:    orr w10, w11, w14
+; CHECK-NEXT:    umov w10, v0.b[6]
+; CHECK-NEXT:    orr w8, w8, w11
+; CHECK-NEXT:    umov w11, v0.b[7]
+; CHECK-NEXT:    orr w8, w8, w12
+; CHECK-NEXT:    orr w8, w8, w9
 ; CHECK-NEXT:    orr w8, w8, w10
-; CHECK-NEXT:    orr w0, w8, w9
+; CHECK-NEXT:    orr w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redor_v32i8:
@@ -453,13 +453,13 @@ define i16 @test_redor_v4i16(<4 x i16> %a) {
 ; CHECK-LABEL: test_redor_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.h[3]
-; CHECK-NEXT:    umov w9, v0.h[2]
-; CHECK-NEXT:    umov w10, v0.h[1]
-; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    orr w8, w9, w8
-; CHECK-NEXT:    orr w10, w11, w10
-; CHECK-NEXT:    orr w0, w10, w8
+; CHECK-NEXT:    orr w8, w8, w10
+; CHECK-NEXT:    orr w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redor_v4i16:
@@ -490,8 +490,8 @@ define i16 @test_redor_v8i16(<8 x i16> %a) {
 ; CHECK-NEXT:    umov w10, v0.h[2]
 ; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    orr w8, w9, w8
-; CHECK-NEXT:    orr w9, w10, w11
-; CHECK-NEXT:    orr w0, w8, w9
+; CHECK-NEXT:    orr w8, w8, w10
+; CHECK-NEXT:    orr w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redor_v8i16:
@@ -524,8 +524,8 @@ define i16 @test_redor_v16i16(<16 x i16> %a) {
 ; CHECK-NEXT:    umov w10, v0.h[2]
 ; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    orr w8, w9, w8
-; CHECK-NEXT:    orr w9, w10, w11
-; CHECK-NEXT:    orr w0, w8, w9
+; CHECK-NEXT:    orr w8, w8, w10
+; CHECK-NEXT:    orr w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redor_v16i16:

diff  --git a/llvm/test/CodeGen/AArch64/reduce-xor.ll b/llvm/test/CodeGen/AArch64/reduce-xor.ll
index 7bed71dffb93..4372dee4ab2f 100644
--- a/llvm/test/CodeGen/AArch64/reduce-xor.ll
+++ b/llvm/test/CodeGen/AArch64/reduce-xor.ll
@@ -262,13 +262,13 @@ define i8 @test_redxor_v4i8(<4 x i8> %a) {
 ; CHECK-LABEL: test_redxor_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.h[3]
-; CHECK-NEXT:    umov w9, v0.h[2]
-; CHECK-NEXT:    umov w10, v0.h[1]
-; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    eor w10, w11, w10
-; CHECK-NEXT:    eor w0, w10, w8
+; CHECK-NEXT:    eor w8, w8, w10
+; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redxor_v4i8:
@@ -293,21 +293,21 @@ define i8 @test_redxor_v8i8(<8 x i8> %a) {
 ; CHECK-LABEL: test_redxor_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.b[5]
-; CHECK-NEXT:    umov w9, v0.b[4]
-; CHECK-NEXT:    umov w10, v0.b[1]
-; CHECK-NEXT:    umov w11, v0.b[0]
-; CHECK-NEXT:    umov w12, v0.b[3]
-; CHECK-NEXT:    umov w13, v0.b[2]
-; CHECK-NEXT:    umov w14, v0.b[6]
-; CHECK-NEXT:    umov w15, v0.b[7]
-; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    eor w10, w11, w10
-; CHECK-NEXT:    eor w11, w13, w12
-; CHECK-NEXT:    eor w9, w10, w11
-; CHECK-NEXT:    eor w8, w8, w14
+; CHECK-NEXT:    umov w8, v0.b[1]
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    umov w10, v0.b[2]
+; CHECK-NEXT:    umov w11, v0.b[3]
+; CHECK-NEXT:    umov w12, v0.b[4]
+; CHECK-NEXT:    umov w13, v0.b[5]
 ; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    eor w0, w8, w15
+; CHECK-NEXT:    umov w9, v0.b[6]
+; CHECK-NEXT:    eor w8, w8, w10
+; CHECK-NEXT:    umov w10, v0.b[7]
+; CHECK-NEXT:    eor w8, w8, w11
+; CHECK-NEXT:    eor w8, w8, w12
+; CHECK-NEXT:    eor w8, w8, w13
+; CHECK-NEXT:    eor w8, w8, w9
+; CHECK-NEXT:    eor w0, w8, w10
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redxor_v8i8:
@@ -350,16 +350,16 @@ define i8 @test_redxor_v16i8(<16 x i8> %a) {
 ; CHECK-NEXT:    umov w10, v0.b[2]
 ; CHECK-NEXT:    umov w11, v0.b[3]
 ; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    umov w13, v0.b[5]
-; CHECK-NEXT:    umov w14, v0.b[6]
 ; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    umov w9, v0.b[7]
-; CHECK-NEXT:    eor w10, w10, w11
-; CHECK-NEXT:    eor w11, w12, w13
+; CHECK-NEXT:    umov w9, v0.b[5]
 ; CHECK-NEXT:    eor w8, w8, w10
-; CHECK-NEXT:    eor w10, w11, w14
+; CHECK-NEXT:    umov w10, v0.b[6]
+; CHECK-NEXT:    eor w8, w8, w11
+; CHECK-NEXT:    umov w11, v0.b[7]
+; CHECK-NEXT:    eor w8, w8, w12
+; CHECK-NEXT:    eor w8, w8, w9
 ; CHECK-NEXT:    eor w8, w8, w10
-; CHECK-NEXT:    eor w0, w8, w9
+; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redxor_v16i8:
@@ -404,16 +404,16 @@ define i8 @test_redxor_v32i8(<32 x i8> %a) {
 ; CHECK-NEXT:    umov w10, v0.b[2]
 ; CHECK-NEXT:    umov w11, v0.b[3]
 ; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    umov w13, v0.b[5]
-; CHECK-NEXT:    umov w14, v0.b[6]
 ; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    umov w9, v0.b[7]
-; CHECK-NEXT:    eor w10, w10, w11
-; CHECK-NEXT:    eor w11, w12, w13
+; CHECK-NEXT:    umov w9, v0.b[5]
 ; CHECK-NEXT:    eor w8, w8, w10
-; CHECK-NEXT:    eor w10, w11, w14
+; CHECK-NEXT:    umov w10, v0.b[6]
+; CHECK-NEXT:    eor w8, w8, w11
+; CHECK-NEXT:    umov w11, v0.b[7]
+; CHECK-NEXT:    eor w8, w8, w12
+; CHECK-NEXT:    eor w8, w8, w9
 ; CHECK-NEXT:    eor w8, w8, w10
-; CHECK-NEXT:    eor w0, w8, w9
+; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redxor_v32i8:
@@ -452,13 +452,13 @@ define i16 @test_redxor_v4i16(<4 x i16> %a) {
 ; CHECK-LABEL: test_redxor_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.h[3]
-; CHECK-NEXT:    umov w9, v0.h[2]
-; CHECK-NEXT:    umov w10, v0.h[1]
-; CHECK-NEXT:    umov w11, v0.h[0]
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    umov w9, v0.h[0]
+; CHECK-NEXT:    umov w10, v0.h[2]
+; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    eor w10, w11, w10
-; CHECK-NEXT:    eor w0, w10, w8
+; CHECK-NEXT:    eor w8, w8, w10
+; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redxor_v4i16:
@@ -489,8 +489,8 @@ define i16 @test_redxor_v8i16(<8 x i16> %a) {
 ; CHECK-NEXT:    umov w10, v0.h[2]
 ; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    eor w9, w10, w11
-; CHECK-NEXT:    eor w0, w8, w9
+; CHECK-NEXT:    eor w8, w8, w10
+; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redxor_v8i16:
@@ -523,8 +523,8 @@ define i16 @test_redxor_v16i16(<16 x i16> %a) {
 ; CHECK-NEXT:    umov w10, v0.h[2]
 ; CHECK-NEXT:    umov w11, v0.h[3]
 ; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    eor w9, w10, w11
-; CHECK-NEXT:    eor w0, w8, w9
+; CHECK-NEXT:    eor w8, w8, w10
+; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redxor_v16i16:

diff  --git a/llvm/test/CodeGen/AArch64/swift-return.ll b/llvm/test/CodeGen/AArch64/swift-return.ll
index 6c675b3e60c7..bcfa3ef3e896 100644
--- a/llvm/test/CodeGen/AArch64/swift-return.ll
+++ b/llvm/test/CodeGen/AArch64/swift-return.ll
@@ -28,8 +28,8 @@ declare swiftcc { i16, i8 } @gen(i32)
 ; CHECK-LABEL: test2
 ; CHECK:  bl      _gen2
 ; CHECK:  add     [[TMP:x.*]], x0, x1
-; CHECK:  add     [[TMP2:x.*]], x2, x3
-; CHECK:  add     [[TMP]], [[TMP]], [[TMP2]]
+; CHECK:  add     [[TMP]], [[TMP]], x2
+; CHECK:  add     [[TMP]], [[TMP]], x3
 ; CHECK:  add     x0, [[TMP]], x4
 ; CHECK-O0-LABEL: test2
 ; CHECK-O0:  bl      _gen2
@@ -75,8 +75,8 @@ define swiftcc { i64, i64, i64, i64, i64 } @gen2(i64 %key) {
 ; CHECK-LABEL: test3
 ; CHECK: bl      _gen3
 ; CHECK: add             [[TMP:w.*]], w0, w1
-; CHECK: add             [[TMP2:w.*]], w2, w3
-; CHECK: add             w0, [[TMP]], [[TMP2]]
+; CHECK: add             [[TMP]], [[TMP]], w2
+; CHECK: add             w0, [[TMP]], w3
 ; CHECK-O0-LABEL: test3
 ; CHECK-O0: bl      _gen3
 ; CHECK-O0: add             [[TMP:w.*]], w0, w1
@@ -159,8 +159,8 @@ declare swiftcc { double, double, double, double } @gen5()
 ; CHECK-DAG:   fadd    d0, d0, d2
 ; CHECK-DAG:   fadd    d0, d0, d3
 ; CHECK-DAG:   add     [[TMP:w.*]], w0, w1
-; CHECK-DAG:   add     [[TMP2:w.*]], w2, w3
-; CHECK-DAG:   add     w0, [[TMP]], [[TMP2]]
+; CHECK-DAG:   add     [[TMP]], [[TMP]], w2
+; CHECK-DAG:   add     w0, [[TMP]], w3
 ; CHECK-O0-LABEL: test6
 ; CHECK-O0:   bl      _gen6
 ; CHECK-O0-DAG:   fadd    d0, d0, d1

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
index 95a524b80810..005b4e3afe80 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
@@ -97,28 +97,28 @@ define i8 @test_v9i8(<9 x i8> %a) nounwind {
 ; CHECK-LABEL: test_v9i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #-1
-; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    umov w12, v0.b[4]
 ; CHECK-NEXT:    mov v1.16b, v0.16b
-; CHECK-NEXT:    umov w10, v0.b[6]
-; CHECK-NEXT:    umov w15, v0.b[7]
 ; CHECK-NEXT:    mov v1.b[9], w8
 ; CHECK-NEXT:    mov v1.b[10], w8
 ; CHECK-NEXT:    mov v1.b[11], w8
 ; CHECK-NEXT:    mov v1.b[13], w8
-; CHECK-NEXT:    umov w8, v0.b[4]
 ; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    and v1.8b, v0.8b, v1.8b
+; CHECK-NEXT:    umov w8, v1.b[1]
+; CHECK-NEXT:    umov w9, v1.b[0]
+; CHECK-NEXT:    umov w10, v1.b[2]
+; CHECK-NEXT:    umov w11, v1.b[3]
+; CHECK-NEXT:    and w8, w9, w8
+; CHECK-NEXT:    umov w9, v0.b[5]
+; CHECK-NEXT:    and w8, w8, w10
+; CHECK-NEXT:    umov w10, v0.b[6]
+; CHECK-NEXT:    and w8, w8, w11
+; CHECK-NEXT:    umov w11, v0.b[7]
+; CHECK-NEXT:    and w8, w8, w12
 ; CHECK-NEXT:    and w8, w8, w9
 ; CHECK-NEXT:    and w8, w8, w10
-; CHECK-NEXT:    and w8, w8, w15
-; CHECK-NEXT:    and v1.8b, v0.8b, v1.8b
-; CHECK-NEXT:    umov w11, v1.b[1]
-; CHECK-NEXT:    umov w12, v1.b[0]
-; CHECK-NEXT:    umov w13, v1.b[2]
-; CHECK-NEXT:    umov w14, v1.b[3]
-; CHECK-NEXT:    and w9, w12, w11
-; CHECK-NEXT:    and w11, w13, w14
-; CHECK-NEXT:    and w9, w9, w11
-; CHECK-NEXT:    and w0, w9, w8
+; CHECK-NEXT:    and w0, w8, w11
 ; CHECK-NEXT:    ret
   %b = call i8 @llvm.vector.reduce.and.v9i8(<9 x i8> %a)
   ret i8 %b


        


More information about the llvm-commits mailing list