[llvm] f1e9ece - [AArch64][GlobalISel] Legalize G_VECREDUCE_XOR. Treated same as other bitwise reductions.

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 10 17:01:28 PDT 2021


Author: Amara Emerson
Date: 2021-10-10T17:01:21-07:00
New Revision: f1e9ecea442a2f839e5ac85f840b720db1ee7914

URL: https://github.com/llvm/llvm-project/commit/f1e9ecea442a2f839e5ac85f840b720db1ee7914
DIFF: https://github.com/llvm/llvm-project/commit/f1e9ecea442a2f839e5ac85f840b720db1ee7914.diff

LOG: [AArch64][GlobalISel] Legalize G_VECREDUCE_XOR. Treated same as other bitwise reductions.

Added: 
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-xor.mir

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
    llvm/test/CodeGen/AArch64/reduce-xor.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index a0848efab7201..1524aa5eb0ec6 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -721,7 +721,8 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
       .clampMaxNumElements(1, s32, 4)
       .lower();
 
-  getActionDefinitionsBuilder({G_VECREDUCE_OR, G_VECREDUCE_AND})
+  getActionDefinitionsBuilder(
+      {G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
       // Try to break down into smaller vectors as long as they're at least 64
       // bits. This lets us use vector operations for some parts of the
       // reduction.

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-xor.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-xor.mir
new file mode 100644
index 0000000000000..8d91352d3a61b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-xor.mir
@@ -0,0 +1,653 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -march=aarch64 -run-pass=legalizer -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name:            test_redxor_v1i1
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$w0' }
+body:             |
+  bb.1:
+    liveins: $w0
+
+    ; CHECK-LABEL: name: test_redxor_v1i1
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(s32) = COPY $w0
+    %0:_(s1) = G_TRUNC %1(s32)
+    %2:_(s1) = G_VECREDUCE_XOR %0(s1)
+    %4:_(s32) = G_ZEXT %2(s1)
+    $w0 = COPY %4(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v2i1
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$d0' }
+body:             |
+  bb.1:
+    liveins: $d0
+
+    ; CHECK-LABEL: name: test_redxor_v2i1
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(<2 x s32>) = COPY $d0
+    %0:_(<2 x s1>) = G_TRUNC %1(<2 x s32>)
+    %2:_(s1) = G_VECREDUCE_XOR %0(<2 x s1>)
+    %4:_(s32) = G_ZEXT %2(s1)
+    $w0 = COPY %4(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v4i1
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$d0' }
+body:             |
+  bb.1:
+    liveins: $d0
+
+    ; CHECK-LABEL: name: test_redxor_v4i1
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[XOR2]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(<4 x s16>) = COPY $d0
+    %0:_(<4 x s1>) = G_TRUNC %1(<4 x s16>)
+    %2:_(s1) = G_VECREDUCE_XOR %0(<4 x s1>)
+    %4:_(s32) = G_ZEXT %2(s1)
+    $w0 = COPY %4(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v8i1
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$d0' }
+body:             |
+  bb.1:
+    liveins: $d0
+
+    ; CHECK-LABEL: name: test_redxor_v8i1
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<8 x s8>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
+    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT4]], [[ANYEXT5]]
+    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
+    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
+    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT6]], [[ANYEXT7]]
+    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
+    ; CHECK-NEXT: [[XOR5:%[0-9]+]]:_(s32) = G_XOR [[XOR2]], [[XOR3]]
+    ; CHECK-NEXT: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[XOR4]], [[XOR5]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[XOR6]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(<8 x s8>) = COPY $d0
+    %0:_(<8 x s1>) = G_TRUNC %1(<8 x s8>)
+    %2:_(s1) = G_VECREDUCE_XOR %0(<8 x s1>)
+    %4:_(s32) = G_ZEXT %2(s1)
+    $w0 = COPY %4(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v16i1
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0' }
+frameInfo:
+  maxAlignment:    1
+machineFunctionInfo: {}
+body:             |
+  bb.1:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: test_redxor_v16i1
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8), [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<16 x s8>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
+    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT4]], [[ANYEXT5]]
+    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
+    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
+    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT6]], [[ANYEXT7]]
+    ; CHECK-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
+    ; CHECK-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
+    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT8]], [[ANYEXT9]]
+    ; CHECK-NEXT: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[UV10]](s8)
+    ; CHECK-NEXT: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[UV11]](s8)
+    ; CHECK-NEXT: [[XOR5:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT10]], [[ANYEXT11]]
+    ; CHECK-NEXT: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[UV12]](s8)
+    ; CHECK-NEXT: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[UV13]](s8)
+    ; CHECK-NEXT: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT12]], [[ANYEXT13]]
+    ; CHECK-NEXT: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[UV14]](s8)
+    ; CHECK-NEXT: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[UV15]](s8)
+    ; CHECK-NEXT: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT14]], [[ANYEXT15]]
+    ; CHECK-NEXT: [[XOR8:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
+    ; CHECK-NEXT: [[XOR9:%[0-9]+]]:_(s32) = G_XOR [[XOR2]], [[XOR3]]
+    ; CHECK-NEXT: [[XOR10:%[0-9]+]]:_(s32) = G_XOR [[XOR4]], [[XOR5]]
+    ; CHECK-NEXT: [[XOR11:%[0-9]+]]:_(s32) = G_XOR [[XOR6]], [[XOR7]]
+    ; CHECK-NEXT: [[XOR12:%[0-9]+]]:_(s32) = G_XOR [[XOR8]], [[XOR9]]
+    ; CHECK-NEXT: [[XOR13:%[0-9]+]]:_(s32) = G_XOR [[XOR10]], [[XOR11]]
+    ; CHECK-NEXT: [[XOR14:%[0-9]+]]:_(s32) = G_XOR [[XOR12]], [[XOR13]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[XOR14]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(<16 x s8>) = COPY $q0
+    %0:_(<16 x s1>) = G_TRUNC %1(<16 x s8>)
+    %2:_(s1) = G_VECREDUCE_XOR %0(<16 x s1>)
+    %4:_(s32) = G_ZEXT %2(s1)
+    $w0 = COPY %4(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v1i8
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$d0' }
+body:             |
+  bb.1:
+    liveins: $d0
+
+    ; CHECK-LABEL: name: test_redxor_v1i8
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s64) = G_BITCAST [[COPY]](<8 x s8>)
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[BITCAST]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(<8 x s8>) = COPY $d0
+    %11:_(s64) = G_BITCAST %1(<8 x s8>)
+    %0:_(s8) = G_TRUNC %11(s64)
+    %9:_(s8) = G_VECREDUCE_XOR %0(s8)
+    %10:_(s32) = G_ANYEXT %9(s8)
+    $w0 = COPY %10(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v3i8
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$w0' }
+  - { reg: '$w1' }
+  - { reg: '$w2' }
+body:             |
+  bb.1:
+    liveins: $w0, $w1, $w2
+
+    ; CHECK-LABEL: name: test_redxor_v3i8
+    ; CHECK: liveins: $w0, $w1, $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[COPY2]]
+    ; CHECK-NEXT: $w0 = COPY [[XOR1]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(s32) = COPY $w0
+    %2:_(s32) = COPY $w1
+    %3:_(s32) = COPY $w2
+    %4:_(<3 x s32>) = G_BUILD_VECTOR %1(s32), %2(s32), %3(s32)
+    %0:_(<3 x s8>) = G_TRUNC %4(<3 x s32>)
+    %5:_(s8) = G_VECREDUCE_XOR %0(<3 x s8>)
+    %6:_(s32) = G_ANYEXT %5(s8)
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v4i8
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$d0' }
+body:             |
+  bb.1:
+    liveins: $d0
+
+    ; CHECK-LABEL: name: test_redxor_v4i8
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
+    ; CHECK-NEXT: $w0 = COPY [[XOR2]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(<4 x s16>) = COPY $d0
+    %0:_(<4 x s8>) = G_TRUNC %1(<4 x s16>)
+    %2:_(s8) = G_VECREDUCE_XOR %0(<4 x s8>)
+    %3:_(s32) = G_ANYEXT %2(s8)
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v8i8
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$d0' }
+body:             |
+  bb.1:
+    liveins: $d0
+
+    ; CHECK-LABEL: name: test_redxor_v8i8
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<8 x s8>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
+    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT4]], [[ANYEXT5]]
+    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
+    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
+    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT6]], [[ANYEXT7]]
+    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
+    ; CHECK-NEXT: [[XOR5:%[0-9]+]]:_(s32) = G_XOR [[XOR2]], [[XOR3]]
+    ; CHECK-NEXT: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[XOR4]], [[XOR5]]
+    ; CHECK-NEXT: $w0 = COPY [[XOR6]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(<8 x s8>) = COPY $d0
+    %1:_(s8) = G_VECREDUCE_XOR %0(<8 x s8>)
+    %2:_(s32) = G_ANYEXT %1(s8)
+    $w0 = COPY %2(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v16i8
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0' }
+body:             |
+  bb.1:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: test_redxor_v16i8
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<8 x s8>), [[UV1:%[0-9]+]]:_(<8 x s8>) = G_UNMERGE_VALUES [[COPY]](<16 x s8>)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<8 x s8>) = G_XOR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[XOR]](<8 x s8>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
+    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
+    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT4]], [[ANYEXT5]]
+    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
+    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
+    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT6]], [[ANYEXT7]]
+    ; CHECK-NEXT: [[XOR5:%[0-9]+]]:_(s32) = G_XOR [[XOR1]], [[XOR2]]
+    ; CHECK-NEXT: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[XOR3]], [[XOR4]]
+    ; CHECK-NEXT: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[XOR5]], [[XOR6]]
+    ; CHECK-NEXT: $w0 = COPY [[XOR7]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(<16 x s8>) = COPY $q0
+    %1:_(s8) = G_VECREDUCE_XOR %0(<16 x s8>)
+    %2:_(s32) = G_ANYEXT %1(s8)
+    $w0 = COPY %2(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v32i8
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0' }
+  - { reg: '$q1' }
+body:             |
+  bb.1:
+    liveins: $q0, $q1
+
+    ; CHECK-LABEL: name: test_redxor_v32i8
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<16 x s8>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<8 x s8>), [[UV1:%[0-9]+]]:_(<8 x s8>) = G_UNMERGE_VALUES [[XOR]](<16 x s8>)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<8 x s8>) = G_XOR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[XOR1]](<8 x s8>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
+    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
+    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
+    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT4]], [[ANYEXT5]]
+    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
+    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
+    ; CHECK-NEXT: [[XOR5:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT6]], [[ANYEXT7]]
+    ; CHECK-NEXT: [[XOR6:%[0-9]+]]:_(s32) = G_XOR [[XOR2]], [[XOR3]]
+    ; CHECK-NEXT: [[XOR7:%[0-9]+]]:_(s32) = G_XOR [[XOR4]], [[XOR5]]
+    ; CHECK-NEXT: [[XOR8:%[0-9]+]]:_(s32) = G_XOR [[XOR6]], [[XOR7]]
+    ; CHECK-NEXT: $w0 = COPY [[XOR8]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(<16 x s8>) = COPY $q0
+    %2:_(<16 x s8>) = COPY $q1
+    %0:_(<32 x s8>) = G_CONCAT_VECTORS %1(<16 x s8>), %2(<16 x s8>)
+    %3:_(s8) = G_VECREDUCE_XOR %0(<32 x s8>)
+    %4:_(s32) = G_ANYEXT %3(s8)
+    $w0 = COPY %4(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v4i16
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$d0' }
+body:             |
+  bb.1:
+    liveins: $d0
+
+    ; CHECK-LABEL: name: test_redxor_v4i16
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[XOR]], [[XOR1]]
+    ; CHECK-NEXT: $w0 = COPY [[XOR2]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(<4 x s16>) = COPY $d0
+    %1:_(s16) = G_VECREDUCE_XOR %0(<4 x s16>)
+    %2:_(s32) = G_ANYEXT %1(s16)
+    $w0 = COPY %2(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v8i16
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0' }
+body:             |
+  bb.1:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: test_redxor_v8i16
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s16>) = G_XOR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[XOR]](<4 x s16>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[XOR1]], [[XOR2]]
+    ; CHECK-NEXT: $w0 = COPY [[XOR3]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(<8 x s16>) = COPY $q0
+    %1:_(s16) = G_VECREDUCE_XOR %0(<8 x s16>)
+    %2:_(s32) = G_ANYEXT %1(s16)
+    $w0 = COPY %2(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v16i16
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0' }
+  - { reg: '$q1' }
+body:             |
+  bb.1:
+    liveins: $q0, $q1
+
+    ; CHECK-LABEL: name: test_redxor_v16i16
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<8 x s16>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[XOR]](<8 x s16>)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<4 x s16>) = G_XOR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[XOR1]](<4 x s16>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
+    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[XOR2]], [[XOR3]]
+    ; CHECK-NEXT: $w0 = COPY [[XOR4]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(<8 x s16>) = COPY $q0
+    %2:_(<8 x s16>) = COPY $q1
+    %0:_(<16 x s16>) = G_CONCAT_VECTORS %1(<8 x s16>), %2(<8 x s16>)
+    %3:_(s16) = G_VECREDUCE_XOR %0(<16 x s16>)
+    %4:_(s32) = G_ANYEXT %3(s16)
+    $w0 = COPY %4(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v2i32
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$d0' }
+body:             |
+  bb.1:
+    liveins: $d0
+
+    ; CHECK-LABEL: name: test_redxor_v2i32
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[XOR]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(<2 x s32>) = COPY $d0
+    %1:_(s32) = G_VECREDUCE_XOR %0(<2 x s32>)
+    $w0 = COPY %1(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v4i32
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0' }
+body:             |
+  bb.1:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: test_redxor_v4i32
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s32>) = G_XOR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](<2 x s32>)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV2]], [[UV3]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[XOR1]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(s32) = G_VECREDUCE_XOR %0(<4 x s32>)
+    $w0 = COPY %1(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v8i32
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0' }
+  - { reg: '$q1' }
+body:             |
+  bb.1:
+    liveins: $q0, $q1
+
+    ; CHECK-LABEL: name: test_redxor_v8i32
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[XOR]](<4 x s32>)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s32>) = G_XOR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](<2 x s32>)
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[UV2]], [[UV3]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[XOR2]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[COPY2]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %1:_(<4 x s32>) = COPY $q0
+    %2:_(<4 x s32>) = COPY $q1
+    %0:_(<8 x s32>) = G_CONCAT_VECTORS %1(<4 x s32>), %2(<4 x s32>)
+    %3:_(s32) = G_VECREDUCE_XOR %0(<8 x s32>)
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            test_redxor_v2i64
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0' }
+body:             |
+  bb.1:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: test_redxor_v2i64
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[XOR]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[COPY1]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %0:_(<2 x s64>) = COPY $q0
+    %1:_(s64) = G_VECREDUCE_XOR %0(<2 x s64>)
+    $x0 = COPY %1(s64)
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            test_redxor_v4i64
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0' }
+  - { reg: '$q1' }
+body:             |
+  bb.1:
+    liveins: $q0, $q1
+
+    ; CHECK-LABEL: name: test_redxor_v4i64
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[XOR]](<2 x s64>)
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[XOR1]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[COPY2]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %1:_(<2 x s64>) = COPY $q0
+    %2:_(<2 x s64>) = COPY $q1
+    %0:_(<4 x s64>) = G_CONCAT_VECTORS %1(<2 x s64>), %2(<2 x s64>)
+    %3:_(s64) = G_VECREDUCE_XOR %0(<4 x s64>)
+    $x0 = COPY %3(s64)
+    RET_ReallyLR implicit $x0
+
+...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index 72cb253d5f968..f43c458a8dd8d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -684,8 +684,9 @@
 # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected 
 # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_XOR (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_SMAX (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
 # DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
 # DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined

diff  --git a/llvm/test/CodeGen/AArch64/reduce-xor.ll b/llvm/test/CodeGen/AArch64/reduce-xor.ll
index 14d1ac94c031d..5b07b58152541 100644
--- a/llvm/test/CodeGen/AArch64/reduce-xor.ll
+++ b/llvm/test/CodeGen/AArch64/reduce-xor.ll
@@ -1,11 +1,17 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -mattr=+neon | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon -global-isel -global-isel-abort=1 | FileCheck %s --check-prefix=GISEL
 
 define i1 @test_redxor_v1i1(<1 x i1> %a) {
 ; CHECK-LABEL: test_redxor_v1i1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w0, w0, #0x1
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v1i1:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    and w0, w0, #0x1
+; GISEL-NEXT:    ret
   %or_result = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %a)
   ret i1 %or_result
 }
@@ -19,6 +25,16 @@ define i1 @test_redxor_v2i1(<2 x i1> %a) {
 ; CHECK-NEXT:    eor w8, w9, w8
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v2i1:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; GISEL-NEXT:    mov s1, v0.s[1]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    ret
   %or_result = call i1 @llvm.vector.reduce.xor.v2i1(<2 x i1> %a)
   ret i1 %or_result
 }
@@ -36,6 +52,22 @@ define i1 @test_redxor_v4i1(<4 x i1> %a) {
 ; CHECK-NEXT:    eor w8, w8, w11
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v4i1:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; GISEL-NEXT:    mov h1, v0.h[1]
+; GISEL-NEXT:    mov h2, v0.h[2]
+; GISEL-NEXT:    mov h3, v0.h[3]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    fmov w10, s2
+; GISEL-NEXT:    fmov w11, s3
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    eor w9, w10, w11
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    ret
   %or_result = call i1 @llvm.vector.reduce.xor.v4i1(<4 x i1> %a)
   ret i1 %or_result
 }
@@ -61,6 +93,34 @@ define i1 @test_redxor_v8i1(<8 x i1> %a) {
 ; CHECK-NEXT:    eor w8, w8, w10
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v8i1:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; GISEL-NEXT:    mov b1, v0.b[1]
+; GISEL-NEXT:    mov b2, v0.b[2]
+; GISEL-NEXT:    mov b3, v0.b[3]
+; GISEL-NEXT:    mov b4, v0.b[4]
+; GISEL-NEXT:    mov b5, v0.b[5]
+; GISEL-NEXT:    mov b6, v0.b[6]
+; GISEL-NEXT:    mov b7, v0.b[7]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    fmov w10, s2
+; GISEL-NEXT:    fmov w11, s3
+; GISEL-NEXT:    fmov w12, s4
+; GISEL-NEXT:    fmov w13, s5
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    fmov w9, s6
+; GISEL-NEXT:    eor w10, w10, w11
+; GISEL-NEXT:    fmov w11, s7
+; GISEL-NEXT:    eor w12, w12, w13
+; GISEL-NEXT:    eor w8, w8, w10
+; GISEL-NEXT:    eor w9, w9, w11
+; GISEL-NEXT:    eor w9, w12, w9
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    ret
   %or_result = call i1 @llvm.vector.reduce.xor.v8i1(<8 x i1> %a)
   ret i1 %or_result
 }
@@ -87,6 +147,57 @@ define i1 @test_redxor_v16i1(<16 x i1> %a) {
 ; CHECK-NEXT:    eor w8, w8, w11
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v16i1:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    mov b1, v0.b[1]
+; GISEL-NEXT:    mov b2, v0.b[2]
+; GISEL-NEXT:    mov b3, v0.b[3]
+; GISEL-NEXT:    mov b4, v0.b[4]
+; GISEL-NEXT:    mov b5, v0.b[5]
+; GISEL-NEXT:    mov b6, v0.b[6]
+; GISEL-NEXT:    mov b7, v0.b[7]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    fmov w10, s2
+; GISEL-NEXT:    fmov w11, s3
+; GISEL-NEXT:    mov b16, v0.b[8]
+; GISEL-NEXT:    mov b17, v0.b[9]
+; GISEL-NEXT:    mov b18, v0.b[10]
+; GISEL-NEXT:    mov b19, v0.b[11]
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    eor w9, w10, w11
+; GISEL-NEXT:    fmov w10, s4
+; GISEL-NEXT:    fmov w11, s5
+; GISEL-NEXT:    fmov w12, s6
+; GISEL-NEXT:    fmov w13, s7
+; GISEL-NEXT:    mov b20, v0.b[12]
+; GISEL-NEXT:    mov b21, v0.b[13]
+; GISEL-NEXT:    mov b22, v0.b[14]
+; GISEL-NEXT:    mov b23, v0.b[15]
+; GISEL-NEXT:    eor w10, w10, w11
+; GISEL-NEXT:    eor w11, w12, w13
+; GISEL-NEXT:    fmov w12, s16
+; GISEL-NEXT:    fmov w13, s17
+; GISEL-NEXT:    fmov w14, s18
+; GISEL-NEXT:    fmov w15, s19
+; GISEL-NEXT:    fmov w16, s22
+; GISEL-NEXT:    fmov w17, s23
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    eor w12, w12, w13
+; GISEL-NEXT:    eor w9, w10, w11
+; GISEL-NEXT:    eor w13, w14, w15
+; GISEL-NEXT:    fmov w14, s20
+; GISEL-NEXT:    fmov w15, s21
+; GISEL-NEXT:    eor w10, w12, w13
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    eor w14, w14, w15
+; GISEL-NEXT:    eor w15, w16, w17
+; GISEL-NEXT:    eor w11, w14, w15
+; GISEL-NEXT:    eor w9, w10, w11
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    ret
   %or_result = call i1 @llvm.vector.reduce.xor.v16i1(<16 x i1> %a)
   ret i1 %or_result
 }
@@ -97,6 +208,12 @@ define i8 @test_redxor_v1i8(<1 x i8> %a) {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    umov w0, v0.b[0]
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v1i8:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    fmov x0, d0
+; GISEL-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; GISEL-NEXT:    ret
   %xor_result = call i8 @llvm.vector.reduce.xor.v1i8(<1 x i8> %a)
   ret i8 %xor_result
 }
@@ -107,6 +224,12 @@ define i8 @test_redxor_v3i8(<3 x i8> %a) {
 ; CHECK-NEXT:    eor w8, w0, w1
 ; CHECK-NEXT:    eor w0, w8, w2
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v3i8:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    eor w8, w0, w1
+; GISEL-NEXT:    eor w0, w8, w2
+; GISEL-NEXT:    ret
   %xor_result = call i8 @llvm.vector.reduce.xor.v3i8(<3 x i8> %a)
   ret i8 %xor_result
 }
@@ -123,6 +246,21 @@ define i8 @test_redxor_v4i8(<4 x i8> %a) {
 ; CHECK-NEXT:    eor w8, w8, w10
 ; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v4i8:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; GISEL-NEXT:    mov h1, v0.h[1]
+; GISEL-NEXT:    mov h2, v0.h[2]
+; GISEL-NEXT:    mov h3, v0.h[3]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    fmov w10, s2
+; GISEL-NEXT:    fmov w11, s3
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    eor w9, w10, w11
+; GISEL-NEXT:    eor w0, w8, w9
+; GISEL-NEXT:    ret
   %xor_result = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %a)
   ret i8 %xor_result
 }
@@ -147,6 +285,33 @@ define i8 @test_redxor_v8i8(<8 x i8> %a) {
 ; CHECK-NEXT:    eor w8, w8, w9
 ; CHECK-NEXT:    eor w0, w8, w10
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v8i8:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; GISEL-NEXT:    mov b1, v0.b[1]
+; GISEL-NEXT:    mov b2, v0.b[2]
+; GISEL-NEXT:    mov b3, v0.b[3]
+; GISEL-NEXT:    mov b4, v0.b[4]
+; GISEL-NEXT:    mov b5, v0.b[5]
+; GISEL-NEXT:    mov b6, v0.b[6]
+; GISEL-NEXT:    mov b7, v0.b[7]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    fmov w10, s2
+; GISEL-NEXT:    fmov w11, s3
+; GISEL-NEXT:    fmov w12, s4
+; GISEL-NEXT:    fmov w13, s5
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    fmov w9, s6
+; GISEL-NEXT:    eor w10, w10, w11
+; GISEL-NEXT:    fmov w11, s7
+; GISEL-NEXT:    eor w12, w12, w13
+; GISEL-NEXT:    eor w8, w8, w10
+; GISEL-NEXT:    eor w9, w9, w11
+; GISEL-NEXT:    eor w9, w12, w9
+; GISEL-NEXT:    eor w0, w8, w9
+; GISEL-NEXT:    ret
   %xor_result = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %a)
   ret i8 %xor_result
 }
@@ -172,6 +337,34 @@ define i8 @test_redxor_v16i8(<16 x i8> %a) {
 ; CHECK-NEXT:    eor w8, w8, w10
 ; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v16i8:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    mov d1, v0.d[1]
+; GISEL-NEXT:    eor v0.8b, v0.8b, v1.8b
+; GISEL-NEXT:    mov b1, v0.b[1]
+; GISEL-NEXT:    mov b2, v0.b[2]
+; GISEL-NEXT:    mov b3, v0.b[3]
+; GISEL-NEXT:    mov b4, v0.b[4]
+; GISEL-NEXT:    mov b5, v0.b[5]
+; GISEL-NEXT:    mov b6, v0.b[6]
+; GISEL-NEXT:    mov b7, v0.b[7]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    fmov w10, s2
+; GISEL-NEXT:    fmov w11, s3
+; GISEL-NEXT:    fmov w12, s4
+; GISEL-NEXT:    fmov w13, s5
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    fmov w9, s6
+; GISEL-NEXT:    eor w10, w10, w11
+; GISEL-NEXT:    fmov w11, s7
+; GISEL-NEXT:    eor w12, w12, w13
+; GISEL-NEXT:    eor w8, w8, w10
+; GISEL-NEXT:    eor w9, w9, w11
+; GISEL-NEXT:    eor w9, w12, w9
+; GISEL-NEXT:    eor w0, w8, w9
+; GISEL-NEXT:    ret
   %xor_result = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %a)
   ret i8 %xor_result
 }
@@ -198,6 +391,35 @@ define i8 @test_redxor_v32i8(<32 x i8> %a) {
 ; CHECK-NEXT:    eor w8, w8, w10
 ; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v32i8:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    eor v0.16b, v0.16b, v1.16b
+; GISEL-NEXT:    mov d1, v0.d[1]
+; GISEL-NEXT:    eor v0.8b, v0.8b, v1.8b
+; GISEL-NEXT:    mov b1, v0.b[1]
+; GISEL-NEXT:    mov b2, v0.b[2]
+; GISEL-NEXT:    mov b3, v0.b[3]
+; GISEL-NEXT:    mov b4, v0.b[4]
+; GISEL-NEXT:    mov b5, v0.b[5]
+; GISEL-NEXT:    mov b6, v0.b[6]
+; GISEL-NEXT:    mov b7, v0.b[7]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    fmov w10, s2
+; GISEL-NEXT:    fmov w11, s3
+; GISEL-NEXT:    fmov w12, s4
+; GISEL-NEXT:    fmov w13, s5
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    fmov w9, s6
+; GISEL-NEXT:    eor w10, w10, w11
+; GISEL-NEXT:    fmov w11, s7
+; GISEL-NEXT:    eor w12, w12, w13
+; GISEL-NEXT:    eor w8, w8, w10
+; GISEL-NEXT:    eor w9, w9, w11
+; GISEL-NEXT:    eor w9, w12, w9
+; GISEL-NEXT:    eor w0, w8, w9
+; GISEL-NEXT:    ret
   %xor_result = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %a)
   ret i8 %xor_result
 }
@@ -214,6 +436,21 @@ define i16 @test_redxor_v4i16(<4 x i16> %a) {
 ; CHECK-NEXT:    eor w8, w8, w10
 ; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v4i16:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; GISEL-NEXT:    mov h1, v0.h[1]
+; GISEL-NEXT:    mov h2, v0.h[2]
+; GISEL-NEXT:    mov h3, v0.h[3]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    fmov w10, s2
+; GISEL-NEXT:    fmov w11, s3
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    eor w9, w10, w11
+; GISEL-NEXT:    eor w0, w8, w9
+; GISEL-NEXT:    ret
   %xor_result = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %a)
   ret i16 %xor_result
 }
@@ -231,6 +468,22 @@ define i16 @test_redxor_v8i16(<8 x i16> %a) {
 ; CHECK-NEXT:    eor w8, w8, w10
 ; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v8i16:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    mov d1, v0.d[1]
+; GISEL-NEXT:    eor v0.8b, v0.8b, v1.8b
+; GISEL-NEXT:    mov h1, v0.h[1]
+; GISEL-NEXT:    mov h2, v0.h[2]
+; GISEL-NEXT:    mov h3, v0.h[3]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    fmov w10, s2
+; GISEL-NEXT:    fmov w11, s3
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    eor w9, w10, w11
+; GISEL-NEXT:    eor w0, w8, w9
+; GISEL-NEXT:    ret
   %xor_result = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %a)
   ret i16 %xor_result
 }
@@ -249,6 +502,23 @@ define i16 @test_redxor_v16i16(<16 x i16> %a) {
 ; CHECK-NEXT:    eor w8, w8, w10
 ; CHECK-NEXT:    eor w0, w8, w11
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v16i16:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    eor v0.16b, v0.16b, v1.16b
+; GISEL-NEXT:    mov d1, v0.d[1]
+; GISEL-NEXT:    eor v0.8b, v0.8b, v1.8b
+; GISEL-NEXT:    mov h1, v0.h[1]
+; GISEL-NEXT:    mov h2, v0.h[2]
+; GISEL-NEXT:    mov h3, v0.h[3]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    fmov w10, s2
+; GISEL-NEXT:    fmov w11, s3
+; GISEL-NEXT:    eor w8, w8, w9
+; GISEL-NEXT:    eor w9, w10, w11
+; GISEL-NEXT:    eor w0, w8, w9
+; GISEL-NEXT:    ret
   %xor_result = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %a)
   ret i16 %xor_result
 }
@@ -261,6 +531,15 @@ define i32 @test_redxor_v2i32(<2 x i32> %a) {
 ; CHECK-NEXT:    fmov w9, s0
 ; CHECK-NEXT:    eor w0, w9, w8
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v2i32:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; GISEL-NEXT:    mov s1, v0.s[1]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    eor w0, w8, w9
+; GISEL-NEXT:    ret
   %xor_result = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %a)
   ret i32 %xor_result
 }
@@ -274,6 +553,16 @@ define i32 @test_redxor_v4i32(<4 x i32> %a) {
 ; CHECK-NEXT:    fmov w9, s0
 ; CHECK-NEXT:    eor w0, w9, w8
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v4i32:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    mov d1, v0.d[1]
+; GISEL-NEXT:    eor v0.8b, v0.8b, v1.8b
+; GISEL-NEXT:    mov s1, v0.s[1]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    eor w0, w8, w9
+; GISEL-NEXT:    ret
   %xor_result = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %a)
   ret i32 %xor_result
 }
@@ -288,6 +577,17 @@ define i32 @test_redxor_v8i32(<8 x i32> %a) {
 ; CHECK-NEXT:    fmov w9, s0
 ; CHECK-NEXT:    eor w0, w9, w8
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v8i32:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    eor v0.16b, v0.16b, v1.16b
+; GISEL-NEXT:    mov d1, v0.d[1]
+; GISEL-NEXT:    eor v0.8b, v0.8b, v1.8b
+; GISEL-NEXT:    mov s1, v0.s[1]
+; GISEL-NEXT:    fmov w8, s0
+; GISEL-NEXT:    fmov w9, s1
+; GISEL-NEXT:    eor w0, w8, w9
+; GISEL-NEXT:    ret
   %xor_result = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %a)
   ret i32 %xor_result
 }
@@ -299,6 +599,14 @@ define i64 @test_redxor_v2i64(<2 x i64> %a) {
 ; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v2i64:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    mov d1, v0.d[1]
+; GISEL-NEXT:    fmov x8, d0
+; GISEL-NEXT:    fmov x9, d1
+; GISEL-NEXT:    eor x0, x8, x9
+; GISEL-NEXT:    ret
   %xor_result = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %a)
   ret i64 %xor_result
 }
@@ -311,6 +619,15 @@ define i64 @test_redxor_v4i64(<4 x i64> %a) {
 ; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: test_redxor_v4i64:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    eor v0.16b, v0.16b, v1.16b
+; GISEL-NEXT:    mov d1, v0.d[1]
+; GISEL-NEXT:    fmov x8, d0
+; GISEL-NEXT:    fmov x9, d1
+; GISEL-NEXT:    eor x0, x8, x9
+; GISEL-NEXT:    ret
   %xor_result = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %a)
   ret i64 %xor_result
 }


        


More information about the llvm-commits mailing list