[llvm] 5888a19 - [AArch64][GlobalISel] Lower vector types for min/max

Irina Dobrescu via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 7 07:34:21 PDT 2021


Author: Irina Dobrescu
Date: 2021-07-07T15:34:03+01:00
New Revision: 5888a194c1043b20bb2e4039ca28ef2a1e63e796

URL: https://github.com/llvm/llvm-project/commit/5888a194c1043b20bb2e4039ca28ef2a1e63e796
DIFF: https://github.com/llvm/llvm-project/commit/5888a194c1043b20bb2e4039ca28ef2a1e63e796.diff

LOG: [AArch64][GlobalISel] Lower vector types for min/max

Differential Revision: https://reviews.llvm.org/D105433

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 69be7fb94778b..cc55137e25a27 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -169,7 +169,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
 
   getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
       .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
-      .lowerIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar(); });
+      .lower();
 
   getActionDefinitionsBuilder(
       {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO})

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
index 5510b6d243929..11f676f495291 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
@@ -127,4 +127,510 @@ body: |
     RET_ReallyLR implicit $q0
 
 ...
+---
+name:            v2s64_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v2s64_smin
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(slt), %vec(<2 x s64>), %vec1
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]]
+    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[ASHR]]
+    ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND %vec1, [[XOR]]
+    ; CHECK: %smin:_(<2 x s64>) = G_OR [[AND]], [[AND1]]
+    ; CHECK: $q0 = COPY %smin(<2 x s64>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+    %vec1:_(<2 x s64>) = G_IMPLICIT_DEF
+    %smin:_(<2 x s64>) = G_SMIN %vec, %vec1
+    $q0 = COPY %smin
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v8s8_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v8s8_umin
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %umin:_(<8 x s8>) = G_UMIN %vec, %vec1
+    ; CHECK: $x0 = COPY %umin(<8 x s8>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<8 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s8>) = G_IMPLICIT_DEF
+    %umin:_(<8 x s8>) = G_UMIN %vec, %vec1
+    $x0 = COPY %umin
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v16s8_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v16s8_umin
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %umin:_(<16 x s8>) = G_UMIN %vec, %vec1
+    ; CHECK: $q0 = COPY %umin(<16 x s8>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<16 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<16 x s8>) = G_IMPLICIT_DEF
+    %umin:_(<16 x s8>) = G_UMIN %vec, %vec1
+    $q0 = COPY %umin
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v4s16_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v4s16_umin
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %umin:_(<4 x s16>) = G_UMIN %vec, %vec1
+    ; CHECK: $x0 = COPY %umin(<4 x s16>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<4 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s16>) = G_IMPLICIT_DEF
+    %umin:_(<4 x s16>) = G_UMIN %vec, %vec1
+    $x0 = COPY %umin
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v8s16_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v8s16_umin
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %umin:_(<8 x s16>) = G_UMIN %vec, %vec1
+    ; CHECK: $q0 = COPY %umin(<8 x s16>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<8 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s16>) = G_IMPLICIT_DEF
+    %umin:_(<8 x s16>) = G_UMIN %vec, %vec1
+    $q0 = COPY %umin
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v2s32_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v2s32_umin
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %umin:_(<2 x s32>) = G_UMIN %vec, %vec1
+    ; CHECK: $x0 = COPY %umin(<2 x s32>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<2 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<2 x s32>) = G_IMPLICIT_DEF
+    %umin:_(<2 x s32>) = G_UMIN %vec, %vec1
+    $x0 = COPY %umin
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v4s32_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v4s32_umin
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %umin:_(<4 x s32>) = G_UMIN %vec, %vec1
+    ; CHECK: $q0 = COPY %umin(<4 x s32>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<4 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s32>) = G_IMPLICIT_DEF
+    %umin:_(<4 x s32>) = G_UMIN %vec, %vec1
+    $q0 = COPY %umin
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v2s64_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v2s64_umin
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ult), %vec(<2 x s64>), %vec1
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]]
+    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[ASHR]]
+    ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND %vec1, [[XOR]]
+    ; CHECK: %umin:_(<2 x s64>) = G_OR [[AND]], [[AND1]]
+    ; CHECK: $q0 = COPY %umin(<2 x s64>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+    %vec1:_(<2 x s64>) = G_IMPLICIT_DEF
+    %umin:_(<2 x s64>) = G_UMIN %vec, %vec1
+    $q0 = COPY %umin
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v8s8_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v8s8_smax
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %smax:_(<8 x s8>) = G_SMAX %vec, %vec1
+    ; CHECK: $x0 = COPY %smax(<8 x s8>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<8 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s8>) = G_IMPLICIT_DEF
+    %smax:_(<8 x s8>) = G_SMAX %vec, %vec1
+    $x0 = COPY %smax
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v16s8_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v16s8_smax
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %smax:_(<16 x s8>) = G_SMAX %vec, %vec1
+    ; CHECK: $q0 = COPY %smax(<16 x s8>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<16 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<16 x s8>) = G_IMPLICIT_DEF
+    %smax:_(<16 x s8>) = G_SMAX %vec, %vec1
+    $q0 = COPY %smax
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v4s16_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v4s16_smax
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %smax:_(<4 x s16>) = G_SMAX %vec, %vec1
+    ; CHECK: $x0 = COPY %smax(<4 x s16>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<4 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s16>) = G_IMPLICIT_DEF
+    %smax:_(<4 x s16>) = G_SMAX %vec, %vec1
+    $x0 = COPY %smax
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v8s16_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v8s16_smax
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %smax:_(<8 x s16>) = G_SMAX %vec, %vec1
+    ; CHECK: $q0 = COPY %smax(<8 x s16>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<8 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s16>) = G_IMPLICIT_DEF
+    %smax:_(<8 x s16>) = G_SMAX %vec, %vec1
+    $q0 = COPY %smax
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v2s32_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v2s32_smax
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %smax:_(<2 x s32>) = G_SMAX %vec, %vec1
+    ; CHECK: $x0 = COPY %smax(<2 x s32>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<2 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<2 x s32>) = G_IMPLICIT_DEF
+    %smax:_(<2 x s32>) = G_SMAX %vec, %vec1
+    $x0 = COPY %smax
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v4s32_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v4s32_smax
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %smax:_(<4 x s32>) = G_SMAX %vec, %vec1
+    ; CHECK: $q0 = COPY %smax(<4 x s32>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<4 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s32>) = G_IMPLICIT_DEF
+    %smax:_(<4 x s32>) = G_SMAX %vec, %vec1
+    $q0 = COPY %smax
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v2s64_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v2s64_smax
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), %vec(<2 x s64>), %vec1
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]]
+    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[ASHR]]
+    ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND %vec1, [[XOR]]
+    ; CHECK: %smax:_(<2 x s64>) = G_OR [[AND]], [[AND1]]
+    ; CHECK: $q0 = COPY %smax(<2 x s64>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+    %vec1:_(<2 x s64>) = G_IMPLICIT_DEF
+    %smax:_(<2 x s64>) = G_SMAX %vec, %vec1
+    $q0 = COPY %smax
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v8s8_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v8s8_umax
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %umax:_(<8 x s8>) = G_UMAX %vec, %vec1
+    ; CHECK: $x0 = COPY %umax(<8 x s8>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<8 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s8>) = G_IMPLICIT_DEF
+    %umax:_(<8 x s8>) = G_UMAX %vec, %vec1
+    $x0 = COPY %umax
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v16s8_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v16s8_umax
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %umax:_(<16 x s8>) = G_UMAX %vec, %vec1
+    ; CHECK: $q0 = COPY %umax(<16 x s8>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<16 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<16 x s8>) = G_IMPLICIT_DEF
+    %umax:_(<16 x s8>) = G_UMAX %vec, %vec1
+    $q0 = COPY %umax
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v4s16_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v4s16_umax
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %umax:_(<4 x s16>) = G_UMAX %vec, %vec1
+    ; CHECK: $x0 = COPY %umax(<4 x s16>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<4 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s16>) = G_IMPLICIT_DEF
+    %umax:_(<4 x s16>) = G_UMAX %vec, %vec1
+    $x0 = COPY %umax
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v8s16_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v8s16_umax
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %umax:_(<8 x s16>) = G_UMAX %vec, %vec1
+    ; CHECK: $q0 = COPY %umax(<8 x s16>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<8 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s16>) = G_IMPLICIT_DEF
+    %umax:_(<8 x s16>) = G_UMAX %vec, %vec1
+    $q0 = COPY %umax
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v2s32_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v2s32_umax
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %umax:_(<2 x s32>) = G_UMAX %vec, %vec1
+    ; CHECK: $x0 = COPY %umax(<2 x s32>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<2 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<2 x s32>) = G_IMPLICIT_DEF
+    %umax:_(<2 x s32>) = G_UMAX %vec, %vec1
+    $x0 = COPY %umax
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v4s32_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v4s32_umax
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %umax:_(<4 x s32>) = G_UMAX %vec, %vec1
+    ; CHECK: $q0 = COPY %umax(<4 x s32>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<4 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s32>) = G_IMPLICIT_DEF
+    %umax:_(<4 x s32>) = G_UMAX %vec, %vec1
+    $q0 = COPY %umax
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v2s64_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v2s64_umax
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), %vec(<2 x s64>), %vec1
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]]
+    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[ASHR]]
+    ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND %vec1, [[XOR]]
+    ; CHECK: %umax:_(<2 x s64>) = G_OR [[AND]], [[AND1]]
+    ; CHECK: $q0 = COPY %umax(<2 x s64>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<2 x s64>) = G_IMPLICIT_DEF
+    %vec1:_(<2 x s64>) = G_IMPLICIT_DEF
+    %umax:_(<2 x s64>) = G_UMAX %vec, %vec1
+    $q0 = COPY %umax
+    RET_ReallyLR implicit $q0
+
+...
 


        


More information about the llvm-commits mailing list