[llvm] 04dbd44 - [AArch64][test] Merge arm64-$i.ll Linux tests into $i.ll

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 3 09:19:01 PST 2020


Author: Fangrui Song
Date: 2020-01-03T09:18:55-08:00
New Revision: 04dbd449c290438ea3389a806ed3cff057b4b821

URL: https://github.com/llvm/llvm-project/commit/04dbd449c290438ea3389a806ed3cff057b4b821
DIFF: https://github.com/llvm/llvm-project/commit/04dbd449c290438ea3389a806ed3cff057b4b821.diff

LOG: [AArch64][test] Merge arm64-$i.ll Linux tests into $i.ll

Reviewed By: dmgreen

Differential Revision: https://reviews.llvm.org/D72061

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/basic-pic.ll
    llvm/test/CodeGen/AArch64/code-model-large-abs.ll
    llvm/test/CodeGen/AArch64/illegal-float-ops.ll

Removed: 
    llvm/test/CodeGen/AArch64/arm64-basic-pic.ll
    llvm/test/CodeGen/AArch64/arm64-code-model-large-abs.ll
    llvm/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll
    llvm/test/CodeGen/AArch64/arm64-extern-weak.ll
    llvm/test/CodeGen/AArch64/arm64-illegal-float-ops.ll
    llvm/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/arm64-basic-pic.ll b/llvm/test/CodeGen/AArch64/arm64-basic-pic.ll
deleted file mode 100644
index e11274e45ff1..000000000000
--- a/llvm/test/CodeGen/AArch64/arm64-basic-pic.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -relocation-model=pic %s -o - | FileCheck %s
-
- at var = global i32 0
-
-define i32 @get_globalvar() {
-; CHECK-LABEL: get_globalvar:
-
-  %val = load i32, i32* @var
-; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
-; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], :got_lo12:var]
-; CHECK: ldr w0, [x[[GOTLOC]]]
-
-  ret i32 %val
-}
-
-define i32* @get_globalvaraddr() {
-; CHECK-LABEL: get_globalvaraddr:
-
-  %val = load i32, i32* @var
-; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
-; CHECK: ldr x0, [x[[GOTHI]], :got_lo12:var]
-
-  ret i32* @var
-}
-
- at hiddenvar = hidden global i32 0
-
-define i32 @get_hiddenvar() {
-; CHECK-LABEL: get_hiddenvar:
-
-  %val = load i32, i32* @hiddenvar
-; CHECK: adrp x[[HI:[0-9]+]], hiddenvar
-; CHECK: ldr w0, [x[[HI]], :lo12:hiddenvar]
-
-  ret i32 %val
-}
-
-define i32* @get_hiddenvaraddr() {
-; CHECK-LABEL: get_hiddenvaraddr:
-
-  %val = load i32, i32* @hiddenvar
-; CHECK: adrp [[HI:x[0-9]+]], hiddenvar
-; CHECK: add x0, [[HI]], :lo12:hiddenvar
-
-  ret i32* @hiddenvar
-}
-
-define void()* @get_func() {
-; CHECK-LABEL: get_func:
-
-  ret void()* bitcast(void()*()* @get_func to void()*)
-; CHECK: adrp x[[GOTHI:[0-9]+]], :got:get_func
-; CHECK: ldr x0, [x[[GOTHI]], :got_lo12:get_func]
-}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-code-model-large-abs.ll b/llvm/test/CodeGen/AArch64/arm64-code-model-large-abs.ll
deleted file mode 100644
index 171941748c8f..000000000000
--- a/llvm/test/CodeGen/AArch64/arm64-code-model-large-abs.ll
+++ /dev/null
@@ -1,72 +0,0 @@
-; RUN: llc -mtriple=arm64-none-linux-gnu -code-model=large < %s | FileCheck %s
-
- at var8 = global i8 0
- at var16 = global i16 0
- at var32 = global i32 0
- at var64 = global i64 0
-
-define i8* @global_addr() {
-; CHECK-LABEL: global_addr:
-  ret i8* @var8
-  ; The movz/movk calculation should end up returned directly in x0.
-; CHECK: movz x0, #:abs_g0_nc:var8
-; CHECK: movk x0, #:abs_g1_nc:var8
-; CHECK: movk x0, #:abs_g2_nc:var8
-; CHECK: movk x0, #:abs_g3:var8
-; CHECK-NEXT: ret
-}
-
-define i8 @global_i8() {
-; CHECK-LABEL: global_i8:
-  %val = load i8, i8* @var8
-  ret i8 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var8
-; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var8
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var8
-; CHECK: movk x[[ADDR_REG]], #:abs_g3:var8
-; CHECK: ldrb w0, [x[[ADDR_REG]]]
-}
-
-define i16 @global_i16() {
-; CHECK-LABEL: global_i16:
-  %val = load i16, i16* @var16
-  ret i16 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var16
-; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var16
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var16
-; CHECK: movk x[[ADDR_REG]], #:abs_g3:var16
-; CHECK: ldrh w0, [x[[ADDR_REG]]]
-}
-
-define i32 @global_i32() {
-; CHECK-LABEL: global_i32:
-  %val = load i32, i32* @var32
-  ret i32 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var32
-; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var32
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var32
-; CHECK: movk x[[ADDR_REG]], #:abs_g3:var32
-; CHECK: ldr w0, [x[[ADDR_REG]]]
-}
-
-define i64 @global_i64() {
-; CHECK-LABEL: global_i64:
-  %val = load i64, i64* @var64
-  ret i64 %val
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var64
-; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var64
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var64
-; CHECK: movk x[[ADDR_REG]], #:abs_g3:var64
-; CHECK: ldr x0, [x[[ADDR_REG]]]
-}
-
-define <2 x i64> @constpool() {
-; CHECK-LABEL: constpool:
-  ret <2 x i64> <i64 123456789, i64 987654321100>
-
-; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:[[CPADDR:.LCPI[0-9]+_[0-9]+]]
-; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:[[CPADDR]]
-; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:[[CPADDR]]
-; CHECK: movk x[[ADDR_REG]], #:abs_g3:[[CPADDR]]
-; CHECK: ldr q0, [x[[ADDR_REG]]]
-}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll b/llvm/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll
deleted file mode 100644
index 938bc62808f5..000000000000
--- a/llvm/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=-neon < %s
-
-; The DAG combiner decided to use a vector load/store for this struct copy
-; previously. This probably shouldn't happen without NEON, but the most
-; important thing is that it compiles.
-
-define void @store_combine() nounwind {
-  %src = alloca { double, double }, align 8
-  %dst = alloca { double, double }, align 8
-
-  %src.realp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 0
-  %src.real = load double, double* %src.realp
-  %src.imagp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 1
-  %src.imag = load double, double* %src.imagp
-
-  %dst.realp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 0
-  %dst.imagp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 1
-  store double %src.real, double* %dst.realp
-  store double %src.imag, double* %dst.imagp
-  ret void
-}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-extern-weak.ll b/llvm/test/CodeGen/AArch64/arm64-extern-weak.ll
deleted file mode 100644
index c98bda0d01a0..000000000000
--- a/llvm/test/CodeGen/AArch64/arm64-extern-weak.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -o - < %s | FileCheck %s
-; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=static -o - < %s | FileCheck %s
-; RUN: llc -mtriple=arm64-none-linux-gnu -code-model=large -o - < %s | FileCheck --check-prefix=CHECK-LARGE %s
-
-declare extern_weak i32 @var()
-
-define i32()* @foo() {
-; The usual ADRP/ADD pair can't be used for a weak reference because it must
-; evaluate to 0 if the symbol is undefined. We use a GOT entry for PIC
-; otherwise a litpool entry.
-  ret i32()* @var
-
-; CHECK: adrp x[[VAR:[0-9]+]], :got:var
-; CHECK: ldr x0, [x[[VAR]], :got_lo12:var]
-
-  ; In the large model, the usual relocations are absolute and can
-  ; materialise 0.
-; CHECK-LARGE: movz x0, #:abs_g0_nc:var
-; CHECK-LARGE: movk x0, #:abs_g1_nc:var
-; CHECK-LARGE: movk x0, #:abs_g2_nc:var
-; CHECK-LARGE: movk x0, #:abs_g3:var
-}
-
-
- at arr_var = extern_weak global [10 x i32]
-
-define i32* @bar() {
-  %addr = getelementptr [10 x i32], [10 x i32]* @arr_var, i32 0, i32 5
-; CHECK: adrp x[[ARR_VAR_HI:[0-9]+]], :got:arr_var
-; CHECK: ldr [[ARR_VAR:x[0-9]+]], [x[[ARR_VAR_HI]], :got_lo12:arr_var]
-; CHECK: add x0, [[ARR_VAR]], #20
-  ret i32* %addr
-
-  ; In the large model, the usual relocations are absolute and can
-  ; materialise 0.
-; CHECK-LARGE: movz [[ARR_VAR:x[0-9]+]], #:abs_g0_nc:arr_var
-; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g1_nc:arr_var
-; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g2_nc:arr_var
-; CHECK-LARGE: movk [[ARR_VAR]], #:abs_g3:arr_var
-}
-
- at defined_weak_var = internal unnamed_addr global i32 0
-
-define i32* @wibble() {
-  ret i32* @defined_weak_var
-; CHECK: adrp [[BASE:x[0-9]+]], defined_weak_var
-; CHECK: add x0, [[BASE]], :lo12:defined_weak_var
-
-; CHECK-LARGE: movz x0, #:abs_g0_nc:defined_weak_var
-; CHECK-LARGE: movk x0, #:abs_g1_nc:defined_weak_var
-; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
-; CHECK-LARGE: movk x0, #:abs_g3:defined_weak_var
-}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-illegal-float-ops.ll b/llvm/test/CodeGen/AArch64/arm64-illegal-float-ops.ll
deleted file mode 100644
index 9a35fe54d32e..000000000000
--- a/llvm/test/CodeGen/AArch64/arm64-illegal-float-ops.ll
+++ /dev/null
@@ -1,295 +0,0 @@
-; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
-
- at varfloat = global float 0.0
- at vardouble = global double 0.0
- at varfp128 = global fp128 zeroinitializer
-
-declare float @llvm.cos.f32(float)
-declare double @llvm.cos.f64(double)
-declare fp128 @llvm.cos.f128(fp128)
-
-define void @test_cos(float %float, double %double, fp128 %fp128) {
-; CHECK-LABEL: test_cos:
-
-   %cosfloat = call float @llvm.cos.f32(float %float)
-   store float %cosfloat, float* @varfloat
-; CHECK: bl cosf
-
-   %cosdouble = call double @llvm.cos.f64(double %double)
-   store double %cosdouble, double* @vardouble
-; CHECK: bl cos
-
-   %cosfp128 = call fp128 @llvm.cos.f128(fp128 %fp128)
-   store fp128 %cosfp128, fp128* @varfp128
-; CHECK: bl cosl
-
-  ret void
-}
-
-declare float @llvm.exp.f32(float)
-declare double @llvm.exp.f64(double)
-declare fp128 @llvm.exp.f128(fp128)
-
-define void @test_exp(float %float, double %double, fp128 %fp128) {
-; CHECK-LABEL: test_exp:
-
-   %expfloat = call float @llvm.exp.f32(float %float)
-   store float %expfloat, float* @varfloat
-; CHECK: bl expf
-
-   %expdouble = call double @llvm.exp.f64(double %double)
-   store double %expdouble, double* @vardouble
-; CHECK: bl exp
-
-   %expfp128 = call fp128 @llvm.exp.f128(fp128 %fp128)
-   store fp128 %expfp128, fp128* @varfp128
-; CHECK: bl expl
-
-  ret void
-}
-
-declare float @llvm.exp2.f32(float)
-declare double @llvm.exp2.f64(double)
-declare fp128 @llvm.exp2.f128(fp128)
-
-define void @test_exp2(float %float, double %double, fp128 %fp128) {
-; CHECK-LABEL: test_exp2:
-
-   %exp2float = call float @llvm.exp2.f32(float %float)
-   store float %exp2float, float* @varfloat
-; CHECK: bl exp2f
-
-   %exp2double = call double @llvm.exp2.f64(double %double)
-   store double %exp2double, double* @vardouble
-; CHECK: bl exp2
-
-   %exp2fp128 = call fp128 @llvm.exp2.f128(fp128 %fp128)
-   store fp128 %exp2fp128, fp128* @varfp128
-; CHECK: bl exp2l
-  ret void
-
-}
-
-declare float @llvm.log.f32(float)
-declare double @llvm.log.f64(double)
-declare fp128 @llvm.log.f128(fp128)
-
-define void @test_log(float %float, double %double, fp128 %fp128) {
-; CHECK-LABEL: test_log:
-
-   %logfloat = call float @llvm.log.f32(float %float)
-   store float %logfloat, float* @varfloat
-; CHECK: bl logf
-
-   %logdouble = call double @llvm.log.f64(double %double)
-   store double %logdouble, double* @vardouble
-; CHECK: bl log
-
-   %logfp128 = call fp128 @llvm.log.f128(fp128 %fp128)
-   store fp128 %logfp128, fp128* @varfp128
-; CHECK: bl logl
-
-  ret void
-}
-
-declare float @llvm.log2.f32(float)
-declare double @llvm.log2.f64(double)
-declare fp128 @llvm.log2.f128(fp128)
-
-define void @test_log2(float %float, double %double, fp128 %fp128) {
-; CHECK-LABEL: test_log2:
-
-   %log2float = call float @llvm.log2.f32(float %float)
-   store float %log2float, float* @varfloat
-; CHECK: bl log2f
-
-   %log2double = call double @llvm.log2.f64(double %double)
-   store double %log2double, double* @vardouble
-; CHECK: bl log2
-
-   %log2fp128 = call fp128 @llvm.log2.f128(fp128 %fp128)
-   store fp128 %log2fp128, fp128* @varfp128
-; CHECK: bl log2l
-  ret void
-
-}
-
-declare float @llvm.log10.f32(float)
-declare double @llvm.log10.f64(double)
-declare fp128 @llvm.log10.f128(fp128)
-
-define void @test_log10(float %float, double %double, fp128 %fp128) {
-; CHECK-LABEL: test_log10:
-
-   %log10float = call float @llvm.log10.f32(float %float)
-   store float %log10float, float* @varfloat
-; CHECK: bl log10f
-
-   %log10double = call double @llvm.log10.f64(double %double)
-   store double %log10double, double* @vardouble
-; CHECK: bl log10
-
-   %log10fp128 = call fp128 @llvm.log10.f128(fp128 %fp128)
-   store fp128 %log10fp128, fp128* @varfp128
-; CHECK: bl log10l
-
-  ret void
-}
-
-declare float @llvm.sin.f32(float)
-declare double @llvm.sin.f64(double)
-declare fp128 @llvm.sin.f128(fp128)
-
-define void @test_sin(float %float, double %double, fp128 %fp128) {
-; CHECK-LABEL: test_sin:
-
-   %sinfloat = call float @llvm.sin.f32(float %float)
-   store float %sinfloat, float* @varfloat
-; CHECK: bl sinf
-
-   %sindouble = call double @llvm.sin.f64(double %double)
-   store double %sindouble, double* @vardouble
-; CHECK: bl sin
-
-   %sinfp128 = call fp128 @llvm.sin.f128(fp128 %fp128)
-   store fp128 %sinfp128, fp128* @varfp128
-; CHECK: bl sinl
-  ret void
-
-}
-
-declare float @llvm.pow.f32(float, float)
-declare double @llvm.pow.f64(double, double)
-declare fp128 @llvm.pow.f128(fp128, fp128)
-
-define void @test_pow(float %float, double %double, fp128 %fp128) {
-; CHECK-LABEL: test_pow:
-
-   %powfloat = call float @llvm.pow.f32(float %float, float %float)
-   store float %powfloat, float* @varfloat
-; CHECK: bl powf
-
-   %powdouble = call double @llvm.pow.f64(double %double, double %double)
-   store double %powdouble, double* @vardouble
-; CHECK: bl pow
-
-   %powfp128 = call fp128 @llvm.pow.f128(fp128 %fp128, fp128 %fp128)
-   store fp128 %powfp128, fp128* @varfp128
-; CHECK: bl powl
-
-  ret void
-}
-
-declare float @llvm.powi.f32(float, i32)
-declare double @llvm.powi.f64(double, i32)
-declare fp128 @llvm.powi.f128(fp128, i32)
-
-define void @test_powi(float %float, double %double, i32 %exponent, fp128 %fp128) {
-; CHECK-LABEL: test_powi:
-
-   %powifloat = call float @llvm.powi.f32(float %float, i32 %exponent)
-   store float %powifloat, float* @varfloat
-; CHECK: bl __powisf2
-
-   %powidouble = call double @llvm.powi.f64(double %double, i32 %exponent)
-   store double %powidouble, double* @vardouble
-; CHECK: bl __powidf2
-
-   %powifp128 = call fp128 @llvm.powi.f128(fp128 %fp128, i32 %exponent)
-   store fp128 %powifp128, fp128* @varfp128
-; CHECK: bl __powitf2
-  ret void
-
-}
-
-define void @test_frem(float %float, double %double, fp128 %fp128) {
-; CHECK-LABEL: test_frem:
-
-  %fremfloat = frem float %float, %float
-  store float %fremfloat, float* @varfloat
-; CHECK: bl fmodf
-
-  %fremdouble = frem double %double, %double
-  store double %fremdouble, double* @vardouble
-; CHECK: bl fmod
-
-  %fremfp128 = frem fp128 %fp128, %fp128
-  store fp128 %fremfp128, fp128* @varfp128
-; CHECK: bl fmodl
-
-  ret void
-}
-
-declare fp128 @llvm.fma.f128(fp128, fp128, fp128)
-
-define void @test_fma(fp128 %fp128) {
-; CHECK-LABEL: test_fma:
-
-  %fmafp128 = call fp128 @llvm.fma.f128(fp128 %fp128, fp128 %fp128, fp128 %fp128)
-  store fp128 %fmafp128, fp128* @varfp128
-; CHECK: bl fmal
-
-  ret void
-}
-
-declare fp128 @llvm.fmuladd.f128(fp128, fp128, fp128)
-
-define void @test_fmuladd(fp128 %fp128) {
-; CHECK-LABEL: test_fmuladd:
-
-  %fmuladdfp128 = call fp128 @llvm.fmuladd.f128(fp128 %fp128, fp128 %fp128, fp128 %fp128)
-  store fp128 %fmuladdfp128, fp128* @varfp128
-; CHECK-NOT: bl fmal
-; CHECK: bl __multf3
-; CHECK: bl __addtf3
-
-  ret void
-}
-
-define i32 @test_fptosi32(fp128 %a) {
-; CHECK-LABEL: test_fptosi32:
-; CHECK: bl __fixtfsi
-  %conv.i = fptosi fp128 %a to i32
-  %b = add nsw i32 %conv.i, 48
-  ret i32 %b
-}
-
-define i64 @test_fptosi64(fp128 %a) {
-; CHECK-LABEL: test_fptosi64:
-; CHECK: bl __fixtfdi
-  %conv.i = fptosi fp128 %a to i64
-  %b = add nsw i64 %conv.i, 48
-  ret i64 %b
-}
-
-define i128 @test_fptosi128(fp128 %a) {
-; CHECK-LABEL: test_fptosi128:
-; CHECK: bl __fixtfti
-  %conv.i = fptosi fp128 %a to i128
-  %b = add nsw i128 %conv.i, 48
-  ret i128 %b
-}
-
-define i32 @test_fptoui32(fp128 %a) {
-; CHECK-LABEL: test_fptoui32:
-; CHECK: bl __fixunstfsi
-  %conv.i = fptoui fp128 %a to i32
-  %b = add nsw i32 %conv.i, 48
-  ret i32 %b
-}
-
-define i64 @test_fptoui64(fp128 %a) {
-; CHECK-LABEL: test_fptoui64:
-; CHECK: bl __fixunstfdi
-  %conv.i = fptoui fp128 %a to i64
-  %b = add nsw i64 %conv.i, 48
-  ret i64 %b
-}
-
-define i128 @test_fptoui128(fp128 %a) {
-; CHECK-LABEL: test_fptoui128:
-; CHECK: bl __fixunstfti
-  %conv.i = fptoui fp128 %a to i128
-  %b = add nsw i128 %conv.i, 48
-  ret i128 %b
-}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll
deleted file mode 100644
index 8af3807941e1..000000000000
--- a/llvm/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll
+++ /dev/null
@@ -1,1194 +0,0 @@
-; RUN: llc -mtriple=arm64-none-linux-gnu < %s | FileCheck %s
-
-define <8 x i8> @cmeq8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp eq <8 x i8> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmeq16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp eq <16 x i8> %A, %B;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmeq4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-	%tmp3 = icmp eq <4 x i16> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmeq8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-	%tmp3 = icmp eq <8 x i16> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmeq2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-	%tmp3 = icmp eq <2 x i32> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmeq4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-	%tmp3 = icmp eq <4 x i32> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-	%tmp3 = icmp eq <2 x i64> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ne <8 x i8> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp ne <16 x i8> %A, %B;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ne <4 x i16> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp ne <8 x i16> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ne <2 x i32> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp ne <4 x i32> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp ne <2 x i64> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmgt8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp sgt <8 x i8> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmgt16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp sgt <16 x i8> %A, %B;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmgt4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-	%tmp3 = icmp sgt <4 x i16> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmgt8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-	%tmp3 = icmp sgt <8 x i16> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmgt2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-	%tmp3 = icmp sgt <2 x i32> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmgt4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-	%tmp3 = icmp sgt <4 x i32> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmgt2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-	%tmp3 = icmp sgt <2 x i64> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmlt8xi8(<8 x i8> %A, <8 x i8> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.8b, v1.8b, v0.8b
-	%tmp3 = icmp slt <8 x i8> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmlt16xi8(<16 x i8> %A, <16 x i8> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.16b, v1.16b, v0.16b
-	%tmp3 = icmp slt <16 x i8> %A, %B;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmlt4xi16(<4 x i16> %A, <4 x i16> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.4h, v1.4h, v0.4h
-	%tmp3 = icmp slt <4 x i16> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmlt8xi16(<8 x i16> %A, <8 x i16> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.8h, v1.8h, v0.8h
-	%tmp3 = icmp slt <8 x i16> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmlt2xi32(<2 x i32> %A, <2 x i32> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
-	%tmp3 = icmp slt <2 x i32> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmlt4xi32(<4 x i32> %A, <4 x i32> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
-	%tmp3 = icmp slt <4 x i32> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmlt2xi64(<2 x i64> %A, <2 x i64> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LT implemented as GT, so check reversed operands.
-;CHECK: cmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
-	%tmp3 = icmp slt <2 x i64> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmge8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp sge <8 x i8> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmge16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp sge <16 x i8> %A, %B;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmge4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-	%tmp3 = icmp sge <4 x i16> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmge8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-	%tmp3 = icmp sge <8 x i16> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmge2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-	%tmp3 = icmp sge <2 x i32> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmge4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-	%tmp3 = icmp sge <4 x i32> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmge2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-	%tmp3 = icmp sge <2 x i64> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmle8xi8(<8 x i8> %A, <8 x i8> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.8b, v1.8b, v0.8b
-	%tmp3 = icmp sle <8 x i8> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmle16xi8(<16 x i8> %A, <16 x i8> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.16b, v1.16b, v0.16b
-	%tmp3 = icmp sle <16 x i8> %A, %B;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmle4xi16(<4 x i16> %A, <4 x i16> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.4h, v1.4h, v0.4h
-	%tmp3 = icmp sle <4 x i16> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmle8xi16(<8 x i16> %A, <8 x i16> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.8h, v1.8h, v0.8h
-	%tmp3 = icmp sle <8 x i16> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmle2xi32(<2 x i32> %A, <2 x i32> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.2s, v1.2s, v0.2s
-	%tmp3 = icmp sle <2 x i32> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmle4xi32(<4 x i32> %A, <4 x i32> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.4s, v1.4s, v0.4s
-	%tmp3 = icmp sle <4 x i32> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmle2xi64(<2 x i64> %A, <2 x i64> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LE implemented as GE, so check reversed operands.
-;CHECK: cmge {{v[0-9]+}}.2d, v1.2d, v0.2d
-	%tmp3 = icmp sle <2 x i64> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmhi8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ugt <8 x i8> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmhi16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp ugt <16 x i8> %A, %B;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmhi4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-	%tmp3 = icmp ugt <4 x i16> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmhi8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-	%tmp3 = icmp ugt <8 x i16> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmhi2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-	%tmp3 = icmp ugt <2 x i32> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmhi4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-	%tmp3 = icmp ugt <4 x i32> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmhi2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-	%tmp3 = icmp ugt <2 x i64> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmlo8xi8(<8 x i8> %A, <8 x i8> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.8b, v1.8b, v0.8b
-	%tmp3 = icmp ult <8 x i8> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmlo16xi8(<16 x i8> %A, <16 x i8> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
-	%tmp3 = icmp ult <16 x i8> %A, %B;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmlo4xi16(<4 x i16> %A, <4 x i16> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
-	%tmp3 = icmp ult <4 x i16> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmlo8xi16(<8 x i16> %A, <8 x i16> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
-	%tmp3 = icmp ult <8 x i16> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmlo2xi32(<2 x i32> %A, <2 x i32> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
-	%tmp3 = icmp ult <2 x i32> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmlo4xi32(<4 x i32> %A, <4 x i32> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
-	%tmp3 = icmp ult <4 x i32> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmlo2xi64(<2 x i64> %A, <2 x i64> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
-	%tmp3 = icmp ult <2 x i64> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmhs8xi8(<8 x i8> %A, <8 x i8> %B) {
-;CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp uge <8 x i8> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmhs16xi8(<16 x i8> %A, <16 x i8> %B) {
-;CHECK: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp uge <16 x i8> %A, %B;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmhs4xi16(<4 x i16> %A, <4 x i16> %B) {
-;CHECK: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-	%tmp3 = icmp uge <4 x i16> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmhs8xi16(<8 x i16> %A, <8 x i16> %B) {
-;CHECK: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-	%tmp3 = icmp uge <8 x i16> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmhs2xi32(<2 x i32> %A, <2 x i32> %B) {
-;CHECK: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-	%tmp3 = icmp uge <2 x i32> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmhs4xi32(<4 x i32> %A, <4 x i32> %B) {
-;CHECK: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-	%tmp3 = icmp uge <4 x i32> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmhs2xi64(<2 x i64> %A, <2 x i64> %B) {
-;CHECK: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-	%tmp3 = icmp uge <2 x i64> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmls8xi8(<8 x i8> %A, <8 x i8> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
-	%tmp3 = icmp ule <8 x i8> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmls16xi8(<16 x i8> %A, <16 x i8> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
-	%tmp3 = icmp ule <16 x i8> %A, %B;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmls4xi16(<4 x i16> %A, <4 x i16> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
-	%tmp3 = icmp ule <4 x i16> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmls8xi16(<8 x i16> %A, <8 x i16> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
-	%tmp3 = icmp ule <8 x i16> %A, %B;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmls2xi32(<2 x i32> %A, <2 x i32> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
-	%tmp3 = icmp ule <2 x i32> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmls4xi32(<4 x i32> %A, <4 x i32> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
-	%tmp3 = icmp ule <4 x i32> %A, %B;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmls2xi64(<2 x i64> %A, <2 x i64> %B) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
-	%tmp3 = icmp ule <2 x i64> %A, %B;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-
-define <8 x i8> @cmeqz8xi8(<8 x i8> %A) {
-;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
-	%tmp3 = icmp eq <8 x i8> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmeqz16xi8(<16 x i8> %A) {
-;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
-	%tmp3 = icmp eq <16 x i8> %A, zeroinitializer;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmeqz4xi16(<4 x i16> %A) {
-;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
-	%tmp3 = icmp eq <4 x i16> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmeqz8xi16(<8 x i16> %A) {
-;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
-	%tmp3 = icmp eq <8 x i16> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmeqz2xi32(<2 x i32> %A) {
-;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
-	%tmp3 = icmp eq <2 x i32> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmeqz4xi32(<4 x i32> %A) {
-;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
-	%tmp3 = icmp eq <4 x i32> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmeqz2xi64(<2 x i64> %A) {
-;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
-	%tmp3 = icmp eq <2 x i64> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-
-define <8 x i8> @cmgez8xi8(<8 x i8> %A) {
-;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
-	%tmp3 = icmp sge <8 x i8> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmgez16xi8(<16 x i8> %A) {
-;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
-	%tmp3 = icmp sge <16 x i8> %A, zeroinitializer;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmgez4xi16(<4 x i16> %A) {
-;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
-	%tmp3 = icmp sge <4 x i16> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmgez8xi16(<8 x i16> %A) {
-;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
-	%tmp3 = icmp sge <8 x i16> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmgez2xi32(<2 x i32> %A) {
-;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
-	%tmp3 = icmp sge <2 x i32> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmgez4xi32(<4 x i32> %A) {
-;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
-	%tmp3 = icmp sge <4 x i32> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmgez2xi64(<2 x i64> %A) {
-;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
-	%tmp3 = icmp sge <2 x i64> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-
-define <8 x i8> @cmgtz8xi8(<8 x i8> %A) {
-;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
-	%tmp3 = icmp sgt <8 x i8> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmgtz16xi8(<16 x i8> %A) {
-;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
-	%tmp3 = icmp sgt <16 x i8> %A, zeroinitializer;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmgtz4xi16(<4 x i16> %A) {
-;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
-	%tmp3 = icmp sgt <4 x i16> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmgtz8xi16(<8 x i16> %A) {
-;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
-	%tmp3 = icmp sgt <8 x i16> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmgtz2xi32(<2 x i32> %A) {
-;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
-	%tmp3 = icmp sgt <2 x i32> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmgtz4xi32(<4 x i32> %A) {
-;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
-	%tmp3 = icmp sgt <4 x i32> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmgtz2xi64(<2 x i64> %A) {
-;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
-	%tmp3 = icmp sgt <2 x i64> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmlez8xi8(<8 x i8> %A) {
-;CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
-	%tmp3 = icmp sle <8 x i8> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmlez16xi8(<16 x i8> %A) {
-;CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
-	%tmp3 = icmp sle <16 x i8> %A, zeroinitializer;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmlez4xi16(<4 x i16> %A) {
-;CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
-	%tmp3 = icmp sle <4 x i16> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmlez8xi16(<8 x i16> %A) {
-;CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
-	%tmp3 = icmp sle <8 x i16> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmlez2xi32(<2 x i32> %A) {
-;CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
-	%tmp3 = icmp sle <2 x i32> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmlez4xi32(<4 x i32> %A) {
-;CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
-	%tmp3 = icmp sle <4 x i32> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmlez2xi64(<2 x i64> %A) {
-;CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
-	%tmp3 = icmp sle <2 x i64> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmltz8xi8(<8 x i8> %A) {
-;CHECK: cmlt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
-	%tmp3 = icmp slt <8 x i8> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmltz16xi8(<16 x i8> %A) {
-;CHECK: cmlt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
-	%tmp3 = icmp slt <16 x i8> %A, zeroinitializer;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmltz4xi16(<4 x i16> %A) {
-;CHECK: cmlt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
-	%tmp3 = icmp slt <4 x i16> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmltz8xi16(<8 x i16> %A) {
-;CHECK: cmlt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
-	%tmp3 = icmp slt <8 x i16> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmltz2xi32(<2 x i32> %A) {
-;CHECK: cmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
-	%tmp3 = icmp slt <2 x i32> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmltz4xi32(<4 x i32> %A) {
-;CHECK: cmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
-	%tmp3 = icmp slt <4 x i32> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
-;CHECK: cmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
-	%tmp3 = icmp slt <2 x i64> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
-;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
-;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
-;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
-;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
-;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
-;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
-;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
-;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
-;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
-;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
-;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
-;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
-;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
-;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmhsz8xi8(<8 x i8> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].8b, #2
-;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, v[[ZERO]].8b
-	%tmp3 = icmp uge <8 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmhsz16xi8(<16 x i8> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].16b, #2
-;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, v[[ZERO]].16b
-	%tmp3 = icmp uge <16 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmhsz4xi16(<4 x i16> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].4h, #2
-;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, v[[ZERO]].4h
-	%tmp3 = icmp uge <4 x i16> %A, <i16 2, i16 2, i16 2, i16 2>
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmhsz8xi16(<8 x i16> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].8h, #2
-;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, v[[ZERO]].8h
-	%tmp3 = icmp uge <8 x i16> %A, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmhsz2xi32(<2 x i32> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].2s, #2
-;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, v[[ZERO]].2s
-	%tmp3 = icmp uge <2 x i32> %A, <i32 2, i32 2>
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmhsz4xi32(<4 x i32> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].4s, #2
-;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, v[[ZERO]].4s
-	%tmp3 = icmp uge <4 x i32> %A, <i32 2, i32 2, i32 2, i32 2>
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
-;CHECK: mov w[[TWO:[0-9]+]], #2
-;CHECK-NEXT: dup v[[ZERO:[0-9]+]].2d, x[[TWO]]
-;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, v[[ZERO]].2d
-	%tmp3 = icmp uge <2 x i64> %A, <i64 2, i64 2>
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-
-define <8 x i8> @cmhiz8xi8(<8 x i8> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].8b, #1
-;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, v[[ZERO]].8b
-	%tmp3 = icmp ugt <8 x i8> %A, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmhiz16xi8(<16 x i8> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].16b, #1
-;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, v[[ZERO]].16b
-	%tmp3 = icmp ugt <16 x i8> %A, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmhiz4xi16(<4 x i16> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].4h, #1
-;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, v[[ZERO]].4h
-	%tmp3 = icmp ugt <4 x i16> %A, <i16 1, i16 1, i16 1, i16 1>
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmhiz8xi16(<8 x i16> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].8h, #1
-;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, v[[ZERO]].8h
-	%tmp3 = icmp ugt <8 x i16> %A, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmhiz2xi32(<2 x i32> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].2s, #1
-;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, v[[ZERO]].2s
-	%tmp3 = icmp ugt <2 x i32> %A, <i32 1, i32 1>
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmhiz4xi32(<4 x i32> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].4s, #1
-;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, v[[ZERO]].4s
-	%tmp3 = icmp ugt <4 x i32> %A, <i32 1, i32 1, i32 1, i32 1>
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
-;CHECK: mov w[[ONE:[0-9]+]], #1
-;CHECK-NEXT: dup v[[ZERO:[0-9]+]].2d, x[[ONE]]
-;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, v[[ZERO]].2d
-	%tmp3 = icmp ugt <2 x i64> %A, <i64 1, i64 1>
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmlsz8xi8(<8 x i8> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, v[[ZERO]].8b, v0.8b
-	%tmp3 = icmp ule <8 x i8> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmlsz16xi8(<16 x i8> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, v[[ZERO]].16b, v0.16b
-	%tmp3 = icmp ule <16 x i8> %A, zeroinitializer;
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmlsz4xi16(<4 x i16> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, v[[ZERO]].4h, v0.4h
-	%tmp3 = icmp ule <4 x i16> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmlsz8xi16(<8 x i16> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, v[[ZERO]].8h, v0.8h
-	%tmp3 = icmp ule <8 x i16> %A, zeroinitializer;
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmlsz2xi32(<2 x i32> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, v[[ZERO]].2s, v0.2s
-	%tmp3 = icmp ule <2 x i32> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmlsz4xi32(<4 x i32> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, v[[ZERO]].4s, v0.4s
-	%tmp3 = icmp ule <4 x i32> %A, zeroinitializer;
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmlsz2xi64(<2 x i64> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LS implemented as HS, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
-;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, v[[ZERO]].2d, v0.2d
-	%tmp3 = icmp ule <2 x i64> %A, zeroinitializer;
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @cmloz8xi8(<8 x i8> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].8b, #2
-;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v[[ZERO]].8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ult <8 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
-	ret <8 x i8> %tmp4
-}
-
-define <16 x i8> @cmloz16xi8(<16 x i8> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].16b, #2
-;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v[[ZERO]].16b, v0.16b
-	%tmp3 = icmp ult <16 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
-   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
-	ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @cmloz4xi16(<4 x i16> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].4h, #2
-;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v[[ZERO]].4h, v0.4h
-	%tmp3 = icmp ult <4 x i16> %A, <i16 2, i16 2, i16 2, i16 2>
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
-	ret <4 x i16> %tmp4
-}
-
-define <8 x i16> @cmloz8xi16(<8 x i16> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].8h, #2
-;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v[[ZERO]].8h, v0.8h
-	%tmp3 = icmp ult <8 x i16> %A, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
-   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
-	ret <8 x i16> %tmp4
-}
-
-define <2 x i32> @cmloz2xi32(<2 x i32> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2s, #2
-;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v[[ZERO]].2s, v0.2s
-	%tmp3 = icmp ult <2 x i32> %A, <i32 2, i32 2>
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
-	ret <2 x i32> %tmp4
-}
-
-define <4 x i32> @cmloz4xi32(<4 x i32> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].4s, #2
-;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v[[ZERO]].4s, v0.4s
-	%tmp3 = icmp ult <4 x i32> %A, <i32 2, i32 2, i32 2, i32 2>
-   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
-	ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
-; Using registers other than v0, v1 are possible, but would be odd.
-; LO implemented as HI, so check reversed operands.
-;CHECK: mov w[[TWO:[0-9]+]], #2
-;CHECK-NEXT: dup v[[ZERO:[0-9]+]].2d, x[[TWO]]
-;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v[[ZERO]].2d, v0.2d
-	%tmp3 = icmp ult <2 x i64> %A, <i64 2, i64 2>
-   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
-	ret <2 x i64> %tmp4
-}
-
-define <1 x i64> @cmeqz_v1i64(<1 x i64> %A) {
-; CHECK-LABEL: cmeqz_v1i64:
-; CHECK: cmeq d0, d0, #0
-  %tst = icmp eq <1 x i64> %A, <i64 0>
-  %mask = sext <1 x i1> %tst to <1 x i64>
-  ret <1 x i64> %mask
-}
-
-define <1 x i64> @cmgez_v1i64(<1 x i64> %A) {
-; CHECK-LABEL: cmgez_v1i64:
-; CHECK: cmge d0, d0, #0
-  %tst = icmp sge <1 x i64> %A, <i64 0>
-  %mask = sext <1 x i1> %tst to <1 x i64>
-  ret <1 x i64> %mask
-}
-
-define <1 x i64> @cmgtz_v1i64(<1 x i64> %A) {
-; CHECK-LABEL: cmgtz_v1i64:
-; CHECK: cmgt d0, d0, #0
-  %tst = icmp sgt <1 x i64> %A, <i64 0>
-  %mask = sext <1 x i1> %tst to <1 x i64>
-  ret <1 x i64> %mask
-}
-
-define <1 x i64> @cmlez_v1i64(<1 x i64> %A) {
-; CHECK-LABEL: cmlez_v1i64:
-; CHECK: cmle d0, d0, #0
-  %tst = icmp sle <1 x i64> %A, <i64 0>
-  %mask = sext <1 x i1> %tst to <1 x i64>
-  ret <1 x i64> %mask
-}
-
-define <1 x i64> @cmltz_v1i64(<1 x i64> %A) {
-; CHECK-LABEL: cmltz_v1i64:
-; CHECK: cmlt d0, d0, #0
-  %tst = icmp slt <1 x i64> %A, <i64 0>
-  %mask = sext <1 x i1> %tst to <1 x i64>
-  ret <1 x i64> %mask
-}
-
-define <1 x i64> @fcmeqz_v1f64(<1 x double> %A) {
-; CHECK-LABEL: fcmeqz_v1f64:
-; CHECK: fcmeq d0, d0, #0
-  %tst = fcmp oeq <1 x double> %A, <double 0.0>
-  %mask = sext <1 x i1> %tst to <1 x i64>
-  ret <1 x i64> %mask
-}
-
-define <1 x i64> @fcmgez_v1f64(<1 x double> %A) {
-; CHECK-LABEL: fcmgez_v1f64:
-; CHECK: fcmge d0, d0, #0
-  %tst = fcmp oge <1 x double> %A, <double 0.0>
-  %mask = sext <1 x i1> %tst to <1 x i64>
-  ret <1 x i64> %mask
-}
-
-define <1 x i64> @fcmgtz_v1f64(<1 x double> %A) {
-; CHECK-LABEL: fcmgtz_v1f64:
-; CHECK: fcmgt d0, d0, #0
-  %tst = fcmp ogt <1 x double> %A, <double 0.0>
-  %mask = sext <1 x i1> %tst to <1 x i64>
-  ret <1 x i64> %mask
-}
-
-define <1 x i64> @fcmlez_v1f64(<1 x double> %A) {
-; CHECK-LABEL: fcmlez_v1f64:
-; CHECK: fcmle d0, d0, #0
-  %tst = fcmp ole <1 x double> %A, <double 0.0>
-  %mask = sext <1 x i1> %tst to <1 x i64>
-  ret <1 x i64> %mask
-}
-
-define <1 x i64> @fcmltz_v1f64(<1 x double> %A) {
-; CHECK-LABEL: fcmltz_v1f64:
-; CHECK: fcmlt d0, d0, #0
-  %tst = fcmp olt <1 x double> %A, <double 0.0>
-  %mask = sext <1 x i1> %tst to <1 x i64>
-  ret <1 x i64> %mask
-}

diff  --git a/llvm/test/CodeGen/AArch64/basic-pic.ll b/llvm/test/CodeGen/AArch64/basic-pic.ll
index 8765a6d1267c..e5faf52186d8 100644
--- a/llvm/test/CodeGen/AArch64/basic-pic.ll
+++ b/llvm/test/CodeGen/AArch64/basic-pic.ll
@@ -7,7 +7,7 @@ define i32 @get_globalvar() {
 
   %val = load i32, i32* @var
 ; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
-; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], {{#?}}:got_lo12:var]
+; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], :got_lo12:var]
 ; CHECK: ldr w0, [x[[GOTLOC]]]
 
   ret i32 %val
@@ -18,7 +18,7 @@ define i32* @get_globalvaraddr() {
 
   %val = load i32, i32* @var
 ; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
-; CHECK: ldr x0, [x[[GOTHI]], {{#?}}:got_lo12:var]
+; CHECK: ldr x0, [x[[GOTHI]], :got_lo12:var]
 
   ret i32* @var
 }
@@ -30,7 +30,7 @@ define i32 @get_hiddenvar() {
 
   %val = load i32, i32* @hiddenvar
 ; CHECK: adrp x[[HI:[0-9]+]], hiddenvar
-; CHECK: ldr w0, [x[[HI]], {{#?}}:lo12:hiddenvar]
+; CHECK: ldr w0, [x[[HI]], :lo12:hiddenvar]
 
   ret i32 %val
 }
@@ -40,7 +40,7 @@ define i32* @get_hiddenvaraddr() {
 
   %val = load i32, i32* @hiddenvar
 ; CHECK: adrp [[HI:x[0-9]+]], hiddenvar
-; CHECK: add x0, [[HI]], {{#?}}:lo12:hiddenvar
+; CHECK: add x0, [[HI]], :lo12:hiddenvar
 
   ret i32* @hiddenvar
 }
@@ -50,5 +50,5 @@ define void()* @get_func() {
 
   ret void()* bitcast(void()*()* @get_func to void()*)
 ; CHECK: adrp x[[GOTHI:[0-9]+]], :got:get_func
-; CHECK: ldr x0, [x[[GOTHI]], {{#?}}:got_lo12:get_func]
+; CHECK: ldr x0, [x[[GOTHI]], :got_lo12:get_func]
 }

diff  --git a/llvm/test/CodeGen/AArch64/code-model-large-abs.ll b/llvm/test/CodeGen/AArch64/code-model-large-abs.ll
index 82169acc3e56..e7c2db1ac486 100644
--- a/llvm/test/CodeGen/AArch64/code-model-large-abs.ll
+++ b/llvm/test/CodeGen/AArch64/code-model-large-abs.ll
@@ -59,3 +59,14 @@ define i64 @global_i64() {
 ; CHECK: movk x[[ADDR_REG]], #:abs_g3:var64
 ; CHECK: ldr x0, [x[[ADDR_REG]]]
 }
+
+define <2 x i64> @constpool() {
+; CHECK-LABEL: constpool:
+  ret <2 x i64> <i64 123456789, i64 987654321100>
+
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:[[CPADDR:.LCPI[0-9]+_[0-9]+]]
+; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:[[CPADDR]]
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:[[CPADDR]]
+; CHECK: movk x[[ADDR_REG]], #:abs_g3:[[CPADDR]]
+; CHECK: ldr q0, [x[[ADDR_REG]]]
+}

diff  --git a/llvm/test/CodeGen/AArch64/illegal-float-ops.ll b/llvm/test/CodeGen/AArch64/illegal-float-ops.ll
index 8bee4437f6b9..ffa813e09c73 100644
--- a/llvm/test/CodeGen/AArch64/illegal-float-ops.ll
+++ b/llvm/test/CodeGen/AArch64/illegal-float-ops.ll
@@ -247,6 +247,54 @@ define void @test_fmuladd(fp128 %fp128) {
   ret void
 }
 
+define i32 @test_fptosi32(fp128 %a) {
+; CHECK-LABEL: test_fptosi32:
+; CHECK: bl __fixtfsi
+  %conv.i = fptosi fp128 %a to i32
+  %b = add nsw i32 %conv.i, 48
+  ret i32 %b
+}
+
+define i64 @test_fptosi64(fp128 %a) {
+; CHECK-LABEL: test_fptosi64:
+; CHECK: bl __fixtfdi
+  %conv.i = fptosi fp128 %a to i64
+  %b = add nsw i64 %conv.i, 48
+  ret i64 %b
+}
+
+define i128 @test_fptosi128(fp128 %a) {
+; CHECK-LABEL: test_fptosi128:
+; CHECK: bl __fixtfti
+  %conv.i = fptosi fp128 %a to i128
+  %b = add nsw i128 %conv.i, 48
+  ret i128 %b
+}
+
+define i32 @test_fptoui32(fp128 %a) {
+; CHECK-LABEL: test_fptoui32:
+; CHECK: bl __fixunstfsi
+  %conv.i = fptoui fp128 %a to i32
+  %b = add nsw i32 %conv.i, 48
+  ret i32 %b
+}
+
+define i64 @test_fptoui64(fp128 %a) {
+; CHECK-LABEL: test_fptoui64:
+; CHECK: bl __fixunstfdi
+  %conv.i = fptoui fp128 %a to i64
+  %b = add nsw i64 %conv.i, 48
+  ret i64 %b
+}
+
+define i128 @test_fptoui128(fp128 %a) {
+; CHECK-LABEL: test_fptoui128:
+; CHECK: bl __fixunstfti
+  %conv.i = fptoui fp128 %a to i128
+  %b = add nsw i128 %conv.i, 48
+  ret i128 %b
+}
+
 define void @test_exp_finite(double %double) #0 {
   %expdouble = call double @llvm.exp.f64(double %double)
   store double %expdouble, double* @vardouble


        


More information about the llvm-commits mailing list