[clang] 5ae949a - [Clang][ARM] Reenable arm_acle.c test.

David Green via cfe-commits cfe-commits at lists.llvm.org
Fri Mar 12 11:21:33 PST 2021


Author: David Green
Date: 2021-03-12T19:21:21Z
New Revision: 5ae949a9276542b46f41374fbe7aee01e480d9d6

URL: https://github.com/llvm/llvm-project/commit/5ae949a9276542b46f41374fbe7aee01e480d9d6
DIFF: https://github.com/llvm/llvm-project/commit/5ae949a9276542b46f41374fbe7aee01e480d9d6.diff

LOG: [Clang][ARM] Reenable arm_acle.c test.

This test was apparently disabled in 6fcd4e080f09c9765d6, without any
sign of how it was going to be reenabled. This patch rewrites the test
to use update_cc_test_checks, with midend optimizations other that
mem2reg disabled.

Added: 
    

Modified: 
    clang/test/CodeGen/arm_acle.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/arm_acle.c b/clang/test/CodeGen/arm_acle.c
index 9f0ad22bda4f..7e85c767c301 100644
--- a/clang/test/CodeGen/arm_acle.c
+++ b/clang/test/CodeGen/arm_acle.c
@@ -1,125 +1,229 @@
-// RUN: %clang_cc1 -ffreestanding -triple armv8-eabi -target-cpu cortex-a57 -O2  -fno-experimental-new-pass-manager -S -emit-llvm -o - %s | FileCheck %s -check-prefix=ARM -check-prefix=AArch32 -check-prefix=ARM-LEGACY -check-prefix=AArch32-LEGACY
-// RUN: %clang_cc1 -ffreestanding -triple armv8-eabi -target-cpu cortex-a57 -O2  -fexperimental-new-pass-manager -S -emit-llvm -o - %s | FileCheck %s -check-prefix=ARM -check-prefix=AArch32 -check-prefix=ARM-NEWPM -check-prefix=AArch32-NEWPM
-// RUN: %clang_cc1 -ffreestanding -triple aarch64-eabi -target-cpu cortex-a57 -target-feature +neon -target-feature +crc -target-feature +crypto -O2 -fno-experimental-new-pass-manager -S -emit-llvm -o - %s | FileCheck %s -check-prefix=ARM -check-prefix=AArch64 -check-prefix=ARM-LEGACY -check-prefix=AArch64-LEGACY
-// RUN: %clang_cc1 -ffreestanding -triple aarch64-eabi -target-cpu cortex-a57 -target-feature +neon -target-feature +crc -target-feature +crypto -O2 -fexperimental-new-pass-manager -S -emit-llvm -o - %s | FileCheck %s -check-prefix=ARM -check-prefix=AArch64 -check-prefix=ARM-NEWPM -check-prefix=AArch64-NEWPM
-// RUN: %clang_cc1 -ffreestanding -triple aarch64-eabi -target-cpu cortex-a57 -target-feature +v8.3a -O2 -fexperimental-new-pass-manager -S -emit-llvm -o - %s | FileCheck %s -check-prefix=AArch64-v8_3
-// RUN: %clang_cc1 -ffreestanding -triple aarch64-eabi -target-cpu cortex-a57 -target-feature +v8.4a -O2 -fexperimental-new-pass-manager -S -emit-llvm -o - %s | FileCheck %s -check-prefix=AArch64-v8_3
-// RUN: %clang_cc1 -ffreestanding -triple aarch64-eabi -target-cpu cortex-a57 -target-feature +v8.5a -O2 -fexperimental-new-pass-manager -S -emit-llvm -o - %s | FileCheck %s -check-prefix=AArch64-v8_3
-
-// REQUIRES: rewrite
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -ffreestanding -triple armv8-eabi -target-cpu cortex-a57 -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s -check-prefixes=ARM,AArch32
+// RUN: %clang_cc1 -ffreestanding -triple aarch64-eabi -target-cpu cortex-a57 -target-feature +neon -target-feature +crc -target-feature +crypto -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s -check-prefixes=ARM,AArch64
+// RUN: %clang_cc1 -ffreestanding -triple aarch64-eabi -target-cpu cortex-a57 -target-feature +v8.3a -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s -check-prefixes=ARM,AArch64,AArch6483
+// RUN: %clang_cc1 -ffreestanding -triple aarch64-eabi -target-cpu cortex-a57 -target-feature +v8.5a -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s -check-prefixes=ARM,AArch64,AArch6483
 
 #include <arm_acle.h>
 
 /* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
 /* 8.3 Memory Barriers */
-// ARM-LABEL: test_dmb
-// AArch32: call void @llvm.arm.dmb(i32 1)
-// AArch64: call void @llvm.aarch64.dmb(i32 1)
+
+// AArch32-LABEL: @test_dmb(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.arm.dmb(i32 1)
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_dmb(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.aarch64.dmb(i32 1)
+// AArch64-NEXT:    ret void
+//
 void test_dmb(void) {
   __dmb(1);
 }
 
-// ARM-LABEL: test_dsb
-// AArch32: call void @llvm.arm.dsb(i32 2)
-// AArch64: call void @llvm.aarch64.dsb(i32 2)
+// AArch32-LABEL: @test_dsb(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.arm.dsb(i32 2)
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_dsb(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.aarch64.dsb(i32 2)
+// AArch64-NEXT:    ret void
+//
 void test_dsb(void) {
   __dsb(2);
 }
 
-// ARM-LABEL: test_isb
-// AArch32: call void @llvm.arm.isb(i32 3)
-// AArch64: call void @llvm.aarch64.isb(i32 3)
+// AArch32-LABEL: @test_isb(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.arm.isb(i32 3)
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_isb(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.aarch64.isb(i32 3)
+// AArch64-NEXT:    ret void
+//
 void test_isb(void) {
   __isb(3);
 }
 
 /* 8.4 Hints */
-// ARM-LABEL: test_yield
-// AArch32: call void @llvm.arm.hint(i32 1)
-// AArch64: call void @llvm.aarch64.hint(i32 1)
+// AArch32-LABEL: @test_yield(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.arm.hint(i32 1) [[ATTR1:#.*]]
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_yield(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.aarch64.hint(i32 1) [[ATTR3:#.*]]
+// AArch64-NEXT:    ret void
+//
 void test_yield(void) {
   __yield();
 }
 
-// ARM-LABEL: test_wfe
-// AArch32: call void @llvm.arm.hint(i32 2)
-// AArch64: call void @llvm.aarch64.hint(i32 2)
+// AArch32-LABEL: @test_wfe(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.arm.hint(i32 2) [[ATTR1]]
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_wfe(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.aarch64.hint(i32 2) [[ATTR3]]
+// AArch64-NEXT:    ret void
+//
 void test_wfe(void) {
   __wfe();
 }
 
-// ARM-LABEL: test_wfi
-// AArch32: call void @llvm.arm.hint(i32 3)
-// AArch64: call void @llvm.aarch64.hint(i32 3)
+// AArch32-LABEL: @test_wfi(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.arm.hint(i32 3) [[ATTR1]]
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_wfi(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.aarch64.hint(i32 3) [[ATTR3]]
+// AArch64-NEXT:    ret void
+//
 void test_wfi(void) {
   __wfi();
 }
 
-// ARM-LABEL: test_sev
-// AArch32: call void @llvm.arm.hint(i32 4)
-// AArch64: call void @llvm.aarch64.hint(i32 4)
+// AArch32-LABEL: @test_sev(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.arm.hint(i32 4) [[ATTR1]]
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_sev(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.aarch64.hint(i32 4) [[ATTR3]]
+// AArch64-NEXT:    ret void
+//
 void test_sev(void) {
   __sev();
 }
 
-// ARM-LABEL: test_sevl
-// AArch32: call void @llvm.arm.hint(i32 5)
-// AArch64: call void @llvm.aarch64.hint(i32 5)
+// AArch32-LABEL: @test_sevl(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.arm.hint(i32 5) [[ATTR1]]
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_sevl(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.aarch64.hint(i32 5) [[ATTR3]]
+// AArch64-NEXT:    ret void
+//
 void test_sevl(void) {
   __sevl();
 }
 
 #if __ARM_32BIT_STATE
-// AArch32-LABEL: test_dbg
-// AArch32: call void @llvm.arm.dbg(i32 0)
+// AArch32-LABEL: @test_dbg(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.arm.dbg(i32 0)
+// AArch32-NEXT:    ret void
+//
 void test_dbg(void) {
   __dbg(0);
 }
 #endif
 
 /* 8.5 Swap */
-// ARM-LABEL: test_swp
-// AArch32: call i32 @llvm.arm.ldrex
-// AArch32: call i32 @llvm.arm.strex
-// AArch64: call i64 @llvm.aarch64.ldxr
-// AArch64: call i32 @llvm.aarch64.stxr
+// AArch32-LABEL: @test_swp(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = bitcast i8* [[P:%.*]] to i32*
+// AArch32-NEXT:    br label [[DO_BODY_I:%.*]]
+// AArch32:       do.body.i:
+// AArch32-NEXT:    [[LDREX_I:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* [[TMP0]]) [[ATTR1]]
+// AArch32-NEXT:    [[STREX_I:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 [[X:%.*]], i32* [[TMP0]]) [[ATTR1]]
+// AArch32-NEXT:    [[TOBOOL_I:%.*]] = icmp ne i32 [[STREX_I]], 0
+// AArch32-NEXT:    br i1 [[TOBOOL_I]], label [[DO_BODY_I]], label [[__SWP_EXIT:%.*]], [[LOOP3:!llvm.loop !.*]]
+// AArch32:       __swp.exit:
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_swp(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[P:%.*]] to i32*
+// AArch64-NEXT:    br label [[DO_BODY_I:%.*]]
+// AArch64:       do.body.i:
+// AArch64-NEXT:    [[LDXR_I:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* [[TMP0]]) [[ATTR3]]
+// AArch64-NEXT:    [[TMP1:%.*]] = trunc i64 [[LDXR_I]] to i32
+// AArch64-NEXT:    [[TMP2:%.*]] = zext i32 [[X:%.*]] to i64
+// AArch64-NEXT:    [[STXR_I:%.*]] = call i32 @llvm.aarch64.stxr.p0i32(i64 [[TMP2]], i32* [[TMP0]]) [[ATTR3]]
+// AArch64-NEXT:    [[TOBOOL_I:%.*]] = icmp ne i32 [[STXR_I]], 0
+// AArch64-NEXT:    br i1 [[TOBOOL_I]], label [[DO_BODY_I]], label [[__SWP_EXIT:%.*]], [[LOOP6:!llvm.loop !.*]]
+// AArch64:       __swp.exit:
+// AArch64-NEXT:    ret void
+//
 void test_swp(uint32_t x, volatile void *p) {
   __swp(x, p);
 }
 
 /* 8.6 Memory prefetch intrinsics */
 /* 8.6.1 Data prefetch */
-// ARM-LABEL: test_pld
-// ARM: call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 3, i32 1)
+// ARM-LABEL: @test_pld(
+// ARM-NEXT:  entry:
+// ARM-NEXT:    call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 3, i32 1)
+// ARM-NEXT:    ret void
+//
 void test_pld() {
   __pld(0);
 }
 
-// ARM-LABEL: test_pldx
-// AArch32: call void @llvm.prefetch.p0i8(i8* null, i32 1, i32 3, i32 1)
-// AArch64: call void @llvm.prefetch.p0i8(i8* null, i32 1, i32 1, i32 1)
+// AArch32-LABEL: @test_pldx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.prefetch.p0i8(i8* null, i32 1, i32 3, i32 1)
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_pldx(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.prefetch.p0i8(i8* null, i32 1, i32 1, i32 1)
+// AArch64-NEXT:    ret void
+//
 void test_pldx() {
   __pldx(1, 2, 0, 0);
 }
 
 /* 8.6.2 Instruction prefetch */
-// ARM-LABEL: test_pli
-// ARM: call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 3, i32 0)
+// ARM-LABEL: @test_pli(
+// ARM-NEXT:  entry:
+// ARM-NEXT:    call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 3, i32 0)
+// ARM-NEXT:    ret void
+//
 void test_pli() {
   __pli(0);
 }
 
-// ARM-LABEL: test_plix
-// AArch32: call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 3, i32 0)
-// AArch64: call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 1, i32 0)
+// AArch32-LABEL: @test_plix(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 3, i32 0)
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_plix(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 1, i32 0)
+// AArch64-NEXT:    ret void
+//
 void test_plix() {
   __plix(2, 0, 0);
 }
 
 /* 8.7 NOP */
-// ARM-LABEL: test_nop
-// AArch32: call void @llvm.arm.hint(i32 0)
-// AArch64: call void @llvm.aarch64.hint(i32 0)
+// AArch32-LABEL: @test_nop(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.arm.hint(i32 0) [[ATTR1]]
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_nop(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.aarch64.hint(i32 0) [[ATTR3]]
+// AArch64-NEXT:    ret void
+//
 void test_nop(void) {
   __nop();
 }
@@ -127,174 +231,465 @@ void test_nop(void) {
 /* 9 DATA-PROCESSING INTRINSICS */
 
 /* 9.2 Miscellaneous data-processing intrinsics */
-// ARM-LABEL: test_ror
-// ARM-LEGACY: lshr
-// ARM-LEGACY: sub
-// ARM-LEGACY: shl
-// ARM-LEGACY: or
-// ARM-NEWPM: call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y)
+// ARM-LABEL: @test_ror(
+// ARM-NEXT:  entry:
+// ARM-NEXT:    [[REM_I:%.*]] = urem i32 [[Y:%.*]], 32
+// ARM-NEXT:    [[CMP_I:%.*]] = icmp eq i32 [[REM_I]], 0
+// ARM-NEXT:    br i1 [[CMP_I]], label [[IF_THEN_I:%.*]], label [[IF_END_I:%.*]]
+// ARM:       if.then.i:
+// ARM-NEXT:    br label [[__ROR_EXIT:%.*]]
+// ARM:       if.end.i:
+// ARM-NEXT:    [[SHR_I:%.*]] = lshr i32 [[X:%.*]], [[REM_I]]
+// ARM-NEXT:    [[SUB_I:%.*]] = sub i32 32, [[REM_I]]
+// ARM-NEXT:    [[SHL_I:%.*]] = shl i32 [[X]], [[SUB_I]]
+// ARM-NEXT:    [[OR_I:%.*]] = or i32 [[SHR_I]], [[SHL_I]]
+// ARM-NEXT:    br label [[__ROR_EXIT]]
+// ARM:       __ror.exit:
+// ARM-NEXT:    [[RETVAL_I_0:%.*]] = phi i32 [ [[X]], [[IF_THEN_I]] ], [ [[OR_I]], [[IF_END_I]] ]
+// ARM-NEXT:    ret i32 [[RETVAL_I_0]]
+//
 uint32_t test_ror(uint32_t x, uint32_t y) {
   return __ror(x, y);
 }
 
-// ARM-LABEL: test_rorl
-// ARM-LEGACY: lshr
-// ARM-LEGACY: sub
-// ARM-LEGACY: shl
-// ARM-LEGACY: or
-// AArch32-NEWPM: call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y)
+// AArch32-LABEL: @test_rorl(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[REM_I_I:%.*]] = urem i32 [[Y:%.*]], 32
+// AArch32-NEXT:    [[CMP_I_I:%.*]] = icmp eq i32 [[REM_I_I]], 0
+// AArch32-NEXT:    br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[IF_END_I_I:%.*]]
+// AArch32:       if.then.i.i:
+// AArch32-NEXT:    br label [[__RORL_EXIT:%.*]]
+// AArch32:       if.end.i.i:
+// AArch32-NEXT:    [[SHR_I_I:%.*]] = lshr i32 [[X:%.*]], [[REM_I_I]]
+// AArch32-NEXT:    [[SUB_I_I:%.*]] = sub i32 32, [[REM_I_I]]
+// AArch32-NEXT:    [[SHL_I_I:%.*]] = shl i32 [[X]], [[SUB_I_I]]
+// AArch32-NEXT:    [[OR_I_I:%.*]] = or i32 [[SHR_I_I]], [[SHL_I_I]]
+// AArch32-NEXT:    br label [[__RORL_EXIT]]
+// AArch32:       __rorl.exit:
+// AArch32-NEXT:    [[RETVAL_I_I_0:%.*]] = phi i32 [ [[X]], [[IF_THEN_I_I]] ], [ [[OR_I_I]], [[IF_END_I_I]] ]
+// AArch32-NEXT:    ret i32 [[RETVAL_I_I_0]]
+//
+// AArch64-LABEL: @test_rorl(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[REM_I_I:%.*]] = urem i32 [[Y:%.*]], 64
+// AArch64-NEXT:    [[CMP_I_I:%.*]] = icmp eq i32 [[REM_I_I]], 0
+// AArch64-NEXT:    br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[IF_END_I_I:%.*]]
+// AArch64:       if.then.i.i:
+// AArch64-NEXT:    br label [[__RORL_EXIT:%.*]]
+// AArch64:       if.end.i.i:
+// AArch64-NEXT:    [[SH_PROM_I_I:%.*]] = zext i32 [[REM_I_I]] to i64
+// AArch64-NEXT:    [[SHR_I_I:%.*]] = lshr i64 [[X:%.*]], [[SH_PROM_I_I]]
+// AArch64-NEXT:    [[SUB_I_I:%.*]] = sub i32 64, [[REM_I_I]]
+// AArch64-NEXT:    [[SH_PROM1_I_I:%.*]] = zext i32 [[SUB_I_I]] to i64
+// AArch64-NEXT:    [[SHL_I_I:%.*]] = shl i64 [[X]], [[SH_PROM1_I_I]]
+// AArch64-NEXT:    [[OR_I_I:%.*]] = or i64 [[SHR_I_I]], [[SHL_I_I]]
+// AArch64-NEXT:    br label [[__RORL_EXIT]]
+// AArch64:       __rorl.exit:
+// AArch64-NEXT:    [[RETVAL_I_I_0:%.*]] = phi i64 [ [[X]], [[IF_THEN_I_I]] ], [ [[OR_I_I]], [[IF_END_I_I]] ]
+// AArch64-NEXT:    ret i64 [[RETVAL_I_I_0]]
+//
 unsigned long test_rorl(unsigned long x, uint32_t y) {
   return __rorl(x, y);
 }
 
-// ARM-LABEL: test_rorll
-// ARM: lshr
-// ARM: sub
-// ARM: shl
-// ARM: or
+// ARM-LABEL: @test_rorll(
+// ARM-NEXT:  entry:
+// ARM-NEXT:    [[REM_I:%.*]] = urem i32 [[Y:%.*]], 64
+// ARM-NEXT:    [[CMP_I:%.*]] = icmp eq i32 [[REM_I]], 0
+// ARM-NEXT:    br i1 [[CMP_I]], label [[IF_THEN_I:%.*]], label [[IF_END_I:%.*]]
+// ARM:       if.then.i:
+// ARM-NEXT:    br label [[__RORLL_EXIT:%.*]]
+// ARM:       if.end.i:
+// ARM-NEXT:    [[SH_PROM_I:%.*]] = zext i32 [[REM_I]] to i64
+// ARM-NEXT:    [[SHR_I:%.*]] = lshr i64 [[X:%.*]], [[SH_PROM_I]]
+// ARM-NEXT:    [[SUB_I:%.*]] = sub i32 64, [[REM_I]]
+// ARM-NEXT:    [[SH_PROM1_I:%.*]] = zext i32 [[SUB_I]] to i64
+// ARM-NEXT:    [[SHL_I:%.*]] = shl i64 [[X]], [[SH_PROM1_I]]
+// ARM-NEXT:    [[OR_I:%.*]] = or i64 [[SHR_I]], [[SHL_I]]
+// ARM-NEXT:    br label [[__RORLL_EXIT]]
+// ARM:       __rorll.exit:
+// ARM-NEXT:    [[RETVAL_I_0:%.*]] = phi i64 [ [[X]], [[IF_THEN_I]] ], [ [[OR_I]], [[IF_END_I]] ]
+// ARM-NEXT:    ret i64 [[RETVAL_I_0]]
+//
 uint64_t test_rorll(uint64_t x, uint32_t y) {
   return __rorll(x, y);
 }
 
-// ARM-LABEL: test_clz
-// ARM: call i32 @llvm.ctlz.i32(i32 %t, i1 false)
+// AArch32-LABEL: @test_clz(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[T:%.*]], i1 false) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
+// AArch64-LABEL: @test_clz(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[T:%.*]], i1 false) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[TMP0]]
+//
 uint32_t test_clz(uint32_t t) {
   return __clz(t);
 }
 
-// ARM-LABEL: test_clzl
-// AArch32: call i32 @llvm.ctlz.i32(i32 %t, i1 false)
-// AArch64: call i64 @llvm.ctlz.i64(i64 %t, i1 false)
+// AArch32-LABEL: @test_clzl(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[T:%.*]], i1 false) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
+// AArch64-LABEL: @test_clzl(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[T:%.*]], i1 false) [[ATTR3]]
+// AArch64-NEXT:    [[CAST_I:%.*]] = trunc i64 [[TMP0]] to i32
+// AArch64-NEXT:    [[CONV_I:%.*]] = sext i32 [[CAST_I]] to i64
+// AArch64-NEXT:    ret i64 [[CONV_I]]
+//
 long test_clzl(long t) {
   return __clzl(t);
 }
 
-// ARM-LABEL: test_clzll
-// ARM: call i64 @llvm.ctlz.i64(i64 %t, i1 false)
+// AArch32-LABEL: @test_clzll(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[T:%.*]], i1 false) [[ATTR1]]
+// AArch32-NEXT:    [[CAST_I:%.*]] = trunc i64 [[TMP0]] to i32
+// AArch32-NEXT:    [[CONV_I:%.*]] = sext i32 [[CAST_I]] to i64
+// AArch32-NEXT:    ret i64 [[CONV_I]]
+//
+// AArch64-LABEL: @test_clzll(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[T:%.*]], i1 false) [[ATTR3]]
+// AArch64-NEXT:    [[CAST_I:%.*]] = trunc i64 [[TMP0]] to i32
+// AArch64-NEXT:    [[CONV_I:%.*]] = sext i32 [[CAST_I]] to i64
+// AArch64-NEXT:    ret i64 [[CONV_I]]
+//
 uint64_t test_clzll(uint64_t t) {
   return __clzll(t);
 }
 
-// ARM-LABEL: test_cls
-// ARM: call i32 @llvm.arm.cls(i32 %t)
+// AArch32-LABEL: @test_cls(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[CLS_I:%.*]] = call i32 @llvm.arm.cls(i32 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[CLS_I]]
+//
+// AArch64-LABEL: @test_cls(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[CLS_I:%.*]] = call i32 @llvm.aarch64.cls(i32 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[CLS_I]]
+//
 unsigned test_cls(uint32_t t) {
   return __cls(t);
 }
 
-// ARM-LABEL: test_clsl
-// AArch32: call i32 @llvm.arm.cls(i32 %t)
-// AArch64: call i32 @llvm.arm.cls64(i64 %t)
+// AArch32-LABEL: @test_clsl(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[CLS_I:%.*]] = call i32 @llvm.arm.cls(i32 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[CLS_I]]
+//
+// AArch64-LABEL: @test_clsl(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[CLS_I:%.*]] = call i32 @llvm.aarch64.cls64(i64 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[CLS_I]]
+//
 unsigned test_clsl(unsigned long t) {
   return __clsl(t);
 }
-// ARM-LABEL: test_clsll
-// ARM: call i32 @llvm.arm.cls64(i64 %t)
+
+// AArch32-LABEL: @test_clsll(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[CLS_I:%.*]] = call i32 @llvm.arm.cls64(i64 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[CLS_I]]
+//
+// AArch64-LABEL: @test_clsll(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[CLS_I:%.*]] = call i32 @llvm.aarch64.cls64(i64 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[CLS_I]]
+//
 unsigned test_clsll(uint64_t t) {
   return __clsll(t);
 }
 
-// ARM-LABEL: test_rev
-// ARM: call i32 @llvm.bswap.i32(i32 %t)
+// AArch32-LABEL: @test_rev(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
+// AArch64-LABEL: @test_rev(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[TMP0]]
+//
 uint32_t test_rev(uint32_t t) {
   return __rev(t);
 }
 
-// ARM-LABEL: test_revl
-// AArch32: call i32 @llvm.bswap.i32(i32 %t)
-// AArch64: call i64 @llvm.bswap.i64(i64 %t)
+// AArch32-LABEL: @test_revl(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
+// AArch64-LABEL: @test_revl(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.bswap.i64(i64 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i64 [[TMP0]]
+//
 long test_revl(long t) {
   return __revl(t);
 }
 
-// ARM-LABEL: test_revll
-// ARM: call i64 @llvm.bswap.i64(i64 %t)
+// AArch32-LABEL: @test_revll(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i64 @llvm.bswap.i64(i64 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i64 [[TMP0]]
+//
+// AArch64-LABEL: @test_revll(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.bswap.i64(i64 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i64 [[TMP0]]
+//
 uint64_t test_revll(uint64_t t) {
   return __revll(t);
 }
 
-// ARM-LABEL: test_rev16
-// ARM: llvm.bswap
-// ARM-LEGACY: lshr {{.*}}, 16
-// ARM-LEGACY: shl {{.*}}, 16
-// ARM-LEGACY: or
-// ARM-NEWPM: call i32 @llvm.fshl.i32(i32 %0, i32 %0, i32 16)
+// AArch32-LABEL: @test_rev16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    [[REM_I_I:%.*]] = urem i32 16, 32
+// AArch32-NEXT:    [[CMP_I_I:%.*]] = icmp eq i32 [[REM_I_I]], 0
+// AArch32-NEXT:    br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[IF_END_I_I:%.*]]
+// AArch32:       if.then.i.i:
+// AArch32-NEXT:    br label [[__REV16_EXIT:%.*]]
+// AArch32:       if.end.i.i:
+// AArch32-NEXT:    [[SHR_I_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I]]
+// AArch32-NEXT:    [[SUB_I_I:%.*]] = sub i32 32, [[REM_I_I]]
+// AArch32-NEXT:    [[SHL_I_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I]]
+// AArch32-NEXT:    [[OR_I_I:%.*]] = or i32 [[SHR_I_I]], [[SHL_I_I]]
+// AArch32-NEXT:    br label [[__REV16_EXIT]]
+// AArch32:       __rev16.exit:
+// AArch32-NEXT:    [[RETVAL_I_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I]] ], [ [[OR_I_I]], [[IF_END_I_I]] ]
+// AArch32-NEXT:    ret i32 [[RETVAL_I_I_0]]
+//
+// AArch64-LABEL: @test_rev16(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    [[REM_I_I:%.*]] = urem i32 16, 32
+// AArch64-NEXT:    [[CMP_I_I:%.*]] = icmp eq i32 [[REM_I_I]], 0
+// AArch64-NEXT:    br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[IF_END_I_I:%.*]]
+// AArch64:       if.then.i.i:
+// AArch64-NEXT:    br label [[__REV16_EXIT:%.*]]
+// AArch64:       if.end.i.i:
+// AArch64-NEXT:    [[SHR_I_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I]]
+// AArch64-NEXT:    [[SUB_I_I:%.*]] = sub i32 32, [[REM_I_I]]
+// AArch64-NEXT:    [[SHL_I_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I]]
+// AArch64-NEXT:    [[OR_I_I:%.*]] = or i32 [[SHR_I_I]], [[SHL_I_I]]
+// AArch64-NEXT:    br label [[__REV16_EXIT]]
+// AArch64:       __rev16.exit:
+// AArch64-NEXT:    [[RETVAL_I_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I]] ], [ [[OR_I_I]], [[IF_END_I_I]] ]
+// AArch64-NEXT:    ret i32 [[RETVAL_I_I_0]]
+//
 uint32_t test_rev16(uint32_t t) {
   return __rev16(t);
 }
 
-// ARM-LABEL: test_rev16l
-// AArch32: llvm.bswap
-// AArch32-LEGACY: lshr {{.*}}, 16
-// AArch32-LEGACY: shl {{.*}}, 16
-// AArch32-LEGACY: or
-// AArch32-NEWPM: call i32 @llvm.fshl.i32(i32 %0, i32 %0, i32 16)
-// AArch64: [[T1:%.*]] = lshr i64 [[IN:%.*]], 32
-// AArch64: [[T2:%.*]] = trunc i64 [[T1]] to i32
-// AArch64: [[T3:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[T2]])
-// AArch64-LEGACY: [[T4:%.*]] = lshr i32 [[T3]], 16
-// AArch64-LEGACY: [[T5:%.*]] = shl i32 [[T3]], 16
-// AArch64-LEGACY: [[T6:%.*]] = or i32 [[T5]], [[T4]]
-// AArch64-NEWPM: [[T6:%.*]] = tail call i32 @llvm.fshl.i32(i32 [[T3]], i32 [[T3]], i32 16)
-// AArch64: [[T7:%.*]] = zext i32 [[T6]] to i64
-// AArch64: [[T8:%.*]] = shl nuw i64 [[T7]], 32
-// AArch64: [[T9:%.*]] = trunc i64 [[IN]] to i32
-// AArch64: [[T10:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[T9]])
-// AArch64-LEGACY: [[T11:%.*]] = lshr i32 [[T10]], 16
-// AArch64-LEGACY: [[T12:%.*]] = shl i32 [[T10]], 16
-// AArch64-LEGACY: [[T13:%.*]] = or i32 [[T12]], [[T11]]
-// AArch64-NEWPM: [[T13:%.*]] = tail call i32 @llvm.fshl.i32(i32 [[T10]], i32 [[T10]], i32 16)
-// AArch64: [[T14:%.*]] = zext i32 [[T13]] to i64
-// AArch64: [[T15:%.*]] = or i64 [[T8]], [[T14]]
+// AArch32-LABEL: @test_rev16l(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    [[REM_I_I_I:%.*]] = urem i32 16, 32
+// AArch32-NEXT:    [[CMP_I_I_I:%.*]] = icmp eq i32 [[REM_I_I_I]], 0
+// AArch32-NEXT:    br i1 [[CMP_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[IF_END_I_I_I:%.*]]
+// AArch32:       if.then.i.i.i:
+// AArch32-NEXT:    br label [[__REV16L_EXIT:%.*]]
+// AArch32:       if.end.i.i.i:
+// AArch32-NEXT:    [[SHR_I_I_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I_I]]
+// AArch32-NEXT:    [[SUB_I_I_I:%.*]] = sub i32 32, [[REM_I_I_I]]
+// AArch32-NEXT:    [[SHL_I_I_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I_I]]
+// AArch32-NEXT:    [[OR_I_I_I:%.*]] = or i32 [[SHR_I_I_I]], [[SHL_I_I_I]]
+// AArch32-NEXT:    br label [[__REV16L_EXIT]]
+// AArch32:       __rev16l.exit:
+// AArch32-NEXT:    [[RETVAL_I_I_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I_I]] ], [ [[OR_I_I_I]], [[IF_END_I_I_I]] ]
+// AArch32-NEXT:    ret i32 [[RETVAL_I_I_I_0]]
+//
+// AArch64-LABEL: @test_rev16l(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[SHR_I_I:%.*]] = lshr i64 [[T:%.*]], 32
+// AArch64-NEXT:    [[CONV_I_I:%.*]] = trunc i64 [[SHR_I_I]] to i32
+// AArch64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV_I_I]]) [[ATTR3]]
+// AArch64-NEXT:    [[REM_I_I_I_I:%.*]] = urem i32 16, 32
+// AArch64-NEXT:    [[CMP_I_I_I_I:%.*]] = icmp eq i32 [[REM_I_I_I_I]], 0
+// AArch64-NEXT:    br i1 [[CMP_I_I_I_I]], label [[IF_THEN_I_I_I_I:%.*]], label [[IF_END_I_I_I_I:%.*]]
+// AArch64:       if.then.i.i.i.i:
+// AArch64-NEXT:    br label [[__REV16_EXIT_I_I:%.*]]
+// AArch64:       if.end.i.i.i.i:
+// AArch64-NEXT:    [[SHR_I_I_I_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I_I_I]]
+// AArch64-NEXT:    [[SUB_I_I_I_I:%.*]] = sub i32 32, [[REM_I_I_I_I]]
+// AArch64-NEXT:    [[SHL_I_I_I_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I_I_I]]
+// AArch64-NEXT:    [[OR_I_I_I_I:%.*]] = or i32 [[SHR_I_I_I_I]], [[SHL_I_I_I_I]]
+// AArch64-NEXT:    br label [[__REV16_EXIT_I_I]]
+// AArch64:       __rev16.exit.i.i:
+// AArch64-NEXT:    [[RETVAL_I_I_I_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I_I_I]] ], [ [[OR_I_I_I_I]], [[IF_END_I_I_I_I]] ]
+// AArch64-NEXT:    [[CONV1_I_I:%.*]] = zext i32 [[RETVAL_I_I_I_I_0]] to i64
+// AArch64-NEXT:    [[SHL_I_I:%.*]] = shl i64 [[CONV1_I_I]], 32
+// AArch64-NEXT:    [[CONV2_I_I:%.*]] = trunc i64 [[T]] to i32
+// AArch64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV2_I_I]]) [[ATTR3]]
+// AArch64-NEXT:    [[REM_I_I10_I_I:%.*]] = urem i32 16, 32
+// AArch64-NEXT:    [[CMP_I_I11_I_I:%.*]] = icmp eq i32 [[REM_I_I10_I_I]], 0
+// AArch64-NEXT:    br i1 [[CMP_I_I11_I_I]], label [[IF_THEN_I_I12_I_I:%.*]], label [[IF_END_I_I17_I_I:%.*]]
+// AArch64:       if.then.i.i12.i.i:
+// AArch64-NEXT:    br label [[__REV16L_EXIT:%.*]]
+// AArch64:       if.end.i.i17.i.i:
+// AArch64-NEXT:    [[SHR_I_I13_I_I:%.*]] = lshr i32 [[TMP1]], [[REM_I_I10_I_I]]
+// AArch64-NEXT:    [[SUB_I_I14_I_I:%.*]] = sub i32 32, [[REM_I_I10_I_I]]
+// AArch64-NEXT:    [[SHL_I_I15_I_I:%.*]] = shl i32 [[TMP1]], [[SUB_I_I14_I_I]]
+// AArch64-NEXT:    [[OR_I_I16_I_I:%.*]] = or i32 [[SHR_I_I13_I_I]], [[SHL_I_I15_I_I]]
+// AArch64-NEXT:    br label [[__REV16L_EXIT]]
+// AArch64:       __rev16l.exit:
+// AArch64-NEXT:    [[RETVAL_I_I5_I_I_0:%.*]] = phi i32 [ [[TMP1]], [[IF_THEN_I_I12_I_I]] ], [ [[OR_I_I16_I_I]], [[IF_END_I_I17_I_I]] ]
+// AArch64-NEXT:    [[CONV4_I_I:%.*]] = zext i32 [[RETVAL_I_I5_I_I_0]] to i64
+// AArch64-NEXT:    [[OR_I_I:%.*]] = or i64 [[SHL_I_I]], [[CONV4_I_I]]
+// AArch64-NEXT:    ret i64 [[OR_I_I]]
+//
 long test_rev16l(long t) {
   return __rev16l(t);
 }
 
-// ARM-LABEL: test_rev16ll
-// ARM: [[T1:%.*]] = lshr i64 [[IN:%.*]], 32
-// ARM: [[T2:%.*]] = trunc i64 [[T1]] to i32
-// ARM: [[T3:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[T2]])
-// ARM-LEGACY: [[T4:%.*]] = lshr i32 [[T3]], 16
-// ARM-LEGACY: [[T5:%.*]] = shl i32 [[T3]], 16
-// ARM-LEGACY: [[T6:%.*]] = or i32 [[T5]], [[T4]]
-// ARM-NEWPM: [[T6:%.*]] = tail call i32 @llvm.fshl.i32(i32 [[T3]], i32 [[T3]], i32 16)
-// ARM: [[T7:%.*]] = zext i32 [[T6]] to i64
-// ARM: [[T8:%.*]] = shl nuw i64 [[T7]], 32
-// ARM: [[T9:%.*]] = trunc i64 [[IN]] to i32
-// ARM: [[T10:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[T9]])
-// ARM-LEGACY: [[T11:%.*]] = lshr i32 [[T10]], 16
-// ARM-LEGACY: [[T12:%.*]] = shl i32 [[T10]], 16
-// ARM-LEGACY: [[T13:%.*]] = or i32 [[T12]], [[T11]]
-// ARM-NEWPM: [[T13:%.*]] = tail call i32 @llvm.fshl.i32(i32 [[T10]], i32 [[T10]], i32 16)
-// ARM: [[T14:%.*]] = zext i32 [[T13]] to i64
-// ARM: [[T15:%.*]] = or i64 [[T8]], [[T14]]
+// AArch32-LABEL: @test_rev16ll(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[SHR_I:%.*]] = lshr i64 [[T:%.*]], 32
+// AArch32-NEXT:    [[CONV_I:%.*]] = trunc i64 [[SHR_I]] to i32
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV_I]]) [[ATTR1]]
+// AArch32-NEXT:    [[REM_I_I_I:%.*]] = urem i32 16, 32
+// AArch32-NEXT:    [[CMP_I_I_I:%.*]] = icmp eq i32 [[REM_I_I_I]], 0
+// AArch32-NEXT:    br i1 [[CMP_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[IF_END_I_I_I:%.*]]
+// AArch32:       if.then.i.i.i:
+// AArch32-NEXT:    br label [[__REV16_EXIT_I:%.*]]
+// AArch32:       if.end.i.i.i:
+// AArch32-NEXT:    [[SHR_I_I_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I_I]]
+// AArch32-NEXT:    [[SUB_I_I_I:%.*]] = sub i32 32, [[REM_I_I_I]]
+// AArch32-NEXT:    [[SHL_I_I_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I_I]]
+// AArch32-NEXT:    [[OR_I_I_I:%.*]] = or i32 [[SHR_I_I_I]], [[SHL_I_I_I]]
+// AArch32-NEXT:    br label [[__REV16_EXIT_I]]
+// AArch32:       __rev16.exit.i:
+// AArch32-NEXT:    [[RETVAL_I_I_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I_I]] ], [ [[OR_I_I_I]], [[IF_END_I_I_I]] ]
+// AArch32-NEXT:    [[CONV1_I:%.*]] = zext i32 [[RETVAL_I_I_I_0]] to i64
+// AArch32-NEXT:    [[SHL_I:%.*]] = shl i64 [[CONV1_I]], 32
+// AArch32-NEXT:    [[CONV2_I:%.*]] = trunc i64 [[T]] to i32
+// AArch32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV2_I]]) [[ATTR1]]
+// AArch32-NEXT:    [[REM_I_I10_I:%.*]] = urem i32 16, 32
+// AArch32-NEXT:    [[CMP_I_I11_I:%.*]] = icmp eq i32 [[REM_I_I10_I]], 0
+// AArch32-NEXT:    br i1 [[CMP_I_I11_I]], label [[IF_THEN_I_I12_I:%.*]], label [[IF_END_I_I17_I:%.*]]
+// AArch32:       if.then.i.i12.i:
+// AArch32-NEXT:    br label [[__REV16LL_EXIT:%.*]]
+// AArch32:       if.end.i.i17.i:
+// AArch32-NEXT:    [[SHR_I_I13_I:%.*]] = lshr i32 [[TMP1]], [[REM_I_I10_I]]
+// AArch32-NEXT:    [[SUB_I_I14_I:%.*]] = sub i32 32, [[REM_I_I10_I]]
+// AArch32-NEXT:    [[SHL_I_I15_I:%.*]] = shl i32 [[TMP1]], [[SUB_I_I14_I]]
+// AArch32-NEXT:    [[OR_I_I16_I:%.*]] = or i32 [[SHR_I_I13_I]], [[SHL_I_I15_I]]
+// AArch32-NEXT:    br label [[__REV16LL_EXIT]]
+// AArch32:       __rev16ll.exit:
+// AArch32-NEXT:    [[RETVAL_I_I5_I_0:%.*]] = phi i32 [ [[TMP1]], [[IF_THEN_I_I12_I]] ], [ [[OR_I_I16_I]], [[IF_END_I_I17_I]] ]
+// AArch32-NEXT:    [[CONV4_I:%.*]] = zext i32 [[RETVAL_I_I5_I_0]] to i64
+// AArch32-NEXT:    [[OR_I:%.*]] = or i64 [[SHL_I]], [[CONV4_I]]
+// AArch32-NEXT:    ret i64 [[OR_I]]
+//
+// AArch64-LABEL: @test_rev16ll(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[SHR_I:%.*]] = lshr i64 [[T:%.*]], 32
+// AArch64-NEXT:    [[CONV_I:%.*]] = trunc i64 [[SHR_I]] to i32
+// AArch64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV_I]]) [[ATTR3]]
+// AArch64-NEXT:    [[REM_I_I_I:%.*]] = urem i32 16, 32
+// AArch64-NEXT:    [[CMP_I_I_I:%.*]] = icmp eq i32 [[REM_I_I_I]], 0
+// AArch64-NEXT:    br i1 [[CMP_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[IF_END_I_I_I:%.*]]
+// AArch64:       if.then.i.i.i:
+// AArch64-NEXT:    br label [[__REV16_EXIT_I:%.*]]
+// AArch64:       if.end.i.i.i:
+// AArch64-NEXT:    [[SHR_I_I_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I_I]]
+// AArch64-NEXT:    [[SUB_I_I_I:%.*]] = sub i32 32, [[REM_I_I_I]]
+// AArch64-NEXT:    [[SHL_I_I_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I_I]]
+// AArch64-NEXT:    [[OR_I_I_I:%.*]] = or i32 [[SHR_I_I_I]], [[SHL_I_I_I]]
+// AArch64-NEXT:    br label [[__REV16_EXIT_I]]
+// AArch64:       __rev16.exit.i:
+// AArch64-NEXT:    [[RETVAL_I_I_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I_I]] ], [ [[OR_I_I_I]], [[IF_END_I_I_I]] ]
+// AArch64-NEXT:    [[CONV1_I:%.*]] = zext i32 [[RETVAL_I_I_I_0]] to i64
+// AArch64-NEXT:    [[SHL_I:%.*]] = shl i64 [[CONV1_I]], 32
+// AArch64-NEXT:    [[CONV2_I:%.*]] = trunc i64 [[T]] to i32
+// AArch64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV2_I]]) [[ATTR3]]
+// AArch64-NEXT:    [[REM_I_I10_I:%.*]] = urem i32 16, 32
+// AArch64-NEXT:    [[CMP_I_I11_I:%.*]] = icmp eq i32 [[REM_I_I10_I]], 0
+// AArch64-NEXT:    br i1 [[CMP_I_I11_I]], label [[IF_THEN_I_I12_I:%.*]], label [[IF_END_I_I17_I:%.*]]
+// AArch64:       if.then.i.i12.i:
+// AArch64-NEXT:    br label [[__REV16LL_EXIT:%.*]]
+// AArch64:       if.end.i.i17.i:
+// AArch64-NEXT:    [[SHR_I_I13_I:%.*]] = lshr i32 [[TMP1]], [[REM_I_I10_I]]
+// AArch64-NEXT:    [[SUB_I_I14_I:%.*]] = sub i32 32, [[REM_I_I10_I]]
+// AArch64-NEXT:    [[SHL_I_I15_I:%.*]] = shl i32 [[TMP1]], [[SUB_I_I14_I]]
+// AArch64-NEXT:    [[OR_I_I16_I:%.*]] = or i32 [[SHR_I_I13_I]], [[SHL_I_I15_I]]
+// AArch64-NEXT:    br label [[__REV16LL_EXIT]]
+// AArch64:       __rev16ll.exit:
+// AArch64-NEXT:    [[RETVAL_I_I5_I_0:%.*]] = phi i32 [ [[TMP1]], [[IF_THEN_I_I12_I]] ], [ [[OR_I_I16_I]], [[IF_END_I_I17_I]] ]
+// AArch64-NEXT:    [[CONV4_I:%.*]] = zext i32 [[RETVAL_I_I5_I_0]] to i64
+// AArch64-NEXT:    [[OR_I:%.*]] = or i64 [[SHL_I]], [[CONV4_I]]
+// AArch64-NEXT:    ret i64 [[OR_I]]
+//
 uint64_t test_rev16ll(uint64_t t) {
   return __rev16ll(t);
 }
 
-// ARM-LABEL: test_revsh
-// ARM: call i16 @llvm.bswap.i16(i16 %t)
+// AArch32-LABEL: @test_revsh(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i16 @llvm.bswap.i16(i16 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i16 [[TMP0]]
+//
+// AArch64-LABEL: @test_revsh(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.bswap.i16(i16 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i16 [[TMP0]]
+//
 int16_t test_revsh(int16_t t) {
   return __revsh(t);
 }
 
-// ARM-LABEL: test_rbit
-// AArch32: call i32 @llvm.bitreverse.i32
-// AArch64: call i32 @llvm.bitreverse.i32
+// AArch32-LABEL: @test_rbit(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[RBIT_I:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[RBIT_I]]
+//
+// AArch64-LABEL: @test_rbit(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[RBIT_I:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[RBIT_I]]
+//
 uint32_t test_rbit(uint32_t t) {
   return __rbit(t);
 }
 
-// ARM-LABEL: test_rbitl
-// AArch32: call i32 @llvm.bitreverse.i32
-// AArch64: call i64 @llvm.bitreverse.i64
+// AArch32-LABEL: @test_rbitl(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[RBIT_I_I:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[T:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[RBIT_I_I]]
+//
+// AArch64-LABEL: @test_rbitl(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[RBIT_I_I:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i64 [[RBIT_I_I]]
+//
 long test_rbitl(long t) {
   return __rbitl(t);
 }
 
-// ARM-LABEL: test_rbitll
-// AArch32: call i32 @llvm.bitreverse.i32
-// AArch32: call i32 @llvm.bitreverse.i32
-// AArch64: call i64 @llvm.bitreverse.i64
+// AArch32-LABEL: @test_rbitll(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[CONV_I:%.*]] = trunc i64 [[T:%.*]] to i32
+// AArch32-NEXT:    [[RBIT_I:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[CONV_I]]) [[ATTR1]]
+// AArch32-NEXT:    [[CONV1_I:%.*]] = zext i32 [[RBIT_I]] to i64
+// AArch32-NEXT:    [[SHL_I:%.*]] = shl i64 [[CONV1_I]], 32
+// AArch32-NEXT:    [[SHR_I:%.*]] = lshr i64 [[T]], 32
+// AArch32-NEXT:    [[CONV2_I:%.*]] = trunc i64 [[SHR_I]] to i32
+// AArch32-NEXT:    [[RBIT3_I:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[CONV2_I]]) [[ATTR1]]
+// AArch32-NEXT:    [[CONV4_I:%.*]] = zext i32 [[RBIT3_I]] to i64
+// AArch32-NEXT:    [[OR_I:%.*]] = or i64 [[SHL_I]], [[CONV4_I]]
+// AArch32-NEXT:    ret i64 [[OR_I]]
+//
+// AArch64-LABEL: @test_rbitll(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[RBIT_I:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[T:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i64 [[RBIT_I]]
+//
 uint64_t test_rbitll(uint64_t t) {
   return __rbitll(t);
 }
@@ -302,14 +697,20 @@ uint64_t test_rbitll(uint64_t t) {
 /* 9.4 Saturating intrinsics */
 #ifdef __ARM_FEATURE_SAT
 /* 9.4.1 Width-specified saturation intrinsics */
-// AArch32-LABEL: test_ssat
-// AArch32: call i32 @llvm.arm.ssat(i32 %t, i32 1)
+// AArch32-LABEL: @test_ssat(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.ssat(i32 [[T:%.*]], i32 1)
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_ssat(int32_t t) {
   return __ssat(t, 1);
 }
 
-// AArch32-LABEL: test_usat
-// AArch32: call i32 @llvm.arm.usat(i32 %t, i32 2)
+// AArch32-LABEL: @test_usat(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.usat(i32 [[T:%.*]], i32 2)
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint32_t test_usat(int32_t t) {
   return __usat(t, 2);
 }
@@ -317,23 +718,31 @@ uint32_t test_usat(int32_t t) {
 
 /* 9.4.2 Saturating addition and subtraction intrinsics */
 #ifdef __ARM_FEATURE_DSP
-// AArch32-LABEL: test_qadd
-// AArch32: call i32 @llvm.arm.qadd(i32 %a, i32 %b)
+// AArch32-LABEL: @test_qadd(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.qadd(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_qadd(int32_t a, int32_t b) {
   return __qadd(a, b);
 }
 
-// AArch32-LABEL: test_qsub
-// AArch32: call i32 @llvm.arm.qsub(i32 %a, i32 %b)
+// AArch32-LABEL: @test_qsub(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.qsub(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_qsub(int32_t a, int32_t b) {
   return __qsub(a, b);
 }
 
 extern int32_t f();
-// AArch32-LABEL: test_qdbl
-// AArch32: [[VAR:%[a-z0-9]+]] = {{.*}} call {{.*}} @f
-// AArch32-NOT: call {{.*}} @f
-// AArch32: call i32 @llvm.arm.qadd(i32 [[VAR]], i32 [[VAR]])
+// AArch32-LABEL: @test_qdbl(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[CALL:%.*]] = call i32 bitcast (i32 (...)* @f to i32 ()*)() [[ATTR7:#.*]]
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.qadd(i32 [[CALL]], i32 [[CALL]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_qdbl() {
   return __qdbl(f());
 }
@@ -343,33 +752,56 @@ int32_t test_qdbl() {
  * 9.3 16-bit multiplications
  */
 #if __ARM_FEATURE_DSP
-// AArch32-LABEL: test_smulbb
-// AArch32: call i32 @llvm.arm.smulbb
+// AArch32-LABEL: @test_smulbb(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smulbb(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smulbb(int32_t a, int32_t b) {
   return __smulbb(a, b);
 }
-// AArch32-LABEL: test_smulbt
-// AArch32: call i32 @llvm.arm.smulbt
+
+// AArch32-LABEL: @test_smulbt(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smulbt(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smulbt(int32_t a, int32_t b) {
   return __smulbt(a, b);
 }
-// AArch32-LABEL: test_smultb
-// AArch32: call i32 @llvm.arm.smultb
+
+// AArch32-LABEL: @test_smultb(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smultb(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smultb(int32_t a, int32_t b) {
   return __smultb(a, b);
 }
-// AArch32-LABEL: test_smultt
-// AArch32: call i32 @llvm.arm.smultt
+
+// AArch32-LABEL: @test_smultt(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smultt(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smultt(int32_t a, int32_t b) {
   return __smultt(a, b);
 }
-// AArch32-LABEL: test_smulwb
-// AArch32: call i32 @llvm.arm.smulwb
+
+// AArch32-LABEL: @test_smulwb(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smulwb(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smulwb(int32_t a, int32_t b) {
   return __smulwb(a, b);
 }
-// AArch32-LABEL: test_smulwt
-// AArch32: call i32 @llvm.arm.smulwt
+
+// AArch32-LABEL: @test_smulwt(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smulwt(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smulwt(int32_t a, int32_t b) {
   return __smulwt(a, b);
 }
@@ -377,33 +809,56 @@ int32_t test_smulwt(int32_t a, int32_t b) {
 
 /* 9.4.3 Accumultating multiplications */
 #if __ARM_FEATURE_DSP
-// AArch32-LABEL: test_smlabb
-// AArch32: call i32 @llvm.arm.smlabb(i32 %a, i32 %b, i32 %c)
+// AArch32-LABEL: @test_smlabb(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smlabb(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smlabb(int32_t a, int32_t b, int32_t c) {
   return __smlabb(a, b, c);
 }
-// AArch32-LABEL: test_smlabt
-// AArch32: call i32 @llvm.arm.smlabt(i32 %a, i32 %b, i32 %c)
+
+// AArch32-LABEL: @test_smlabt(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smlabt(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smlabt(int32_t a, int32_t b, int32_t c) {
   return __smlabt(a, b, c);
 }
-// AArch32-LABEL: test_smlatb
-// AArch32: call i32 @llvm.arm.smlatb(i32 %a, i32 %b, i32 %c)
+
+// AArch32-LABEL: @test_smlatb(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smlatb(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smlatb(int32_t a, int32_t b, int32_t c) {
   return __smlatb(a, b, c);
 }
-// AArch32-LABEL: test_smlatt
-// AArch32: call i32 @llvm.arm.smlatt(i32 %a, i32 %b, i32 %c)
+
+// AArch32-LABEL: @test_smlatt(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smlatt(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smlatt(int32_t a, int32_t b, int32_t c) {
   return __smlatt(a, b, c);
 }
-// AArch32-LABEL: test_smlawb
-// AArch32: call i32 @llvm.arm.smlawb(i32 %a, i32 %b, i32 %c)
+
+// AArch32-LABEL: @test_smlawb(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smlawb(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smlawb(int32_t a, int32_t b, int32_t c) {
   return __smlawb(a, b, c);
 }
-// AArch32-LABEL: test_smlawt
-// AArch32: call i32 @llvm.arm.smlawt(i32 %a, i32 %b, i32 %c)
+
+// AArch32-LABEL: @test_smlawt(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smlawt(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smlawt(int32_t a, int32_t b, int32_t c) {
   return __smlawt(a, b, c);
 }
@@ -411,13 +866,20 @@ int32_t test_smlawt(int32_t a, int32_t b, int32_t c) {
 
 /* 9.5.4 Parallel 16-bit saturation */
 #if __ARM_FEATURE_SIMD32
-// AArch32-LABEL: test_ssat16
-// AArch32: call i32 @llvm.arm.ssat16
+// AArch32-LABEL: @test_ssat16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.ssat16(i32 [[A:%.*]], i32 15)
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_ssat16(int16x2_t a) {
   return __ssat16(a, 15);
 }
-// AArch32-LABEL: test_usat16
-// AArch32: call i32 @llvm.arm.usat16
+
+// AArch32-LABEL: @test_usat16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.usat16(i32 [[A:%.*]], i32 15)
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_usat16(int16x2_t a) {
   return __usat16(a, 15);
 }
@@ -425,23 +887,38 @@ uint16x2_t test_usat16(int16x2_t a) {
 
 /* 9.5.5 Packing and unpacking */
 #if __ARM_FEATURE_SIMD32
-// AArch32-LABEL: test_sxtab16
-// AArch32: call i32 @llvm.arm.sxtab16
+// AArch32-LABEL: @test_sxtab16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.sxtab16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_sxtab16(int16x2_t a, int8x4_t b) {
   return __sxtab16(a, b);
 }
-// AArch32-LABEL: test_sxtb16
-// AArch32: call i32 @llvm.arm.sxtb16
+
+// AArch32-LABEL: @test_sxtb16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.sxtb16(i32 [[A:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_sxtb16(int8x4_t a) {
   return __sxtb16(a);
 }
-// AArch32-LABEL: test_uxtab16
-// AArch32: call i32 @llvm.arm.uxtab16
+
+// AArch32-LABEL: @test_uxtab16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uxtab16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_uxtab16(int16x2_t a, int8x4_t b) {
   return __uxtab16(a, b);
 }
-// AArch32-LABEL: test_uxtb16
-// AArch32: call i32 @llvm.arm.uxtb16
+
+// AArch32-LABEL: @test_uxtb16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uxtb16(i32 [[A:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_uxtb16(int8x4_t a) {
   return __uxtb16(a);
 }
@@ -449,8 +926,11 @@ int16x2_t test_uxtb16(int8x4_t a) {
 
 /* 9.5.6 Parallel selection */
 #if __ARM_FEATURE_SIMD32
-// AArch32-LABEL: test_sel
-// AArch32: call i32 @llvm.arm.sel
+// AArch32-LABEL: @test_sel(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.sel(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint8x4_t test_sel(uint8x4_t a, uint8x4_t b) {
   return __sel(a, b);
 }
@@ -458,63 +938,110 @@ uint8x4_t test_sel(uint8x4_t a, uint8x4_t b) {
 
 /* 9.5.7 Parallel 8-bit addition and subtraction */
 #if __ARM_FEATURE_SIMD32
-// AArch32-LABEL: test_qadd8
-// AArch32: call i32 @llvm.arm.qadd8
+// AArch32-LABEL: @test_qadd8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.qadd8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_qadd8(int8x4_t a, int8x4_t b) {
   return __qadd8(a, b);
 }
-// AArch32-LABEL: test_qsub8
-// AArch32: call i32 @llvm.arm.qsub8
+
+// AArch32-LABEL: @test_qsub8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.qsub8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int8x4_t test_qsub8(int8x4_t a, int8x4_t b) {
   return __qsub8(a, b);
 }
-// AArch32-LABEL: test_sadd8
-// AArch32: call i32 @llvm.arm.sadd8
+
+// AArch32-LABEL: @test_sadd8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.sadd8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int8x4_t test_sadd8(int8x4_t a, int8x4_t b) {
   return __sadd8(a, b);
 }
-// AArch32-LABEL: test_shadd8
-// AArch32: call i32 @llvm.arm.shadd8
+
+// AArch32-LABEL: @test_shadd8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.shadd8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int8x4_t test_shadd8(int8x4_t a, int8x4_t b) {
   return __shadd8(a, b);
 }
-// AArch32-LABEL: test_shsub8
-// AArch32: call i32 @llvm.arm.shsub8
+
+// AArch32-LABEL: @test_shsub8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.shsub8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int8x4_t test_shsub8(int8x4_t a, int8x4_t b) {
   return __shsub8(a, b);
 }
-// AArch32-LABEL: test_ssub8
-// AArch32: call i32 @llvm.arm.ssub8
+
+// AArch32-LABEL: @test_ssub8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.ssub8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int8x4_t test_ssub8(int8x4_t a, int8x4_t b) {
   return __ssub8(a, b);
 }
-// AArch32-LABEL: test_uadd8
-// AArch32: call i32 @llvm.arm.uadd8
+
+// AArch32-LABEL: @test_uadd8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uadd8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint8x4_t test_uadd8(uint8x4_t a, uint8x4_t b) {
   return __uadd8(a, b);
 }
-// AArch32-LABEL: test_uhadd8
-// AArch32: call i32 @llvm.arm.uhadd8
+
+// AArch32-LABEL: @test_uhadd8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uhadd8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint8x4_t test_uhadd8(uint8x4_t a, uint8x4_t b) {
   return __uhadd8(a, b);
 }
-// AArch32-LABEL: test_uhsub8
-// AArch32: call i32 @llvm.arm.uhsub8
+
+// AArch32-LABEL: @test_uhsub8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uhsub8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint8x4_t test_uhsub8(uint8x4_t a, uint8x4_t b) {
   return __uhsub8(a, b);
 }
-// AArch32-LABEL: test_uqadd8
-// AArch32: call i32 @llvm.arm.uqadd8
+
+// AArch32-LABEL: @test_uqadd8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uqadd8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint8x4_t test_uqadd8(uint8x4_t a, uint8x4_t b) {
   return __uqadd8(a, b);
 }
-// AArch32-LABEL: test_uqsub8
-// AArch32: call i32 @llvm.arm.uqsub8
+
+// AArch32-LABEL: @test_uqsub8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uqsub8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint8x4_t test_uqsub8(uint8x4_t a, uint8x4_t b) {
   return __uqsub8(a, b);
 }
-// AArch32-LABEL: test_usub8
-// AArch32: call i32 @llvm.arm.usub8
+
+// AArch32-LABEL: @test_usub8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.usub8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint8x4_t test_usub8(uint8x4_t a, uint8x4_t b) {
   return __usub8(a, b);
 }
@@ -522,13 +1049,23 @@ uint8x4_t test_usub8(uint8x4_t a, uint8x4_t b) {
 
 /* 9.5.8 Sum of 8-bit absolute 
diff erences */
 #if __ARM_FEATURE_SIMD32
-// AArch32-LABEL: test_usad8
-// AArch32: call i32 @llvm.arm.usad8
+// AArch32-LABEL: @test_usad8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.usad8(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint32_t test_usad8(uint8x4_t a, uint8x4_t b) {
   return __usad8(a, b);
 }
-// AArch32-LABEL: test_usada8
-// AArch32: call i32 @llvm.arm.usada8
+
+// AArch32-LABEL: @test_usada8(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[CONV:%.*]] = zext i8 [[A:%.*]] to i32
+// AArch32-NEXT:    [[CONV1:%.*]] = zext i8 [[B:%.*]] to i32
+// AArch32-NEXT:    [[CONV2:%.*]] = zext i8 [[C:%.*]] to i32
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.usada8(i32 [[CONV]], i32 [[CONV1]], i32 [[CONV2]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint32_t test_usada8(uint8_t a, uint8_t b, uint8_t c) {
   return __usada8(a, b, c);
 }
@@ -536,123 +1073,218 @@ uint32_t test_usada8(uint8_t a, uint8_t b, uint8_t c) {
 
 /* 9.5.9 Parallel 16-bit addition and subtraction */
 #if __ARM_FEATURE_SIMD32
-// AArch32-LABEL: test_qadd16
-// AArch32: call i32 @llvm.arm.qadd16
+// AArch32-LABEL: @test_qadd16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.qadd16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_qadd16(int16x2_t a, int16x2_t b) {
   return __qadd16(a, b);
 }
-// AArch32-LABEL: test_qasx
-// AArch32: call i32 @llvm.arm.qasx
+
+// AArch32-LABEL: @test_qasx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.qasx(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_qasx(int16x2_t a, int16x2_t b) {
   return __qasx(a, b);
 }
-// AArch32-LABEL: test_qsax
-// AArch32: call i32 @llvm.arm.qsax
+
+// AArch32-LABEL: @test_qsax(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.qsax(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_qsax(int16x2_t a, int16x2_t b) {
   return __qsax(a, b);
 }
-// AArch32-LABEL: test_qsub16
-// AArch32: call i32 @llvm.arm.qsub16
+
+// AArch32-LABEL: @test_qsub16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.qsub16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_qsub16(int16x2_t a, int16x2_t b) {
   return __qsub16(a, b);
 }
-// AArch32-LABEL: test_sadd16
-// AArch32: call i32 @llvm.arm.sadd16
+
+// AArch32-LABEL: @test_sadd16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.sadd16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_sadd16(int16x2_t a, int16x2_t b) {
   return __sadd16(a, b);
 }
-// AArch32-LABEL: test_sasx
-// AArch32: call i32 @llvm.arm.sasx
+
+// AArch32-LABEL: @test_sasx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.sasx(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_sasx(int16x2_t a, int16x2_t b) {
   return __sasx(a, b);
 }
-// AArch32-LABEL: test_shadd16
-// AArch32: call i32 @llvm.arm.shadd16
+
+// AArch32-LABEL: @test_shadd16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.shadd16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_shadd16(int16x2_t a, int16x2_t b) {
   return __shadd16(a, b);
 }
-// AArch32-LABEL: test_shasx
-// AArch32: call i32 @llvm.arm.shasx
+
+// AArch32-LABEL: @test_shasx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.shasx(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_shasx(int16x2_t a, int16x2_t b) {
   return __shasx(a, b);
 }
-// AArch32-LABEL: test_shsax
-// AArch32: call i32 @llvm.arm.shsax
+
+// AArch32-LABEL: @test_shsax(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.shsax(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_shsax(int16x2_t a, int16x2_t b) {
   return __shsax(a, b);
 }
-// AArch32-LABEL: test_shsub16
-// AArch32: call i32 @llvm.arm.shsub16
+
+// AArch32-LABEL: @test_shsub16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.shsub16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_shsub16(int16x2_t a, int16x2_t b) {
   return __shsub16(a, b);
 }
-// AArch32-LABEL: test_ssax
-// AArch32: call i32 @llvm.arm.ssax
+
+// AArch32-LABEL: @test_ssax(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.ssax(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_ssax(int16x2_t a, int16x2_t b) {
   return __ssax(a, b);
 }
-// AArch32-LABEL: test_ssub16
-// AArch32: call i32 @llvm.arm.ssub16
+
+// AArch32-LABEL: @test_ssub16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.ssub16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int16x2_t test_ssub16(int16x2_t a, int16x2_t b) {
   return __ssub16(a, b);
 }
-// AArch32-LABEL: test_uadd16
-// AArch32: call i32 @llvm.arm.uadd16
+
+// AArch32-LABEL: @test_uadd16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uadd16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_uadd16(uint16x2_t a, uint16x2_t b) {
   return __uadd16(a, b);
 }
-// AArch32-LABEL: test_uasx
-// AArch32: call i32 @llvm.arm.uasx
+
+// AArch32-LABEL: @test_uasx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uasx(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_uasx(uint16x2_t a, uint16x2_t b) {
   return __uasx(a, b);
 }
-// AArch32-LABEL: test_uhadd16
-// AArch32: call i32 @llvm.arm.uhadd16
+
+// AArch32-LABEL: @test_uhadd16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uhadd16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_uhadd16(uint16x2_t a, uint16x2_t b) {
   return __uhadd16(a, b);
 }
-// AArch32-LABEL: test_uhasx
-// AArch32: call i32 @llvm.arm.uhasx
+
+// AArch32-LABEL: @test_uhasx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uhasx(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_uhasx(uint16x2_t a, uint16x2_t b) {
   return __uhasx(a, b);
 }
-// AArch32-LABEL: test_uhsax
-// AArch32: call i32 @llvm.arm.uhsax
+
+// AArch32-LABEL: @test_uhsax(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uhsax(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_uhsax(uint16x2_t a, uint16x2_t b) {
   return __uhsax(a, b);
 }
-// AArch32-LABEL: test_uhsub16
-// AArch32: call i32 @llvm.arm.uhsub16
+
+// AArch32-LABEL: @test_uhsub16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uhsub16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_uhsub16(uint16x2_t a, uint16x2_t b) {
   return __uhsub16(a, b);
 }
-// AArch32-LABEL: test_uqadd16
-// AArch32: call i32 @llvm.arm.uqadd16
+
+// AArch32-LABEL: @test_uqadd16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uqadd16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_uqadd16(uint16x2_t a, uint16x2_t b) {
   return __uqadd16(a, b);
 }
-// AArch32-LABEL: test_uqasx
-// AArch32: call i32 @llvm.arm.uqasx
+
+// AArch32-LABEL: @test_uqasx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uqasx(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_uqasx(uint16x2_t a, uint16x2_t b) {
   return __uqasx(a, b);
 }
-// AArch32-LABEL: test_uqsax
-// AArch32: call i32 @llvm.arm.uqsax
+
+// AArch32-LABEL: @test_uqsax(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uqsax(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_uqsax(uint16x2_t a, uint16x2_t b) {
   return __uqsax(a, b);
 }
-// AArch32-LABEL: test_uqsub16
-// AArch32: call i32 @llvm.arm.uqsub16
+
+// AArch32-LABEL: @test_uqsub16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.uqsub16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_uqsub16(uint16x2_t a, uint16x2_t b) {
   return __uqsub16(a, b);
 }
-// AArch32-LABEL: test_usax
-// AArch32: call i32 @llvm.arm.usax
+
+// AArch32-LABEL: @test_usax(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.usax(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_usax(uint16x2_t a, uint16x2_t b) {
   return __usax(a, b);
 }
-// AArch32-LABEL: test_usub16
-// AArch32: call i32 @llvm.arm.usub16
+
+// AArch32-LABEL: @test_usub16(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.usub16(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 uint16x2_t test_usub16(uint16x2_t a, uint16x2_t b) {
   return __usub16(a, b);
 }
@@ -660,131 +1292,256 @@ uint16x2_t test_usub16(uint16x2_t a, uint16x2_t b) {
 
 /* 9.5.10 Parallel 16-bit multiplications */
 #if __ARM_FEATURE_SIMD32
-// AArch32-LABEL: test_smlad
-// AArch32: call i32 @llvm.arm.smlad
+// AArch32-LABEL: @test_smlad(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smlad(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smlad(int16x2_t a, int16x2_t b, int32_t c) {
   return __smlad(a, b, c);
 }
-// AArch32-LABEL: test_smladx
-// AArch32: call i32 @llvm.arm.smladx
+
+// AArch32-LABEL: @test_smladx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smladx(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smladx(int16x2_t a, int16x2_t b, int32_t c) {
   return __smladx(a, b, c);
 }
-// AArch32-LABEL: test_smlald
-// AArch32: call i64 @llvm.arm.smlald
+
+// AArch32-LABEL: @test_smlald(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i64 @llvm.arm.smlald(i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i64 [[TMP0]]
+//
 int64_t test_smlald(int16x2_t a, int16x2_t b, int64_t c) {
   return __smlald(a, b, c);
 }
-// AArch32-LABEL: test_smlaldx
-// AArch32: call i64 @llvm.arm.smlaldx
+
+// AArch32-LABEL: @test_smlaldx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i64 @llvm.arm.smlaldx(i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i64 [[TMP0]]
+//
 int64_t test_smlaldx(int16x2_t a, int16x2_t b, int64_t c) {
   return __smlaldx(a, b, c);
 }
-// AArch32-LABEL: test_smlsd
-// AArch32: call i32 @llvm.arm.smlsd
+
+// AArch32-LABEL: @test_smlsd(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smlsd(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smlsd(int16x2_t a, int16x2_t b, int32_t c) {
   return __smlsd(a, b, c);
 }
-// AArch32-LABEL: test_smlsdx
-// AArch32: call i32 @llvm.arm.smlsdx
+
+// AArch32-LABEL: @test_smlsdx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smlsdx(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smlsdx(int16x2_t a, int16x2_t b, int32_t c) {
   return __smlsdx(a, b, c);
 }
-// AArch32-LABEL: test_smlsld
-// AArch32: call i64 @llvm.arm.smlsld
+
+// AArch32-LABEL: @test_smlsld(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i64 @llvm.arm.smlsld(i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i64 [[TMP0]]
+//
 int64_t test_smlsld(int16x2_t a, int16x2_t b, int64_t c) {
   return __smlsld(a, b, c);
 }
-// AArch32-LABEL: test_smlsldx
-// AArch32: call i64 @llvm.arm.smlsldx
+
+// AArch32-LABEL: @test_smlsldx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i64 @llvm.arm.smlsldx(i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i64 [[TMP0]]
+//
 int64_t test_smlsldx(int16x2_t a, int16x2_t b, int64_t c) {
   return __smlsldx(a, b, c);
 }
-// AArch32-LABEL: test_smuad
-// AArch32: call i32 @llvm.arm.smuad
+
+// AArch32-LABEL: @test_smuad(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smuad(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smuad(int16x2_t a, int16x2_t b) {
   return __smuad(a, b);
 }
-// AArch32-LABEL: test_smuadx
-// AArch32: call i32 @llvm.arm.smuadx
+
+// AArch32-LABEL: @test_smuadx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smuadx(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smuadx(int16x2_t a, int16x2_t b) {
   return __smuadx(a, b);
 }
-// AArch32-LABEL: test_smusd
-// AArch32: call i32 @llvm.arm.smusd
+
+// AArch32-LABEL: @test_smusd(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smusd(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smusd(int16x2_t a, int16x2_t b) {
   return __smusd(a, b);
 }
-// AArch32-LABEL: test_smusdx
-// AArch32: call i32 @llvm.arm.smusdx
+
+// AArch32-LABEL: @test_smusdx(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.smusdx(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_smusdx(int16x2_t a, int16x2_t b) {
   return __smusdx(a, b);
 }
 #endif
 
 /* 9.7 CRC32 intrinsics */
-// ARM-LABEL: test_crc32b
-// AArch32: call i32 @llvm.arm.crc32b
-// AArch64: call i32 @llvm.aarch64.crc32b
+// AArch32-LABEL: @test_crc32b(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = zext i8 [[B:%.*]] to i32
+// AArch32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.arm.crc32b(i32 [[A:%.*]], i32 [[TMP0]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP1]]
+//
+// AArch64-LABEL: @test_crc32b(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = zext i8 [[B:%.*]] to i32
+// AArch64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32b(i32 [[A:%.*]], i32 [[TMP0]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[TMP1]]
+//
 uint32_t test_crc32b(uint32_t a, uint8_t b) {
   return __crc32b(a, b);
 }
 
-// ARM-LABEL: test_crc32h
-// AArch32: call i32 @llvm.arm.crc32h
-// AArch64: call i32 @llvm.aarch64.crc32h
+// AArch32-LABEL: @test_crc32h(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = zext i16 [[B:%.*]] to i32
+// AArch32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.arm.crc32h(i32 [[A:%.*]], i32 [[TMP0]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP1]]
+//
+// AArch64-LABEL: @test_crc32h(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = zext i16 [[B:%.*]] to i32
+// AArch64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32h(i32 [[A:%.*]], i32 [[TMP0]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[TMP1]]
+//
 uint32_t test_crc32h(uint32_t a, uint16_t b) {
   return __crc32h(a, b);
 }
 
-// ARM-LABEL: test_crc32w
-// AArch32: call i32 @llvm.arm.crc32w
-// AArch64: call i32 @llvm.aarch64.crc32w
+// AArch32-LABEL: @test_crc32w(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.crc32w(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
+// AArch64-LABEL: @test_crc32w(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32w(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[TMP0]]
+//
 uint32_t test_crc32w(uint32_t a, uint32_t b) {
   return __crc32w(a, b);
 }
 
-// ARM-LABEL: test_crc32d
-// AArch32: call i32 @llvm.arm.crc32w
-// AArch32: call i32 @llvm.arm.crc32w
-// AArch64: call i32 @llvm.aarch64.crc32x
+// AArch32-LABEL: @test_crc32d(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = trunc i64 [[B:%.*]] to i32
+// AArch32-NEXT:    [[TMP1:%.*]] = lshr i64 [[B]], 32
+// AArch32-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
+// AArch32-NEXT:    [[TMP3:%.*]] = call i32 @llvm.arm.crc32w(i32 [[A:%.*]], i32 [[TMP0]]) [[ATTR1]]
+// AArch32-NEXT:    [[TMP4:%.*]] = call i32 @llvm.arm.crc32w(i32 [[TMP3]], i32 [[TMP2]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP4]]
+//
+// AArch64-LABEL: @test_crc32d(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32x(i32 [[A:%.*]], i64 [[B:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[TMP0]]
+//
 uint32_t test_crc32d(uint32_t a, uint64_t b) {
   return __crc32d(a, b);
 }
 
-// ARM-LABEL: test_crc32cb
-// AArch32: call i32 @llvm.arm.crc32cb
-// AArch64: call i32 @llvm.aarch64.crc32cb
+// AArch32-LABEL: @test_crc32cb(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = zext i8 [[B:%.*]] to i32
+// AArch32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.arm.crc32cb(i32 [[A:%.*]], i32 [[TMP0]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP1]]
+//
+// AArch64-LABEL: @test_crc32cb(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = zext i8 [[B:%.*]] to i32
+// AArch64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32cb(i32 [[A:%.*]], i32 [[TMP0]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[TMP1]]
+//
 uint32_t test_crc32cb(uint32_t a, uint8_t b) {
   return __crc32cb(a, b);
 }
 
-// ARM-LABEL: test_crc32ch
-// AArch32: call i32 @llvm.arm.crc32ch
-// AArch64: call i32 @llvm.aarch64.crc32ch
+// AArch32-LABEL: @test_crc32ch(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = zext i16 [[B:%.*]] to i32
+// AArch32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.arm.crc32ch(i32 [[A:%.*]], i32 [[TMP0]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP1]]
+//
+// AArch64-LABEL: @test_crc32ch(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = zext i16 [[B:%.*]] to i32
+// AArch64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32ch(i32 [[A:%.*]], i32 [[TMP0]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[TMP1]]
+//
 uint32_t test_crc32ch(uint32_t a, uint16_t b) {
   return __crc32ch(a, b);
 }
 
-// ARM-LABEL: test_crc32cw
-// AArch32: call i32 @llvm.arm.crc32cw
-// AArch64: call i32 @llvm.aarch64.crc32cw
+// AArch32-LABEL: @test_crc32cw(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.arm.crc32cw(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
+// AArch64-LABEL: @test_crc32cw(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32cw(i32 [[A:%.*]], i32 [[B:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[TMP0]]
+//
 uint32_t test_crc32cw(uint32_t a, uint32_t b) {
   return __crc32cw(a, b);
 }
 
-// ARM-LABEL: test_crc32cd
-// AArch32: call i32 @llvm.arm.crc32cw
-// AArch32: call i32 @llvm.arm.crc32cw
-// AArch64: call i32 @llvm.aarch64.crc32cx
+// AArch32-LABEL: @test_crc32cd(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = trunc i64 [[B:%.*]] to i32
+// AArch32-NEXT:    [[TMP1:%.*]] = lshr i64 [[B]], 32
+// AArch32-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
+// AArch32-NEXT:    [[TMP3:%.*]] = call i32 @llvm.arm.crc32cw(i32 [[A:%.*]], i32 [[TMP0]]) [[ATTR1]]
+// AArch32-NEXT:    [[TMP4:%.*]] = call i32 @llvm.arm.crc32cw(i32 [[TMP3]], i32 [[TMP2]]) [[ATTR1]]
+// AArch32-NEXT:    ret i32 [[TMP4]]
+//
+// AArch64-LABEL: @test_crc32cd(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32cx(i32 [[A:%.*]], i64 [[B:%.*]]) [[ATTR3]]
+// AArch64-NEXT:    ret i32 [[TMP0]]
+//
 uint32_t test_crc32cd(uint32_t a, uint64_t b) {
   return __crc32cd(a, b);
 }
 
 /* 10.1 Special register intrinsics */
-// ARM-LABEL: test_rsr
-// AArch64: call i64 @llvm.read_register.i64(metadata ![[M0:[0-9]]])
-// AArch32: call i32 @llvm.read_register.i32(metadata ![[M2:[0-9]]])
+// AArch32-LABEL: @test_rsr(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata !5)
+// AArch32-NEXT:    ret i32 [[TMP0]]
+//
+// AArch64-LABEL: @test_rsr(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata !8)
+// AArch64-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
+// AArch64-NEXT:    ret i32 [[TMP1]]
+//
 uint32_t test_rsr() {
 #ifdef __ARM_32BIT_STATE
   return __arm_rsr("cp1:2:c3:c4:5");
@@ -793,9 +1550,16 @@ uint32_t test_rsr() {
 #endif
 }
 
-// ARM-LABEL: test_rsr64
-// AArch64: call i64 @llvm.read_register.i64(metadata ![[M0:[0-9]]])
-// AArch32: call i64 @llvm.read_register.i64(metadata ![[M3:[0-9]]])
+// AArch32-LABEL: @test_rsr64(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata !6)
+// AArch32-NEXT:    ret i64 [[TMP0]]
+//
+// AArch64-LABEL: @test_rsr64(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata !8)
+// AArch64-NEXT:    ret i64 [[TMP0]]
+//
 uint64_t test_rsr64() {
 #ifdef __ARM_32BIT_STATE
   return __arm_rsr64("cp1:2:c3");
@@ -804,16 +1568,33 @@ uint64_t test_rsr64() {
 #endif
 }
 
-// ARM-LABEL: test_rsrp
-// AArch64: call i64 @llvm.read_register.i64(metadata ![[M1:[0-9]]])
-// AArch32: call i32 @llvm.read_register.i32(metadata ![[M4:[0-9]]])
+// AArch32-LABEL: @test_rsrp(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata !7)
+// AArch32-NEXT:    [[TMP1:%.*]] = inttoptr i32 [[TMP0]] to i8*
+// AArch32-NEXT:    ret i8* [[TMP1]]
+//
+// AArch64-LABEL: @test_rsrp(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata !9)
+// AArch64-NEXT:    [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to i8*
+// AArch64-NEXT:    ret i8* [[TMP1]]
+//
 void *test_rsrp() {
   return __arm_rsrp("sysreg");
 }
 
-// ARM-LABEL: test_wsr
-// AArch64: call void @llvm.write_register.i64(metadata ![[M0:[0-9]]], i64 %{{.*}})
-// AArch32: call void @llvm.write_register.i32(metadata ![[M2:[0-9]]], i32 %{{.*}})
+// AArch32-LABEL: @test_wsr(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.write_register.i32(metadata !5, i32 [[V:%.*]])
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_wsr(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = zext i32 [[V:%.*]] to i64
+// AArch64-NEXT:    call void @llvm.write_register.i64(metadata !8, i64 [[TMP0]])
+// AArch64-NEXT:    ret void
+//
 void test_wsr(uint32_t v) {
 #ifdef __ARM_32BIT_STATE
   __arm_wsr("cp1:2:c3:c4:5", v);
@@ -822,9 +1603,16 @@ void test_wsr(uint32_t v) {
 #endif
 }
 
-// ARM-LABEL: test_wsr64
-// AArch64: call void @llvm.write_register.i64(metadata ![[M0:[0-9]]], i64 %{{.*}})
-// AArch32: call void @llvm.write_register.i64(metadata ![[M3:[0-9]]], i64 %{{.*}})
+// AArch32-LABEL: @test_wsr64(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    call void @llvm.write_register.i64(metadata !6, i64 [[V:%.*]])
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_wsr64(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    call void @llvm.write_register.i64(metadata !8, i64 [[V:%.*]])
+// AArch64-NEXT:    ret void
+//
 void test_wsr64(uint64_t v) {
 #ifdef __ARM_32BIT_STATE
   __arm_wsr64("cp1:2:c3", v);
@@ -833,18 +1621,41 @@ void test_wsr64(uint64_t v) {
 #endif
 }
 
-// ARM-LABEL: test_wsrp
-// AArch64: call void @llvm.write_register.i64(metadata ![[M1:[0-9]]], i64 %{{.*}})
-// AArch32: call void @llvm.write_register.i32(metadata ![[M4:[0-9]]], i32 %{{.*}})
+// AArch32-LABEL: @test_wsrp(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[TMP0:%.*]] = ptrtoint i8* [[V:%.*]] to i32
+// AArch32-NEXT:    call void @llvm.write_register.i32(metadata !7, i32 [[TMP0]])
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_wsrp(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[TMP0:%.*]] = ptrtoint i8* [[V:%.*]] to i64
+// AArch64-NEXT:    call void @llvm.write_register.i64(metadata !9, i64 [[TMP0]])
+// AArch64-NEXT:    ret void
+//
 void test_wsrp(void *v) {
   __arm_wsrp("sysreg", v);
 }
 
-// ARM-LABEL: test_rsrf
-// AArch64: call i64 @llvm.read_register.i64(metadata ![[M0:[0-9]]])
-// AArch32: call i32 @llvm.read_register.i32(metadata ![[M2:[0-9]]])
-// ARM-NOT: uitofp
-// ARM: bitcast
+// AArch32-LABEL: @test_rsrf(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[REF_TMP:%.*]] = alloca i32, align 4
+// AArch32-NEXT:    [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata !5)
+// AArch32-NEXT:    store i32 [[TMP0]], i32* [[REF_TMP]], align 4
+// AArch32-NEXT:    [[TMP1:%.*]] = bitcast i32* [[REF_TMP]] to float*
+// AArch32-NEXT:    [[TMP2:%.*]] = load float, float* [[TMP1]], align 4
+// AArch32-NEXT:    ret float [[TMP2]]
+//
+// AArch64-LABEL: @test_rsrf(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[REF_TMP:%.*]] = alloca i32, align 4
+// AArch64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata !8)
+// AArch64-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
+// AArch64-NEXT:    store i32 [[TMP1]], i32* [[REF_TMP]], align 4
+// AArch64-NEXT:    [[TMP2:%.*]] = bitcast i32* [[REF_TMP]] to float*
+// AArch64-NEXT:    [[TMP3:%.*]] = load float, float* [[TMP2]], align 4
+// AArch64-NEXT:    ret float [[TMP3]]
+//
 float test_rsrf() {
 #ifdef __ARM_32BIT_STATE
   return __arm_rsrf("cp1:2:c3:c4:5");
@@ -852,11 +1663,25 @@ float test_rsrf() {
   return __arm_rsrf("1:2:3:4:5");
 #endif
 }
-// ARM-LABEL: test_rsrf64
-// AArch64: call i64 @llvm.read_register.i64(metadata ![[M0:[0-9]]])
-// AArch32: call i64 @llvm.read_register.i64(metadata ![[M3:[0-9]]])
-// ARM-NOT: uitofp
-// ARM: bitcast
+
+// AArch32-LABEL: @test_rsrf64(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[REF_TMP:%.*]] = alloca i64, align 8
+// AArch32-NEXT:    [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata !6)
+// AArch32-NEXT:    store i64 [[TMP0]], i64* [[REF_TMP]], align 8
+// AArch32-NEXT:    [[TMP1:%.*]] = bitcast i64* [[REF_TMP]] to double*
+// AArch32-NEXT:    [[TMP2:%.*]] = load double, double* [[TMP1]], align 8
+// AArch32-NEXT:    ret double [[TMP2]]
+//
+// AArch64-LABEL: @test_rsrf64(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[REF_TMP:%.*]] = alloca i64, align 8
+// AArch64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata !8)
+// AArch64-NEXT:    store i64 [[TMP0]], i64* [[REF_TMP]], align 8
+// AArch64-NEXT:    [[TMP1:%.*]] = bitcast i64* [[REF_TMP]] to double*
+// AArch64-NEXT:    [[TMP2:%.*]] = load double, double* [[TMP1]], align 8
+// AArch64-NEXT:    ret double [[TMP2]]
+//
 double test_rsrf64() {
 #ifdef __ARM_32BIT_STATE
   return __arm_rsrf64("cp1:2:c3");
@@ -864,11 +1689,26 @@ double test_rsrf64() {
   return __arm_rsrf64("1:2:3:4:5");
 #endif
 }
-// ARM-LABEL: test_wsrf
-// ARM-NOT: fptoui
-// ARM: bitcast
-// AArch64: call void @llvm.write_register.i64(metadata ![[M0:[0-9]]], i64 %{{.*}})
-// AArch32: call void @llvm.write_register.i32(metadata ![[M2:[0-9]]], i32 %{{.*}})
+
+// AArch32-LABEL: @test_wsrf(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[V_ADDR:%.*]] = alloca float, align 4
+// AArch32-NEXT:    store float [[V:%.*]], float* [[V_ADDR]], align 4
+// AArch32-NEXT:    [[TMP0:%.*]] = bitcast float* [[V_ADDR]] to i32*
+// AArch32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
+// AArch32-NEXT:    call void @llvm.write_register.i32(metadata !5, i32 [[TMP1]])
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_wsrf(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[V_ADDR:%.*]] = alloca float, align 4
+// AArch64-NEXT:    store float [[V:%.*]], float* [[V_ADDR]], align 4
+// AArch64-NEXT:    [[TMP0:%.*]] = bitcast float* [[V_ADDR]] to i32*
+// AArch64-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
+// AArch64-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+// AArch64-NEXT:    call void @llvm.write_register.i64(metadata !8, i64 [[TMP2]])
+// AArch64-NEXT:    ret void
+//
 void test_wsrf(float v) {
 #ifdef __ARM_32BIT_STATE
   __arm_wsrf("cp1:2:c3:c4:5", v);
@@ -876,11 +1716,25 @@ void test_wsrf(float v) {
   __arm_wsrf("1:2:3:4:5", v);
 #endif
 }
-// ARM-LABEL: test_wsrf64
-// ARM-NOT: fptoui
-// ARM: bitcast
-// AArch64: call void @llvm.write_register.i64(metadata ![[M0:[0-9]]], i64 %{{.*}})
-// AArch32: call void @llvm.write_register.i64(metadata ![[M3:[0-9]]], i64 %{{.*}})
+
+// AArch32-LABEL: @test_wsrf64(
+// AArch32-NEXT:  entry:
+// AArch32-NEXT:    [[V_ADDR:%.*]] = alloca double, align 8
+// AArch32-NEXT:    store double [[V:%.*]], double* [[V_ADDR]], align 8
+// AArch32-NEXT:    [[TMP0:%.*]] = bitcast double* [[V_ADDR]] to i64*
+// AArch32-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
+// AArch32-NEXT:    call void @llvm.write_register.i64(metadata !6, i64 [[TMP1]])
+// AArch32-NEXT:    ret void
+//
+// AArch64-LABEL: @test_wsrf64(
+// AArch64-NEXT:  entry:
+// AArch64-NEXT:    [[V_ADDR:%.*]] = alloca double, align 8
+// AArch64-NEXT:    store double [[V:%.*]], double* [[V_ADDR]], align 8
+// AArch64-NEXT:    [[TMP0:%.*]] = bitcast double* [[V_ADDR]] to i64*
+// AArch64-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
+// AArch64-NEXT:    call void @llvm.write_register.i64(metadata !8, i64 [[TMP1]])
+// AArch64-NEXT:    ret void
+//
 void test_wsrf64(double v) {
 #ifdef __ARM_32BIT_STATE
   __arm_wsrf64("cp1:2:c3", v);
@@ -889,17 +1743,20 @@ void test_wsrf64(double v) {
 #endif
 }
 
-// AArch32: ![[M2]] = !{!"cp1:2:c3:c4:5"}
-// AArch32: ![[M3]] = !{!"cp1:2:c3"}
-// AArch32: ![[M4]] = !{!"sysreg"}
-
-// AArch64: ![[M0]] = !{!"1:2:3:4:5"}
-// AArch64: ![[M1]] = !{!"sysreg"}
-
-// AArch64-v8_3-LABEL: @test_jcvt(
-// AArch64-v8_3: call i32 @llvm.aarch64.fjcvtzs
 #ifdef __ARM_64BIT_STATE
+// AArch6483-LABEL: @test_jcvt(
+// AArch6483-NEXT:  entry:
+// AArch6483-NEXT:    [[TMP0:%.*]] = call i32 @llvm.aarch64.fjcvtzs(double [[V:%.*]]) [[ATTR3:#.*]]
+// AArch6483-NEXT:    ret i32 [[TMP0]]
+//
 int32_t test_jcvt(double v) {
   return __jcvt(v);
 }
 #endif
+
+// AArch32: !5 = !{!"cp1:2:c3:c4:5"}
+// AArch32: !6 = !{!"cp1:2:c3"}
+// AArch32: !7 = !{!"sysreg"}
+
+// AArch64: !8 = !{!"1:2:3:4:5"}
+// AArch64: !9 = !{!"sysreg"}
\ No newline at end of file


        


More information about the cfe-commits mailing list