[clang] d38c9d3 - [NFC][clang][SVE] Auto-generate SVE operator tests.

David Truby via cfe-commits cfe-commits at lists.llvm.org
Wed Mar 16 09:40:36 PDT 2022


Author: David Truby
Date: 2022-03-16T16:39:27Z
New Revision: d38c9d38348fa84042e272803a91c098483a3132

URL: https://github.com/llvm/llvm-project/commit/d38c9d38348fa84042e272803a91c098483a3132
DIFF: https://github.com/llvm/llvm-project/commit/d38c9d38348fa84042e272803a91c098483a3132.diff

LOG: [NFC][clang][SVE] Auto-generate SVE operator tests.

Added: 
    

Modified: 
    clang/test/CodeGen/aarch64-sve-vector-ops.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/aarch64-sve-vector-ops.c b/clang/test/CodeGen/aarch64-sve-vector-ops.c
index 7fb307cb497b0..57d943ec7891d 100644
--- a/clang/test/CodeGen/aarch64-sve-vector-ops.c
+++ b/clang/test/CodeGen/aarch64-sve-vector-ops.c
@@ -1,6 +1,7 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
-// RUN: -fallow-half-arguments-and-returns \
-// RUN:  -O1 -emit-llvm -o - %s 2>&1 | FileCheck %s
+// RUN: -fallow-half-arguments-and-returns -disable-O0-optnone \
+// RUN:  -emit-llvm -o - %s | opt -S -sroa | FileCheck %s
 
 // REQUIRES: aarch64-registered-target
 
@@ -8,738 +9,946 @@
 
 // ADDITION
 
+// CHECK-LABEL: @add_i8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[ADD]]
+//
 svint8_t add_i8(svint8_t a, svint8_t b) {
-  // CHECK-LABEL: add_i8
-  // CHECK: %add = add <vscale x 16 x i8> %b, %a
-  // CHECK-NEXT: ret <vscale x 16 x i8> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_i16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[ADD]]
+//
 svint16_t add_i16(svint16_t a, svint16_t b) {
-  // CHECK-LABEL: add_i16
-  // CHECK: %add = add <vscale x 8 x i16> %b, %a
-  // CHECK-NEXT: ret <vscale x 8 x i16> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_i32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[ADD]]
+//
 svint32_t add_i32(svint32_t a, svint32_t b) {
-  // CHECK-LABEL: add_i32
-  // CHECK: %add = add <vscale x 4 x i32> %b, %a
-  // CHECK-NEXT: ret <vscale x 4 x i32> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_i64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[ADD]]
+//
 svint64_t add_i64(svint64_t a, svint64_t b) {
-  // CHECK-LABEL: add_i64
-  // CHECK: %add = add <vscale x 2 x i64> %b, %a
-  // CHECK-NEXT: ret <vscale x 2 x i64> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[ADD]]
+//
 svuint8_t add_u8(svuint8_t a, svuint8_t b) {
-  // CHECK-LABEL: add_u8
-  // CHECK: %add = add <vscale x 16 x i8> %b, %a
-  // CHECK-NEXT: ret <vscale x 16 x i8> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[ADD]]
+//
 svuint16_t add_u16(svuint16_t a, svuint16_t b) {
-  // CHECK-LABEL: add_u16
-  // CHECK: %add = add <vscale x 8 x i16> %b, %a
-  // CHECK-NEXT: ret <vscale x 8 x i16> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[ADD]]
+//
 svuint32_t add_u32(svuint32_t a, svuint32_t b) {
-  // CHECK-LABEL: add_u32
-  // CHECK: %add = add <vscale x 4 x i32> %b, %a
-  // CHECK-NEXT: ret <vscale x 4 x i32> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[ADD]]
+//
 svuint64_t add_u64(svuint64_t a, svuint64_t b) {
-  // CHECK-LABEL: add_u64
-  // CHECK: %add = add <vscale x 2 x i64> %b, %a
-  // CHECK-NEXT: ret <vscale x 2 x i64> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = fadd <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x half> [[ADD]]
+//
 svfloat16_t add_f16(svfloat16_t a, svfloat16_t b) {
-  // CHECK-LABEL: add_f16
-  // CHECK: %add = fadd <vscale x 8 x half> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x half> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = fadd <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x float> [[ADD]]
+//
 svfloat32_t add_f32(svfloat32_t a, svfloat32_t b) {
-  // CHECK-LABEL: add_f32
-  // CHECK: %add = fadd <vscale x 4 x float> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x float> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[ADD]]
+//
 svfloat64_t add_f64(svfloat64_t a, svfloat64_t b) {
-  // CHECK-LABEL: add_f64
-  // CHECK: %add = fadd <vscale x 2 x double> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x double> %add
   return a + b;
 }
 
+// CHECK-LABEL: @add_inplace_i8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[ADD]]
+//
 svint8_t add_inplace_i8(svint8_t a, svint8_t b) {
-  // CHECK-LABEL: add_inplace_i8
-  // CHECK: %add = add <vscale x 16 x i8> %b, %a
-  // CHECK-NEXT: ret <vscale x 16 x i8> %add
   return a += b;
 }
 
+// CHECK-LABEL: @add_inplace_i16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[ADD]]
+//
 svint16_t add_inplace_i16(svint16_t a, svint16_t b) {
-  // CHECK-LABEL: add_inplace_i16
-  // CHECK: %add = add <vscale x 8 x i16> %b, %a
-  // CHECK-NEXT: ret <vscale x 8 x i16> %add
   return a += b;
 }
 
+// CHECK-LABEL: @add_inplace_i32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[ADD]]
+//
 svint32_t add_inplace_i32(svint32_t a, svint32_t b) {
-  // CHECK-LABEL: add_inplace_i32
-  // CHECK: %add = add <vscale x 4 x i32> %b, %a
-  // CHECK-NEXT: ret <vscale x 4 x i32> %add
   return a += b;
 }
 
+// CHECK-LABEL: @add_inplace_i64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[ADD]]
+//
 svint64_t add_inplace_i64(svint64_t a, svint64_t b) {
-  // CHECK-LABEL: add_inplace_i64
-  // CHECK: %add = add <vscale x 2 x i64> %b, %a
-  // CHECK-NEXT: ret <vscale x 2 x i64> %add
   return a += b;
 }
 
+// CHECK-LABEL: @add_inplace_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[ADD]]
+//
 svuint8_t add_inplace_u8(svuint8_t a, svuint8_t b) {
-  // CHECK-LABEL: add_inplace_u8
-  // CHECK: %add = add <vscale x 16 x i8> %b, %a
-  // CHECK-NEXT: ret <vscale x 16 x i8> %add
   return a += b;
 }
 
+// CHECK-LABEL: @add_inplace_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[ADD]]
+//
 svuint16_t add_inplace_u16(svuint16_t a, svuint16_t b) {
-  // CHECK-LABEL: add_inplace_u16
-  // CHECK: %add = add <vscale x 8 x i16> %b, %a
-  // CHECK-NEXT: ret <vscale x 8 x i16> %add
   return a += b;
 }
 
+// CHECK-LABEL: @add_inplace_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[ADD]]
+//
 svuint32_t add_inplace_u32(svuint32_t a, svuint32_t b) {
-  // CHECK-LABEL: add_inplace_u32
-  // CHECK: %add = add <vscale x 4 x i32> %b, %a
-  // CHECK-NEXT: ret <vscale x 4 x i32> %add
   return a += b;
 }
 
+// CHECK-LABEL: @add_inplace_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[ADD]]
+//
 svuint64_t add_inplace_u64(svuint64_t a, svuint64_t b) {
-  // CHECK-LABEL: add_inplace_u64
-  // CHECK: %add = add <vscale x 2 x i64> %b, %a
-  // CHECK-NEXT: ret <vscale x 2 x i64> %add
   return a += b;
 }
 
+// CHECK-LABEL: @add_inplace_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = fadd <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x half> [[ADD]]
+//
 svfloat16_t add_inplace_f16(svfloat16_t a, svfloat16_t b) {
-  // CHECK-LABEL: add_inplace_f16
-  // CHECK: %add = fadd <vscale x 8 x half> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x half> %add
   return a += b;
 }
 
+// CHECK-LABEL: @add_inplace_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = fadd <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x float> [[ADD]]
+//
 svfloat32_t add_inplace_f32(svfloat32_t a, svfloat32_t b) {
-  // CHECK-LABEL: add_inplace_f32
-  // CHECK: %add = fadd <vscale x 4 x float> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x float> %add
   return a += b;
 }
 
+// CHECK-LABEL: @add_inplace_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[ADD]]
+//
 svfloat64_t add_inplace_f64(svfloat64_t a, svfloat64_t b) {
-  // CHECK-LABEL: add_inplace_f64
-  // CHECK: %add = fadd <vscale x 2 x double> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x double> %add
   return a += b;
 }
 
 // SUBTRACTION
 
+// CHECK-LABEL: @sub_i8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[SUB]]
+//
 svint8_t sub_i8(svint8_t a, svint8_t b) {
-  // CHECK-LABEL: sub_i8
-  // CHECK: %sub = sub <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_i16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[SUB]]
+//
 svint16_t sub_i16(svint16_t a, svint16_t b) {
-  // CHECK-LABEL: sub_i16
-  // CHECK: %sub = sub <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_i32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[SUB]]
+//
 svint32_t sub_i32(svint32_t a, svint32_t b) {
-  // CHECK-LABEL: sub_i32
-  // CHECK: %sub = sub <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_i64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[SUB]]
+//
 svint64_t sub_i64(svint64_t a, svint64_t b) {
-  // CHECK-LABEL: sub_i64
-  // CHECK: %sub = sub <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[SUB]]
+//
 svuint8_t sub_u8(svuint8_t a, svuint8_t b) {
-  // CHECK-LABEL: sub_u8
-  // CHECK: %sub = sub <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[SUB]]
+//
 svuint16_t sub_u16(svuint16_t a, svuint16_t b) {
-  // CHECK-LABEL: sub_u16
-  // CHECK: %sub = sub <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[SUB]]
+//
 svuint32_t sub_u32(svuint32_t a, svuint32_t b) {
-  // CHECK-LABEL: sub_u32
-  // CHECK: %sub = sub <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[SUB]]
+//
 svuint64_t sub_u64(svuint64_t a, svuint64_t b) {
-  // CHECK-LABEL: sub_u64
-  // CHECK: %sub = sub <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = fsub <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x half> [[SUB]]
+//
 svfloat16_t sub_f16(svfloat16_t a, svfloat16_t b) {
-  // CHECK-LABEL: sub_f16
-  // CHECK: %sub = fsub <vscale x 8 x half> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x half> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = fsub <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x float> [[SUB]]
+//
 svfloat32_t sub_f32(svfloat32_t a, svfloat32_t b) {
-  // CHECK-LABEL: sub_f32
-  // CHECK: %sub = fsub <vscale x 4 x float> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x float> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = fsub <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[SUB]]
+//
 svfloat64_t sub_f64(svfloat64_t a, svfloat64_t b) {
-  // CHECK-LABEL: sub_f64
-  // CHECK: %sub = fsub <vscale x 2 x double> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x double> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_i8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[SUB]]
+//
 svint8_t sub_inplace_i8(svint8_t a, svint8_t b) {
-  // CHECK-LABEL: sub_inplace_i8
-  // CHECK: %sub = sub <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_i16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[SUB]]
+//
 svint16_t sub_inplace_i16(svint16_t a, svint16_t b) {
-  // CHECK-LABEL: sub_inplace_i16
-  // CHECK: %sub = sub <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_i32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[SUB]]
+//
 svint32_t sub_inplace_i32(svint32_t a, svint32_t b) {
-  // CHECK-LABEL: sub_inplace_i32
-  // CHECK: %sub = sub <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_i64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[SUB]]
+//
 svint64_t sub_inplace_i64(svint64_t a, svint64_t b) {
-  // CHECK-LABEL: sub_inplace_i64
-  // CHECK: %sub = sub <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[SUB]]
+//
 svuint8_t sub_inplace_u8(svuint8_t a, svuint8_t b) {
-  // CHECK-LABEL: sub_inplace_u8
-  // CHECK: %sub = sub <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[SUB]]
+//
 svuint16_t sub_inplace_u16(svuint16_t a, svuint16_t b) {
-  // CHECK-LABEL: sub_inplace_u16
-  // CHECK: %sub = sub <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[SUB]]
+//
 svuint32_t sub_inplace_u32(svuint32_t a, svuint32_t b) {
-  // CHECK-LABEL: sub_inplace_u32
-  // CHECK: %sub = sub <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[SUB]]
+//
 svuint64_t sub_inplace_u64(svuint64_t a, svuint64_t b) {
-  // CHECK-LABEL: sub_inplace_u64
-  // CHECK: %sub = sub <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = fsub <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x half> [[SUB]]
+//
 svfloat16_t sub_inplace_f16(svfloat16_t a, svfloat16_t b) {
-  // CHECK-LABEL: sub_inplace_f16
-  // CHECK: %sub = fsub <vscale x 8 x half> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x half> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = fsub <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x float> [[SUB]]
+//
 svfloat32_t sub_inplace_f32(svfloat32_t a, svfloat32_t b) {
-  // CHECK-LABEL: sub_inplace_f32
-  // CHECK: %sub = fsub <vscale x 4 x float> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x float> %sub
   return a - b;
 }
 
+// CHECK-LABEL: @sub_inplace_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SUB:%.*]] = fsub <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[SUB]]
+//
 svfloat64_t sub_inplace_f64(svfloat64_t a, svfloat64_t b) {
-  // CHECK-LABEL: sub_inplace_f64
-  // CHECK: %sub = fsub <vscale x 2 x double> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x double> %sub
   return a - b;
 }
 
 // MULTIPLICATION
 
+// CHECK-LABEL: @mul_i8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[MUL]]
+//
 svint8_t mul_i8(svint8_t a, svint8_t b) {
-  // CHECK-LABEL: mul_i8
-  // CHECK: %mul = mul <vscale x 16 x i8> %b, %a
-  // CHECK-NEXT: ret <vscale x 16 x i8> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_i16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[MUL]]
+//
 svint16_t mul_i16(svint16_t a, svint16_t b) {
-  // CHECK-LABEL: mul_i16
-  // CHECK: %mul = mul <vscale x 8 x i16> %b, %a
-  // CHECK-NEXT: ret <vscale x 8 x i16> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_i32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[MUL]]
+//
 svint32_t mul_i32(svint32_t a, svint32_t b) {
-  // CHECK-LABEL: mul_i32
-  // CHECK: %mul = mul <vscale x 4 x i32> %b, %a
-  // CHECK-NEXT: ret <vscale x 4 x i32> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_i64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[MUL]]
+//
 svint64_t mul_i64(svint64_t a, svint64_t b) {
-  // CHECK-LABEL: mul_i64
-  // CHECK: %mul = mul <vscale x 2 x i64> %b, %a
-  // CHECK-NEXT: ret <vscale x 2 x i64> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[MUL]]
+//
 svuint8_t mul_u8(svuint8_t a, svuint8_t b) {
-  // CHECK-LABEL: mul_u8
-  // CHECK: %mul = mul <vscale x 16 x i8> %b, %a
-  // CHECK-NEXT: ret <vscale x 16 x i8> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[MUL]]
+//
 svuint16_t mul_u16(svuint16_t a, svuint16_t b) {
-  // CHECK-LABEL: mul_u16
-  // CHECK: %mul = mul <vscale x 8 x i16> %b, %a
-  // CHECK-NEXT: ret <vscale x 8 x i16> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[MUL]]
+//
 svuint32_t mul_u32(svuint32_t a, svuint32_t b) {
-  // CHECK-LABEL: mul_u32
-  // CHECK: %mul = mul <vscale x 4 x i32> %b, %a
-  // CHECK-NEXT: ret <vscale x 4 x i32> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[MUL]]
+//
 svuint64_t mul_u64(svuint64_t a, svuint64_t b) {
-  // CHECK-LABEL: mul_u64
-  // CHECK: %mul = mul <vscale x 2 x i64> %b, %a
-  // CHECK-NEXT: ret <vscale x 2 x i64> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = fmul <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x half> [[MUL]]
+//
 svfloat16_t mul_f16(svfloat16_t a, svfloat16_t b) {
-  // CHECK-LABEL: mul_f16
-  // CHECK: %mul = fmul <vscale x 8 x half> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x half> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = fmul <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x float> [[MUL]]
+//
 svfloat32_t mul_f32(svfloat32_t a, svfloat32_t b) {
-  // CHECK-LABEL: mul_f32
-  // CHECK: %mul = fmul <vscale x 4 x float> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x float> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = fmul <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[MUL]]
+//
 svfloat64_t mul_f64(svfloat64_t a, svfloat64_t b) {
-  // CHECK-LABEL: mul_f64
-  // CHECK: %mul = fmul <vscale x 2 x double> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x double> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_i8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[MUL]]
+//
 svint8_t mul_inplace_i8(svint8_t a, svint8_t b) {
-  // CHECK-LABEL: mul_inplace_i8
-  // CHECK: %mul = mul <vscale x 16 x i8> %b, %a
-  // CHECK-NEXT: ret <vscale x 16 x i8> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_i16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[MUL]]
+//
 svint16_t mul_inplace_i16(svint16_t a, svint16_t b) {
-  // CHECK-LABEL: mul_inplace_i16
-  // CHECK: %mul = mul <vscale x 8 x i16> %b, %a
-  // CHECK-NEXT: ret <vscale x 8 x i16> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_i32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[MUL]]
+//
 svint32_t mul_inplace_i32(svint32_t a, svint32_t b) {
-  // CHECK-LABEL: mul_inplace_i32
-  // CHECK: %mul = mul <vscale x 4 x i32> %b, %a
-  // CHECK-NEXT: ret <vscale x 4 x i32> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_i64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[MUL]]
+//
 svint64_t mul_inplace_i64(svint64_t a, svint64_t b) {
-  // CHECK-LABEL: mul_inplace_i64
-  // CHECK: %mul = mul <vscale x 2 x i64> %b, %a
-  // CHECK-NEXT: ret <vscale x 2 x i64> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[MUL]]
+//
 svuint8_t mul_inplace_u8(svuint8_t a, svuint8_t b) {
-  // CHECK-LABEL: mul_inplace_u8
-  // CHECK: %mul = mul <vscale x 16 x i8> %b, %a
-  // CHECK-NEXT: ret <vscale x 16 x i8> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[MUL]]
+//
 svuint16_t mul_inplace_u16(svuint16_t a, svuint16_t b) {
-  // CHECK-LABEL: mul_inplace_u16
-  // CHECK: %mul = mul <vscale x 8 x i16> %b, %a
-  // CHECK-NEXT: ret <vscale x 8 x i16> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[MUL]]
+//
 svuint32_t mul_inplace_u32(svuint32_t a, svuint32_t b) {
-  // CHECK-LABEL: mul_inplace_u32
-  // CHECK: %mul = mul <vscale x 4 x i32> %b, %a
-  // CHECK-NEXT: ret <vscale x 4 x i32> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[MUL]]
+//
 svuint64_t mul_inplace_u64(svuint64_t a, svuint64_t b) {
-  // CHECK-LABEL: mul_inplace_u64
-  // CHECK: %mul = mul <vscale x 2 x i64> %b, %a
-  // CHECK-NEXT: ret <vscale x 2 x i64> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = fmul <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x half> [[MUL]]
+//
 svfloat16_t mul_inplace_f16(svfloat16_t a, svfloat16_t b) {
-  // CHECK-LABEL: mul_inplace_f16
-  // CHECK: %mul = fmul <vscale x 8 x half> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x half> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = fmul <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x float> [[MUL]]
+//
 svfloat32_t mul_inplace_f32(svfloat32_t a, svfloat32_t b) {
-  // CHECK-LABEL: mul_inplace_f32
-  // CHECK: %mul = fmul <vscale x 4 x float> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x float> %mul
   return a * b;
 }
 
+// CHECK-LABEL: @mul_inplace_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MUL:%.*]] = fmul <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[MUL]]
+//
 svfloat64_t mul_inplace_f64(svfloat64_t a, svfloat64_t b) {
-  // CHECK-LABEL: mul_inplace_f64
-  // CHECK: %mul = fmul <vscale x 2 x double> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x double> %mul
   return a * b;
 }
 
 // DIVISION
 
+// CHECK-LABEL: @div_i8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = sdiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[DIV]]
+//
 svint8_t div_i8(svint8_t a, svint8_t b) {
-  // CHECK-LABEL: div_i8
-  // CHECK: %div = sdiv <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_i16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = sdiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[DIV]]
+//
 svint16_t div_i16(svint16_t a, svint16_t b) {
-  // CHECK-LABEL: div_i16
-  // CHECK: %div = sdiv <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_i32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = sdiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[DIV]]
+//
 svint32_t div_i32(svint32_t a, svint32_t b) {
-  // CHECK-LABEL: div_i32
-  // CHECK: %div = sdiv <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_i64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = sdiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[DIV]]
+//
 svint64_t div_i64(svint64_t a, svint64_t b) {
-  // CHECK-LABEL: div_i64
-  // CHECK: %div = sdiv <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[DIV]]
+//
 svuint8_t div_u8(svuint8_t a, svuint8_t b) {
-  // CHECK-LABEL: div_u8
-  // CHECK: %div = udiv <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[DIV]]
+//
 svuint16_t div_u16(svuint16_t a, svuint16_t b) {
-  // CHECK-LABEL: div_u16
-  // CHECK: %div = udiv <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[DIV]]
+//
 svuint32_t div_u32(svuint32_t a, svuint32_t b) {
-  // CHECK-LABEL: div_u32
-  // CHECK: %div = udiv <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[DIV]]
+//
 svuint64_t div_u64(svuint64_t a, svuint64_t b) {
-  // CHECK-LABEL: div_u64
-  // CHECK: %div = udiv <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = fdiv <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x half> [[DIV]]
+//
 svfloat16_t div_f16(svfloat16_t a, svfloat16_t b) {
-  // CHECK-LABEL: div_f16
-  // CHECK: %div = fdiv <vscale x 8 x half> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x half> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = fdiv <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x float> [[DIV]]
+//
 svfloat32_t div_f32(svfloat32_t a, svfloat32_t b) {
-  // CHECK-LABEL: div_f32
-  // CHECK: %div = fdiv <vscale x 4 x float> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x float> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = fdiv <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[DIV]]
+//
 svfloat64_t div_f64(svfloat64_t a, svfloat64_t b) {
-  // CHECK-LABEL: div_f64
-  // CHECK: %div = fdiv <vscale x 2 x double> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x double> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_i8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = sdiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[DIV]]
+//
 svint8_t div_inplace_i8(svint8_t a, svint8_t b) {
-  // CHECK-LABEL: div_inplace_i8
-  // CHECK: %div = sdiv <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_i16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = sdiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[DIV]]
+//
 svint16_t div_inplace_i16(svint16_t a, svint16_t b) {
-  // CHECK-LABEL: div_inplace_i16
-  // CHECK: %div = sdiv <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_i32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = sdiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[DIV]]
+//
 svint32_t div_inplace_i32(svint32_t a, svint32_t b) {
-  // CHECK-LABEL: div_inplace_i32
-  // CHECK: %div = sdiv <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_i64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = sdiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[DIV]]
+//
 svint64_t div_inplace_i64(svint64_t a, svint64_t b) {
-  // CHECK-LABEL: div_inplace_i64
-  // CHECK: %div = sdiv <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[DIV]]
+//
 svuint8_t div_inplace_u8(svuint8_t a, svuint8_t b) {
-  // CHECK-LABEL: div_inplace_u8
-  // CHECK: %div = udiv <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[DIV]]
+//
 svuint16_t div_inplace_u16(svuint16_t a, svuint16_t b) {
-  // CHECK-LABEL: div_inplace_u16
-  // CHECK: %div = udiv <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[DIV]]
+//
 svuint32_t div_inplace_u32(svuint32_t a, svuint32_t b) {
-  // CHECK-LABEL: div_inplace_u32
-  // CHECK: %div = udiv <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[DIV]]
+//
 svuint64_t div_inplace_u64(svuint64_t a, svuint64_t b) {
-  // CHECK-LABEL: div_inplace_u64
-  // CHECK: %div = udiv <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = fdiv <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x half> [[DIV]]
+//
 svfloat16_t div_inplace_f16(svfloat16_t a, svfloat16_t b) {
-  // CHECK-LABEL: div_inplace_f16
-  // CHECK: %div = fdiv <vscale x 8 x half> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x half> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_f32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = fdiv <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x float> [[DIV]]
+//
 svfloat32_t div_inplace_f32(svfloat32_t a, svfloat32_t b) {
-  // CHECK-LABEL: div_inplace_f32
-  // CHECK: %div = fdiv <vscale x 4 x float> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x float> %div
   return a / b;
 }
 
+// CHECK-LABEL: @div_inplace_f64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DIV:%.*]] = fdiv <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x double> [[DIV]]
+//
 svfloat64_t div_inplace_f64(svfloat64_t a, svfloat64_t b) {
-  // CHECK-LABEL: div_inplace_f64
-  // CHECK: %div = fdiv <vscale x 2 x double> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x double> %div
   return a / b;
 }
 
 // REMAINDER
 
+// CHECK-LABEL: @rem_i8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = srem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[REM]]
+//
 svint8_t rem_i8(svint8_t a, svint8_t b) {
-  // CHECK-LABEL: rem_i8
-  // CHECK: %rem = srem <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_i16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = srem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[REM]]
+//
 svint16_t rem_i16(svint16_t a, svint16_t b) {
-  // CHECK-LABEL: rem_i16
-  // CHECK: %rem = srem <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_i32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = srem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[REM]]
+//
 svint32_t rem_i32(svint32_t a, svint32_t b) {
-  // CHECK-LABEL: rem_i32
-  // CHECK: %rem = srem <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_i64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = srem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[REM]]
+//
 svint64_t rem_i64(svint64_t a, svint64_t b) {
-  // CHECK-LABEL: rem_i64
-  // CHECK: %rem = srem <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = urem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[REM]]
+//
 svuint8_t rem_u8(svuint8_t a, svuint8_t b) {
-  // CHECK-LABEL: rem_u8
-  // CHECK: %rem = urem <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = urem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[REM]]
+//
 svuint16_t rem_u16(svuint16_t a, svuint16_t b) {
-  // CHECK-LABEL: rem_u16
-  // CHECK: %rem = urem <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = urem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[REM]]
+//
 svuint32_t rem_u32(svuint32_t a, svuint32_t b) {
-  // CHECK-LABEL: rem_u32
-  // CHECK: %rem = urem <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = urem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[REM]]
+//
 svuint64_t rem_u64(svuint64_t a, svuint64_t b) {
-  // CHECK-LABEL: rem_u64
-  // CHECK: %rem = urem <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_inplace_i8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = srem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[REM]]
+//
 svint8_t rem_inplace_i8(svint8_t a, svint8_t b) {
-  // CHECK-LABEL: rem_inplace_i8
-  // CHECK: %rem = srem <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_inplace_i16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = srem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[REM]]
+//
 svint16_t rem_inplace_i16(svint16_t a, svint16_t b) {
-  // CHECK-LABEL: rem_inplace_i16
-  // CHECK: %rem = srem <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_inplace_i32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = srem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[REM]]
+//
 svint32_t rem_inplace_i32(svint32_t a, svint32_t b) {
-  // CHECK-LABEL: rem_inplace_i32
-  // CHECK: %rem = srem <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_inplace_i64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = srem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[REM]]
+//
 svint64_t rem_inplace_i64(svint64_t a, svint64_t b) {
-  // CHECK-LABEL: rem_inplace_i64
-  // CHECK: %rem = srem <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_inplace_u8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = urem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[REM]]
+//
 svuint8_t rem_inplace_u8(svuint8_t a, svuint8_t b) {
-  // CHECK-LABEL: rem_inplace_u8
-  // CHECK: %rem = urem <vscale x 16 x i8> %a, %b
-  // CHECK-NEXT: ret <vscale x 16 x i8> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_inplace_u16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = urem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 8 x i16> [[REM]]
+//
 svuint16_t rem_inplace_u16(svuint16_t a, svuint16_t b) {
-  // CHECK-LABEL: rem_inplace_u16
-  // CHECK: %rem = urem <vscale x 8 x i16> %a, %b
-  // CHECK-NEXT: ret <vscale x 8 x i16> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_inplace_u32(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = urem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 4 x i32> [[REM]]
+//
 svuint32_t rem_inplace_u32(svuint32_t a, svuint32_t b) {
-  // CHECK-LABEL: rem_inplace_u32
-  // CHECK: %rem = urem <vscale x 4 x i32> %a, %b
-  // CHECK-NEXT: ret <vscale x 4 x i32> %rem
   return a % b;
 }
 
+// CHECK-LABEL: @rem_inplace_u64(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[REM:%.*]] = urem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
+// CHECK-NEXT:    ret <vscale x 2 x i64> [[REM]]
+//
 svuint64_t rem_inplace_u64(svuint64_t a, svuint64_t b) {
-  // CHECK-LABEL: rem_inplace_u64
-  // CHECK: %rem = urem <vscale x 2 x i64> %a, %b
-  // CHECK-NEXT: ret <vscale x 2 x i64> %rem
   return a % b;
 }


        


More information about the cfe-commits mailing list