[llvm] 8cb5663 - [AArch64][SVE] Guard bitcast patterns under IsLE predicate

Sander de Smalen via llvm-commits llvm-commits at lists.llvm.org
Tue May 5 05:19:13 PDT 2020


Author: Sander de Smalen
Date: 2020-05-05T13:18:35+01:00
New Revision: 8cb5663abd117f93b1e44c014f527ad44462d8b8

URL: https://github.com/llvm/llvm-project/commit/8cb5663abd117f93b1e44c014f527ad44462d8b8
DIFF: https://github.com/llvm/llvm-project/commit/8cb5663abd117f93b1e44c014f527ad44462d8b8.diff

LOG: [AArch64][SVE] Guard bitcast patterns under IsLE predicate

Reviewed By: efriedma

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D79352

Added: 
    llvm/test/CodeGen/AArch64/sve-bitcast.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index f7b600f8f28e..1593a2c0b2f4 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1396,54 +1396,59 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio
     def : Pat<(vscale (sve_cntd_imm_neg i32:$imm)), (SUBXrs XZR, (CNTD_XPiI 31, $imm), 0)>;
   }
 
-  def : Pat<(nxv16i8 (bitconvert (nxv8i16 ZPR:$src))), (nxv16i8 ZPR:$src)>;
-  def : Pat<(nxv16i8 (bitconvert (nxv4i32 ZPR:$src))), (nxv16i8 ZPR:$src)>;
-  def : Pat<(nxv16i8 (bitconvert (nxv2i64 ZPR:$src))), (nxv16i8 ZPR:$src)>;
-  def : Pat<(nxv16i8 (bitconvert (nxv8f16 ZPR:$src))), (nxv16i8 ZPR:$src)>;
-  def : Pat<(nxv16i8 (bitconvert (nxv4f32 ZPR:$src))), (nxv16i8 ZPR:$src)>;
-  def : Pat<(nxv16i8 (bitconvert (nxv2f64 ZPR:$src))), (nxv16i8 ZPR:$src)>;
-
-  def : Pat<(nxv8i16 (bitconvert (nxv16i8 ZPR:$src))), (nxv8i16 ZPR:$src)>;
-  def : Pat<(nxv8i16 (bitconvert (nxv4i32 ZPR:$src))), (nxv8i16 ZPR:$src)>;
-  def : Pat<(nxv8i16 (bitconvert (nxv2i64 ZPR:$src))), (nxv8i16 ZPR:$src)>;
-  def : Pat<(nxv8i16 (bitconvert (nxv8f16 ZPR:$src))), (nxv8i16 ZPR:$src)>;
-  def : Pat<(nxv8i16 (bitconvert (nxv4f32 ZPR:$src))), (nxv8i16 ZPR:$src)>;
-  def : Pat<(nxv8i16 (bitconvert (nxv2f64 ZPR:$src))), (nxv8i16 ZPR:$src)>;
-
-  def : Pat<(nxv4i32 (bitconvert (nxv16i8 ZPR:$src))), (nxv4i32 ZPR:$src)>;
-  def : Pat<(nxv4i32 (bitconvert (nxv8i16 ZPR:$src))), (nxv4i32 ZPR:$src)>;
-  def : Pat<(nxv4i32 (bitconvert (nxv2i64 ZPR:$src))), (nxv4i32 ZPR:$src)>;
-  def : Pat<(nxv4i32 (bitconvert (nxv8f16 ZPR:$src))), (nxv4i32 ZPR:$src)>;
-  def : Pat<(nxv4i32 (bitconvert (nxv4f32 ZPR:$src))), (nxv4i32 ZPR:$src)>;
-  def : Pat<(nxv4i32 (bitconvert (nxv2f64 ZPR:$src))), (nxv4i32 ZPR:$src)>;
-
-  def : Pat<(nxv2i64 (bitconvert (nxv16i8 ZPR:$src))), (nxv2i64 ZPR:$src)>;
-  def : Pat<(nxv2i64 (bitconvert (nxv8i16 ZPR:$src))), (nxv2i64 ZPR:$src)>;
-  def : Pat<(nxv2i64 (bitconvert (nxv4i32 ZPR:$src))), (nxv2i64 ZPR:$src)>;
-  def : Pat<(nxv2i64 (bitconvert (nxv8f16 ZPR:$src))), (nxv2i64 ZPR:$src)>;
-  def : Pat<(nxv2i64 (bitconvert (nxv4f32 ZPR:$src))), (nxv2i64 ZPR:$src)>;
-  def : Pat<(nxv2i64 (bitconvert (nxv2f64 ZPR:$src))), (nxv2i64 ZPR:$src)>;
-
-  def : Pat<(nxv8f16 (bitconvert (nxv16i8 ZPR:$src))), (nxv8f16 ZPR:$src)>;
-  def : Pat<(nxv8f16 (bitconvert (nxv8i16 ZPR:$src))), (nxv8f16 ZPR:$src)>;
-  def : Pat<(nxv8f16 (bitconvert (nxv4i32 ZPR:$src))), (nxv8f16 ZPR:$src)>;
-  def : Pat<(nxv8f16 (bitconvert (nxv2i64 ZPR:$src))), (nxv8f16 ZPR:$src)>;
-  def : Pat<(nxv8f16 (bitconvert (nxv4f32 ZPR:$src))), (nxv8f16 ZPR:$src)>;
-  def : Pat<(nxv8f16 (bitconvert (nxv2f64 ZPR:$src))), (nxv8f16 ZPR:$src)>;
-
-  def : Pat<(nxv4f32 (bitconvert (nxv16i8 ZPR:$src))), (nxv4f32 ZPR:$src)>;
-  def : Pat<(nxv4f32 (bitconvert (nxv8i16 ZPR:$src))), (nxv4f32 ZPR:$src)>;
-  def : Pat<(nxv4f32 (bitconvert (nxv4i32 ZPR:$src))), (nxv4f32 ZPR:$src)>;
-  def : Pat<(nxv4f32 (bitconvert (nxv2i64 ZPR:$src))), (nxv4f32 ZPR:$src)>;
-  def : Pat<(nxv4f32 (bitconvert (nxv8f16 ZPR:$src))), (nxv4f32 ZPR:$src)>;
-  def : Pat<(nxv4f32 (bitconvert (nxv2f64 ZPR:$src))), (nxv4f32 ZPR:$src)>;
-
-  def : Pat<(nxv2f64 (bitconvert (nxv16i8 ZPR:$src))), (nxv2f64 ZPR:$src)>;
-  def : Pat<(nxv2f64 (bitconvert (nxv8i16 ZPR:$src))), (nxv2f64 ZPR:$src)>;
-  def : Pat<(nxv2f64 (bitconvert (nxv4i32 ZPR:$src))), (nxv2f64 ZPR:$src)>;
-  def : Pat<(nxv2f64 (bitconvert (nxv2i64 ZPR:$src))), (nxv2f64 ZPR:$src)>;
-  def : Pat<(nxv2f64 (bitconvert (nxv8f16 ZPR:$src))), (nxv2f64 ZPR:$src)>;
-  def : Pat<(nxv2f64 (bitconvert (nxv4f32 ZPR:$src))), (nxv2f64 ZPR:$src)>;
+  // FIXME: BigEndian requires an additional REV instruction to satisfy the
+  // constraint that none of the bits change when stored to memory as one
+  // type, and and reloaded as another type.
+  let Predicates = [IsLE] in {
+    def : Pat<(nxv16i8 (bitconvert (nxv8i16 ZPR:$src))), (nxv16i8 ZPR:$src)>;
+    def : Pat<(nxv16i8 (bitconvert (nxv4i32 ZPR:$src))), (nxv16i8 ZPR:$src)>;
+    def : Pat<(nxv16i8 (bitconvert (nxv2i64 ZPR:$src))), (nxv16i8 ZPR:$src)>;
+    def : Pat<(nxv16i8 (bitconvert (nxv8f16 ZPR:$src))), (nxv16i8 ZPR:$src)>;
+    def : Pat<(nxv16i8 (bitconvert (nxv4f32 ZPR:$src))), (nxv16i8 ZPR:$src)>;
+    def : Pat<(nxv16i8 (bitconvert (nxv2f64 ZPR:$src))), (nxv16i8 ZPR:$src)>;
+
+    def : Pat<(nxv8i16 (bitconvert (nxv16i8 ZPR:$src))), (nxv8i16 ZPR:$src)>;
+    def : Pat<(nxv8i16 (bitconvert (nxv4i32 ZPR:$src))), (nxv8i16 ZPR:$src)>;
+    def : Pat<(nxv8i16 (bitconvert (nxv2i64 ZPR:$src))), (nxv8i16 ZPR:$src)>;
+    def : Pat<(nxv8i16 (bitconvert (nxv8f16 ZPR:$src))), (nxv8i16 ZPR:$src)>;
+    def : Pat<(nxv8i16 (bitconvert (nxv4f32 ZPR:$src))), (nxv8i16 ZPR:$src)>;
+    def : Pat<(nxv8i16 (bitconvert (nxv2f64 ZPR:$src))), (nxv8i16 ZPR:$src)>;
+
+    def : Pat<(nxv4i32 (bitconvert (nxv16i8 ZPR:$src))), (nxv4i32 ZPR:$src)>;
+    def : Pat<(nxv4i32 (bitconvert (nxv8i16 ZPR:$src))), (nxv4i32 ZPR:$src)>;
+    def : Pat<(nxv4i32 (bitconvert (nxv2i64 ZPR:$src))), (nxv4i32 ZPR:$src)>;
+    def : Pat<(nxv4i32 (bitconvert (nxv8f16 ZPR:$src))), (nxv4i32 ZPR:$src)>;
+    def : Pat<(nxv4i32 (bitconvert (nxv4f32 ZPR:$src))), (nxv4i32 ZPR:$src)>;
+    def : Pat<(nxv4i32 (bitconvert (nxv2f64 ZPR:$src))), (nxv4i32 ZPR:$src)>;
+
+    def : Pat<(nxv2i64 (bitconvert (nxv16i8 ZPR:$src))), (nxv2i64 ZPR:$src)>;
+    def : Pat<(nxv2i64 (bitconvert (nxv8i16 ZPR:$src))), (nxv2i64 ZPR:$src)>;
+    def : Pat<(nxv2i64 (bitconvert (nxv4i32 ZPR:$src))), (nxv2i64 ZPR:$src)>;
+    def : Pat<(nxv2i64 (bitconvert (nxv8f16 ZPR:$src))), (nxv2i64 ZPR:$src)>;
+    def : Pat<(nxv2i64 (bitconvert (nxv4f32 ZPR:$src))), (nxv2i64 ZPR:$src)>;
+    def : Pat<(nxv2i64 (bitconvert (nxv2f64 ZPR:$src))), (nxv2i64 ZPR:$src)>;
+
+    def : Pat<(nxv8f16 (bitconvert (nxv16i8 ZPR:$src))), (nxv8f16 ZPR:$src)>;
+    def : Pat<(nxv8f16 (bitconvert (nxv8i16 ZPR:$src))), (nxv8f16 ZPR:$src)>;
+    def : Pat<(nxv8f16 (bitconvert (nxv4i32 ZPR:$src))), (nxv8f16 ZPR:$src)>;
+    def : Pat<(nxv8f16 (bitconvert (nxv2i64 ZPR:$src))), (nxv8f16 ZPR:$src)>;
+    def : Pat<(nxv8f16 (bitconvert (nxv4f32 ZPR:$src))), (nxv8f16 ZPR:$src)>;
+    def : Pat<(nxv8f16 (bitconvert (nxv2f64 ZPR:$src))), (nxv8f16 ZPR:$src)>;
+
+    def : Pat<(nxv4f32 (bitconvert (nxv16i8 ZPR:$src))), (nxv4f32 ZPR:$src)>;
+    def : Pat<(nxv4f32 (bitconvert (nxv8i16 ZPR:$src))), (nxv4f32 ZPR:$src)>;
+    def : Pat<(nxv4f32 (bitconvert (nxv4i32 ZPR:$src))), (nxv4f32 ZPR:$src)>;
+    def : Pat<(nxv4f32 (bitconvert (nxv2i64 ZPR:$src))), (nxv4f32 ZPR:$src)>;
+    def : Pat<(nxv4f32 (bitconvert (nxv8f16 ZPR:$src))), (nxv4f32 ZPR:$src)>;
+    def : Pat<(nxv4f32 (bitconvert (nxv2f64 ZPR:$src))), (nxv4f32 ZPR:$src)>;
+
+    def : Pat<(nxv2f64 (bitconvert (nxv16i8 ZPR:$src))), (nxv2f64 ZPR:$src)>;
+    def : Pat<(nxv2f64 (bitconvert (nxv8i16 ZPR:$src))), (nxv2f64 ZPR:$src)>;
+    def : Pat<(nxv2f64 (bitconvert (nxv4i32 ZPR:$src))), (nxv2f64 ZPR:$src)>;
+    def : Pat<(nxv2f64 (bitconvert (nxv2i64 ZPR:$src))), (nxv2f64 ZPR:$src)>;
+    def : Pat<(nxv2f64 (bitconvert (nxv8f16 ZPR:$src))), (nxv2f64 ZPR:$src)>;
+    def : Pat<(nxv2f64 (bitconvert (nxv4f32 ZPR:$src))), (nxv2f64 ZPR:$src)>;
+  }
 
   def : Pat<(nxv16i1 (reinterpret_cast (nxv16i1 PPR:$src))), (COPY_TO_REGCLASS PPR:$src, PPR)>;
   def : Pat<(nxv16i1 (reinterpret_cast (nxv8i1 PPR:$src))), (COPY_TO_REGCLASS PPR:$src, PPR)>;

diff  --git a/llvm/test/CodeGen/AArch64/sve-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-bitcast.ll
new file mode 100644
index 000000000000..b98916585b24
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-bitcast.ll
@@ -0,0 +1,339 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+; RUN: not --crash llc -mtriple=aarch64_be -mattr=+sve < %s
+
+define <vscale x 16 x i8> @bitcast_i16_to_i8(<vscale x 8 x i16> %v) {
+; CHECK-LABEL: bitcast_i16_to_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x i16> %v to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %bc
+}
+
+define <vscale x 16 x i8> @bitcast_i32_to_i8(<vscale x 4 x i32> %v) {
+; CHECK-LABEL: bitcast_i32_to_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x i32> %v to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %bc
+}
+
+define <vscale x 16 x i8> @bitcast_i64_to_i8(<vscale x 2 x i64> %v) {
+; CHECK-LABEL: bitcast_i64_to_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x i64> %v to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %bc
+}
+
+define <vscale x 16 x i8> @bitcast_half_to_i8(<vscale x 8 x half> %v) {
+; CHECK-LABEL: bitcast_half_to_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x half> %v to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %bc
+}
+
+define <vscale x 16 x i8> @bitcast_float_to_i8(<vscale x 4 x float> %v) {
+; CHECK-LABEL: bitcast_float_to_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x float> %v to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %bc
+}
+
+define <vscale x 16 x i8> @bitcast_double_to_i8(<vscale x 2 x double> %v) {
+; CHECK-LABEL: bitcast_double_to_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x double> %v to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %bc
+}
+
+define <vscale x 8 x i16> @bitcast_i8_to_i16(<vscale x 16 x i8> %v) {
+; CHECK-LABEL: bitcast_i8_to_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 16 x i8> %v to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %bc
+}
+
+define <vscale x 8 x i16> @bitcast_i32_to_i16(<vscale x 4 x i32> %v) {
+; CHECK-LABEL: bitcast_i32_to_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x i32> %v to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %bc
+}
+
+define <vscale x 8 x i16> @bitcast_i64_to_i16(<vscale x 2 x i64> %v) {
+; CHECK-LABEL: bitcast_i64_to_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x i64> %v to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %bc
+}
+
+define <vscale x 8 x i16> @bitcast_half_to_i16(<vscale x 8 x half> %v) {
+; CHECK-LABEL: bitcast_half_to_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x half> %v to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %bc
+}
+
+define <vscale x 8 x i16> @bitcast_float_to_i16(<vscale x 4 x float> %v) {
+; CHECK-LABEL: bitcast_float_to_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x float> %v to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %bc
+}
+
+define <vscale x 8 x i16> @bitcast_double_to_i16(<vscale x 2 x double> %v) {
+; CHECK-LABEL: bitcast_double_to_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x double> %v to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %bc
+}
+
+define <vscale x 4 x i32> @bitcast_i8_to_i32(<vscale x 16 x i8> %v) {
+; CHECK-LABEL: bitcast_i8_to_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 16 x i8> %v to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %bc
+}
+
+define <vscale x 4 x i32> @bitcast_i16_to_i32(<vscale x 8 x i16> %v) {
+; CHECK-LABEL: bitcast_i16_to_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x i16> %v to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %bc
+}
+
+define <vscale x 4 x i32> @bitcast_i64_to_i32(<vscale x 2 x i64> %v) {
+; CHECK-LABEL: bitcast_i64_to_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x i64> %v to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %bc
+}
+
+define <vscale x 4 x i32> @bitcast_half_to_i32(<vscale x 8 x half> %v) {
+; CHECK-LABEL: bitcast_half_to_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x half> %v to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %bc
+}
+
+define <vscale x 4 x i32> @bitcast_float_to_i32(<vscale x 4 x float> %v) {
+; CHECK-LABEL: bitcast_float_to_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x float> %v to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %bc
+}
+
+define <vscale x 4 x i32> @bitcast_double_to_i32(<vscale x 2 x double> %v) {
+; CHECK-LABEL: bitcast_double_to_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x double> %v to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %bc
+}
+
+define <vscale x 2 x i64> @bitcast_i8_to_i64(<vscale x 16 x i8> %v) {
+; CHECK-LABEL: bitcast_i8_to_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 16 x i8> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %bc
+}
+
+define <vscale x 2 x i64> @bitcast_i16_to_i64(<vscale x 8 x i16> %v) {
+; CHECK-LABEL: bitcast_i16_to_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x i16> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %bc
+}
+
+define <vscale x 2 x i64> @bitcast_i32_to_i64(<vscale x 4 x i32> %v) {
+; CHECK-LABEL: bitcast_i32_to_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x i32> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %bc
+}
+
+define <vscale x 2 x i64> @bitcast_half_to_i64(<vscale x 8 x half> %v) {
+; CHECK-LABEL: bitcast_half_to_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x half> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %bc
+}
+
+define <vscale x 2 x i64> @bitcast_float_to_i64(<vscale x 4 x float> %v) {
+; CHECK-LABEL: bitcast_float_to_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x float> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %bc
+}
+
+define <vscale x 2 x i64> @bitcast_double_to_i64(<vscale x 2 x double> %v) {
+; CHECK-LABEL: bitcast_double_to_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x double> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %bc
+}
+
+define <vscale x 8 x half> @bitcast_i8_to_half(<vscale x 16 x i8> %v) {
+; CHECK-LABEL: bitcast_i8_to_half:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 16 x i8> %v to <vscale x 8 x half>
+  ret <vscale x 8 x half> %bc
+}
+
+define <vscale x 8 x half> @bitcast_i16_to_half(<vscale x 8 x i16> %v) {
+; CHECK-LABEL: bitcast_i16_to_half:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x i16> %v to <vscale x 8 x half>
+  ret <vscale x 8 x half> %bc
+}
+
+define <vscale x 8 x half> @bitcast_i32_to_half(<vscale x 4 x i32> %v) {
+; CHECK-LABEL: bitcast_i32_to_half:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x i32> %v to <vscale x 8 x half>
+  ret <vscale x 8 x half> %bc
+}
+
+define <vscale x 8 x half> @bitcast_i64_to_half(<vscale x 2 x i64> %v) {
+; CHECK-LABEL: bitcast_i64_to_half:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x i64> %v to <vscale x 8 x half>
+  ret <vscale x 8 x half> %bc
+}
+
+define <vscale x 8 x half> @bitcast_float_to_half(<vscale x 4 x float> %v) {
+; CHECK-LABEL: bitcast_float_to_half:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x float> %v to <vscale x 8 x half>
+  ret <vscale x 8 x half> %bc
+}
+
+define <vscale x 8 x half> @bitcast_double_to_half(<vscale x 2 x double> %v) {
+; CHECK-LABEL: bitcast_double_to_half:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x double> %v to <vscale x 8 x half>
+  ret <vscale x 8 x half> %bc
+}
+
+define <vscale x 4 x float> @bitcast_i8_to_float(<vscale x 16 x i8> %v) {
+; CHECK-LABEL: bitcast_i8_to_float:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 16 x i8> %v to <vscale x 4 x float>
+  ret <vscale x 4 x float> %bc
+}
+
+define <vscale x 4 x float> @bitcast_i16_to_float(<vscale x 8 x i16> %v) {
+; CHECK-LABEL: bitcast_i16_to_float:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x i16> %v to <vscale x 4 x float>
+  ret <vscale x 4 x float> %bc
+}
+
+define <vscale x 4 x float> @bitcast_i32_to_float(<vscale x 4 x i32> %v) {
+; CHECK-LABEL: bitcast_i32_to_float:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x i32> %v to <vscale x 4 x float>
+  ret <vscale x 4 x float> %bc
+}
+
+define <vscale x 4 x float> @bitcast_i64_to_float(<vscale x 2 x i64> %v) {
+; CHECK-LABEL: bitcast_i64_to_float:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x i64> %v to <vscale x 4 x float>
+  ret <vscale x 4 x float> %bc
+}
+
+define <vscale x 4 x float> @bitcast_half_to_float(<vscale x 8 x half> %v) {
+; CHECK-LABEL: bitcast_half_to_float:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x half> %v to <vscale x 4 x float>
+  ret <vscale x 4 x float> %bc
+}
+
+define <vscale x 4 x float> @bitcast_double_to_float(<vscale x 2 x double> %v) {
+; CHECK-LABEL: bitcast_double_to_float:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x double> %v to <vscale x 4 x float>
+  ret <vscale x 4 x float> %bc
+}
+
+define <vscale x 2 x double> @bitcast_i8_to_double(<vscale x 16 x i8> %v) {
+; CHECK-LABEL: bitcast_i8_to_double:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 16 x i8> %v to <vscale x 2 x double>
+  ret <vscale x 2 x double> %bc
+}
+
+define <vscale x 2 x double> @bitcast_i16_to_double(<vscale x 8 x i16> %v) {
+; CHECK-LABEL: bitcast_i16_to_double:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x i16> %v to <vscale x 2 x double>
+  ret <vscale x 2 x double> %bc
+}
+
+define <vscale x 2 x double> @bitcast_i32_to_double(<vscale x 4 x i32> %v) {
+; CHECK-LABEL: bitcast_i32_to_double:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x i32> %v to <vscale x 2 x double>
+  ret <vscale x 2 x double> %bc
+}
+
+define <vscale x 2 x double> @bitcast_i64_to_double(<vscale x 2 x i64> %v) {
+; CHECK-LABEL: bitcast_i64_to_double:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 2 x i64> %v to <vscale x 2 x double>
+  ret <vscale x 2 x double> %bc
+}
+
+define <vscale x 2 x double> @bitcast_half_to_double(<vscale x 8 x half> %v) {
+; CHECK-LABEL: bitcast_half_to_double:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 8 x half> %v to <vscale x 2 x double>
+  ret <vscale x 2 x double> %bc
+}
+
+define <vscale x 2 x double> @bitcast_float_to_double(<vscale x 4 x float> %v) {
+; CHECK-LABEL: bitcast_float_to_double:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %bc = bitcast <vscale x 4 x float> %v to <vscale x 2 x double>
+  ret <vscale x 2 x double> %bc
+}


        


More information about the llvm-commits mailing list