[llvm] r249147 - Reapply r249121 : "[FastISel][x86] Teach how to select SSE2/AVX bitcasts between 128/256-bit vector types."

Andrea Di Biagio via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 2 09:08:06 PDT 2015


Author: adibiagio
Date: Fri Oct  2 11:08:05 2015
New Revision: 249147

URL: http://llvm.org/viewvc/llvm-project?rev=249147&view=rev
Log:
Reapply r249121 : "[FastISel][x86] Teach how to select SSE2/AVX bitcasts between 128/256-bit vector types."

This patch teaches FastIsel the following two things:
1) On SSE2, no instructions are needed for bitcasts between 128-bit vector types;
2) On AVX, no instructions are needed for bitcasts between 256-bit vector types.

Example:

  %1 = bitcast <4 x i31> %V to <2 x i64>

Before (-fast-isel -fast-isel-abort=1):

  FastIsel miss: %1 = bitcast <4 x i31> %V to <2 x i64>

Now we don't fall back to SelectionDAG and we correctly fold that computation
propagating the register associated to %V.

Originally reviewed here: http://reviews.llvm.org/D13347

Added:
    llvm/trunk/test/CodeGen/X86/fast-isel-bitcasts-avx.ll
    llvm/trunk/test/CodeGen/X86/fast-isel-bitcasts.ll
Modified:
    llvm/trunk/lib/Target/X86/X86FastISel.cpp

Modified: llvm/trunk/lib/Target/X86/X86FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FastISel.cpp?rev=249147&r1=249146&r2=249147&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FastISel.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FastISel.cpp Fri Oct  2 11:08:05 2015
@@ -3234,6 +3234,30 @@ X86FastISel::fastSelectInstruction(const
     updateValueMap(I, Reg);
     return true;
   }
+  case Instruction::BitCast: {
+    // Select SSE2/AVX bitcasts between 128/256 bit vector types.
+    if (!Subtarget->hasSSE2())
+      return false;
+
+    EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
+    EVT DstVT = TLI.getValueType(DL, I->getType());
+
+    if (!SrcVT.isSimple() || !DstVT.isSimple())
+      return false;
+
+    if (!SrcVT.is128BitVector() &&
+        !(Subtarget->hasAVX() && SrcVT.is256BitVector()))
+      return false;
+
+    unsigned Reg = getRegForValue(I->getOperand(0));
+    if (Reg == 0)
+      return false;
+      
+    // No instruction is needed for conversion. Reuse the register used by
+    // the fist operand.
+    updateValueMap(I, Reg);
+    return true;
+  }
   }
 
   return false;

Added: llvm/trunk/test/CodeGen/X86/fast-isel-bitcasts-avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-bitcasts-avx.ll?rev=249147&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-bitcasts-avx.ll (added)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-bitcasts-avx.ll Fri Oct  2 11:08:05 2015
@@ -0,0 +1,244 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -fast-isel-abort=1 -asm-verbose=0 | FileCheck %s
+;
+; Bitcasts between 256-bit vector types are no-ops since no instruction is
+; needed for the conversion.
+
+define <4 x i64> @v8i32_to_v4i64(<8 x i32> %a) {
+;CHECK-LABEL: v8i32_to_v4i64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x i32> %a to <4 x i64>
+  ret <4 x i64> %1
+}
+
+define <4 x i64> @v16i16_to_v4i64(<16 x i16> %a) {
+;CHECK-LABEL: v16i16_to_v4i64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <16 x i16> %a to <4 x i64>
+  ret <4 x i64> %1
+}
+
+define <4 x i64> @v32i8_to_v4i64(<32 x i8> %a) {
+;CHECK-LABEL: v32i8_to_v4i64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <32 x i8> %a to <4 x i64>
+  ret <4 x i64> %1
+}
+
+define <4 x i64> @v4f64_to_v4i64(<4 x double> %a) {
+;CHECK-LABEL: v4f64_to_v4i64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x double> %a to <4 x i64>
+  ret <4 x i64> %1
+}
+
+define <4 x i64> @v8f32_to_v4i64(<8 x float> %a) {
+;CHECK-LABEL: v8f32_to_v4i64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x float> %a to <4 x i64>
+  ret <4 x i64> %1
+}
+
+define <8 x i32> @v4i64_to_v8i32(<4 x i64> %a) {
+;CHECK-LABEL: v4i64_to_v8i32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x i64> %a to <8 x i32>
+  ret <8 x i32> %1
+}
+
+define <8 x i32> @v16i16_to_v8i32(<16 x i16> %a) {
+;CHECK-LABEL: v16i16_to_v8i32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <16 x i16> %a to <8 x i32>
+  ret <8 x i32> %1
+}
+
+define <8 x i32> @v32i8_to_v8i32(<32 x i8> %a) {
+;CHECK-LABEL: v32i8_to_v8i32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <32 x i8> %a to <8 x i32>
+  ret <8 x i32> %1
+}
+
+define <8 x i32> @v4f64_to_v8i32(<4 x double> %a) {
+;CHECK-LABEL: v4f64_to_v8i32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x double> %a to <8 x i32>
+  ret <8 x i32> %1
+}
+
+define <8 x i32> @v8f32_to_v8i32(<8 x float> %a) {
+;CHECK-LABEL: v8f32_to_v8i32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x float> %a to <8 x i32>
+  ret <8 x i32> %1
+}
+
+define <16 x i16> @v4i64_to_v16i16(<4 x i64> %a) {
+;CHECK-LABEL: v4i64_to_v16i16:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x i64> %a to <16 x i16>
+  ret <16 x i16> %1
+}
+
+define <16 x i16> @v8i32_to_v16i16(<8 x i32> %a) {
+;CHECK-LABEL: v8i32_to_v16i16:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x i32> %a to <16 x i16>
+  ret <16 x i16> %1
+}
+
+define <16 x i16> @v32i8_to_v16i16(<32 x i8> %a) {
+;CHECK-LABEL: v32i8_to_v16i16:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <32 x i8> %a to <16 x i16>
+  ret <16 x i16> %1
+}
+
+define <16 x i16> @v4f64_to_v16i16(<4 x double> %a) {
+;CHECK-LABEL: v4f64_to_v16i16:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x double> %a to <16 x i16>
+  ret <16 x i16> %1
+}
+
+define <16 x i16> @v8f32_to_v16i16(<8 x float> %a) {
+;CHECK-LABEL: v8f32_to_v16i16:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x float> %a to <16 x i16>
+  ret <16 x i16> %1
+}
+
+define <32 x i8> @v16i16_to_v32i8(<16 x i16> %a) {
+;CHECK-LABEL: v16i16_to_v32i8:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <16 x i16> %a to <32 x i8>
+  ret <32 x i8> %1
+}
+
+define <32 x i8> @v4i64_to_v32i8(<4 x i64> %a) {
+;CHECK-LABEL: v4i64_to_v32i8:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x i64> %a to <32 x i8>
+  ret <32 x i8> %1
+}
+
+define <32 x i8> @v8i32_to_v32i8(<8 x i32> %a) {
+;CHECK-LABEL: v8i32_to_v32i8:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x i32> %a to <32 x i8>
+  ret <32 x i8> %1
+}
+
+define <32 x i8> @v4f64_to_v32i8(<4 x double> %a) {
+;CHECK-LABEL: v4f64_to_v32i8:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x double> %a to <32 x i8>
+  ret <32 x i8> %1
+}
+
+define <32 x i8> @v8f32_to_v32i8(<8 x float> %a) {
+;CHECK-LABEL: v8f32_to_v32i8:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x float> %a to <32 x i8>
+  ret <32 x i8> %1
+}
+
+define <8 x float> @v32i8_to_v8f32(<32 x i8> %a) {
+;CHECK-LABEL: v32i8_to_v8f32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <32 x i8> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <8 x float> @v16i16_to_v8f32(<16 x i16> %a) {
+;CHECK-LABEL: v16i16_to_v8f32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <16 x i16> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <8 x float> @v4i64_to_v8f32(<4 x i64> %a) {
+;CHECK-LABEL: v4i64_to_v8f32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x i64> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <8 x float> @v8i32_to_v8f32(<8 x i32> %a) {
+;CHECK-LABEL: v8i32_to_v8f32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x i32> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <8 x float> @v4f64_to_v8f32(<4 x double> %a) {
+;CHECK-LABEL: v4f64_to_v8f32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x double> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <4 x double> @v8f32_to_v4f64(<8 x float> %a) {
+;CHECK-LABEL: v8f32_to_v4f64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x float> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <4 x double> @v32i8_to_v4f64(<32 x i8> %a) {
+;CHECK-LABEL: v32i8_to_v4f64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <32 x i8> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <4 x double> @v16i16_to_v4f64(<16 x i16> %a) {
+;CHECK-LABEL: v16i16_to_v4f64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <16 x i16> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <4 x double> @v4i64_to_v4f64(<4 x i64> %a) {
+;CHECK-LABEL: v4i64_to_v4f64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x i64> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <4 x double> @v8i32_to_v4f64(<8 x i32> %a) {
+;CHECK-LABEL: v8i32_to_v4f64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x i32> %a to <4 x double>
+  ret <4 x double> %1
+}

Added: llvm/trunk/test/CodeGen/X86/fast-isel-bitcasts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-bitcasts.ll?rev=249147&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-bitcasts.ll (added)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-bitcasts.ll Fri Oct  2 11:08:05 2015
@@ -0,0 +1,245 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel -fast-isel-abort=1 -asm-verbose=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -fast-isel-abort=1 -asm-verbose=0 | FileCheck %s
+;
+; Bitcasts between 128-bit vector types are no-ops since no instruction is
+; needed for the conversion.
+
+define <2 x i64> @v4i32_to_v2i64(<4 x i32> %a) {
+;CHECK-LABEL: v4i32_to_v2i64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x i32> %a to <2 x i64>
+  ret <2 x i64> %1
+}
+
+define <2 x i64> @v8i16_to_v2i64(<8 x i16> %a) {
+;CHECK-LABEL: v8i16_to_v2i64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x i16> %a to <2 x i64>
+  ret <2 x i64> %1
+}
+
+define <2 x i64> @v16i8_to_v2i64(<16 x i8> %a) {
+;CHECK-LABEL: v16i8_to_v2i64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <16 x i8> %a to <2 x i64>
+  ret <2 x i64> %1
+}
+
+define <2 x i64> @v2f64_to_v2i64(<2 x double> %a) {
+;CHECK-LABEL: v2f64_to_v2i64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <2 x double> %a to <2 x i64>
+  ret <2 x i64> %1
+}
+
+define <2 x i64> @v4f32_to_v2i64(<4 x float> %a) {
+;CHECK-LABEL: v4f32_to_v2i64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x float> %a to <2 x i64>
+  ret <2 x i64> %1
+}
+
+define <4 x i32> @v2i64_to_v4i32(<2 x i64> %a) {
+;CHECK-LABEL: v2i64_to_v4i32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <2 x i64> %a to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <4 x i32> @v8i16_to_v4i32(<8 x i16> %a) {
+;CHECK-LABEL: v8i16_to_v4i32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x i16> %a to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <4 x i32> @v16i8_to_v4i32(<16 x i8> %a) {
+;CHECK-LABEL: v16i8_to_v4i32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <16 x i8> %a to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <4 x i32> @v2f64_to_v4i32(<2 x double> %a) {
+;CHECK-LABEL: v2f64_to_v4i32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <2 x double> %a to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <4 x i32> @v4f32_to_v4i32(<4 x float> %a) {
+;CHECK-LABEL: v4f32_to_v4i32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x float> %a to <4 x i32>
+  ret <4 x i32> %1
+}
+
+define <8 x i16> @v2i64_to_v8i16(<2 x i64> %a) {
+;CHECK-LABEL: v2i64_to_v8i16:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <2 x i64> %a to <8 x i16>
+  ret <8 x i16> %1
+}
+
+define <8 x i16> @v4i32_to_v8i16(<4 x i32> %a) {
+;CHECK-LABEL: v4i32_to_v8i16:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x i32> %a to <8 x i16>
+  ret <8 x i16> %1
+}
+
+define <8 x i16> @v16i8_to_v8i16(<16 x i8> %a) {
+;CHECK-LABEL: v16i8_to_v8i16:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <16 x i8> %a to <8 x i16>
+  ret <8 x i16> %1
+}
+
+define <8 x i16> @v2f64_to_v8i16(<2 x double> %a) {
+;CHECK-LABEL: v2f64_to_v8i16:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <2 x double> %a to <8 x i16>
+  ret <8 x i16> %1
+}
+
+define <8 x i16> @v4f32_to_v8i16(<4 x float> %a) {
+;CHECK-LABEL: v4f32_to_v8i16:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x float> %a to <8 x i16>
+  ret <8 x i16> %1
+}
+
+define <16 x i8> @v8i16_to_v16i8(<8 x i16> %a) {
+;CHECK-LABEL: v8i16_to_v16i8:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x i16> %a to <16 x i8>
+  ret <16 x i8> %1
+}
+
+define <16 x i8> @v2i64_to_v16i8(<2 x i64> %a) {
+;CHECK-LABEL: v2i64_to_v16i8:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <2 x i64> %a to <16 x i8>
+  ret <16 x i8> %1
+}
+
+define <16 x i8> @v4i32_to_v16i8(<4 x i32> %a) {
+;CHECK-LABEL: v4i32_to_v16i8:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x i32> %a to <16 x i8>
+  ret <16 x i8> %1
+}
+
+define <16 x i8> @v2f64_to_v16i8(<2 x double> %a) {
+;CHECK-LABEL: v2f64_to_v16i8:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <2 x double> %a to <16 x i8>
+  ret <16 x i8> %1
+}
+
+define <16 x i8> @v4f32_to_v16i8(<4 x float> %a) {
+;CHECK-LABEL: v4f32_to_v16i8:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x float> %a to <16 x i8>
+  ret <16 x i8> %1
+}
+
+define <4 x float> @v16i8_to_v4f32(<16 x i8> %a) {
+;CHECK-LABEL: v16i8_to_v4f32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <16 x i8> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <4 x float> @v8i16_to_v4f32(<8 x i16> %a) {
+;CHECK-LABEL: v8i16_to_v4f32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x i16> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <4 x float> @v2i64_to_v4f32(<2 x i64> %a) {
+;CHECK-LABEL: v2i64_to_v4f32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <2 x i64> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <4 x float> @v4i32_to_v4f32(<4 x i32> %a) {
+;CHECK-LABEL: v4i32_to_v4f32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x i32> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <4 x float> @v2f64_to_v4f32(<2 x double> %a) {
+;CHECK-LABEL: v2f64_to_v4f32:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <2 x double> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <2 x double> @v4f32_to_v2f64(<4 x float> %a) {
+;CHECK-LABEL: v4f32_to_v2f64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x float> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <2 x double> @v16i8_to_v2f64(<16 x i8> %a) {
+;CHECK-LABEL: v16i8_to_v2f64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <16 x i8> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <2 x double> @v8i16_to_v2f64(<8 x i16> %a) {
+;CHECK-LABEL: v8i16_to_v2f64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <8 x i16> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <2 x double> @v2i64_to_v2f64(<2 x i64> %a) {
+;CHECK-LABEL: v2i64_to_v2f64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <2 x i64> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <2 x double> @v4i32_to_v2f64(<4 x i32> %a) {
+;CHECK-LABEL: v4i32_to_v2f64:
+;CHECK-NEXT: .cfi_startproc
+;CHECK-NEXT: ret
+  %1 = bitcast <4 x i32> %a to <2 x double>
+  ret <2 x double> %1
+}




More information about the llvm-commits mailing list