[llvm] r233270 - [X86][FastIsel] Teach how to select vector load instructions.

Andrea Di Biagio Andrea_DiBiagio at sn.scee.net
Thu Mar 26 04:29:03 PDT 2015


Author: adibiagio
Date: Thu Mar 26 06:29:02 2015
New Revision: 233270

URL: http://llvm.org/viewvc/llvm-project?rev=233270&view=rev
Log:
[X86][FastIsel] Teach how to select vector load instructions.

This patch teaches fast-isel how to select 128-bit vector load instructions.
Added test CodeGen/X86/fast-isel-vecload.ll

Differential Revision: http://reviews.llvm.org/D8605

Added:
    llvm/trunk/test/CodeGen/X86/fast-isel-vecload.ll
Modified:
    llvm/trunk/lib/Target/X86/X86FastISel.cpp

Modified: llvm/trunk/lib/Target/X86/X86FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FastISel.cpp?rev=233270&r1=233269&r2=233270&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FastISel.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FastISel.cpp Thu Mar 26 06:29:02 2015
@@ -84,7 +84,7 @@ private:
   bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT, DebugLoc DL);
 
   bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, MachineMemOperand *MMO,
-                       unsigned &ResultReg);
+                       unsigned &ResultReg, unsigned Alignment = 1);
 
   bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM,
                         MachineMemOperand *MMO = nullptr, bool Aligned = false);
@@ -327,7 +327,8 @@ bool X86FastISel::isTypeLegal(Type *Ty,
 /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
 /// Return true and the result register by reference if it is possible.
 bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
-                                  MachineMemOperand *MMO, unsigned &ResultReg) {
+                                  MachineMemOperand *MMO, unsigned &ResultReg,
+                                  unsigned Alignment) {
   // Get opcode and regclass of the output for the given load instruction.
   unsigned Opc = 0;
   const TargetRegisterClass *RC = nullptr;
@@ -372,6 +373,30 @@ bool X86FastISel::X86FastEmitLoad(EVT VT
   case MVT::f80:
     // No f80 support yet.
     return false;
+  case MVT::v4f32:
+    if (Alignment >= 16)
+      Opc = Subtarget->hasAVX() ? X86::VMOVAPSrm : X86::MOVAPSrm;
+    else
+      Opc = Subtarget->hasAVX() ? X86::VMOVUPSrm : X86::MOVUPSrm;
+    RC  = &X86::VR128RegClass;
+    break;
+  case MVT::v2f64:
+    if (Alignment >= 16)
+      Opc = Subtarget->hasAVX() ? X86::VMOVAPDrm : X86::MOVAPDrm;
+    else
+      Opc = Subtarget->hasAVX() ? X86::VMOVUPDrm : X86::MOVUPDrm;
+    RC  = &X86::VR128RegClass;
+    break;
+  case MVT::v4i32:
+  case MVT::v2i64:
+  case MVT::v8i16:
+  case MVT::v16i8:
+    if (Alignment >= 16)
+      Opc = Subtarget->hasAVX() ? X86::VMOVDQArm : X86::MOVDQArm;
+    else
+      Opc = Subtarget->hasAVX() ? X86::VMOVDQUrm : X86::MOVDQUrm;
+    RC  = &X86::VR128RegClass;
+    break;
   }
 
   ResultReg = createResultReg(RC);
@@ -1068,8 +1093,14 @@ bool X86FastISel::X86SelectLoad(const In
   if (!X86SelectAddress(Ptr, AM))
     return false;
 
+  unsigned Alignment = LI->getAlignment();
+  unsigned ABIAlignment = DL.getABITypeAlignment(LI->getType());
+  if (Alignment == 0) // Ensure that codegen never sees alignment 0
+    Alignment = ABIAlignment;
+
   unsigned ResultReg = 0;
-  if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg))
+  if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
+                       Alignment))
     return false;
 
   updateValueMap(I, ResultReg);

Added: llvm/trunk/test/CodeGen/X86/fast-isel-vecload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fast-isel-vecload.ll?rev=233270&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fast-isel-vecload.ll (added)
+++ llvm/trunk/test/CodeGen/X86/fast-isel-vecload.ll Thu Mar 26 06:29:02 2015
@@ -0,0 +1,185 @@
+; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE --check-prefix=ALL
+; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=ALL
+
+; Verify that fast-isel knows how to select aligned/unaligned vector loads.
+; Also verify that the selected load instruction is in the correct domain.
+
+define <16 x i8> @test_v16i8(<16 x i8>* %V) {
+; ALL-LABEL: test_v16i8:
+; SSE: movdqa  (%rdi), %xmm0
+; AVX: vmovdqa  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %V, align 16
+  ret <16 x i8> %0
+}
+
+define <8 x i16> @test_v8i16(<8 x i16>* %V) {
+; ALL-LABEL: test_v8i16:
+; SSE: movdqa  (%rdi), %xmm0
+; AVX: vmovdqa  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %V, align 16
+  ret <8 x i16> %0
+}
+
+define <4 x i32> @test_v4i32(<4 x i32>* %V) {
+; ALL-LABEL: test_v4i32:
+; SSE: movdqa  (%rdi), %xmm0
+; AVX: vmovdqa  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %V, align 16
+  ret <4 x i32> %0
+}
+
+define <2 x i64> @test_v2i64(<2 x i64>* %V) {
+; ALL-LABEL: test_v2i64:
+; SSE: movdqa  (%rdi), %xmm0
+; AVX: vmovdqa  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %V, align 16
+  ret <2 x i64> %0
+}
+
+define <16 x i8> @test_v16i8_unaligned(<16 x i8>* %V) {
+; ALL-LABEL: test_v16i8_unaligned:
+; SSE: movdqu  (%rdi), %xmm0
+; AVX: vmovdqu  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %V, align 4
+  ret <16 x i8> %0
+}
+
+define <8 x i16> @test_v8i16_unaligned(<8 x i16>* %V) {
+; ALL-LABEL: test_v8i16_unaligned:
+; SSE: movdqu  (%rdi), %xmm0
+; AVX: vmovdqu  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %V, align 4
+  ret <8 x i16> %0
+}
+
+define <4 x i32> @test_v4i32_unaligned(<4 x i32>* %V) {
+; ALL-LABEL: test_v4i32_unaligned:
+; SSE: movdqu  (%rdi), %xmm0
+; AVX: vmovdqu  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %V, align 4
+  ret <4 x i32> %0
+}
+
+define <2 x i64> @test_v2i64_unaligned(<2 x i64>* %V) {
+; ALL-LABEL: test_v2i64_unaligned:
+; SSE: movdqu  (%rdi), %xmm0
+; AVX: vmovdqu  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %V, align 4
+  ret <2 x i64> %0
+}
+
+define <4 x float> @test_v4f32(<4 x float>* %V) {
+; ALL-LABEL: test_v4f32:
+; SSE: movaps  (%rdi), %xmm0
+; AVX: vmovaps  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <4 x float>, <4 x float>* %V, align 16
+  ret <4 x float> %0
+}
+
+define <2 x double> @test_v2f64(<2 x double>* %V) {
+; ALL-LABEL: test_v2f64:
+; SSE: movapd  (%rdi), %xmm0
+; AVX: vmovapd  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <2 x double>, <2 x double>* %V, align 16
+  ret <2 x double> %0
+}
+
+define <4 x float> @test_v4f32_unaligned(<4 x float>* %V) {
+; ALL-LABEL: test_v4f32_unaligned:
+; SSE: movups  (%rdi), %xmm0
+; AVX: vmovups  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <4 x float>, <4 x float>* %V, align 4
+  ret <4 x float> %0
+}
+
+define <2 x double> @test_v2f64_unaligned(<2 x double>* %V) {
+; ALL-LABEL: test_v2f64_unaligned:
+; SSE: movupd  (%rdi), %xmm0
+; AVX: vmovupd  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <2 x double>, <2 x double>* %V, align 4
+  ret <2 x double> %0
+}
+
+define <16 x i8> @test_v16i8_abi_alignment(<16 x i8>* %V) {
+; ALL-LABEL: test_v16i8_abi_alignment:
+; SSE: movdqa  (%rdi), %xmm0
+; AVX: vmovdqa  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %V
+  ret <16 x i8> %0
+}
+
+define <8 x i16> @test_v8i16_abi_alignment(<8 x i16>* %V) {
+; ALL-LABEL: test_v8i16_abi_alignment:
+; SSE: movdqa  (%rdi), %xmm0
+; AVX: vmovdqa  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %V
+  ret <8 x i16> %0
+}
+
+define <4 x i32> @test_v4i32_abi_alignment(<4 x i32>* %V) {
+; ALL-LABEL: test_v4i32_abi_alignment:
+; SSE: movdqa  (%rdi), %xmm0
+; AVX: vmovdqa  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %V
+  ret <4 x i32> %0
+}
+
+define <2 x i64> @test_v2i64_abi_alignment(<2 x i64>* %V) {
+; ALL-LABEL: test_v2i64_abi_alignment:
+; SSE: movdqa  (%rdi), %xmm0
+; AVX: vmovdqa  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %V
+  ret <2 x i64> %0
+}
+
+define <4 x float> @test_v4f32_abi_alignment(<4 x float>* %V) {
+; ALL-LABEL: test_v4f32_abi_alignment:
+; SSE: movaps  (%rdi), %xmm0
+; AVX: vmovaps  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <4 x float>, <4 x float>* %V
+  ret <4 x float> %0
+}
+
+define <2 x double> @test_v2f64_abi_alignment(<2 x double>* %V) {
+; ALL-LABEL: test_v2f64_abi_alignment:
+; SSE: movapd  (%rdi), %xmm0
+; AVX: vmovapd  (%rdi), %xmm0
+; ALL-NEXT: retq
+entry:
+  %0 = load <2 x double>, <2 x double>* %V
+  ret <2 x double> %0
+}





More information about the llvm-commits mailing list