[llvm] r206557 - This commit enables unaligned memory accesses of vector types on AArch64 back end. This should boost vectorized code performance.

Jiangning Liu jiangning.liu at arm.com
Thu Apr 17 20:58:38 PDT 2014


Author: jiangning
Date: Thu Apr 17 22:58:38 2014
New Revision: 206557

URL: http://llvm.org/viewvc/llvm-project?rev=206557&view=rev
Log:
This commit enables unaligned memory accesses of vector types on AArch64 back end. This should boost vectorized code performance.

Patched by Z. Zheng


Added:
    llvm/trunk/test/CodeGen/AArch64/unaligned-vector-ld1-st1.ll
Modified:
    llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp
    llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=206557&r1=206556&r2=206557&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Thu Apr 17 22:58:38 2014
@@ -4412,6 +4412,50 @@ AArch64TargetLowering::isFMAFasterThanFM
 
   return false;
 }
+
+bool AArch64TargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+                                                          unsigned AddrSpace,
+                                                          bool *Fast) const {
+  const AArch64Subtarget *Subtarget = getSubtarget();
+  // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
+  bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
+
+  switch (VT.getSimpleVT().SimpleTy) {
+  default:
+    return false;
+  // Scalar types
+  case MVT::i8:  case MVT::i16:
+  case MVT::i32: case MVT::i64:
+  case MVT::f32: case MVT::f64: {
+    // Unaligned access can use (for example) LRDB, LRDH, LDRW
+    if (AllowsUnaligned) {
+      if (Fast)
+        *Fast = true;
+      return true;
+    }
+    return false;
+  }
+  // 64-bit vector types
+  case MVT::v8i8:  case MVT::v4i16:
+  case MVT::v2i32: case MVT::v1i64:
+  case MVT::v2f32: case MVT::v1f64:
+  // 128-bit vector types
+  case MVT::v16i8: case MVT::v8i16:
+  case MVT::v4i32: case MVT::v2i64:
+  case MVT::v4f32: case MVT::v2f64: {
+    // For any little-endian targets with neon, we can support unaligned
+    // load/store of V registers using ld1/st1.
+    // A big-endian target may also explicitly support unaligned accesses
+    if (Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian())) {
+      if (Fast)
+        *Fast = true;
+      return true;
+    }
+    return false;
+  }
+  }
+}
+
 // Check whether a shuffle_vector could be presented as concat_vector.
 bool AArch64TargetLowering::isConcatVector(SDValue Op, SelectionDAG &DAG,
                                            SDValue V0, SDValue V1,

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h?rev=206557&r1=206556&r2=206557&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h Thu Apr 17 22:58:38 2014
@@ -355,6 +355,12 @@ public:
   /// expanded to fmul + fadd.
   virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
 
+  /// allowsUnalignedMemoryAccesses - Returns true if the target allows
+  /// unaligned memory accesses of the specified type. Returns whether it
+  /// is "fast" by reference in the second argument.
+  virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
+                                             bool *Fast) const;
+
   ConstraintType getConstraintType(const std::string &Constraint) const;
 
   ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &Info,

Modified: llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp?rev=206557&r1=206556&r2=206557&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp Thu Apr 17 22:58:38 2014
@@ -25,6 +25,25 @@
 
 using namespace llvm;
 
+enum AlignMode {
+  DefaultAlign,
+  StrictAlign,
+  NoStrictAlign
+};
+
+static cl::opt<AlignMode>
+Align(cl::desc("Load/store alignment support"),
+      cl::Hidden, cl::init(DefaultAlign),
+      cl::values(
+          clEnumValN(DefaultAlign,  "aarch64-default-align",
+                     "Generate unaligned accesses only on hardware/OS "
+                     "combinations that are known to support them"),
+          clEnumValN(StrictAlign,   "aarch64-strict-align",
+                     "Disallow all unaligned memory accesses"),
+          clEnumValN(NoStrictAlign, "aarch64-no-strict-align",
+                     "Allow unaligned memory accesses"),
+          clEnumValEnd));
+
 // Pin the vtable to this file.
 void AArch64Subtarget::anchor() {}
 
@@ -39,6 +58,8 @@ AArch64Subtarget::AArch64Subtarget(Strin
 
 void AArch64Subtarget::initializeSubtargetFeatures(StringRef CPU,
                                                    StringRef FS) {
+  AllowsUnalignedMem = false;
+
   if (CPU.empty())
     CPUString = "generic";
 
@@ -52,6 +73,19 @@ void AArch64Subtarget::initializeSubtarg
   }
 
   ParseSubtargetFeatures(CPU, FullFS);
+
+  switch (Align) {
+    case DefaultAlign:
+      // Linux targets support unaligned accesses on AARCH64
+      AllowsUnalignedMem = isTargetLinux();
+      break;
+    case StrictAlign:
+      AllowsUnalignedMem = false;
+      break;
+    case NoStrictAlign:
+      AllowsUnalignedMem = true;
+      break;
+  }
 }
 
 bool AArch64Subtarget::GVIsIndirectSymbol(const GlobalValue *GV,

Modified: llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h?rev=206557&r1=206556&r2=206557&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h Thu Apr 17 22:58:38 2014
@@ -38,6 +38,11 @@ protected:
   bool HasNEON;
   bool HasCrypto;
 
+  /// AllowsUnalignedMem - If true, the subtarget allows unaligned memory
+  /// accesses for some types.  For details, see
+  /// AArch64TargetLowering::allowsUnalignedMemoryAccesses().
+  bool AllowsUnalignedMem;
+
   /// TargetTriple - What processor and OS we're targeting.
   Triple TargetTriple;
 
@@ -74,6 +79,8 @@ public:
   bool hasNEON() const { return HasNEON; }
   bool hasCrypto() const { return HasCrypto; }
 
+  bool allowsUnalignedMem() const { return AllowsUnalignedMem; }
+
   bool isLittle() const { return IsLittleEndian; }
 
   const std::string & getCPUString() const { return CPUString; }

Added: llvm/trunk/test/CodeGen/AArch64/unaligned-vector-ld1-st1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/unaligned-vector-ld1-st1.ll?rev=206557&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/unaligned-vector-ld1-st1.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/unaligned-vector-ld1-st1.ll Thu Apr 17 22:58:38 2014
@@ -0,0 +1,172 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon -o - | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64_be-none-linux-gnu -mattr=+neon -o - | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -aarch64-no-strict-align -mattr=+neon -o - | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64_be-none-linux-gnu -aarch64-no-strict-align -mattr=+neon -o - | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -aarch64-strict-align -mattr=+neon -o - | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64_be-none-linux-gnu -aarch64-strict-align -mattr=+neon -o - | FileCheck %s --check-prefix=BE-STRICT-ALIGN
+
+;; Check element-aligned 128-bit vector load/store - integer
+define <16 x i8> @qwordint (<16 x i8>* %head.v16i8,   <8 x i16>* %head.v8i16,   <4 x i32>* %head.v4i32, <2 x i64>* %head.v2i64,
+                            <16 x i8>* %tail.v16i8,   <8 x i16>* %tail.v8i16,   <4 x i32>* %tail.v4i32, <2 x i64>* %tail.v2i64) {
+; CHECK-LABEL: qwordint
+; CHECK: ld1     {v0.16b}, [x0]
+; CHECK: ld1     {v1.8h}, [x1]
+; CHECK: ld1     {v2.4s}, [x2]
+; CHECK: ld1     {v3.2d}, [x3]
+; CHECK: st1     {v0.16b}, [x4]
+; CHECK: st1     {v1.8h}, [x5]
+; CHECK: st1     {v2.4s}, [x6]
+; CHECK: st1     {v3.2d}, [x7]
+; BE-STRICT-ALIGN-LABEL: qwordint
+; BE-STRICT-ALIGN: ldrb
+; BE-STRICT-ALIGN: ldrh
+; BE-STRICT-ALIGN: ldr
+; BE-STRICT-ALIGN: ldr
+; BE-STRICT-ALIGN: strb
+; BE-STRICT-ALIGN: strh
+; BE-STRICT-ALIGN: str
+; BE-STRICT-ALIGN: str
+entry:
+  %val.v16i8 = load <16 x i8>* %head.v16i8, align 1
+  %val.v8i16 = load <8 x i16>* %head.v8i16, align 2
+  %val.v4i32 = load <4 x i32>* %head.v4i32, align 4
+  %val.v2i64 = load <2 x i64>* %head.v2i64, align 8
+  store <16 x i8> %val.v16i8, <16 x i8>* %tail.v16i8, align 1
+  store <8 x i16> %val.v8i16, <8 x i16>* %tail.v8i16, align 2
+  store <4 x i32> %val.v4i32, <4 x i32>* %tail.v4i32, align 4
+  store <2 x i64> %val.v2i64, <2 x i64>* %tail.v2i64, align 8
+  ret <16 x i8> %val.v16i8
+}
+
+;; Check element-aligned 128-bit vector load/store - floating point
+define <4 x float> @qwordfloat (<4 x float>* %head.v4f32,   <2 x double>* %head.v2f64,
+                                <4 x float>* %tail.v4f32,   <2 x double>* %tail.v2f64) {
+; CHECK-LABEL: qwordfloat
+; CHECK: ld1     {v0.4s}, [x0]
+; CHECK: ld1     {v1.2d}, [x1]
+; CHECK: st1     {v0.4s}, [x2]
+; CHECK: st1     {v1.2d}, [x3]
+; BE-STRICT-ALIGN-LABEL: qwordfloat
+; BE-STRICT-ALIGN: ldr
+; BE-STRICT-ALIGN: ldr
+; BE-STRICT-ALIGN: str
+; BE-STRICT-ALIGN: str
+entry:
+  %val.v4f32 = load <4 x float>*  %head.v4f32, align 4
+  %val.v2f64 = load <2 x double>* %head.v2f64, align 8
+  store <4 x float>  %val.v4f32, <4 x float>*  %tail.v4f32, align 4
+  store <2 x double> %val.v2f64, <2 x double>* %tail.v2f64, align 8
+  ret <4 x float> %val.v4f32
+}
+
+;; Check element-aligned 64-bit vector load/store - integer
+define <8 x i8> @dwordint (<8 x i8>* %head.v8i8,   <4 x i16>* %head.v4i16,   <2 x i32>* %head.v2i32, <1 x i64>* %head.v1i64,
+                           <8 x i8>* %tail.v8i8,   <4 x i16>* %tail.v4i16,   <2 x i32>* %tail.v2i32, <1 x i64>* %tail.v1i64) {
+; CHECK-LABEL: dwordint
+; CHECK: ld1     {v0.8b}, [x0]
+; CHECK: ld1     {v1.4h}, [x1]
+; CHECK: ld1     {v2.2s}, [x2]
+; CHECK: ld1     {v3.1d}, [x3]
+; CHECK: st1     {v0.8b}, [x4]
+; CHECK: st1     {v1.4h}, [x5]
+; CHECK: st1     {v2.2s}, [x6]
+; CHECK: st1     {v3.1d}, [x7]
+; BE-STRICT-ALIGN-LABEL: dwordint
+; BE-STRICT-ALIGN: ldrb
+; BE-STRICT-ALIGN: ldrh
+; BE-STRICT-ALIGN: ldr
+; BE-STRICT-ALIGN: ld1     {v1.1d}, [x3]
+; BE-STRICT-ALIGN: strb
+; BE-STRICT-ALIGN: strh
+; BE-STRICT-ALIGN: str
+; BE-STRICT-ALIGN: st1     {v1.1d}, [x7]
+entry:
+  %val.v8i8  = load <8 x i8>*  %head.v8i8,  align 1
+  %val.v4i16 = load <4 x i16>* %head.v4i16, align 2
+  %val.v2i32 = load <2 x i32>* %head.v2i32, align 4
+  %val.v1i64 = load <1 x i64>* %head.v1i64, align 8
+  store <8 x i8>  %val.v8i8,  <8 x i8>*  %tail.v8i8 , align 1
+  store <4 x i16> %val.v4i16, <4 x i16>* %tail.v4i16, align 2
+  store <2 x i32> %val.v2i32, <2 x i32>* %tail.v2i32, align 4
+  store <1 x i64> %val.v1i64, <1 x i64>* %tail.v1i64, align 8
+  ret <8 x i8> %val.v8i8
+}
+
+;; Check element-aligned 64-bit vector load/store - floating point
+define <2 x float> @dwordfloat (<2 x float>* %head.v2f32,   <1 x double>* %head.v1f64,
+                                <2 x float>* %tail.v2f32,   <1 x double>* %tail.v1f64) {
+; CHECK-LABEL: dwordfloat
+; CHECK: ld1     {v0.2s}, [x0]
+; CHECK: ld1     {v1.1d}, [x1]
+; CHECK: st1     {v0.2s}, [x2]
+; CHECK: st1     {v1.1d}, [x3]
+; BE-STRICT-ALIGN-LABEL: dwordfloat
+; BE-STRICT-ALIGN: ldr
+; BE-STRICT-ALIGN: ld1     {v1.1d}, [x1]
+; BE-STRICT-ALIGN: str
+; BE-STRICT-ALIGN: st1     {v1.1d}, [x3]
+entry:
+  %val.v2f32 = load <2 x float>*  %head.v2f32, align 4
+  %val.v1f64 = load <1 x double>* %head.v1f64, align 8
+  store <2 x float>  %val.v2f32, <2 x float>* %tail.v2f32, align 4
+  store <1 x double> %val.v1f64, <1 x double>* %tail.v1f64, align 8
+  ret <2 x float> %val.v2f32
+}
+
+;; Check load/store of 128-bit vectors with less-than 16-byte alignment
+define <2 x i64> @align2vi64 (<2 x i64>* %head.byte, <2 x i64>* %head.half, <2 x i64>* %head.word, <2 x i64>* %head.dword,
+                              <2 x i64>* %tail.byte, <2 x i64>* %tail.half, <2 x i64>* %tail.word, <2 x i64>* %tail.dword) {
+; CHECK-LABEL: align2vi64
+; CHECK: ld1     {v0.2d}, [x0]
+; CHECK: ld1     {v1.2d}, [x1]
+; CHECK: ld1     {v2.2d}, [x2]
+; CHECK: ld1     {v3.2d}, [x3]
+; CHECK: st1     {v0.2d}, [x4]
+; CHECK: st1     {v1.2d}, [x5]
+; CHECK: st1     {v2.2d}, [x6]
+; CHECK: st1     {v3.2d}, [x7]
+; BE-STRICT-ALIGN-LABEL: align2vi64
+; BE-STRICT-ALIGN: ldrb     
+; BE-STRICT-ALIGN: ldrh     
+; BE-STRICT-ALIGN: ldr
+; BE-STRICT-ALIGN: strb     
+; BE-STRICT-ALIGN: strh     
+; BE-STRICT-ALIGN: str
+entry:
+  %val.byte  = load <2 x i64>* %head.byte,  align 1
+  %val.half  = load <2 x i64>* %head.half,  align 2
+  %val.word  = load <2 x i64>* %head.word,  align 4
+  %val.dword = load <2 x i64>* %head.dword, align 8
+  store <2 x i64> %val.byte,  <2 x i64>* %tail.byte,  align 1
+  store <2 x i64> %val.half,  <2 x i64>* %tail.half,  align 2
+  store <2 x i64> %val.word,  <2 x i64>* %tail.word,  align 4
+  store <2 x i64> %val.dword, <2 x i64>* %tail.dword, align 8
+  ret <2 x i64> %val.byte
+}
+
+;; Check load/store of 64-bit vectors with less-than 8-byte alignment
+define <2 x float> @align2vf32 (<2 x float>* %head.byte, <2 x float>* %head.half, <2 x float>* %head.word, <2 x float>* %head.dword,
+                                <2 x float>* %tail.byte, <2 x float>* %tail.half, <2 x float>* %tail.word, <2 x float>* %tail.dword) {
+; CHECK-LABEL: align2vf32
+; CHECK: ld1     {v0.2s}, [x0]
+; CHECK: ld1     {v1.2s}, [x1]
+; CHECK: ld1     {v2.2s}, [x2]
+; CHECK: st1     {v0.2s}, [x4]
+; CHECK: st1     {v1.2s}, [x5]
+; CHECK: st1     {v2.2s}, [x6]
+; BE-STRICT-ALIGN-LABEL: align2vf32
+; BE-STRICT-ALIGN: ldrb 
+; BE-STRICT-ALIGN: ldrh    
+; BE-STRICT-ALIGN: ldr
+; BE-STRICT-ALIGN: strb    
+; BE-STRICT-ALIGN: strh    
+; BE-STRICT-ALIGN: str
+entry:
+  %val.byte  = load <2 x float>* %head.byte,  align 1
+  %val.half  = load <2 x float>* %head.half,  align 2
+  %val.word  = load <2 x float>* %head.word,  align 4
+  store <2 x float> %val.byte,  <2 x float>* %tail.byte,  align 1
+  store <2 x float> %val.half,  <2 x float>* %tail.half,  align 2
+  store <2 x float> %val.word,  <2 x float>* %tail.word,  align 4
+  ret <2 x float> %val.byte
+}





More information about the llvm-commits mailing list