[llvm] r283763 - [ARM] Fix invalid VLDM/VSTM access when targeting Big Endian with NEON

Alexandros Lamprineas via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 10 09:01:55 PDT 2016


Author: alelab01
Date: Mon Oct 10 11:01:54 2016
New Revision: 283763

URL: http://llvm.org/viewvc/llvm-project?rev=283763&view=rev
Log:
[ARM] Fix invalid VLDM/VSTM access when targeting Big Endian with NEON

The instructions VLDM/VSTM can only access word-aligned memory
locations and produce alignment fault if the condition is not met.

The compiler currently generates VLDM/VSTM for v2f64 load/store
regardless the alignment of the memory access. Instead, if a v2f64
load/store is not word-aligned, the compiler should generate
VLD1/VST1. For each non double-word-aligned VLD1/VST1, a VREV
instruction should be generated when targeting Big Endian.

Differential Revision: https://reviews.llvm.org/D25281

Added:
    llvm/trunk/test/CodeGen/ARM/load_store_multiple.ll
Modified:
    llvm/trunk/lib/Target/ARM/ARMInstrNEON.td

Modified: llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrNEON.td?rev=283763&r1=283762&r2=283763&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrNEON.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrNEON.td Mon Oct 10 11:01:54 2016
@@ -610,14 +610,14 @@ def NEONimmAllOnesV: PatLeaf<(NEONvmovIm
 def VLDMQIA
   : PseudoVFPLdStM<(outs DPair:$dst), (ins GPR:$Rn),
                     IIC_fpLoad_m, "",
-                   [(set DPair:$dst, (v2f64 (load GPR:$Rn)))]>;
+                   [(set DPair:$dst, (v2f64 (word_alignedload GPR:$Rn)))]>;
 
 // Use VSTM to store a Q register as a D register pair.
 // This is a pseudo instruction that is expanded to VSTMD after reg alloc.
 def VSTMQIA
   : PseudoVFPLdStM<(outs), (ins DPair:$src, GPR:$Rn),
                     IIC_fpStore_m, "",
-                   [(store (v2f64 DPair:$src), GPR:$Rn)]>;
+                   [(word_alignedstore (v2f64 DPair:$src), GPR:$Rn)]>;
 
 // Classes for VLD* pseudo-instructions with multi-register operands.
 // These are expanded to real instructions after register allocation.
@@ -6849,6 +6849,16 @@ let Predicates = [IsBE] in {
   def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (VREV64q32 QPR:$src)>;
 }
 
+// Use VLD1/VST1 + VREV for non-word-aligned v2f64 load/store on Big Endian
+def : Pat<(v2f64 (byte_alignedload addrmode6:$addr)),
+          (VREV64q8 (VLD1q8 addrmode6:$addr))>, Requires<[IsBE]>;
+def : Pat<(byte_alignedstore (v2f64 QPR:$value), addrmode6:$addr),
+          (VST1q8 addrmode6:$addr, (VREV64q8 QPR:$value))>, Requires<[IsBE]>;
+def : Pat<(v2f64 (hword_alignedload addrmode6:$addr)),
+          (VREV64q16 (VLD1q16 addrmode6:$addr))>, Requires<[IsBE]>;
+def : Pat<(hword_alignedstore (v2f64 QPR:$value), addrmode6:$addr),
+          (VST1q16 addrmode6:$addr, (VREV64q16 QPR:$value))>, Requires<[IsBE]>;
+
 // Fold extracting an element out of a v2i32 into a vfp register.
 def : Pat<(f32 (bitconvert (i32 (extractelt (v2i32 DPR:$src), imm:$lane)))),
           (f32 (EXTRACT_SUBREG DPR:$src, (SSubReg_f32_reg imm:$lane)))>;

Added: llvm/trunk/test/CodeGen/ARM/load_store_multiple.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/load_store_multiple.ll?rev=283763&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/load_store_multiple.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/load_store_multiple.ll Mon Oct 10 11:01:54 2016
@@ -0,0 +1,68 @@
+; RUN: llc -mtriple=armv7-eabi -mattr=+neon %s -o - | FileCheck %s --check-prefix=CHECK-LE
+; RUN: llc -mtriple=armv7eb-eabi -mattr=+neon %s -o - | FileCheck %s --check-prefix=CHECK-BE
+
+define void @ld_st_vec_i8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK-LE-LABEL: ld_st_vec_i8:
+;CHECK-LE: vld1.8 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [{{r[0-9]+}}]
+;CHECK-LE-NOT: vrev
+;CHECK-LE: vst1.8 {[[D1]], [[D2]]}, [{{r[0-9]+}}]
+
+;CHECK-BE-LABEL: ld_st_vec_i8:
+;CHECK-BE: vld1.8 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [{{r[0-9]+}}]
+;CHECK-BE: vrev64.8 [[Q1:q[0-9]+]], [[Q2:q[0-9]+]]
+;CHECK-BE: vrev64.8 [[Q1]], [[Q2]]
+;CHECK-BE: vst1.8 {[[D1]], [[D2]]}, [{{r[0-9]+}}]
+
+%load = load <16 x i8>, <16 x i8>* %A, align 1
+store <16 x i8> %load, <16 x i8>* %B, align 1
+ret void
+}
+
+define void @ld_st_vec_i16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK-LE-LABEL: ld_st_vec_i16:
+;CHECK-LE: vld1.16 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [{{r[0-9]+}}]
+;CHECK-LE-NOT: vrev
+;CHECK-LE: vst1.16 {[[D1]], [[D2]]}, [{{r[0-9]+}}]
+
+;CHECK-BE-LABEL: ld_st_vec_i16:
+;CHECK-BE: vld1.16 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [{{r[0-9]+}}]
+;CHECK-BE: vrev64.16 [[Q1:q[0-9]+]], [[Q2:q[0-9]+]]
+;CHECK-BE: vrev64.16 [[Q1]], [[Q2]]
+;CHECK-BE: vst1.16 {[[D1]], [[D2]]}, [{{r[0-9]+}}]
+
+%load = load <8 x i16>, <8 x i16>* %A, align 2
+store <8 x i16> %load, <8 x i16>* %B, align 2
+ret void
+}
+
+define void @ld_st_vec_i32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK-LE-LABEL: ld_st_vec_i32:
+;CHECK-LE: vld1.32 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [{{r[0-9]+}}]
+;CHECK-LE-NOT: vrev
+;CHECK-LE: vst1.32 {[[D1]], [[D2]]}, [{{r[0-9]+}}]
+
+;CHECK-BE-LABEL: ld_st_vec_i32:
+;CHECK-BE: vldmia {{r[0-9]+}}, {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}
+;CHECK-BE-NOT: vrev
+;CHECK-BE: vstmia {{r[0-9]+}}, {[[D1]], [[D2]]}
+
+%load = load <4 x i32>, <4 x i32>* %A, align 4
+store <4 x i32> %load, <4 x i32>* %B, align 4
+ret void
+}
+
+define void @ld_st_vec_double(<2 x double>* %A, <2 x double>* %B) nounwind {
+;CHECK-LE-LABEL: ld_st_vec_double:
+;CHECK-LE: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [{{r[0-9]+}}]
+;CHECK-LE-NOT: vrev
+;CHECK-LE: vst1.64 {[[D1]], [[D2]]}, [{{r[0-9]+}}]
+
+;CHECK-BE-LABEL: ld_st_vec_double:
+;CHECK-BE: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [{{r[0-9]+}}]
+;CHECK-BE-NOT: vrev
+;CHECK-BE: vst1.64 {[[D1]], [[D2]]}, [{{r[0-9]+}}]
+
+%load = load <2 x double>, <2 x double>* %A, align 8
+store <2 x double> %load, <2 x double>* %B, align 8
+ret void
+}




More information about the llvm-commits mailing list