<div dir="ltr">Sure - updated the CHECK at r228086.<br></div><div class="gmail_extra"><br><div class="gmail_quote">On Tue, Feb 3, 2015 at 4:42 PM, Nadav Rotem <span dir="ltr"><<a href="mailto:nrotem@apple.com" target="_blank">nrotem@apple.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"><div class="HOEnZb"><div class="h5"><br>
> On Feb 3, 2015, at 9:13 AM, Sanjay Patel <<a href="mailto:spatel@rotateright.com">spatel@rotateright.com</a>> wrote:<br>
><br>
> Author: spatel<br>
> Date: Tue Feb  3 11:13:04 2015<br>
> New Revision: 227983<br>
><br>
> URL: <a href="http://llvm.org/viewvc/llvm-project?rev=227983&view=rev" target="_blank">http://llvm.org/viewvc/llvm-project?rev=227983&view=rev</a><br>
> Log:<br>
> Fix program crashes due to alignment exceptions generated for SSE memop instructions (PR22371).<br>
><br>
> r224330 introduced a bug by misinterpreting the "FeatureVectorUAMem" bit.<br>
> The commit log says that change did not affect anything, but that's not correct.<br>
> That change allowed SSE instructions to have unaligned mem operands folded into<br>
> math ops, and that's not allowed in the default specification for any SSE variant.<br>
><br>
> The bug is exposed when compiling for an AVX-capable CPU that had this feature<br>
> flag but without enabling AVX codegen. Another mistake in r224330 was not adding<br>
> the feature flag to all AVX CPUs; the AMD chips were excluded.<br>
><br>
> This is part of the fix for PR22371 ( <a href="http://llvm.org/bugs/show_bug.cgi?id=22371" target="_blank">http://llvm.org/bugs/show_bug.cgi?id=22371</a> ).<br>
><br>
> This feature bit is SSE-specific, so I've renamed it to "FeatureSSEUnalignedMem".<br>
> Changed the existing test case for the feature bit to reflect the new name and<br>
> renamed the test file itself to better reflect the feature.<br>
> Added runs to fold-vex.ll to check for the failing codegen.<br>
><br>
> Note that the feature bit is not set by default on any CPU because it may require a<br>
> configuration register setting to enable the enhanced unaligned behavior.<br>
><br>
><br>
> Added:<br>
>    llvm/trunk/test/CodeGen/X86/sse-unaligned-mem-feature.ll<br>
> Removed:<br>
>    llvm/trunk/test/CodeGen/X86/2010-01-07-UAMemFeature.ll<br>
> Modified:<br>
>    llvm/trunk/lib/Target/X86/X86.td<br>
>    llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td<br>
>    llvm/trunk/lib/Target/X86/X86Subtarget.cpp<br>
>    llvm/trunk/lib/Target/X86/X86Subtarget.h<br>
>    llvm/trunk/test/CodeGen/X86/fold-vex.ll<br>
><br>
> Modified: llvm/trunk/lib/Target/X86/X86.td<br>
> URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86.td?rev=227983&r1=227982&r2=227983&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86.td?rev=227983&r1=227982&r2=227983&view=diff</a><br>
> ==============================================================================<br>
> --- llvm/trunk/lib/Target/X86/X86.td (original)<br>
> +++ llvm/trunk/lib/Target/X86/X86.td Tue Feb  3 11:13:04 2015<br>
> @@ -132,9 +132,9 @@ def FeatureFMA4    : SubtargetFeature<"f<br>
> def FeatureXOP     : SubtargetFeature<"xop", "HasXOP", "true",<br>
>                                       "Enable XOP instructions",<br>
>                                       [FeatureFMA4]>;<br>
> -def FeatureVectorUAMem : SubtargetFeature<"vector-unaligned-mem",<br>
> -                                          "HasVectorUAMem", "true",<br>
> -                 "Allow unaligned memory operands on vector/SIMD instructions">;<br>
> +def FeatureSSEUnalignedMem : SubtargetFeature<"sse-unaligned-mem",<br>
> +                                          "HasSSEUnalignedMem", "true",<br>
> +                      "Allow unaligned memory operands with SSE instructions">;<br>
> def FeatureAES     : SubtargetFeature<"aes", "HasAES", "true",<br>
>                                       "Enable AES instructions",<br>
>                                       [FeatureSSE2]>;<br>
> @@ -309,7 +309,6 @@ class SandyBridgeProc<string Name> : Pro<br>
>                                        FeatureCMPXCHG16B,<br>
>                                        FeatureFastUAMem,<br>
>                                        FeatureSlowUAMem32,<br>
> -                                       FeatureVectorUAMem,<br>
>                                        FeaturePOPCNT,<br>
>                                        FeatureAES,<br>
>                                        FeaturePCLMUL<br>
> @@ -322,7 +321,6 @@ class IvyBridgeProc<string Name> : Proce<br>
>                                      FeatureCMPXCHG16B,<br>
>                                      FeatureFastUAMem,<br>
>                                      FeatureSlowUAMem32,<br>
> -                                     FeatureVectorUAMem,<br>
>                                      FeaturePOPCNT,<br>
>                                      FeatureAES,<br>
>                                      FeaturePCLMUL,<br>
> @@ -337,7 +335,6 @@ class HaswellProc<string Name> : Process<br>
>                                    FeatureAVX2,<br>
>                                    FeatureCMPXCHG16B,<br>
>                                    FeatureFastUAMem,<br>
> -                                   FeatureVectorUAMem,<br>
>                                    FeaturePOPCNT,<br>
>                                    FeatureAES,<br>
>                                    FeaturePCLMUL,<br>
> @@ -360,7 +357,6 @@ class BroadwellProc<string Name> : Proce<br>
>                                      FeatureAVX2,<br>
>                                      FeatureCMPXCHG16B,<br>
>                                      FeatureFastUAMem,<br>
> -                                     FeatureVectorUAMem,<br>
>                                      FeaturePOPCNT,<br>
>                                      FeatureAES,<br>
>                                      FeaturePCLMUL,<br>
> @@ -388,7 +384,7 @@ class KnightsLandingProc<string Name> :<br>
>                       FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,<br>
>                       FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,<br>
>                       FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE,<br>
> -                      FeatureSlowIncDec, FeatureVectorUAMem]>;<br>
> +                      FeatureSlowIncDec]>;<br>
> def : KnightsLandingProc<"knl">;<br>
><br>
> // FIXME: define SKX model<br>
> @@ -399,7 +395,7 @@ class SkylakeProc<string Name> : Process<br>
>                       FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,<br>
>                       FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,<br>
>                       FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE,<br>
> -                      FeatureSlowIncDec, FeatureSGX, FeatureVectorUAMem]>;<br>
> +                      FeatureSlowIncDec, FeatureSGX]>;<br>
> def : SkylakeProc<"skylake">;<br>
> def : SkylakeProc<"skx">; // Legacy alias.<br>
><br>
><br>
> Modified: llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td<br>
> URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td?rev=227983&r1=227982&r2=227983&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td?rev=227983&r1=227982&r2=227983&view=diff</a><br>
> ==============================================================================<br>
> --- llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td (original)<br>
> +++ llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td Tue Feb  3 11:13:04 2015<br>
> @@ -433,7 +433,7 @@ def alignedloadv8i64  : PatFrag<(ops nod<br>
> // setting a feature bit in the processor (on startup, for example).<br>
> // Opteron 10h and later implement such a feature.<br>
> def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{<br>
> -  return    Subtarget->hasVectorUAMem()<br>
> +  return    Subtarget->hasSSEUnalignedMem()<br>
>          || cast<LoadSDNode>(N)->getAlignment() >= 16;<br>
> }]>;<br>
><br>
><br>
> Modified: llvm/trunk/lib/Target/X86/X86Subtarget.cpp<br>
> URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Subtarget.cpp?rev=227983&r1=227982&r2=227983&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Subtarget.cpp?rev=227983&r1=227982&r2=227983&view=diff</a><br>
> ==============================================================================<br>
> --- llvm/trunk/lib/Target/X86/X86Subtarget.cpp (original)<br>
> +++ llvm/trunk/lib/Target/X86/X86Subtarget.cpp Tue Feb  3 11:13:04 2015<br>
> @@ -265,7 +265,7 @@ void X86Subtarget::initializeEnvironment<br>
>   IsSHLDSlow = false;<br>
>   IsUAMemFast = false;<br>
>   IsUAMem32Slow = false;<br>
> -  HasVectorUAMem = false;<br>
> +  HasSSEUnalignedMem = false;<br>
>   HasCmpxchg16b = false;<br>
>   UseLeaForSP = false;<br>
>   HasSlowDivide32 = false;<br>
><br>
> Modified: llvm/trunk/lib/Target/X86/X86Subtarget.h<br>
> URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Subtarget.h?rev=227983&r1=227982&r2=227983&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Subtarget.h?rev=227983&r1=227982&r2=227983&view=diff</a><br>
> ==============================================================================<br>
> --- llvm/trunk/lib/Target/X86/X86Subtarget.h (original)<br>
> +++ llvm/trunk/lib/Target/X86/X86Subtarget.h Tue Feb  3 11:13:04 2015<br>
> @@ -162,9 +162,9 @@ protected:<br>
>   /// True if unaligned 32-byte memory accesses are slow.<br>
>   bool IsUAMem32Slow;<br>
><br>
> -  /// HasVectorUAMem - True if SIMD operations can have unaligned memory<br>
> -  /// operands. This may require setting a feature bit in the processor.<br>
> -  bool HasVectorUAMem;<br>
> +  /// True if SSE operations can have unaligned memory operands.<br>
> +  /// This may require setting a configuration bit in the processor.<br>
> +  bool HasSSEUnalignedMem;<br>
><br>
>   /// HasCmpxchg16b - True if this processor has the CMPXCHG16B instruction;<br>
>   /// this is true for most x86-64 chips, but not the first AMD chips.<br>
> @@ -375,7 +375,7 @@ public:<br>
>   bool isSHLDSlow() const { return IsSHLDSlow; }<br>
>   bool isUnalignedMemAccessFast() const { return IsUAMemFast; }<br>
>   bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }<br>
> -  bool hasVectorUAMem() const { return HasVectorUAMem; }<br>
> +  bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }<br>
>   bool hasCmpxchg16b() const { return HasCmpxchg16b; }<br>
>   bool useLeaForSP() const { return UseLeaForSP; }<br>
>   bool hasSlowDivide32() const { return HasSlowDivide32; }<br>
><br>
> Removed: llvm/trunk/test/CodeGen/X86/2010-01-07-UAMemFeature.ll<br>
> URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-01-07-UAMemFeature.ll?rev=227982&view=auto" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-01-07-UAMemFeature.ll?rev=227982&view=auto</a><br>
> ==============================================================================<br>
> --- llvm/trunk/test/CodeGen/X86/2010-01-07-UAMemFeature.ll (original)<br>
> +++ llvm/trunk/test/CodeGen/X86/2010-01-07-UAMemFeature.ll (removed)<br>
> @@ -1,11 +0,0 @@<br>
> -; RUN: llc -mcpu=yonah -mattr=vector-unaligned-mem -march=x86 < %s | FileCheck %s<br>
> -; CHECK: addps (<br>
> -<br>
> -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"<br>
> -target triple = "x86_64-unknown-linux-gnu"<br>
> -<br>
> -define <4 x float> @foo(<4 x float>* %P, <4 x float> %In) nounwind {<br>
> -     %A = load <4 x float>* %P, align 4<br>
> -     %B = fadd <4 x float> %A, %In<br>
> -     ret <4 x float> %B<br>
> -}<br>
><br>
> Modified: llvm/trunk/test/CodeGen/X86/fold-vex.ll<br>
> URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-vex.ll?rev=227983&r1=227982&r2=227983&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-vex.ll?rev=227983&r1=227982&r2=227983&view=diff</a><br>
> ==============================================================================<br>
> --- llvm/trunk/test/CodeGen/X86/fold-vex.ll (original)<br>
> +++ llvm/trunk/test/CodeGen/X86/fold-vex.ll Tue Feb  3 11:13:04 2015<br>
> @@ -1,12 +1,18 @@<br>
> ; Use CPU parameters to ensure that a CPU-specific attribute is not overriding the AVX definition.<br>
><br>
> -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s<br>
> -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 | FileCheck %s<br>
> -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s<br>
> +; RUN: llc < %s -mtriple=x86_64-unknown-unknown                  -mattr=+avx | FileCheck %s<br>
> +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx             | FileCheck %s<br>
> +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2                 | FileCheck %s<br>
> +; RUN: llc < %s -mtriple=x86_64-unknown-unknown                  -mattr=-avx | FileCheck %s --check-prefix=SSE<br>
> +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx -mattr=-avx | FileCheck %s --check-prefix=SSE<br>
> +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2     -mattr=-avx | FileCheck %s --check-prefix=SSE<br>
><br>
> ; No need to load unaligned operand from memory using an explicit instruction with AVX.<br>
> ; The operand should be folded into the AND instr.<br>
><br>
> +; With SSE, folding memory operands into math/logic ops requires 16-byte alignment<br>
> +; unless specially configured on some CPUs such as AMD Family 10H.<br>
> +<br>
> define <4 x i32> @test1(<4 x i32>* %p0, <4 x i32> %in1) nounwind {<br>
>   %in0 = load <4 x i32>* %p0, align 2<br>
>   %a = and <4 x i32> %in0, %in1<br>
> @@ -16,5 +22,10 @@ define <4 x i32> @test1(<4 x i32>* %p0,<br>
> ; CHECK-NOT:   vmovups<br>
> ; CHECK:       vandps (%rdi), %xmm0, %xmm0<br>
> ; CHECK-NEXT:  ret<br>
> +<br>
> +; SSE-LABEL: @test1<br>
> +; SSE:       movups (%rdi), %xmm1<br>
> +; SSE-NEXT:  andps %xmm1, %xmm0<br>
> +; SSE-NEXT:  ret<br>
> }<br>
><br>
><br>
> Added: llvm/trunk/test/CodeGen/X86/sse-unaligned-mem-feature.ll<br>
> URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-unaligned-mem-feature.ll?rev=227983&view=auto" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-unaligned-mem-feature.ll?rev=227983&view=auto</a><br>
> ==============================================================================<br>
> --- llvm/trunk/test/CodeGen/X86/sse-unaligned-mem-feature.ll (added)<br>
> +++ llvm/trunk/test/CodeGen/X86/sse-unaligned-mem-feature.ll Tue Feb  3 11:13:04 2015<br>
> @@ -0,0 +1,13 @@<br>
> +; RUN: llc -mcpu=yonah -mattr=sse-unaligned-mem -march=x86 < %s | FileCheck %s<br>
> +<br>
> +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"<br>
> +target triple = "x86_64-unknown-linux-gnu"<br>
> +<br>
> +define <4 x float> @foo(<4 x float>* %P, <4 x float> %In) nounwind {<br>
> +     %A = load <4 x float>* %P, align 4<br>
> +     %B = fadd <4 x float> %A, %In<br>
> +     ret <4 x float> %B<br>
> +<br>
> +; CHECK-LABEL: @foo<br>
> +; CHECK:       addps (<br>
> +}<br>
><br>
<br>
</div></div>Sanjay, can you please complete the CHECK line?  You can write something like “addps(%rdi)”.<br>
<div class="HOEnZb"><div class="h5"><br>
><br>
> _______________________________________________<br>
> llvm-commits mailing list<br>
> <a href="mailto:llvm-commits@cs.uiuc.edu">llvm-commits@cs.uiuc.edu</a><br>
> <a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits" target="_blank">http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits</a><br>
<br>
</div></div></blockquote></div><br></div>