[llvm] r253773 - [X86][SSE] Legal XMM Register Class ordering for SSE1

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Nov 21 04:38:37 PST 2015


Author: rksimon
Date: Sat Nov 21 06:38:34 2015
New Revision: 253773

URL: http://llvm.org/viewvc/llvm-project?rev=253773&view=rev
Log:
[X86][SSE] Legal XMM Register Class ordering for SSE1

It turns out we have a number of places that just grab the first type attached to a register class for various reasons. This is fine unless for some reason that type isn't legal on the current target, such as for SSE1 which doesn't support v16i8/v8i16/v4i32/v2i64 - all of which were included before 4f32 in the class.

Given that this is such a rare situation I've just re-ordered the types and placed the float types first.

Fix for PR16133

Differential Revision: http://reviews.llvm.org/D14787

Added:
    llvm/trunk/test/CodeGen/X86/inline-sse.ll
Modified:
    llvm/trunk/lib/Target/X86/X86RegisterInfo.td

Modified: llvm/trunk/lib/Target/X86/X86RegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86RegisterInfo.td?rev=253773&r1=253772&r2=253773&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86RegisterInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86RegisterInfo.td Sat Nov 21 06:38:34 2015
@@ -442,10 +442,11 @@ def RST : RegisterClass<"X86", [f80, f64
 }
 
 // Generic vector registers: VR64 and VR128.
+// Ensure that float types are declared first - only float is legal on SSE1.
 def VR64: RegisterClass<"X86", [x86mmx], 64, (sequence "MM%u", 0, 7)>;
-def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
+def VR128 : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
                           128, (add FR32)>;
-def VR256 : RegisterClass<"X86", [v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
+def VR256 : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
                           256, (sequence "YMM%u", 0, 15)>;
 
 // Status flags registers.
@@ -468,9 +469,9 @@ def FR32X : RegisterClass<"X86", [f32],
 def FR64X : RegisterClass<"X86", [f64], 64, (add FR32X)>;
 
 // Extended VR128 and VR256 for AVX-512 instructions
-def VR128X : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
+def VR128X : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
                            128, (add FR32X)>;
-def VR256X : RegisterClass<"X86", [v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
+def VR256X : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
                            256, (sequence "YMM%u", 0, 31)>;
 
 // Mask registers

Added: llvm/trunk/test/CodeGen/X86/inline-sse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/inline-sse.ll?rev=253773&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/inline-sse.ll (added)
+++ llvm/trunk/test/CodeGen/X86/inline-sse.ll Sat Nov 21 06:38:34 2015
@@ -0,0 +1,32 @@
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-sse2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
+
+; PR16133 - we must treat XMM registers as v4f32 as SSE1 targets don't permit other vector types.
+
+define void @nop() nounwind {
+; X32-LABEL: nop:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:    movl %esp, %ebp
+; X32-NEXT:    andl $-16, %esp
+; X32-NEXT:    subl $32, %esp
+; X32-NEXT:    #APP
+; X32-NEXT:    #NO_APP
+; X32-NEXT:    movaps %xmm0, (%esp)
+; X32-NEXT:    movl %ebp, %esp
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    retl
+;
+; X64-LABEL: nop:
+; X64:       # BB#0:
+; X64-NEXT:    #APP
+; X64-NEXT:    #NO_APP
+; X64-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    retq
+  %1 = alloca <4 x float>, align 16
+  %2 = call <4 x float> asm "", "=x,~{dirflag},~{fpsr},~{flags}"()
+  store <4 x float> %2, <4 x float>* %1, align 16
+  ret void
+}




More information about the llvm-commits mailing list