[llvm] Add support for x87 registers on GISel register selection (PR #83528)

via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 29 23:55:55 PST 2024


================
@@ -0,0 +1,237 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=i686-- -mattr=+x87,-sse,-sse2 -global-isel -stop-after=regbankselect | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+x87,-sse,-sse2 -global-isel -stop-after=regbankselect | FileCheck %s --check-prefix=X64
+
+define x86_fp80 @f0(x86_fp80 noundef %a) {
+  ; X32-LABEL: name: f0
+  ; X32: bb.1.entry:
+  ; X32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X32-NEXT:   [[LOAD:%[0-9]+]]:psr(s80) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s80) from %fixed-stack.0, align 4)
+  ; X32-NEXT:   [[C:%[0-9]+]]:psr(s80) = G_FCONSTANT x86_fp80 0xK400A8000000000000000
+  ; X32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %stack.0.a.addr
+  ; X32-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %stack.1.x
+  ; X32-NEXT:   G_STORE [[LOAD]](s80), [[FRAME_INDEX1]](p0) :: (store (s80) into %ir.a.addr, align 16)
+  ; X32-NEXT:   G_STORE [[C]](s80), [[FRAME_INDEX2]](p0) :: (store (s80) into %ir.x, align 16)
+  ; X32-NEXT:   [[LOAD1:%[0-9]+]]:psr(s80) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s80) from %ir.a.addr, align 16)
+  ; X32-NEXT:   [[LOAD2:%[0-9]+]]:psr(s80) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (s80) from %ir.x, align 16)
+  ; X32-NEXT:   [[FADD:%[0-9]+]]:psr(s80) = G_FADD [[LOAD1]], [[LOAD2]]
+  ; X32-NEXT:   $fp0 = COPY [[FADD]](s80)
+  ; X32-NEXT:   RET 0, implicit $fp0
+  ;
+  ; X64-LABEL: name: f0
+  ; X64: bb.1.entry:
+  ; X64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X64-NEXT:   [[LOAD:%[0-9]+]]:psr(s80) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s80) from %fixed-stack.0, align 16)
+  ; X64-NEXT:   [[C:%[0-9]+]]:psr(s80) = G_FCONSTANT x86_fp80 0xK400A8000000000000000
+  ; X64-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %stack.0.a.addr
+  ; X64-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %stack.1.x
+  ; X64-NEXT:   G_STORE [[LOAD]](s80), [[FRAME_INDEX1]](p0) :: (store (s80) into %ir.a.addr, align 16)
+  ; X64-NEXT:   G_STORE [[C]](s80), [[FRAME_INDEX2]](p0) :: (store (s80) into %ir.x, align 16)
+  ; X64-NEXT:   [[LOAD1:%[0-9]+]]:psr(s80) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s80) from %ir.a.addr, align 16)
+  ; X64-NEXT:   [[LOAD2:%[0-9]+]]:psr(s80) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (s80) from %ir.x, align 16)
+  ; X64-NEXT:   [[FADD:%[0-9]+]]:psr(s80) = G_FADD [[LOAD1]], [[LOAD2]]
+  ; X64-NEXT:   $fp0 = COPY [[FADD]](s80)
+  ; X64-NEXT:   RET 0, implicit $fp0
+entry:
+  %a.addr = alloca x86_fp80, align 16
+  %x = alloca x86_fp80, align 16
+  store x86_fp80 %a, ptr %a.addr, align 16
+  store x86_fp80 0xK400A8000000000000000, ptr %x, align 16
+  %0 = load x86_fp80, ptr %a.addr, align 16
+  %1 = load x86_fp80, ptr %x, align 16
+  %add = fadd x86_fp80 %0, %1
+  ret x86_fp80 %add
+}
+
+declare x86_fp80 @llvm.sqrt.f32(x86_fp80)
+
+define void @f1(ptr %a, ptr %b) {
+  ; X32-LABEL: name: f1
+  ; X32: bb.1 (%ir-block.0):
+  ; X32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X32-NEXT:   [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+  ; X32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X32-NEXT:   [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
+  ; X32-NEXT:   [[LOAD2:%[0-9]+]]:psr(s80) = G_LOAD [[LOAD]](p0) :: (load (s80) from %ir.a, align 4)
+  ; X32-NEXT:   [[LOAD3:%[0-9]+]]:psr(s80) = G_LOAD [[LOAD1]](p0) :: (load (s80) from %ir.b, align 4)
+  ; X32-NEXT:   [[FSUB:%[0-9]+]]:psr(s80) = G_FSUB [[LOAD2]], [[LOAD3]]
+  ; X32-NEXT:   G_STORE [[FSUB]](s80), [[LOAD]](p0) :: (store (s80) into %ir.a, align 4)
+  ; X32-NEXT:   RET 0
+  ;
+  ; X64-LABEL: name: f1
+  ; X64: bb.1 (%ir-block.0):
+  ; X64-NEXT:   liveins: $rdi, $rsi
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
+  ; X64-NEXT:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
+  ; X64-NEXT:   [[LOAD:%[0-9]+]]:psr(s80) = G_LOAD [[COPY]](p0) :: (load (s80) from %ir.a, align 4)
+  ; X64-NEXT:   [[LOAD1:%[0-9]+]]:psr(s80) = G_LOAD [[COPY1]](p0) :: (load (s80) from %ir.b, align 4)
+  ; X64-NEXT:   [[FSUB:%[0-9]+]]:psr(s80) = G_FSUB [[LOAD]], [[LOAD1]]
+  ; X64-NEXT:   G_STORE [[FSUB]](s80), [[COPY]](p0) :: (store (s80) into %ir.a, align 4)
+  ; X64-NEXT:   RET 0
+  %1 = load x86_fp80, ptr %a, align 4
+  %2 = load x86_fp80, ptr %b, align 4
+  %sub = fsub x86_fp80 %1, %2
+  store x86_fp80 %sub, ptr %a, align 4
+  ret void
+}
+
+define void @f2(ptr %a, ptr %b) {
+  ; X32-LABEL: name: f2
+  ; X32: bb.1 (%ir-block.0):
+  ; X32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X32-NEXT:   [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+  ; X32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X32-NEXT:   [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
+  ; X32-NEXT:   [[LOAD2:%[0-9]+]]:psr(s80) = G_LOAD [[LOAD]](p0) :: (load (s80) from %ir.a, align 16)
+  ; X32-NEXT:   [[LOAD3:%[0-9]+]]:psr(s80) = G_LOAD [[LOAD1]](p0) :: (load (s80) from %ir.b, align 16)
+  ; X32-NEXT:   [[FMUL:%[0-9]+]]:psr(s80) = G_FMUL [[LOAD2]], [[LOAD3]]
+  ; X32-NEXT:   G_STORE [[FMUL]](s80), [[LOAD]](p0) :: (store (s80) into %ir.a, align 16)
+  ; X32-NEXT:   RET 0
+  ;
+  ; X64-LABEL: name: f2
+  ; X64: bb.1 (%ir-block.0):
+  ; X64-NEXT:   liveins: $rdi, $rsi
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
+  ; X64-NEXT:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
+  ; X64-NEXT:   [[LOAD:%[0-9]+]]:psr(s80) = G_LOAD [[COPY]](p0) :: (load (s80) from %ir.a, align 16)
+  ; X64-NEXT:   [[LOAD1:%[0-9]+]]:psr(s80) = G_LOAD [[COPY1]](p0) :: (load (s80) from %ir.b, align 16)
+  ; X64-NEXT:   [[FMUL:%[0-9]+]]:psr(s80) = G_FMUL [[LOAD]], [[LOAD1]]
+  ; X64-NEXT:   G_STORE [[FMUL]](s80), [[COPY]](p0) :: (store (s80) into %ir.a, align 16)
+  ; X64-NEXT:   RET 0
+  %1 = load x86_fp80, ptr %a, align 16
+  %2 = load x86_fp80, ptr %b, align 16
+  %mul = fmul x86_fp80 %1, %2
+  store x86_fp80 %mul, ptr %a, align 16
+  ret void
+}
+
+define void @f3(ptr %a, ptr %b) {
+  ; X32-LABEL: name: f3
+  ; X32: bb.1 (%ir-block.0):
+  ; X32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X32-NEXT:   [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+  ; X32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X32-NEXT:   [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
+  ; X32-NEXT:   [[LOAD2:%[0-9]+]]:psr(s80) = G_LOAD [[LOAD]](p0) :: (load (s80) from %ir.a, align 4)
+  ; X32-NEXT:   [[LOAD3:%[0-9]+]]:psr(s80) = G_LOAD [[LOAD1]](p0) :: (load (s80) from %ir.b, align 4)
+  ; X32-NEXT:   [[FDIV:%[0-9]+]]:psr(s80) = G_FDIV [[LOAD2]], [[LOAD3]]
+  ; X32-NEXT:   G_STORE [[FDIV]](s80), [[LOAD]](p0) :: (store (s80) into %ir.a, align 4)
+  ; X32-NEXT:   RET 0
+  ;
+  ; X64-LABEL: name: f3
+  ; X64: bb.1 (%ir-block.0):
+  ; X64-NEXT:   liveins: $rdi, $rsi
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
+  ; X64-NEXT:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
+  ; X64-NEXT:   [[LOAD:%[0-9]+]]:psr(s80) = G_LOAD [[COPY]](p0) :: (load (s80) from %ir.a, align 4)
+  ; X64-NEXT:   [[LOAD1:%[0-9]+]]:psr(s80) = G_LOAD [[COPY1]](p0) :: (load (s80) from %ir.b, align 4)
+  ; X64-NEXT:   [[FDIV:%[0-9]+]]:psr(s80) = G_FDIV [[LOAD]], [[LOAD1]]
+  ; X64-NEXT:   G_STORE [[FDIV]](s80), [[COPY]](p0) :: (store (s80) into %ir.a, align 4)
+  ; X64-NEXT:   RET 0
+  %1 = load x86_fp80, ptr %a, align 4
+  %2 = load x86_fp80, ptr %b, align 4
+  %div = fdiv x86_fp80 %1, %2
+  store x86_fp80 %div, ptr %a, align 4
+  ret void
+}
+
+define float @f4(float %val) {
+  ; X32-LABEL: name: f4
+  ; X32: bb.1 (%ir-block.0):
+  ; X32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X32-NEXT:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s32) from %fixed-stack.0)
+  ; X32-NEXT:   $fp0 = COPY [[LOAD]](s32)
+  ; X32-NEXT:   RET 0, implicit $fp0
+  ;
+  ; X64-LABEL: name: f4
+  ; X64: bb.1 (%ir-block.0):
+  ; X64-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X64-NEXT:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s32) from %fixed-stack.0, align 16)
+  ; X64-NEXT:   $xmm0 = COPY [[LOAD]](s32)
+  ; X64-NEXT:   RET 0, implicit $xmm0
+  ret float %val
+}
+
+define void @f5(ptr %a, ptr %b) {
+  ; X32-LABEL: name: f5
+  ; X32: bb.1 (%ir-block.0):
+  ; X32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X32-NEXT:   [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+  ; X32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X32-NEXT:   [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
+  ; X32-NEXT:   [[LOAD2:%[0-9]+]]:gpr(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.a, align 8)
+  ; X32-NEXT:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 4
+  ; X32-NEXT:   [[PTR_ADD:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[LOAD]], [[C]](s32)
+  ; X32-NEXT:   [[COPY:%[0-9]+]]:gpr(p0) = COPY [[PTR_ADD]](p0)
+  ; X32-NEXT:   [[LOAD3:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
+  ; X32-NEXT:   [[MV:%[0-9]+]]:gpr(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
+  ; X32-NEXT:   [[LOAD4:%[0-9]+]]:gpr(s32) = G_LOAD [[LOAD1]](p0) :: (load (s32) from %ir.b, align 8)
+  ; X32-NEXT:   [[PTR_ADD1:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[LOAD1]], [[C]](s32)
+  ; X32-NEXT:   [[LOAD5:%[0-9]+]]:gpr(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
+  ; X32-NEXT:   [[MV1:%[0-9]+]]:gpr(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
+  ; X32-NEXT:   [[COPY1:%[0-9]+]]:psr(s64) = COPY [[MV]](s64)
+  ; X32-NEXT:   [[COPY2:%[0-9]+]]:psr(s64) = COPY [[MV1]](s64)
+  ; X32-NEXT:   [[FADD:%[0-9]+]]:psr(s64) = G_FADD [[COPY1]], [[COPY2]]
+  ; X32-NEXT:   [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[FADD]](s64)
+  ; X32-NEXT:   [[UV:%[0-9]+]]:gpr(s32), [[UV1:%[0-9]+]]:gpr(s32) = G_UNMERGE_VALUES [[COPY3]](s64)
+  ; X32-NEXT:   G_STORE [[UV]](s32), [[LOAD]](p0) :: (store (s32) into %ir.a, align 8)
+  ; X32-NEXT:   G_STORE [[UV1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.a + 4, basealign 8)
+  ; X32-NEXT:   RET 0
+  ;
+  ; X64-LABEL: name: f5
+  ; X64: bb.1 (%ir-block.0):
+  ; X64-NEXT:   liveins: $rdi, $rsi
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
+  ; X64-NEXT:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
+  ; X64-NEXT:   [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.a)
+  ; X64-NEXT:   [[LOAD1:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY1]](p0) :: (load (s64) from %ir.b)
+  ; X64-NEXT:   [[COPY2:%[0-9]+]]:psr(s64) = COPY [[LOAD]](s64)
+  ; X64-NEXT:   [[COPY3:%[0-9]+]]:psr(s64) = COPY [[LOAD1]](s64)
+  ; X64-NEXT:   [[FADD:%[0-9]+]]:psr(s64) = G_FADD [[COPY2]], [[COPY3]]
+  ; X64-NEXT:   [[COPY4:%[0-9]+]]:gpr(s64) = COPY [[FADD]](s64)
+  ; X64-NEXT:   G_STORE [[COPY4]](s64), [[COPY]](p0) :: (store (s64) into %ir.a)
+  ; X64-NEXT:   RET 0
+  %1 = load double, ptr %a, align 8
+  %2 = load double, ptr %b, align 8
+  %add = fadd double %1, %2
+  store double %add, ptr %a, align 8
+  ret void
+}
+
+define void @f6(ptr %0, ptr %1) {
+  ; X32-LABEL: name: f6
+  ; X32: bb.1 (%ir-block.2):
+  ; X32-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X32-NEXT:   [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+  ; X32-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X32-NEXT:   [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
+  ; X32-NEXT:   [[C:%[0-9]+]]:psr(s32) = G_FCONSTANT float 2.000000e+01
+  ; X32-NEXT:   [[LOAD2:%[0-9]+]]:gpr(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.0)
+  ; X32-NEXT:   [[COPY:%[0-9]+]]:psr(s32) = COPY [[LOAD2]](s32)
+  ; X32-NEXT:   [[FADD:%[0-9]+]]:psr(s32) = G_FADD [[COPY]], [[C]]
+  ; X32-NEXT:   [[COPY1:%[0-9]+]]:gpr(s32) = COPY [[FADD]](s32)
+  ; X32-NEXT:   G_STORE [[COPY1]](s32), [[LOAD1]](p0) :: (store (s32) into %ir.1)
+  ; X32-NEXT:   RET 0
+  ;
+  ; X64-LABEL: name: f6
+  ; X64: bb.1 (%ir-block.2):
+  ; X64-NEXT:   liveins: $rdi, $rsi
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
+  ; X64-NEXT:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
+  ; X64-NEXT:   [[C:%[0-9]+]]:psr(s32) = G_FCONSTANT float 2.000000e+01
+  ; X64-NEXT:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.0)
+  ; X64-NEXT:   [[COPY2:%[0-9]+]]:psr(s32) = COPY [[LOAD]](s32)
+  ; X64-NEXT:   [[FADD:%[0-9]+]]:psr(s32) = G_FADD [[COPY2]], [[C]]
+  ; X64-NEXT:   [[COPY3:%[0-9]+]]:gpr(s32) = COPY [[FADD]](s32)
+  ; X64-NEXT:   G_STORE [[COPY3]](s32), [[COPY1]](p0) :: (store (s32) into %ir.1)
+  ; X64-NEXT:   RET 0
+  %3 = load float, ptr %0
+  %4 = fadd float %3, 20.0
+  store float %4, ptr %1
+  ret void
+}
----------------
MalaySanghiIntel wrote:

No. We (me and @e-kud) plan to have more instructions in a separate patch. 

https://github.com/llvm/llvm-project/pull/83528


More information about the llvm-commits mailing list