[llvm] r278599 - [x86] add tests to show missed 64-bit immediate merging

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Sat Aug 13 11:42:16 PDT 2016


Author: spatel
Date: Sat Aug 13 13:42:14 2016
New Revision: 278599

URL: http://llvm.org/viewvc/llvm-project?rev=278599&view=rev
Log:
[x86] add tests to show missed 64-bit immediate merging

Tests are slightly modified versions of those written by
Sunita Marathe in D23391.


Added:
    llvm/trunk/test/CodeGen/X86/immediate_merging64.ll

Added: llvm/trunk/test/CodeGen/X86/immediate_merging64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/immediate_merging64.ll?rev=278599&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/immediate_merging64.ll (added)
+++ llvm/trunk/test/CodeGen/X86/immediate_merging64.ll Sat Aug 13 13:42:14 2016
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+
+; Check that multiple instances of 64-bit constants encodable as
+; 32-bit immediates are merged for code size savings.
+
+ at g1 = common global i64 0, align 8
+ at g2 = common global i64 0, align 8
+ at g3 = common global i64 0, align 8
+ at g4 = common global i64 0, align 8
+
+; Immediates with multiple users should not be pulled into instructions when
+; optimizing for code size.
+define void @imm_multiple_users(i64 %l1, i64 %l2, i64 %l3, i64 %l4) optsize {
+; CHECK-LABEL: imm_multiple_users:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    movq $-1, {{.*}}(%rip)
+; CHECK-NEXT:    cmpq $-1, %rdx
+; CHECK-NEXT:    cmovneq %rsi, %rdi
+; CHECK-NEXT:    movq %rdi, {{.*}}(%rip)
+; CHECK-NEXT:    movq $-1, %rax
+; CHECK-NEXT:    # kill: %CL<def> %CL<kill> %RCX<kill>
+; CHECK-NEXT:    shlq %cl, %rax
+; CHECK-NEXT:    movq %rax, {{.*}}(%rip)
+; CHECK-NEXT:    movq $0, {{.*}}(%rip)
+; CHECK-NEXT:    retq
+;
+  store i64 -1, i64* @g1, align 8
+  %cmp = icmp eq i64 %l3, -1
+  %sel = select i1 %cmp, i64 %l1, i64 %l2
+  store i64 %sel, i64* @g2, align 8
+  %and = and i64 %l4, 63
+  %shl = shl i64 -1, %and
+  store i64 %shl, i64* @g3, align 8
+  store i64 0, i64* @g4, align 8
+  ret void
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+
+; Inlined memsets requiring multiple same-sized stores should be lowered using
+; the register, rather than immediate, form of stores when optimizing for
+; code size.
+define void @memset_zero(i8* noalias nocapture %D) optsize {
+; CHECK-LABEL: memset_zero:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    movq $0, 7(%rdi)
+; CHECK-NEXT:    movq $0, (%rdi)
+; CHECK-NEXT:    retq
+;
+  tail call void @llvm.memset.p0i8.i64(i8* %D, i8 0, i64 15, i32 1, i1 false)
+  ret void
+}
+




More information about the llvm-commits mailing list