[llvm] r331800 - Changing constants in a test (NFC)

Daniel Neilson via llvm-commits llvm-commits at lists.llvm.org
Tue May 8 12:08:12 PDT 2018


Author: dneilson
Date: Tue May  8 12:08:12 2018
New Revision: 331800

URL: http://llvm.org/viewvc/llvm-project?rev=331800&view=rev
Log:
Changing constants in a test (NFC)

Summary:
Changing the lengths of the atomic memory intrinsics in a test to make sure
that they don't get lowered into loads/stores if/when expansion of these
occurs in selectiondag.

Modified:
    llvm/trunk/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll

Modified: llvm/trunk/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll?rev=331800&r1=331799&r2=331800&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll Tue May  8 12:08:12 2018
@@ -2,46 +2,46 @@
 
 define i8* @test_memcpy1(i8* %P, i8* %Q) {
   ; CHECK: test_memcpy
-  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1, i32 1)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 1)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $1, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memcpy_element_unordered_atomic_1
 }
 
 define i8* @test_memcpy2(i8* %P, i8* %Q) {
   ; CHECK: test_memcpy2
-  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 2, i32 2)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 2)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $2, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memcpy_element_unordered_atomic_2
 }
 
 define i8* @test_memcpy4(i8* %P, i8* %Q) {
   ; CHECK: test_memcpy4
-  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 4, i32 4)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 4)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $4, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memcpy_element_unordered_atomic_4
 }
 
 define i8* @test_memcpy8(i8* %P, i8* %Q) {
   ; CHECK: test_memcpy8
-  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 8, i32 8)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 1024, i32 8)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $8, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memcpy_element_unordered_atomic_8
 }
 
 define i8* @test_memcpy16(i8* %P, i8* %Q) {
   ; CHECK: test_memcpy16
-  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 16, i32 16)
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 1024, i32 16)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $16, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memcpy_element_unordered_atomic_16
 }
 
@@ -57,53 +57,54 @@ define void @test_memcpy_args(i8** %Stor
   ; 2nd arg (%rsi)
   ; CHECK-DAG: movq 8(%rdi), %rsi
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $4, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memcpy_element_unordered_atomic_4
-  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 4, i32 4)  ret void
+  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 1024, i32 4)
+  ret void
 }
 
 define i8* @test_memmove1(i8* %P, i8* %Q) {
   ; CHECK: test_memmove
-  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1, i32 1)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 1)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $1, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memmove_element_unordered_atomic_1
 }
 
 define i8* @test_memmove2(i8* %P, i8* %Q) {
   ; CHECK: test_memmove2
-  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 2, i32 2)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 2)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $2, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memmove_element_unordered_atomic_2
 }
 
 define i8* @test_memmove4(i8* %P, i8* %Q) {
   ; CHECK: test_memmove4
-  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 4, i32 4)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 4)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $4, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memmove_element_unordered_atomic_4
 }
 
 define i8* @test_memmove8(i8* %P, i8* %Q) {
   ; CHECK: test_memmove8
-  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 8, i32 8)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 1024, i32 8)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $8, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memmove_element_unordered_atomic_8
 }
 
 define i8* @test_memmove16(i8* %P, i8* %Q) {
   ; CHECK: test_memmove16
-  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 16, i32 16)
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 1024, i32 16)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $16, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memmove_element_unordered_atomic_16
 }
 
@@ -119,53 +120,54 @@ define void @test_memmove_args(i8** %Sto
   ; 2nd arg (%rsi)
   ; CHECK-DAG: movq 8(%rdi), %rsi
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $4, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memmove_element_unordered_atomic_4
-  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 4, i32 4)  ret void
+  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 1024, i32 4)
+  ret void
 }
 
 define i8* @test_memset1(i8* %P, i8 %V) {
   ; CHECK: test_memset
-  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 1, i32 1)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %P, i8 %V, i32 1024, i32 1)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $1, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memset_element_unordered_atomic_1
 }
 
 define i8* @test_memset2(i8* %P, i8 %V) {
   ; CHECK: test_memset2
-  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 2, i32 2)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 %P, i8 %V, i32 1024, i32 2)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $2, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memset_element_unordered_atomic_2
 }
 
 define i8* @test_memset4(i8* %P, i8 %V) {
   ; CHECK: test_memset4
-  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 4, i32 4)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 1024, i32 4)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $4, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memset_element_unordered_atomic_4
 }
 
 define i8* @test_memset8(i8* %P, i8 %V) {
   ; CHECK: test_memset8
-  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %P, i8 %V, i32 8, i32 8)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %P, i8 %V, i32 1024, i32 8)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $8, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memset_element_unordered_atomic_8
 }
 
 define i8* @test_memset16(i8* %P, i8 %V) {
   ; CHECK: test_memset16
-  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %P, i8 %V, i32 16, i32 16)
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %P, i8 %V, i32 1024, i32 16)
   ret i8* %P
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $16, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memset_element_unordered_atomic_16
 }
 
@@ -179,9 +181,10 @@ define void @test_memset_args(i8** %Stor
   ; 2nd arg (%rsi)
   ; CHECK-DAG: movzbl (%rsi), %esi
   ; 3rd arg (%edx) -- length
-  ; CHECK-DAG: movl $4, %edx
+  ; CHECK-DAG: movl $1024, %edx
   ; CHECK: __llvm_memset_element_unordered_atomic_4
-  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %Dst, i8 %Val, i32 4, i32 4)  ret void
+  call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %Dst, i8 %Val, i32 1024, i32 4)
+  ret void
 }
 
 declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32) nounwind




More information about the llvm-commits mailing list