[llvm] 23b2613 - [X86] Add test coverage for Issue #59217

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 27 08:37:26 PST 2022


Author: Simon Pilgrim
Date: 2022-11-27T16:37:17Z
New Revision: 23b261377e10f44be8b5ee4c3567a90dbcf20b62

URL: https://github.com/llvm/llvm-project/commit/23b261377e10f44be8b5ee4c3567a90dbcf20b62
DIFF: https://github.com/llvm/llvm-project/commit/23b261377e10f44be8b5ee4c3567a90dbcf20b62.diff

LOG: [X86] Add test coverage for Issue #59217

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/combine-mul.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll
index 461b4c40be103..b7b90b72d19ae 100644
--- a/llvm/test/CodeGen/X86/combine-mul.ll
+++ b/llvm/test/CodeGen/X86/combine-mul.ll
@@ -430,6 +430,130 @@ define <4 x i32> @combine_mul_self_demandedbits_vector(<4 x i32> %x) {
   ret <4 x i32> %3
 }
 
+; PR59217 - Reuse umul_lohi/smul_lohi node
+
+define i64 @combine_mul_umul_lohi_i64(i64 %a, i64 %b) {
+; SSE-LABEL: combine_mul_umul_lohi_i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq %rdi, %rax
+; SSE-NEXT:    mulq %rsi
+; SSE-NEXT:    imulq %rsi, %rdi
+; SSE-NEXT:    xorq %rdx, %rdi
+; SSE-NEXT:    movq %rdi, %rax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_mul_umul_lohi_i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    mulq %rsi
+; AVX-NEXT:    imulq %rsi, %rdi
+; AVX-NEXT:    xorq %rdx, %rdi
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    retq
+  %a128 = zext i64 %a to i128
+  %b128 = zext i64 %b to i128
+  %m128 = mul nuw i128 %a128, %b128
+  %hi128 = lshr i128 %m128, 64
+  %hi = trunc i128 %hi128 to i64
+  %lo = mul i64 %a, %b
+  %r = xor i64 %lo, %hi
+  ret i64 %r
+}
+
+define i64 @combine_mul_smul_lohi_commute_i64(i64 %a, i64 %b) {
+; SSE-LABEL: combine_mul_smul_lohi_commute_i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq %rdi, %rax
+; SSE-NEXT:    imulq %rsi
+; SSE-NEXT:    imulq %rdi, %rsi
+; SSE-NEXT:    xorq %rdx, %rsi
+; SSE-NEXT:    movq %rsi, %rax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_mul_smul_lohi_commute_i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    imulq %rsi
+; AVX-NEXT:    imulq %rdi, %rsi
+; AVX-NEXT:    xorq %rdx, %rsi
+; AVX-NEXT:    movq %rsi, %rax
+; AVX-NEXT:    retq
+  %a128 = sext i64 %a to i128
+  %b128 = sext i64 %b to i128
+  %m128 = mul nsw i128 %a128, %b128
+  %hi128 = lshr i128 %m128, 64
+  %hi = trunc i128 %hi128 to i64
+  %lo = mul i64 %b, %a
+  %r = xor i64 %lo, %hi
+  ret i64 %r
+}
+
+define i64 @combine_mul_umul_lohi_const_i64(i64 %h) {
+; SSE-LABEL: combine_mul_umul_lohi_const_i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movabsq $-4265267296055464877, %rcx # imm = 0xC4CEB9FE1A85EC53
+; SSE-NEXT:    movq %rdi, %rax
+; SSE-NEXT:    mulq %rcx
+; SSE-NEXT:    imulq %rdi, %rcx
+; SSE-NEXT:    xorq %rdx, %rcx
+; SSE-NEXT:    movq %rcx, %rax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_mul_umul_lohi_const_i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movabsq $-4265267296055464877, %rcx # imm = 0xC4CEB9FE1A85EC53
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    mulq %rcx
+; AVX-NEXT:    imulq %rdi, %rcx
+; AVX-NEXT:    xorq %rdx, %rcx
+; AVX-NEXT:    movq %rcx, %rax
+; AVX-NEXT:    retq
+  %h128 = zext i64 %h to i128
+  %m128 = mul nuw i128 %h128, 14181476777654086739
+  %hi128 = lshr i128 %m128, 64
+  %hi = trunc i128 %hi128 to i64
+  %lo = mul i64 %h, 14181476777654086739
+  %r = xor i64 %lo, %hi
+  ret i64 %r
+}
+
+define i64 @combine_mul_smul_lohi_const_i64(i64 %h) {
+; SSE-LABEL: combine_mul_smul_lohi_const_i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq %rdi, %rsi
+; SSE-NEXT:    sarq $63, %rsi
+; SSE-NEXT:    movabsq $-4265267296055464877, %rcx # imm = 0xC4CEB9FE1A85EC53
+; SSE-NEXT:    movq %rdi, %rax
+; SSE-NEXT:    mulq %rcx
+; SSE-NEXT:    imulq %rcx, %rsi
+; SSE-NEXT:    addq %rdx, %rsi
+; SSE-NEXT:    imulq %rdi, %rcx
+; SSE-NEXT:    xorq %rsi, %rcx
+; SSE-NEXT:    movq %rcx, %rax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_mul_smul_lohi_const_i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movq %rdi, %rsi
+; AVX-NEXT:    sarq $63, %rsi
+; AVX-NEXT:    movabsq $-4265267296055464877, %rcx # imm = 0xC4CEB9FE1A85EC53
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    mulq %rcx
+; AVX-NEXT:    imulq %rcx, %rsi
+; AVX-NEXT:    addq %rdx, %rsi
+; AVX-NEXT:    imulq %rdi, %rcx
+; AVX-NEXT:    xorq %rsi, %rcx
+; AVX-NEXT:    movq %rcx, %rax
+; AVX-NEXT:    retq
+  %h128 = sext i64 %h to i128
+  %m128 = mul nsw i128 %h128, 14181476777654086739
+  %hi128 = lshr i128 %m128, 64
+  %hi = trunc i128 %hi128 to i64
+  %lo = mul i64 %h, 14181476777654086739
+  %r = xor i64 %lo, %hi
+  ret i64 %r
+}
+
 ; This would infinite loop because DAGCombiner wants to turn this into a shift,
 ; but x86 lowering wants to avoid non-uniform vector shift amounts.
 


        


More information about the llvm-commits mailing list