[llvm] 86bd9a4 - [AArch64] Additional tests for creating BIC from known bits. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 5 07:42:39 PDT 2023


Author: David Green
Date: 2023-07-05T15:42:33+01:00
New Revision: 86bd9a420fbbd6bad9c59b57d53c25a0222da753

URL: https://github.com/llvm/llvm-project/commit/86bd9a420fbbd6bad9c59b57d53c25a0222da753
DIFF: https://github.com/llvm/llvm-project/commit/86bd9a420fbbd6bad9c59b57d53c25a0222da753.diff

LOG: [AArch64] Additional tests for creating BIC from known bits. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
    llvm/test/CodeGen/AArch64/shiftregister-from-and.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
index 4bd190bf751ed2..6fc392a0aa563d 100644
--- a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
@@ -1480,6 +1480,63 @@ define <2 x i64> @and64imm8h_lsl8(<2 x i64> %a) {
 	ret <2 x i64> %tmp1
 }
 
+define <8 x i16> @bic_shifted_knownbits(<8 x i16> %v) {
+; CHECK-LABEL: bic_shifted_knownbits:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v1.8h, #1
+; CHECK-NEXT:    ushr v0.8h, v0.8h, #9
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ret
+entry:
+  %vshr_n = lshr <8 x i16> %v, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
+  %and.i = and <8 x i16> %vshr_n, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  ret <8 x i16> %and.i
+}
+
+define <8 x i32> @bic_shifted_knownbits2(<8 x i16> %v) {
+; CHECK-LABEL: bic_shifted_knownbits2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, #-1048321 // =0xfff000ff
+; CHECK-NEXT:    ushll2 v1.4s, v0.8h, #0
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    dup v2.4s, w8
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ret
+entry:
+  %vshr_n = zext <8 x i16> %v to <8 x i32>
+  %and.i = and <8 x i32> %vshr_n, <i32 4293918975, i32 4293918975, i32 4293918975, i32 4293918975, i32 4293918975, i32 4293918975, i32 4293918975, i32 4293918975>
+  ret <8 x i32> %and.i
+}
+
+define <8 x i32> @bic_shifted_knownbits3(<8 x i16> %v) {
+; CHECK-LABEL: bic_shifted_knownbits3:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEXT:    ushll2 v1.4s, v0.8h, #0
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    ret
+  %a = and <8 x i16> %v, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+  %and.i = zext <8 x i16> %a to <8 x i32>
+  ret <8 x i32> %and.i
+}
+
+
+define <8 x i32> @bic_shifted_knownbits4(<8 x i32> %v) {
+; CHECK-LABEL: bic_shifted_knownbits4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v2.2d, #0xffff0000ffff0000
+; CHECK-NEXT:    shl v0.4s, v0.4s, #8
+; CHECK-NEXT:    shl v1.4s, v1.4s, #8
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
+entry:
+  %vshr_n = shl <8 x i32> %v, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
+  %and.i = and <8 x i32> %vshr_n, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
+  ret <8 x i32> %and.i
+}
+
 define <8 x i8> @orr8imm2s_lsl0(<8 x i8> %a) {
 ; CHECK-LABEL: orr8imm2s_lsl0:
 ; CHECK:       // %bb.0:

diff  --git a/llvm/test/CodeGen/AArch64/shiftregister-from-and.ll b/llvm/test/CodeGen/AArch64/shiftregister-from-and.ll
index 91011ec66048f8..ec4e3b3e42b7f9 100644
--- a/llvm/test/CodeGen/AArch64/shiftregister-from-and.ll
+++ b/llvm/test/CodeGen/AArch64/shiftregister-from-and.ll
@@ -21,7 +21,7 @@ define i64 @and_shiftedreg_from_and(i64 %a, i64 %b) {
 define i64 @bic_shiftedreg_from_and(i64 %a, i64 %b) {
 ; CHECK-LABEL: bic_shiftedreg_from_and:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #16777215
+; CHECK-NEXT:    mov w8, #16777215 // =0xffffff
 ; CHECK-NEXT:    orn x8, x8, x0, asr #23
 ; CHECK-NEXT:    and x0, x1, x8
 ; CHECK-NEXT:    ret
@@ -67,7 +67,7 @@ define i64 @eor_shiftedreg_from_and(i64 %a, i64 %b) {
 define i64 @mvn_shiftedreg_from_and(i64 %a) {
 ; CHECK-LABEL: mvn_shiftedreg_from_and:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #9007199254740991
+; CHECK-NEXT:    mov x8, #9007199254740991 // =0x1fffffffffffff
 ; CHECK-NEXT:    orn x0, x8, x0, lsl #36
 ; CHECK-NEXT:    ret
   %shl = shl i64 %a, 36
@@ -205,7 +205,7 @@ define i32 @shiftedreg_from_and_negative_oneuse2(i32 %a, i32 %b) {
 define i32 @shiftedreg_from_and_negative_andc1(i32 %a, i32 %b) {
 ; CHECK-LABEL: shiftedreg_from_and_negative_andc1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #26215
+; CHECK-NEXT:    mov w8, #26215 // =0x6667
 ; CHECK-NEXT:    movk w8, #65510, lsl #16
 ; CHECK-NEXT:    and w8, w8, w0, asr #23
 ; CHECK-NEXT:    add w0, w8, w1
@@ -221,7 +221,7 @@ define i32 @shiftedreg_from_and_negative_andc1(i32 %a, i32 %b) {
 define i32 @shiftedreg_from_and_negative_andc2(i32 %a, i32 %b) {
 ; CHECK-LABEL: shiftedreg_from_and_negative_andc2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-285212672
+; CHECK-NEXT:    mov w8, #-285212672 // =0xef000000
 ; CHECK-NEXT:    and w8, w8, w0, asr #23
 ; CHECK-NEXT:    add w0, w8, w1
 ; CHECK-NEXT:    ret


        


More information about the llvm-commits mailing list