[llvm] 911df1e - [AArch64] Pre-commit test for D153575

Dhruv Chawla via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 23 00:48:16 PDT 2023


Author: Dhruv Chawla
Date: 2023-06-23T13:08:26+05:30
New Revision: 911df1e8ddfe31fdd2536f8a73628d53b3dd5ef6

URL: https://github.com/llvm/llvm-project/commit/911df1e8ddfe31fdd2536f8a73628d53b3dd5ef6
DIFF: https://github.com/llvm/llvm-project/commit/911df1e8ddfe31fdd2536f8a73628d53b3dd5ef6.diff

LOG: [AArch64] Pre-commit test for D153575

Added: 
    llvm/test/CodeGen/AArch64/aarch64-saturating-arithmetic.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/aarch64-saturating-arithmetic.ll b/llvm/test/CodeGen/AArch64/aarch64-saturating-arithmetic.ll
new file mode 100644
index 0000000000000..9c6cdeeb82d72
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-saturating-arithmetic.ll
@@ -0,0 +1,225 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=aarch64-linux-gnu -O2 -o - %s | FileCheck %s
+
+define i64 @test_ssub_nonneg_rhs(i64 %x) {
+; CHECK-LABEL: test_ssub_nonneg_rhs:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, #1
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %sat = call i64 @llvm.ssub.sat.i64(i64 %x, i64 1)
+  ret i64 %sat
+}
+
+define i64 @test_ssub_neg_rhs(i64 %x) {
+; CHECK-LABEL: test_ssub_neg_rhs:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adds x8, x0, #1
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %sat = call i64 @llvm.ssub.sat.i64(i64 %x, i64 -1)
+  ret i64 %sat
+}
+
+define i64 @test_sadd_nonneg_rhs(i64 %x) {
+; CHECK-LABEL: test_sadd_nonneg_rhs:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adds x8, x0, #1
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %sat = call i64 @llvm.sadd.sat.i64(i64 %x, i64 1)
+  ret i64 %sat
+}
+
+
+define i64 @test_sadd_neg_rhs(i64 %x) {
+; CHECK-LABEL: test_sadd_neg_rhs:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, #1
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %sat = call i64 @llvm.sadd.sat.i64(i64 %x, i64 -1)
+  ret i64 %sat
+}
+
+define i64 @test_ssub_nonneg_lhs(i64 %x) {
+; CHECK-LABEL: test_ssub_nonneg_lhs:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    subs x8, x8, x0
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %sat = call i64 @llvm.ssub.sat.i64(i64 1, i64 %x)
+  ret i64 %sat
+}
+
+define i64 @test_ssub_neg_lhs(i64 %x) {
+; CHECK-LABEL: test_ssub_neg_lhs:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    subs x8, x8, x0
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %sat = call i64 @llvm.ssub.sat.i64(i64 -1, i64 %x)
+  ret i64 %sat
+}
+
+define i64 @test_sadd_nonneg_lhs(i64 %x) {
+; CHECK-LABEL: test_sadd_nonneg_lhs:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adds x8, x0, #1
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %sat = call i64 @llvm.sadd.sat.i64(i64 1, i64 %x)
+  ret i64 %sat
+}
+
+define i64 @test_sadd_neg_lhs(i64 %x) {
+; CHECK-LABEL: test_sadd_neg_lhs:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    subs x8, x0, #1
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %sat = call i64 @llvm.sadd.sat.i64(i64 -1, i64 %x)
+  ret i64 %sat
+}
+
+define i64 @test_ssub_nonneg_rhs_nonconst(i64 %x) {
+; CHECK-LABEL: test_ssub_nonneg_rhs_nonconst:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #123 // =0x7b
+; CHECK-NEXT:    and x8, x0, x8
+; CHECK-NEXT:    subs x8, x0, x8
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %y = and i64 %x, 123
+  %sat = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y)
+  ret i64 %sat
+}
+
+define i64 @test_ssub_neg_rhs_nonconst(i64 %x) {
+; CHECK-LABEL: test_ssub_neg_rhs_nonconst:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmn x0, #1
+; CHECK-NEXT:    csinv x8, x0, xzr, lt
+; CHECK-NEXT:    subs x8, x0, x8
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %y = call i64 @llvm.smin(i64 %x, i64 -1)
+  %sat = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y)
+  ret i64 %sat
+}
+
+define i64 @test_sadd_nonneg_rhs_nonconst(i64 %x) {
+; CHECK-LABEL: test_sadd_nonneg_rhs_nonconst:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, #1
+; CHECK-NEXT:    csinc x8, x0, xzr, gt
+; CHECK-NEXT:    adds x8, x0, x8
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %y = call i64 @llvm.smax(i64 %x, i64 1)
+  %sat = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y)
+  ret i64 %sat
+}
+
+
+define i64 @test_sadd_neg_rhs_nonconst(i64 %x) {
+; CHECK-LABEL: test_sadd_neg_rhs_nonconst:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr x8, x0, #0x8000000000000000
+; CHECK-NEXT:    adds x8, x0, x8
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %y = or i64 %x, u0x8000000000000000
+  %sat = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y)
+  ret i64 %sat
+}
+
+define i64 @test_ssub_nonneg_lhs_nonconst(i64 %x) {
+; CHECK-LABEL: test_ssub_nonneg_lhs_nonconst:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #123 // =0x7b
+; CHECK-NEXT:    and x8, x0, x8
+; CHECK-NEXT:    subs x8, x8, x0
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %y = and i64 %x, 123
+  %sat = call i64 @llvm.ssub.sat.i64(i64 %y, i64 %x)
+  ret i64 %sat
+}
+
+define i64 @test_ssub_neg_lhs_nonconst(i64 %x) {
+; CHECK-LABEL: test_ssub_neg_lhs_nonconst:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmn x0, #1
+; CHECK-NEXT:    csinv x8, x0, xzr, lt
+; CHECK-NEXT:    subs x8, x8, x0
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %y = call i64 @llvm.smin(i64 %x, i64 -1)
+  %sat = call i64 @llvm.ssub.sat.i64(i64 %y, i64 %x)
+  ret i64 %sat
+}
+
+define i64 @test_sadd_nonneg_lhs_nonconst(i64 %x) {
+; CHECK-LABEL: test_sadd_nonneg_lhs_nonconst:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, #1
+; CHECK-NEXT:    csinc x8, x0, xzr, gt
+; CHECK-NEXT:    adds x8, x8, x0
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %y = call i64 @llvm.smax(i64 %x, i64 1)
+  %sat = call i64 @llvm.sadd.sat.i64(i64 %y, i64 %x)
+  ret i64 %sat
+}
+
+define i64 @test_sadd_neg_lhs_nonconst(i64 %x) {
+; CHECK-LABEL: test_sadd_neg_lhs_nonconst:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr x8, x0, #0x8000000000000000
+; CHECK-NEXT:    adds x8, x8, x0
+; CHECK-NEXT:    asr x9, x8, #63
+; CHECK-NEXT:    eor x9, x9, #0x8000000000000000
+; CHECK-NEXT:    csel x0, x9, x8, vs
+; CHECK-NEXT:    ret
+  %y = or i64 %x, u0x8000000000000000
+  %sat = call i64 @llvm.sadd.sat.i64(i64 %y, i64 %x)
+  ret i64 %sat
+}
+
+declare i64 @llvm.sadd.sat.i64(i64, i64)
+declare i64 @llvm.ssub.sat.i64(i64, i64)
+declare i64 @llvm.smax(i64, i64)
+declare i64 @llvm.smin(i64, i64)


        


More information about the llvm-commits mailing list