[llvm] 16fb1c7 - [RISCV] Add i8/i16 test cases to div.ll and i8/i16/i64 to rem.ll. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 4 16:48:48 PST 2021


Author: Craig Topper
Date: 2021-02-04T16:46:23-08:00
New Revision: 16fb1c7aaeba9a7f22e4dcc8761b1cbf1fe524c1

URL: https://github.com/llvm/llvm-project/commit/16fb1c7aaeba9a7f22e4dcc8761b1cbf1fe524c1
DIFF: https://github.com/llvm/llvm-project/commit/16fb1c7aaeba9a7f22e4dcc8761b1cbf1fe524c1.diff

LOG: [RISCV] Add i8/i16 test cases to div.ll and i8/i16/i64 to rem.ll. NFC

This improves our coverage of these operations and shows that we
use really large constants for division by constant on i8/i16
especially on RV64. The issue is that BuildSDIV/BuildUDIV are
limited to legal types so we have to promote to i64 before it
kicks in. At that point we've lost the range information for the
original type.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/div.ll
    llvm/test/CodeGen/RISCV/rem.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index fea3149ba230..3d2f6b7f0a47 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -204,6 +204,258 @@ define i64 @udiv64_constant(i64 %a) nounwind {
   ret i64 %1
 }
 
+define i8 @udiv8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: udiv8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    andi a1, a1, 255
+; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: udiv8:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    andi a1, a1, 255
+; RV32IM-NEXT:    andi a0, a0, 255
+; RV32IM-NEXT:    divu a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv8:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    andi a1, a1, 255
+; RV64IM-NEXT:    andi a0, a0, 255
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i8 %a, %b
+  ret i8 %1
+}
+
+define i8 @udiv8_constant(i8 %a) nounwind {
+; RV32I-LABEL: udiv8_constant:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: udiv8_constant:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    andi a0, a0, 255
+; RV32IM-NEXT:    lui a1, 838861
+; RV32IM-NEXT:    addi a1, a1, -819
+; RV32IM-NEXT:    mulhu a0, a0, a1
+; RV32IM-NEXT:    srli a0, a0, 2
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv8_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv8_constant:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    andi a0, a0, 255
+; RV64IM-NEXT:    lui a1, 1035469
+; RV64IM-NEXT:    addiw a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    mulhu a0, a0, a1
+; RV64IM-NEXT:    srli a0, a0, 2
+; RV64IM-NEXT:    ret
+  %1 = udiv i8 %a, 5
+  ret i8 %1
+}
+
+define i8 @udiv8_pow2(i8 %a) nounwind {
+; RV32I-LABEL: udiv8_pow2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 248
+; RV32I-NEXT:    srli a0, a0, 3
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: udiv8_pow2:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    andi a0, a0, 248
+; RV32IM-NEXT:    srli a0, a0, 3
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv8_pow2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 248
+; RV64I-NEXT:    srli a0, a0, 3
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv8_pow2:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    andi a0, a0, 248
+; RV64IM-NEXT:    srli a0, a0, 3
+; RV64IM-NEXT:    ret
+  %1 = udiv i8 %a, 8
+  ret i8 %1
+}
+
+define i16 @udiv16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: udiv16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: udiv16:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    lui a2, 16
+; RV32IM-NEXT:    addi a2, a2, -1
+; RV32IM-NEXT:    and a1, a1, a2
+; RV32IM-NEXT:    and a0, a0, a2
+; RV32IM-NEXT:    divu a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv16:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    lui a2, 16
+; RV64IM-NEXT:    addiw a2, a2, -1
+; RV64IM-NEXT:    and a1, a1, a2
+; RV64IM-NEXT:    and a0, a0, a2
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = udiv i16 %a, %b
+  ret i16 %1
+}
+
+define i16 @udiv16_constant(i16 %a) nounwind {
+; RV32I-LABEL: udiv16_constant:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    call __udivsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: udiv16_constant:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    lui a1, 16
+; RV32IM-NEXT:    addi a1, a1, -1
+; RV32IM-NEXT:    and a0, a0, a1
+; RV32IM-NEXT:    lui a1, 838861
+; RV32IM-NEXT:    addi a1, a1, -819
+; RV32IM-NEXT:    mulhu a0, a0, a1
+; RV32IM-NEXT:    srli a0, a0, 2
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv16_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __udivdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv16_constant:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    lui a1, 16
+; RV64IM-NEXT:    addiw a1, a1, -1
+; RV64IM-NEXT:    and a0, a0, a1
+; RV64IM-NEXT:    lui a1, 1035469
+; RV64IM-NEXT:    addiw a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, -819
+; RV64IM-NEXT:    mulhu a0, a0, a1
+; RV64IM-NEXT:    srli a0, a0, 2
+; RV64IM-NEXT:    ret
+  %1 = udiv i16 %a, 5
+  ret i16 %1
+}
+
+define i16 @udiv16_pow2(i16 %a) nounwind {
+; RV32I-LABEL: udiv16_pow2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 19
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: udiv16_pow2:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a0, a0, 16
+; RV32IM-NEXT:    srli a0, a0, 19
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: udiv16_pow2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slliw a0, a0, 16
+; RV64I-NEXT:    srliw a0, a0, 19
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: udiv16_pow2:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slliw a0, a0, 16
+; RV64IM-NEXT:    srliw a0, a0, 19
+; RV64IM-NEXT:    ret
+  %1 = udiv i16 %a, 8
+  ret i16 %1
+}
+
 define i32 @sdiv(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sdiv:
 ; RV32I:       # %bb.0:
@@ -466,3 +718,311 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
   %3 = sdiv i64 %1, %2
   ret i64 %3
 }
+
+define i8 @sdiv8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: sdiv8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: sdiv8:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a1, a1, 24
+; RV32IM-NEXT:    srai a1, a1, 24
+; RV32IM-NEXT:    slli a0, a0, 24
+; RV32IM-NEXT:    srai a0, a0, 24
+; RV32IM-NEXT:    div a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv8:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a1, 56
+; RV64IM-NEXT:    srai a1, a1, 56
+; RV64IM-NEXT:    slli a0, a0, 56
+; RV64IM-NEXT:    srai a0, a0, 56
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i8 %a, %b
+  ret i8 %1
+}
+
+define i8 @sdiv8_constant(i8 %a) nounwind {
+; RV32I-LABEL: sdiv8_constant:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: sdiv8_constant:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a0, a0, 24
+; RV32IM-NEXT:    srai a0, a0, 24
+; RV32IM-NEXT:    lui a1, 419430
+; RV32IM-NEXT:    addi a1, a1, 1639
+; RV32IM-NEXT:    mulh a0, a0, a1
+; RV32IM-NEXT:    srli a1, a0, 31
+; RV32IM-NEXT:    srai a0, a0, 1
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv8_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv8_constant:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a0, a0, 56
+; RV64IM-NEXT:    srai a0, a0, 56
+; RV64IM-NEXT:    lui a1, 13107
+; RV64IM-NEXT:    addiw a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 13
+; RV64IM-NEXT:    addi a1, a1, 1639
+; RV64IM-NEXT:    mulh a0, a0, a1
+; RV64IM-NEXT:    srli a1, a0, 63
+; RV64IM-NEXT:    srai a0, a0, 1
+; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i8 %a, 5
+  ret i8 %1
+}
+
+define i8 @sdiv8_pow2(i8 %a) nounwind {
+; RV32I-LABEL: sdiv8_pow2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    srli a1, a1, 12
+; RV32I-NEXT:    andi a1, a1, 7
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 27
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: sdiv8_pow2:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a1, a0, 24
+; RV32IM-NEXT:    srai a1, a1, 24
+; RV32IM-NEXT:    srli a1, a1, 12
+; RV32IM-NEXT:    andi a1, a1, 7
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    slli a0, a0, 24
+; RV32IM-NEXT:    srai a0, a0, 27
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv8_pow2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    srli a1, a1, 12
+; RV64I-NEXT:    andi a1, a1, 7
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 59
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv8_pow2:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a0, 56
+; RV64IM-NEXT:    srai a1, a1, 56
+; RV64IM-NEXT:    srli a1, a1, 12
+; RV64IM-NEXT:    andi a1, a1, 7
+; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 56
+; RV64IM-NEXT:    srai a0, a0, 59
+; RV64IM-NEXT:    ret
+  %1 = sdiv i8 %a, 8
+  ret i8 %1
+}
+
+define i16 @sdiv16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: sdiv16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: sdiv16:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a1, a1, 16
+; RV32IM-NEXT:    srai a1, a1, 16
+; RV32IM-NEXT:    slli a0, a0, 16
+; RV32IM-NEXT:    srai a0, a0, 16
+; RV32IM-NEXT:    div a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv16:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a1, 48
+; RV64IM-NEXT:    srai a1, a1, 48
+; RV64IM-NEXT:    slli a0, a0, 48
+; RV64IM-NEXT:    srai a0, a0, 48
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i16 %a, %b
+  ret i16 %1
+}
+
+define i16 @sdiv16_constant(i16 %a) nounwind {
+; RV32I-LABEL: sdiv16_constant:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    call __divsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: sdiv16_constant:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a0, a0, 16
+; RV32IM-NEXT:    srai a0, a0, 16
+; RV32IM-NEXT:    lui a1, 419430
+; RV32IM-NEXT:    addi a1, a1, 1639
+; RV32IM-NEXT:    mulh a0, a0, a1
+; RV32IM-NEXT:    srli a1, a0, 31
+; RV32IM-NEXT:    srai a0, a0, 1
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv16_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    call __divdi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv16_constant:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a0, a0, 48
+; RV64IM-NEXT:    srai a0, a0, 48
+; RV64IM-NEXT:    lui a1, 13107
+; RV64IM-NEXT:    addiw a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 12
+; RV64IM-NEXT:    addi a1, a1, 819
+; RV64IM-NEXT:    slli a1, a1, 13
+; RV64IM-NEXT:    addi a1, a1, 1639
+; RV64IM-NEXT:    mulh a0, a0, a1
+; RV64IM-NEXT:    srli a1, a0, 63
+; RV64IM-NEXT:    srai a0, a0, 1
+; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = sdiv i16 %a, 5
+  ret i16 %1
+}
+
+define i16 @sdiv16_pow2(i16 %a) nounwind {
+; RV32I-LABEL: sdiv16_pow2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    srli a1, a1, 28
+; RV32I-NEXT:    andi a1, a1, 7
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 19
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: sdiv16_pow2:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a1, a0, 16
+; RV32IM-NEXT:    srai a1, a1, 16
+; RV32IM-NEXT:    srli a1, a1, 28
+; RV32IM-NEXT:    andi a1, a1, 7
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    slli a0, a0, 16
+; RV32IM-NEXT:    srai a0, a0, 19
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: sdiv16_pow2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    srli a1, a1, 28
+; RV64I-NEXT:    andi a1, a1, 7
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 51
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv16_pow2:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a0, 48
+; RV64IM-NEXT:    srai a1, a1, 48
+; RV64IM-NEXT:    srli a1, a1, 28
+; RV64IM-NEXT:    andi a1, a1, 7
+; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 48
+; RV64IM-NEXT:    srai a0, a0, 51
+; RV64IM-NEXT:    ret
+  %1 = sdiv i16 %a, 8
+  ret i16 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll
index fbff2dc965c3..15f2025195d9 100644
--- a/llvm/test/CodeGen/RISCV/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rem.ll
@@ -77,3 +77,259 @@ define i32 @srem(i32 %a, i32 %b) nounwind {
   %1 = srem i32 %a, %b
   ret i32 %1
 }
+
+define i64 @urem64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: urem64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __umoddi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: urem64:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    addi sp, sp, -16
+; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    call __umoddi3 at plt
+; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    addi sp, sp, 16
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: urem64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: urem64:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    remu a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @srem64(i64 %a, i64 %b) nounwind {
+; RV32I-LABEL: srem64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __moddi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: srem64:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    addi sp, sp, -16
+; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IM-NEXT:    call __moddi3 at plt
+; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IM-NEXT:    addi sp, sp, 16
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: srem64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: srem64:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    rem a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i64 %a, %b
+  ret i64 %1
+}
+
+define i8 @urem8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: urem8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    andi a1, a1, 255
+; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: urem8:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    andi a1, a1, 255
+; RV32IM-NEXT:    andi a0, a0, 255
+; RV32IM-NEXT:    remu a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: urem8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: urem8:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    andi a1, a1, 255
+; RV64IM-NEXT:    andi a0, a0, 255
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i8 %a, %b
+  ret i8 %1
+}
+
+define i8 @srem8(i8 %a, i8 %b) nounwind {
+; RV32I-LABEL: srem8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: srem8:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a1, a1, 24
+; RV32IM-NEXT:    srai a1, a1, 24
+; RV32IM-NEXT:    slli a0, a0, 24
+; RV32IM-NEXT:    srai a0, a0, 24
+; RV32IM-NEXT:    rem a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: srem8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: srem8:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a1, 56
+; RV64IM-NEXT:    srai a1, a1, 56
+; RV64IM-NEXT:    slli a0, a0, 56
+; RV64IM-NEXT:    srai a0, a0, 56
+; RV64IM-NEXT:    rem a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i8 %a, %b
+  ret i8 %1
+}
+
+define i16 @urem16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: urem16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    call __umodsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: urem16:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    lui a2, 16
+; RV32IM-NEXT:    addi a2, a2, -1
+; RV32IM-NEXT:    and a1, a1, a2
+; RV32IM-NEXT:    and a0, a0, a2
+; RV32IM-NEXT:    remu a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: urem16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    call __umoddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: urem16:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    lui a2, 16
+; RV64IM-NEXT:    addiw a2, a2, -1
+; RV64IM-NEXT:    and a1, a1, a2
+; RV64IM-NEXT:    and a0, a0, a2
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = urem i16 %a, %b
+  ret i16 %1
+}
+
+define i16 @srem16(i16 %a, i16 %b) nounwind {
+; RV32I-LABEL: srem16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    call __modsi3 at plt
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: srem16:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a1, a1, 16
+; RV32IM-NEXT:    srai a1, a1, 16
+; RV32IM-NEXT:    slli a0, a0, 16
+; RV32IM-NEXT:    srai a0, a0, 16
+; RV32IM-NEXT:    rem a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: srem16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    call __moddi3 at plt
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: srem16:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a1, 48
+; RV64IM-NEXT:    srai a1, a1, 48
+; RV64IM-NEXT:    slli a0, a0, 48
+; RV64IM-NEXT:    srai a0, a0, 48
+; RV64IM-NEXT:    rem a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = srem i16 %a, %b
+  ret i16 %1
+}


        


More information about the llvm-commits mailing list