[llvm] a574f0e - [RISCV] Disable use of i128 shift libcalls on RV32.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 8 14:42:28 PDT 2021


Author: Craig Topper
Date: 2021-09-08T14:26:07-07:00
New Revision: a574f0e0c347cd8bee97e6770022aeb537542b22

URL: https://github.com/llvm/llvm-project/commit/a574f0e0c347cd8bee97e6770022aeb537542b22
DIFF: https://github.com/llvm/llvm-project/commit/a574f0e0c347cd8bee97e6770022aeb537542b22.diff

LOG: [RISCV] Disable use of i128 shift libcalls on RV32.

Since i128 isn't a legal C type on RV32, I don't believe
libgcc implements these functions for RV32. compiler-rt
does implement them because i128 support is enabled
in order to handle long double.

This is consistent with 32-bit X86 and ARM.

Reviewed By: efriedma

Differential Revision: https://reviews.llvm.org/D109383

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/shifts.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7299a38006a53..9929edf2fee69 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -200,6 +200,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
   } else {
+    setLibcallName(RTLIB::SHL_I128, nullptr);
+    setLibcallName(RTLIB::SRL_I128, nullptr);
+    setLibcallName(RTLIB::SRA_I128, nullptr);
     setLibcallName(RTLIB::MUL_I128, nullptr);
     setLibcallName(RTLIB::MULO_I64, nullptr);
   }

diff  --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll
index 934ea94fbbe63..08ea1a02ffd01 100644
--- a/llvm/test/CodeGen/RISCV/shifts.ll
+++ b/llvm/test/CodeGen/RISCV/shifts.ll
@@ -145,33 +145,120 @@ define i64 @shl64_minsize(i64 %a, i64 %b) minsize nounwind {
 define i128 @lshr128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: lshr128:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a2, 0(a2)
-; RV32I-NEXT:    lw a3, 0(a1)
-; RV32I-NEXT:    lw a4, 4(a1)
-; RV32I-NEXT:    lw a5, 8(a1)
-; RV32I-NEXT:    lw a1, 12(a1)
-; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    sw a1, 20(sp)
-; RV32I-NEXT:    sw a5, 16(sp)
-; RV32I-NEXT:    sw a4, 12(sp)
-; RV32I-NEXT:    addi a0, sp, 24
-; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    sw a3, 8(sp)
-; RV32I-NEXT:    call __lshrti3 at plt
-; RV32I-NEXT:    lw a0, 36(sp)
-; RV32I-NEXT:    lw a1, 32(sp)
-; RV32I-NEXT:    lw a2, 28(sp)
-; RV32I-NEXT:    lw a3, 24(sp)
-; RV32I-NEXT:    sw a0, 12(s0)
-; RV32I-NEXT:    sw a1, 8(s0)
-; RV32I-NEXT:    sw a2, 4(s0)
-; RV32I-NEXT:    sw a3, 0(s0)
-; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    lw t1, 8(a1)
+; RV32I-NEXT:    lw t6, 12(a1)
+; RV32I-NEXT:    addi a6, zero, 64
+; RV32I-NEXT:    sub t5, a6, a2
+; RV32I-NEXT:    addi a3, zero, 32
+; RV32I-NEXT:    sub s0, a3, a2
+; RV32I-NEXT:    addi t2, zero, 31
+; RV32I-NEXT:    bltz s0, .LBB6_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sll a4, t1, s0
+; RV32I-NEXT:    j .LBB6_3
+; RV32I-NEXT:  .LBB6_2:
+; RV32I-NEXT:    sll a3, t6, t5
+; RV32I-NEXT:    sub a4, t2, t5
+; RV32I-NEXT:    srli a5, t1, 1
+; RV32I-NEXT:    srl a4, a5, a4
+; RV32I-NEXT:    or a4, a3, a4
+; RV32I-NEXT:  .LBB6_3:
+; RV32I-NEXT:    lw a3, 4(a1)
+; RV32I-NEXT:    addi t0, a2, -32
+; RV32I-NEXT:    bgez t0, .LBB6_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    srl a5, a3, a2
+; RV32I-NEXT:    or a4, a4, a5
+; RV32I-NEXT:  .LBB6_5:
+; RV32I-NEXT:    addi t4, a2, -96
+; RV32I-NEXT:    addi t3, a2, -64
+; RV32I-NEXT:    bltz t4, .LBB6_7
+; RV32I-NEXT:  # %bb.6:
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    bgeu a2, a6, .LBB6_8
+; RV32I-NEXT:    j .LBB6_9
+; RV32I-NEXT:  .LBB6_7:
+; RV32I-NEXT:    srl a5, t6, t3
+; RV32I-NEXT:    bltu a2, a6, .LBB6_9
+; RV32I-NEXT:  .LBB6_8:
+; RV32I-NEXT:    mv a4, a5
+; RV32I-NEXT:  .LBB6_9:
+; RV32I-NEXT:    mv a7, a3
+; RV32I-NEXT:    beqz a2, .LBB6_11
+; RV32I-NEXT:  # %bb.10:
+; RV32I-NEXT:    mv a7, a4
+; RV32I-NEXT:  .LBB6_11:
+; RV32I-NEXT:    lw a1, 0(a1)
+; RV32I-NEXT:    sub t2, t2, a2
+; RV32I-NEXT:    bltz t0, .LBB6_13
+; RV32I-NEXT:  # %bb.12:
+; RV32I-NEXT:    srl a3, a3, t0
+; RV32I-NEXT:    bltz s0, .LBB6_14
+; RV32I-NEXT:    j .LBB6_15
+; RV32I-NEXT:  .LBB6_13:
+; RV32I-NEXT:    srl a4, a1, a2
+; RV32I-NEXT:    slli a3, a3, 1
+; RV32I-NEXT:    sll a3, a3, t2
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    bgez s0, .LBB6_15
+; RV32I-NEXT:  .LBB6_14:
+; RV32I-NEXT:    sll a4, t1, t5
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:  .LBB6_15:
+; RV32I-NEXT:    slli a4, t6, 1
+; RV32I-NEXT:    bltz t4, .LBB6_17
+; RV32I-NEXT:  # %bb.16:
+; RV32I-NEXT:    srl a5, t6, t4
+; RV32I-NEXT:    bgeu a2, a6, .LBB6_18
+; RV32I-NEXT:    j .LBB6_19
+; RV32I-NEXT:  .LBB6_17:
+; RV32I-NEXT:    addi a5, zero, 95
+; RV32I-NEXT:    sub a5, a5, a2
+; RV32I-NEXT:    sll a5, a4, a5
+; RV32I-NEXT:    srl s0, t1, t3
+; RV32I-NEXT:    or a5, s0, a5
+; RV32I-NEXT:    bltu a2, a6, .LBB6_19
+; RV32I-NEXT:  .LBB6_18:
+; RV32I-NEXT:    mv a3, a5
+; RV32I-NEXT:  .LBB6_19:
+; RV32I-NEXT:    bnez a2, .LBB6_22
+; RV32I-NEXT:  # %bb.20:
+; RV32I-NEXT:    bltz t0, .LBB6_23
+; RV32I-NEXT:  .LBB6_21:
+; RV32I-NEXT:    srl a3, t6, t0
+; RV32I-NEXT:    bgeu a2, a6, .LBB6_24
+; RV32I-NEXT:    j .LBB6_25
+; RV32I-NEXT:  .LBB6_22:
+; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:    bgez t0, .LBB6_21
+; RV32I-NEXT:  .LBB6_23:
+; RV32I-NEXT:    srl a3, t1, a2
+; RV32I-NEXT:    sll a4, a4, t2
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    bltu a2, a6, .LBB6_25
+; RV32I-NEXT:  .LBB6_24:
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:  .LBB6_25:
+; RV32I-NEXT:    bltz t0, .LBB6_27
+; RV32I-NEXT:  # %bb.26:
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    bgeu a2, a6, .LBB6_28
+; RV32I-NEXT:    j .LBB6_29
+; RV32I-NEXT:  .LBB6_27:
+; RV32I-NEXT:    srl a4, t6, a2
+; RV32I-NEXT:    bltu a2, a6, .LBB6_29
+; RV32I-NEXT:  .LBB6_28:
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:  .LBB6_29:
+; RV32I-NEXT:    sw a4, 12(a0)
+; RV32I-NEXT:    sw a3, 8(a0)
+; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    sw a7, 4(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: lshr128:
@@ -198,33 +285,123 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind {
 define i128 @ashr128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: ashr128:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a2, 0(a2)
-; RV32I-NEXT:    lw a3, 0(a1)
-; RV32I-NEXT:    lw a4, 4(a1)
-; RV32I-NEXT:    lw a5, 8(a1)
-; RV32I-NEXT:    lw a1, 12(a1)
-; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    sw a1, 20(sp)
-; RV32I-NEXT:    sw a5, 16(sp)
-; RV32I-NEXT:    sw a4, 12(sp)
-; RV32I-NEXT:    addi a0, sp, 24
-; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    sw a3, 8(sp)
-; RV32I-NEXT:    call __ashrti3 at plt
-; RV32I-NEXT:    lw a0, 36(sp)
-; RV32I-NEXT:    lw a1, 32(sp)
-; RV32I-NEXT:    lw a2, 28(sp)
-; RV32I-NEXT:    lw a3, 24(sp)
-; RV32I-NEXT:    sw a0, 12(s0)
-; RV32I-NEXT:    sw a1, 8(s0)
-; RV32I-NEXT:    sw a2, 4(s0)
-; RV32I-NEXT:    sw a3, 0(s0)
-; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    lw t6, 8(a1)
+; RV32I-NEXT:    lw s1, 12(a1)
+; RV32I-NEXT:    addi a6, zero, 64
+; RV32I-NEXT:    sub t2, a6, a2
+; RV32I-NEXT:    addi a3, zero, 32
+; RV32I-NEXT:    sub a3, a3, a2
+; RV32I-NEXT:    addi t4, zero, 31
+; RV32I-NEXT:    bltz a3, .LBB7_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sll s0, t6, a3
+; RV32I-NEXT:    j .LBB7_3
+; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    sll a4, s1, t2
+; RV32I-NEXT:    sub a5, t4, t2
+; RV32I-NEXT:    srli s0, t6, 1
+; RV32I-NEXT:    srl a5, s0, a5
+; RV32I-NEXT:    or s0, a4, a5
+; RV32I-NEXT:  .LBB7_3:
+; RV32I-NEXT:    lw a5, 4(a1)
+; RV32I-NEXT:    addi a7, a2, -32
+; RV32I-NEXT:    bgez a7, .LBB7_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    srl a4, a5, a2
+; RV32I-NEXT:    or s0, s0, a4
+; RV32I-NEXT:  .LBB7_5:
+; RV32I-NEXT:    addi t3, a2, -64
+; RV32I-NEXT:    addi t5, a2, -96
+; RV32I-NEXT:    srai t1, s1, 31
+; RV32I-NEXT:    bltz t5, .LBB7_7
+; RV32I-NEXT:  # %bb.6:
+; RV32I-NEXT:    mv a4, t1
+; RV32I-NEXT:    bgeu a2, a6, .LBB7_8
+; RV32I-NEXT:    j .LBB7_9
+; RV32I-NEXT:  .LBB7_7:
+; RV32I-NEXT:    sra a4, s1, t3
+; RV32I-NEXT:    bltu a2, a6, .LBB7_9
+; RV32I-NEXT:  .LBB7_8:
+; RV32I-NEXT:    mv s0, a4
+; RV32I-NEXT:  .LBB7_9:
+; RV32I-NEXT:    mv t0, a5
+; RV32I-NEXT:    beqz a2, .LBB7_11
+; RV32I-NEXT:  # %bb.10:
+; RV32I-NEXT:    mv t0, s0
+; RV32I-NEXT:  .LBB7_11:
+; RV32I-NEXT:    lw a1, 0(a1)
+; RV32I-NEXT:    sub t4, t4, a2
+; RV32I-NEXT:    bltz a7, .LBB7_13
+; RV32I-NEXT:  # %bb.12:
+; RV32I-NEXT:    srl a5, a5, a7
+; RV32I-NEXT:    bltz a3, .LBB7_14
+; RV32I-NEXT:    j .LBB7_15
+; RV32I-NEXT:  .LBB7_13:
+; RV32I-NEXT:    srl a4, a1, a2
+; RV32I-NEXT:    slli a5, a5, 1
+; RV32I-NEXT:    sll a5, a5, t4
+; RV32I-NEXT:    or a5, a4, a5
+; RV32I-NEXT:    bgez a3, .LBB7_15
+; RV32I-NEXT:  .LBB7_14:
+; RV32I-NEXT:    sll a3, t6, t2
+; RV32I-NEXT:    or a5, a5, a3
+; RV32I-NEXT:  .LBB7_15:
+; RV32I-NEXT:    slli a3, s1, 1
+; RV32I-NEXT:    bltz t5, .LBB7_17
+; RV32I-NEXT:  # %bb.16:
+; RV32I-NEXT:    sra s0, s1, t5
+; RV32I-NEXT:    bgeu a2, a6, .LBB7_18
+; RV32I-NEXT:    j .LBB7_19
+; RV32I-NEXT:  .LBB7_17:
+; RV32I-NEXT:    addi a4, zero, 95
+; RV32I-NEXT:    sub a4, a4, a2
+; RV32I-NEXT:    sll a4, a3, a4
+; RV32I-NEXT:    srl s0, t6, t3
+; RV32I-NEXT:    or s0, s0, a4
+; RV32I-NEXT:    bltu a2, a6, .LBB7_19
+; RV32I-NEXT:  .LBB7_18:
+; RV32I-NEXT:    mv a5, s0
+; RV32I-NEXT:  .LBB7_19:
+; RV32I-NEXT:    bnez a2, .LBB7_22
+; RV32I-NEXT:  # %bb.20:
+; RV32I-NEXT:    bltz a7, .LBB7_23
+; RV32I-NEXT:  .LBB7_21:
+; RV32I-NEXT:    sra a3, s1, a7
+; RV32I-NEXT:    bgeu a2, a6, .LBB7_24
+; RV32I-NEXT:    j .LBB7_25
+; RV32I-NEXT:  .LBB7_22:
+; RV32I-NEXT:    mv a1, a5
+; RV32I-NEXT:    bgez a7, .LBB7_21
+; RV32I-NEXT:  .LBB7_23:
+; RV32I-NEXT:    srl a4, t6, a2
+; RV32I-NEXT:    sll a3, a3, t4
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    bltu a2, a6, .LBB7_25
+; RV32I-NEXT:  .LBB7_24:
+; RV32I-NEXT:    mv a3, t1
+; RV32I-NEXT:  .LBB7_25:
+; RV32I-NEXT:    bltz a7, .LBB7_27
+; RV32I-NEXT:  # %bb.26:
+; RV32I-NEXT:    mv a4, t1
+; RV32I-NEXT:    bgeu a2, a6, .LBB7_28
+; RV32I-NEXT:    j .LBB7_29
+; RV32I-NEXT:  .LBB7_27:
+; RV32I-NEXT:    sra a4, s1, a2
+; RV32I-NEXT:    bltu a2, a6, .LBB7_29
+; RV32I-NEXT:  .LBB7_28:
+; RV32I-NEXT:    mv a4, t1
+; RV32I-NEXT:  .LBB7_29:
+; RV32I-NEXT:    sw a4, 12(a0)
+; RV32I-NEXT:    sw a3, 8(a0)
+; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    sw t0, 4(a0)
+; RV32I-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: ashr128:
@@ -251,33 +428,120 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind {
 define i128 @shl128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: shl128:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -48
-; RV32I-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lw a2, 0(a2)
-; RV32I-NEXT:    lw a3, 0(a1)
-; RV32I-NEXT:    lw a4, 4(a1)
-; RV32I-NEXT:    lw a5, 8(a1)
+; RV32I-NEXT:    lw t1, 4(a1)
+; RV32I-NEXT:    lw t6, 0(a1)
+; RV32I-NEXT:    addi a6, zero, 64
+; RV32I-NEXT:    sub t5, a6, a2
+; RV32I-NEXT:    addi a3, zero, 32
+; RV32I-NEXT:    sub s0, a3, a2
+; RV32I-NEXT:    addi t2, zero, 31
+; RV32I-NEXT:    bltz s0, .LBB8_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    srl a4, t1, s0
+; RV32I-NEXT:    j .LBB8_3
+; RV32I-NEXT:  .LBB8_2:
+; RV32I-NEXT:    srl a3, t6, t5
+; RV32I-NEXT:    sub a4, t2, t5
+; RV32I-NEXT:    slli a5, t1, 1
+; RV32I-NEXT:    sll a4, a5, a4
+; RV32I-NEXT:    or a4, a3, a4
+; RV32I-NEXT:  .LBB8_3:
+; RV32I-NEXT:    lw a3, 8(a1)
+; RV32I-NEXT:    addi t0, a2, -32
+; RV32I-NEXT:    bgez t0, .LBB8_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    sll a5, a3, a2
+; RV32I-NEXT:    or a4, a4, a5
+; RV32I-NEXT:  .LBB8_5:
+; RV32I-NEXT:    addi t4, a2, -96
+; RV32I-NEXT:    addi t3, a2, -64
+; RV32I-NEXT:    bltz t4, .LBB8_7
+; RV32I-NEXT:  # %bb.6:
+; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    bgeu a2, a6, .LBB8_8
+; RV32I-NEXT:    j .LBB8_9
+; RV32I-NEXT:  .LBB8_7:
+; RV32I-NEXT:    sll a5, t6, t3
+; RV32I-NEXT:    bltu a2, a6, .LBB8_9
+; RV32I-NEXT:  .LBB8_8:
+; RV32I-NEXT:    mv a4, a5
+; RV32I-NEXT:  .LBB8_9:
+; RV32I-NEXT:    mv a7, a3
+; RV32I-NEXT:    beqz a2, .LBB8_11
+; RV32I-NEXT:  # %bb.10:
+; RV32I-NEXT:    mv a7, a4
+; RV32I-NEXT:  .LBB8_11:
 ; RV32I-NEXT:    lw a1, 12(a1)
-; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    sw a1, 20(sp)
-; RV32I-NEXT:    sw a5, 16(sp)
-; RV32I-NEXT:    sw a4, 12(sp)
-; RV32I-NEXT:    addi a0, sp, 24
-; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    sw a3, 8(sp)
-; RV32I-NEXT:    call __ashlti3 at plt
-; RV32I-NEXT:    lw a0, 36(sp)
-; RV32I-NEXT:    lw a1, 32(sp)
-; RV32I-NEXT:    lw a2, 28(sp)
-; RV32I-NEXT:    lw a3, 24(sp)
-; RV32I-NEXT:    sw a0, 12(s0)
-; RV32I-NEXT:    sw a1, 8(s0)
-; RV32I-NEXT:    sw a2, 4(s0)
-; RV32I-NEXT:    sw a3, 0(s0)
-; RV32I-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 48
+; RV32I-NEXT:    sub t2, t2, a2
+; RV32I-NEXT:    bltz t0, .LBB8_13
+; RV32I-NEXT:  # %bb.12:
+; RV32I-NEXT:    sll a3, a3, t0
+; RV32I-NEXT:    bltz s0, .LBB8_14
+; RV32I-NEXT:    j .LBB8_15
+; RV32I-NEXT:  .LBB8_13:
+; RV32I-NEXT:    sll a4, a1, a2
+; RV32I-NEXT:    srli a3, a3, 1
+; RV32I-NEXT:    srl a3, a3, t2
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    bgez s0, .LBB8_15
+; RV32I-NEXT:  .LBB8_14:
+; RV32I-NEXT:    srl a4, t1, t5
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:  .LBB8_15:
+; RV32I-NEXT:    srli a4, t6, 1
+; RV32I-NEXT:    bltz t4, .LBB8_17
+; RV32I-NEXT:  # %bb.16:
+; RV32I-NEXT:    sll a5, t6, t4
+; RV32I-NEXT:    bgeu a2, a6, .LBB8_18
+; RV32I-NEXT:    j .LBB8_19
+; RV32I-NEXT:  .LBB8_17:
+; RV32I-NEXT:    addi a5, zero, 95
+; RV32I-NEXT:    sub a5, a5, a2
+; RV32I-NEXT:    srl a5, a4, a5
+; RV32I-NEXT:    sll s0, t1, t3
+; RV32I-NEXT:    or a5, s0, a5
+; RV32I-NEXT:    bltu a2, a6, .LBB8_19
+; RV32I-NEXT:  .LBB8_18:
+; RV32I-NEXT:    mv a3, a5
+; RV32I-NEXT:  .LBB8_19:
+; RV32I-NEXT:    bnez a2, .LBB8_22
+; RV32I-NEXT:  # %bb.20:
+; RV32I-NEXT:    bltz t0, .LBB8_23
+; RV32I-NEXT:  .LBB8_21:
+; RV32I-NEXT:    sll a3, t6, t0
+; RV32I-NEXT:    bgeu a2, a6, .LBB8_24
+; RV32I-NEXT:    j .LBB8_25
+; RV32I-NEXT:  .LBB8_22:
+; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:    bgez t0, .LBB8_21
+; RV32I-NEXT:  .LBB8_23:
+; RV32I-NEXT:    sll a3, t1, a2
+; RV32I-NEXT:    srl a4, a4, t2
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    bltu a2, a6, .LBB8_25
+; RV32I-NEXT:  .LBB8_24:
+; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:  .LBB8_25:
+; RV32I-NEXT:    bltz t0, .LBB8_27
+; RV32I-NEXT:  # %bb.26:
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    bgeu a2, a6, .LBB8_28
+; RV32I-NEXT:    j .LBB8_29
+; RV32I-NEXT:  .LBB8_27:
+; RV32I-NEXT:    sll a4, t6, a2
+; RV32I-NEXT:    bltu a2, a6, .LBB8_29
+; RV32I-NEXT:  .LBB8_28:
+; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:  .LBB8_29:
+; RV32I-NEXT:    sw a4, 0(a0)
+; RV32I-NEXT:    sw a3, 4(a0)
+; RV32I-NEXT:    sw a1, 12(a0)
+; RV32I-NEXT:    sw a7, 8(a0)
+; RV32I-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: shl128:


        


More information about the llvm-commits mailing list