[llvm] d47300f - [RISCV] Correct the operand order for fshl/fshr to fsl/fsr instructions.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 4 11:14:05 PST 2020


Author: Craig Topper
Date: 2020-11-04T11:13:25-08:00
New Revision: d47300f503c90511304e4999182060b8e1e86b0f

URL: https://github.com/llvm/llvm-project/commit/d47300f503c90511304e4999182060b8e1e86b0f
DIFF: https://github.com/llvm/llvm-project/commit/d47300f503c90511304e4999182060b8e1e86b0f.diff

LOG: [RISCV] Correct the operand order for fshl/fshr to fsl/fsr instructions.

fsl/fsr take their shift amount in $rs2 or an immediate. The
sources are $rs1 and $rs3.

fshl/fshr ISD opcodes both concatenate operand 0 in the high bits and
operand 1 in the lower bits. fshl returns the high bits after
shifting and fshr returns the low bits. So a shift amount of 0
returns operand 0 for fshl and operand 1 for fshr.

fsl/fsr concatenate their operands in different orders such that
$rs1 will be returned for a shift amount of 0. So $rs1 needs to
come from operand 0 of fshl and operand 1 of fshr.

Differential Revision: https://reviews.llvm.org/D90735

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoB.td
    llvm/test/CodeGen/RISCV/rv32Zbb.ll
    llvm/test/CodeGen/RISCV/rv32Zbbp.ll
    llvm/test/CodeGen/RISCV/rv32Zbt.ll
    llvm/test/CodeGen/RISCV/rv64Zbt.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
index dac8eea5a925..632e2f0f6031 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
@@ -816,12 +816,17 @@ def : Pat<(or (and (xor GPR:$rs2, -1), GPR:$rs3), (and GPR:$rs2, GPR:$rs1)),
           (CMIX GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
 def : Pat<(riscv_selectcc GPR:$rs2, (XLenVT 0), (XLenVT 17), GPR:$rs3, GPR:$rs1),
           (CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
-def : Pat<(fshl GPR:$rs1, GPR:$rs2, GPR:$rs3),
+
+// fshl and fshr concatenate their operands in the same order. fsr and fsl
+// instruction use 
diff erent orders. fshl will return its first operand for
+// shift of zero, fshr will return its second operand. fsl and fsr both return
+// $rs1 so the patterns need to have 
diff erent operand orders.
+def : Pat<(fshl GPR:$rs1, GPR:$rs3, GPR:$rs2),
           (FSL GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
-def : Pat<(fshr GPR:$rs1, GPR:$rs2, GPR:$rs3),
+def : Pat<(fshr GPR:$rs3, GPR:$rs1, GPR:$rs2),
           (FSR GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
-def : Pat<(fshr GPR:$rs1, GPR:$rs2, uimmlog2xlen:$shamt),
-          (FSRI GPR:$rs1, GPR:$rs2, uimmlog2xlen:$shamt)>;
+def : Pat<(fshr GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt),
+          (FSRI GPR:$rs1, GPR:$rs3, uimmlog2xlen:$shamt)>;
 } // Predicates = [HasStdExtZbt]
 
 let Predicates = [HasStdExtZbb] in {
@@ -1020,20 +1025,20 @@ def : Pat<(sra (bitreverse GPR:$rs1), (i64 32)), (GREVIW GPR:$rs1, (i64 31))>;
 
 let Predicates = [HasStdExtZbt, IsRV64] in {
 def : Pat<(sext_inreg (fshl (assertsexti32 GPR:$rs1),
-                            (shl (assertsexti32 GPR:$rs2), (i64 32)),
-                            (and (assertsexti32 GPR:$rs3), (i64 31))),
+                            (shl (assertsexti32 GPR:$rs3), (i64 32)),
+                            (and (assertsexti32 GPR:$rs2), (i64 31))),
                       i32),
           (FSLW GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
-def : Pat<(sext_inreg (fshr (assertsexti32 GPR:$rs1),
-                            (shl (assertsexti32 GPR:$rs2), (i64 32)),
-                            (or (assertsexti32 GPR:$rs3), (i64 32))),
+def : Pat<(sext_inreg (fshr (assertsexti32 GPR:$rs3),
+                            (shl (assertsexti32 GPR:$rs1), (i64 32)),
+                            (or (assertsexti32 GPR:$rs2), (i64 32))),
                       i32),
           (FSRW GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
-def : Pat<(sext_inreg (fshr (assertsexti32 GPR:$rs1),
-                            (shl (assertsexti32 GPR:$rs2), (i64 32)),
+def : Pat<(sext_inreg (fshr (assertsexti32 GPR:$rs3),
+                            (shl (assertsexti32 GPR:$rs1), (i64 32)),
                             uimm6gt32:$shamt),
                       i32),
-          (FSRIW GPR:$rs1, GPR:$rs2, (ImmSub32 uimm6gt32:$shamt))>;
+          (FSRIW GPR:$rs1, GPR:$rs3, (ImmSub32 uimm6gt32:$shamt))>;
 } // Predicates = [HasStdExtZbt, IsRV64]
 
 let Predicates = [HasStdExtZbb, IsRV64] in {

diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbb.ll b/llvm/test/CodeGen/RISCV/rv32Zbb.ll
index 6933bad1f8cd..d5585f89ec89 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbb.ll
@@ -246,7 +246,7 @@ define i64 @sloi_i64(i64 %a) nounwind {
 ; RV32IB-LABEL: sloi_i64:
 ; RV32IB:       # %bb.0:
 ; RV32IB-NEXT:    addi a2, zero, 1
-; RV32IB-NEXT:    fsl a1, a1, a2, a0
+; RV32IB-NEXT:    fsl a1, a1, a0, a2
 ; RV32IB-NEXT:    sloi a0, a0, 1
 ; RV32IB-NEXT:    ret
 ;
@@ -298,7 +298,7 @@ define i64 @sroi_i64(i64 %a) nounwind {
 ; RV32IB-LABEL: sroi_i64:
 ; RV32IB:       # %bb.0:
 ; RV32IB-NEXT:    addi a2, zero, 31
-; RV32IB-NEXT:    fsl a0, a1, a2, a0
+; RV32IB-NEXT:    fsl a0, a1, a0, a2
 ; RV32IB-NEXT:    sroi a1, a1, 1
 ; RV32IB-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbbp.ll b/llvm/test/CodeGen/RISCV/rv32Zbbp.ll
index 4457945c7f6a..3ab76197151e 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbbp.ll
@@ -694,8 +694,8 @@ define i64 @rori_i64(i64 %a) nounwind {
 ; RV32IB-LABEL: rori_i64:
 ; RV32IB:       # %bb.0:
 ; RV32IB-NEXT:    addi a3, zero, 31
-; RV32IB-NEXT:    fsl a2, a1, a3, a0
-; RV32IB-NEXT:    fsl a1, a0, a3, a1
+; RV32IB-NEXT:    fsl a2, a1, a0, a3
+; RV32IB-NEXT:    fsl a1, a0, a1, a3
 ; RV32IB-NEXT:    mv a0, a2
 ; RV32IB-NEXT:    ret
 ;
@@ -739,8 +739,8 @@ define i64 @rori_i64_fshr(i64 %a) nounwind {
 ; RV32IB-LABEL: rori_i64_fshr:
 ; RV32IB:       # %bb.0:
 ; RV32IB-NEXT:    addi a3, zero, 1
-; RV32IB-NEXT:    fsl a2, a0, a3, a1
-; RV32IB-NEXT:    fsl a1, a1, a3, a0
+; RV32IB-NEXT:    fsl a2, a0, a1, a3
+; RV32IB-NEXT:    fsl a1, a1, a0, a3
 ; RV32IB-NEXT:    mv a0, a2
 ; RV32IB-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbt.ll b/llvm/test/CodeGen/RISCV/rv32Zbt.ll
index 3de8d1dcf449..52a1c164529e 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbt.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbt.ll
@@ -131,12 +131,12 @@ define i32 @fshl_i32(i32 %a, i32 %b, i32 %c) nounwind {
 ;
 ; RV32IB-LABEL: fshl_i32:
 ; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    fsl a0, a0, a2, a1
+; RV32IB-NEXT:    fsl a0, a0, a1, a2
 ; RV32IB-NEXT:    ret
 ;
 ; RV32IBT-LABEL: fshl_i32:
 ; RV32IBT:       # %bb.0:
-; RV32IBT-NEXT:    fsl a0, a0, a2, a1
+; RV32IBT-NEXT:    fsl a0, a0, a1, a2
 ; RV32IBT-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
   ret i32 %1
@@ -227,7 +227,7 @@ define i64 @fshl_i64(i64 %a, i64 %b, i64 %c) nounwind {
 ; RV32IB-NEXT:    mv t0, zero
 ; RV32IB-NEXT:    bgez a1, .LBB5_8
 ; RV32IB-NEXT:  .LBB5_5:
-; RV32IB-NEXT:    fsl a1, a3, a6, a2
+; RV32IB-NEXT:    fsl a1, a3, a2, a6
 ; RV32IB-NEXT:    srl a1, a1, t1
 ; RV32IB-NEXT:    sub a2, a6, a5
 ; RV32IB-NEXT:    slli a3, t3, 1
@@ -275,7 +275,7 @@ define i64 @fshl_i64(i64 %a, i64 %b, i64 %c) nounwind {
 ; RV32IBT-NEXT:    mv t0, zero
 ; RV32IBT-NEXT:    bgez a5, .LBB5_8
 ; RV32IBT-NEXT:  .LBB5_5:
-; RV32IBT-NEXT:    fsl a2, a3, a6, a2
+; RV32IBT-NEXT:    fsl a2, a3, a2, a6
 ; RV32IBT-NEXT:    srl a1, a2, a1
 ; RV32IBT-NEXT:    sub a2, a6, t3
 ; RV32IBT-NEXT:    slli a3, t2, 1
@@ -315,12 +315,12 @@ define i32 @fshr_i32(i32 %a, i32 %b, i32 %c) nounwind {
 ;
 ; RV32IB-LABEL: fshr_i32:
 ; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    fsr a0, a0, a2, a1
+; RV32IB-NEXT:    fsr a0, a1, a0, a2
 ; RV32IB-NEXT:    ret
 ;
 ; RV32IBT-LABEL: fshr_i32:
 ; RV32IBT:       # %bb.0:
-; RV32IBT-NEXT:    fsr a0, a0, a2, a1
+; RV32IBT-NEXT:    fsr a0, a1, a0, a2
 ; RV32IBT-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c)
   ret i32 %1
@@ -414,7 +414,7 @@ define i64 @fshr_i64(i64 %a, i64 %b, i64 %c) nounwind {
 ; RV32IB-NEXT:    bgez a5, .LBB7_8
 ; RV32IB-NEXT:  .LBB7_5:
 ; RV32IB-NEXT:    addi a5, zero, 1
-; RV32IB-NEXT:    fsl a1, a1, a5, a0
+; RV32IB-NEXT:    fsl a1, a1, a0, a5
 ; RV32IB-NEXT:    sll a1, a1, t1
 ; RV32IB-NEXT:    sub a2, a6, a2
 ; RV32IB-NEXT:    lui a5, 524288
@@ -452,7 +452,7 @@ define i64 @fshr_i64(i64 %a, i64 %b, i64 %c) nounwind {
 ; RV32IBT-NEXT:    j .LBB7_3
 ; RV32IBT-NEXT:  .LBB7_2:
 ; RV32IBT-NEXT:    addi a5, zero, 1
-; RV32IBT-NEXT:    fsl a1, a1, a5, a0
+; RV32IBT-NEXT:    fsl a1, a1, a0, a5
 ; RV32IBT-NEXT:    sll a1, a1, a7
 ; RV32IBT-NEXT:    lui a5, 524288
 ; RV32IBT-NEXT:    addi a5, a5, -1
@@ -503,12 +503,12 @@ define i32 @fshri_i32(i32 %a, i32 %b) nounwind {
 ;
 ; RV32IB-LABEL: fshri_i32:
 ; RV32IB:       # %bb.0:
-; RV32IB-NEXT:    fsri a0, a0, a1, 5
+; RV32IB-NEXT:    fsri a0, a1, a0, 5
 ; RV32IB-NEXT:    ret
 ;
 ; RV32IBT-LABEL: fshri_i32:
 ; RV32IBT:       # %bb.0:
-; RV32IBT-NEXT:    fsri a0, a0, a1, 5
+; RV32IBT-NEXT:    fsri a0, a1, a0, 5
 ; RV32IBT-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 5)
   ret i32 %1
@@ -529,16 +529,16 @@ define i64 @fshri_i64(i64 %a, i64 %b) nounwind {
 ; RV32IB-LABEL: fshri_i64:
 ; RV32IB:       # %bb.0:
 ; RV32IB-NEXT:    addi a1, zero, 27
-; RV32IB-NEXT:    fsl a2, a3, a1, a2
-; RV32IB-NEXT:    fsl a1, a0, a1, a3
+; RV32IB-NEXT:    fsl a2, a3, a2, a1
+; RV32IB-NEXT:    fsl a1, a0, a3, a1
 ; RV32IB-NEXT:    mv a0, a2
 ; RV32IB-NEXT:    ret
 ;
 ; RV32IBT-LABEL: fshri_i64:
 ; RV32IBT:       # %bb.0:
 ; RV32IBT-NEXT:    addi a1, zero, 27
-; RV32IBT-NEXT:    fsl a2, a3, a1, a2
-; RV32IBT-NEXT:    fsl a1, a0, a1, a3
+; RV32IBT-NEXT:    fsl a2, a3, a2, a1
+; RV32IBT-NEXT:    fsl a1, a0, a3, a1
 ; RV32IBT-NEXT:    mv a0, a2
 ; RV32IBT-NEXT:    ret
   %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 5)
@@ -556,13 +556,13 @@ define i32 @fshli_i32(i32 %a, i32 %b) nounwind {
 ; RV32IB-LABEL: fshli_i32:
 ; RV32IB:       # %bb.0:
 ; RV32IB-NEXT:    addi a2, zero, 5
-; RV32IB-NEXT:    fsl a0, a0, a2, a1
+; RV32IB-NEXT:    fsl a0, a0, a1, a2
 ; RV32IB-NEXT:    ret
 ;
 ; RV32IBT-LABEL: fshli_i32:
 ; RV32IBT:       # %bb.0:
 ; RV32IBT-NEXT:    addi a2, zero, 5
-; RV32IBT-NEXT:    fsl a0, a0, a2, a1
+; RV32IBT-NEXT:    fsl a0, a0, a1, a2
 ; RV32IBT-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 5)
   ret i32 %1
@@ -583,16 +583,16 @@ define i64 @fshli_i64(i64 %a, i64 %b) nounwind {
 ; RV32IB-LABEL: fshli_i64:
 ; RV32IB:       # %bb.0:
 ; RV32IB-NEXT:    addi a4, zero, 5
-; RV32IB-NEXT:    fsl a2, a0, a4, a3
-; RV32IB-NEXT:    fsl a1, a1, a4, a0
+; RV32IB-NEXT:    fsl a2, a0, a3, a4
+; RV32IB-NEXT:    fsl a1, a1, a0, a4
 ; RV32IB-NEXT:    mv a0, a2
 ; RV32IB-NEXT:    ret
 ;
 ; RV32IBT-LABEL: fshli_i64:
 ; RV32IBT:       # %bb.0:
 ; RV32IBT-NEXT:    addi a4, zero, 5
-; RV32IBT-NEXT:    fsl a2, a0, a4, a3
-; RV32IBT-NEXT:    fsl a1, a1, a4, a0
+; RV32IBT-NEXT:    fsl a2, a0, a3, a4
+; RV32IBT-NEXT:    fsl a1, a1, a0, a4
 ; RV32IBT-NEXT:    mv a0, a2
 ; RV32IBT-NEXT:    ret
   %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 5)

diff  --git a/llvm/test/CodeGen/RISCV/rv64Zbt.ll b/llvm/test/CodeGen/RISCV/rv64Zbt.ll
index bb8e4639f34e..0bc752638cef 100644
--- a/llvm/test/CodeGen/RISCV/rv64Zbt.ll
+++ b/llvm/test/CodeGen/RISCV/rv64Zbt.ll
@@ -120,12 +120,12 @@ define signext i32 @fshl_i32(i32 signext %a, i32 signext %b, i32 signext %c) nou
 ;
 ; RV64IB-LABEL: fshl_i32:
 ; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    fslw a0, a0, a2, a1
+; RV64IB-NEXT:    fslw a0, a0, a1, a2
 ; RV64IB-NEXT:    ret
 ;
 ; RV64IBT-LABEL: fshl_i32:
 ; RV64IBT:       # %bb.0:
-; RV64IBT-NEXT:    fslw a0, a0, a2, a1
+; RV64IBT-NEXT:    fslw a0, a0, a1, a2
 ; RV64IBT-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
   ret i32 %1
@@ -145,12 +145,12 @@ define i64 @fshl_i64(i64 %a, i64 %b, i64 %c) nounwind {
 ;
 ; RV64IB-LABEL: fshl_i64:
 ; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    fsl a0, a0, a2, a1
+; RV64IB-NEXT:    fsl a0, a0, a1, a2
 ; RV64IB-NEXT:    ret
 ;
 ; RV64IBT-LABEL: fshl_i64:
 ; RV64IBT:       # %bb.0:
-; RV64IBT-NEXT:    fsl a0, a0, a2, a1
+; RV64IBT-NEXT:    fsl a0, a0, a1, a2
 ; RV64IBT-NEXT:    ret
   %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %c)
   ret i64 %1
@@ -172,12 +172,12 @@ define signext i32 @fshr_i32(i32 signext %a, i32 signext %b, i32 signext %c) nou
 ;
 ; RV64IB-LABEL: fshr_i32:
 ; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    fsrw a0, a0, a2, a1
+; RV64IB-NEXT:    fsrw a0, a1, a0, a2
 ; RV64IB-NEXT:    ret
 ;
 ; RV64IBT-LABEL: fshr_i32:
 ; RV64IBT:       # %bb.0:
-; RV64IBT-NEXT:    fsrw a0, a0, a2, a1
+; RV64IBT-NEXT:    fsrw a0, a1, a0, a2
 ; RV64IBT-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c)
   ret i32 %1
@@ -197,12 +197,12 @@ define i64 @fshr_i64(i64 %a, i64 %b, i64 %c) nounwind {
 ;
 ; RV64IB-LABEL: fshr_i64:
 ; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    fsr a0, a0, a2, a1
+; RV64IB-NEXT:    fsr a0, a1, a0, a2
 ; RV64IB-NEXT:    ret
 ;
 ; RV64IBT-LABEL: fshr_i64:
 ; RV64IBT:       # %bb.0:
-; RV64IBT-NEXT:    fsr a0, a0, a2, a1
+; RV64IBT-NEXT:    fsr a0, a1, a0, a2
 ; RV64IBT-NEXT:    ret
   %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %c)
   ret i64 %1
@@ -219,12 +219,12 @@ define signext i32 @fshri_i32(i32 signext %a, i32 signext %b) nounwind {
 ;
 ; RV64IB-LABEL: fshri_i32:
 ; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    fsriw a0, a0, a1, 5
+; RV64IB-NEXT:    fsriw a0, a1, a0, 5
 ; RV64IB-NEXT:    ret
 ;
 ; RV64IBT-LABEL: fshri_i32:
 ; RV64IBT:       # %bb.0:
-; RV64IBT-NEXT:    fsriw a0, a0, a1, 5
+; RV64IBT-NEXT:    fsriw a0, a1, a0, 5
 ; RV64IBT-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 5)
   ret i32 %1
@@ -240,12 +240,12 @@ define i64 @fshri_i64(i64 %a, i64 %b) nounwind {
 ;
 ; RV64IB-LABEL: fshri_i64:
 ; RV64IB:       # %bb.0:
-; RV64IB-NEXT:    fsri a0, a0, a1, 5
+; RV64IB-NEXT:    fsri a0, a1, a0, 5
 ; RV64IB-NEXT:    ret
 ;
 ; RV64IBT-LABEL: fshri_i64:
 ; RV64IBT:       # %bb.0:
-; RV64IBT-NEXT:    fsri a0, a0, a1, 5
+; RV64IBT-NEXT:    fsri a0, a1, a0, 5
 ; RV64IBT-NEXT:    ret
   %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 5)
   ret i64 %1
@@ -264,7 +264,7 @@ define signext i32 @fshli_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64IB:       # %bb.0:
 ; RV64IB-NEXT:    slli a1, a1, 32
 ; RV64IB-NEXT:    addi a2, zero, 5
-; RV64IB-NEXT:    fsl a0, a0, a2, a1
+; RV64IB-NEXT:    fsl a0, a0, a1, a2
 ; RV64IB-NEXT:    sext.w a0, a0
 ; RV64IB-NEXT:    ret
 ;
@@ -272,7 +272,7 @@ define signext i32 @fshli_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64IBT:       # %bb.0:
 ; RV64IBT-NEXT:    slli a1, a1, 32
 ; RV64IBT-NEXT:    addi a2, zero, 5
-; RV64IBT-NEXT:    fsl a0, a0, a2, a1
+; RV64IBT-NEXT:    fsl a0, a0, a1, a2
 ; RV64IBT-NEXT:    sext.w a0, a0
 ; RV64IBT-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 5)
@@ -290,13 +290,13 @@ define i64 @fshli_i64(i64 %a, i64 %b) nounwind {
 ; RV64IB-LABEL: fshli_i64:
 ; RV64IB:       # %bb.0:
 ; RV64IB-NEXT:    addi a2, zero, 5
-; RV64IB-NEXT:    fsl a0, a0, a2, a1
+; RV64IB-NEXT:    fsl a0, a0, a1, a2
 ; RV64IB-NEXT:    ret
 ;
 ; RV64IBT-LABEL: fshli_i64:
 ; RV64IBT:       # %bb.0:
 ; RV64IBT-NEXT:    addi a2, zero, 5
-; RV64IBT-NEXT:    fsl a0, a0, a2, a1
+; RV64IBT-NEXT:    fsl a0, a0, a1, a2
 ; RV64IBT-NEXT:    ret
   %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 5)
   ret i64 %1


        


More information about the llvm-commits mailing list