[llvm] [RISCV] Handle codegen for Big Endian (PR #172668)
Djordje Todorovic via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 18 04:56:45 PST 2025
https://github.com/djtodoro updated https://github.com/llvm/llvm-project/pull/172668
>From 924cf8188aa9a5a6e4a0e4603d09905fd9010a4a Mon Sep 17 00:00:00 2001
From: Djordje Todorovic <djordje.todorovic at htecgroup.com>
Date: Fri, 29 Aug 2025 17:34:07 +0200
Subject: [PATCH 1/3] [RISCV] Handle codegen for Big Endian
- Handle BE in RISCVSubtarget
- Handle riscv big-endian f64
- Handle loads/stores
- Add tests for LE vs BE
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 125 +++--
llvm/lib/Target/RISCV/RISCVSubtarget.cpp | 2 +-
llvm/lib/Target/RISCV/RISCVSubtarget.h | 3 +
.../RISCV/bigendian-double-bitmanip.ll | 78 ++++
llvm/test/CodeGen/RISCV/bigendian-f64-call.ll | 94 ++++
.../CodeGen/RISCV/bigendian-load-store.ll | 435 ++++++++++++++++++
6 files changed, 706 insertions(+), 31 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/bigendian-double-bitmanip.ll
create mode 100644 llvm/test/CodeGen/RISCV/bigendian-f64-call.ll
create mode 100644 llvm/test/CodeGen/RISCV/bigendian-load-store.ll
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2d6bb06d689c3..154776d92fad6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8347,6 +8347,10 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
+ // For big-endian, swap the order of Lo and Hi.
+ if (!Subtarget.isLittleEndian())
+ std::swap(Lo, Hi);
+
SDValue Pair = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
return DAG.getMergeValues({Pair, Chain}, DL);
}
@@ -8419,15 +8423,21 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
SDValue Split = DAG.getNode(RISCVISD::SplitF64, DL,
DAG.getVTList(MVT::i32, MVT::i32), StoredVal);
- SDValue Lo = DAG.getStore(Chain, DL, Split.getValue(0), BasePtr,
- Store->getPointerInfo(), Store->getBaseAlign(),
- Store->getMemOperand()->getFlags());
+ SDValue Lo = Split.getValue(0);
+ SDValue Hi = Split.getValue(1);
+
+ // For big-endian, swap the order of Lo and Hi before storing.
+ if (!Subtarget.isLittleEndian())
+ std::swap(Lo, Hi);
+
+ SDValue LoStore = DAG.getStore(
+ Chain, DL, Lo, BasePtr, Store->getPointerInfo(),
+ Store->getBaseAlign(), Store->getMemOperand()->getFlags());
BasePtr = DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::getFixed(4));
- SDValue Hi = DAG.getStore(Chain, DL, Split.getValue(1), BasePtr,
- Store->getPointerInfo().getWithOffset(4),
- Store->getBaseAlign(),
- Store->getMemOperand()->getFlags());
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
+ SDValue HiStore = DAG.getStore(
+ Chain, DL, Hi, BasePtr, Store->getPointerInfo().getWithOffset(4),
+ Store->getBaseAlign(), Store->getMemOperand()->getFlags());
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, LoStore, HiStore);
}
if (VT == MVT::i64) {
assert(Subtarget.hasStdExtZilsd() && !Subtarget.is64Bit() &&
@@ -15160,8 +15170,12 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
Subtarget.hasStdExtDOrZdinx()) {
SDValue NewReg = DAG.getNode(RISCVISD::SplitF64, DL,
DAG.getVTList(MVT::i32, MVT::i32), Op0);
- SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64,
- NewReg.getValue(0), NewReg.getValue(1));
+ SDValue Lo = NewReg.getValue(0);
+ SDValue Hi = NewReg.getValue(1);
+ // For big-endian, swap the order when building the i64 pair.
+ if (!Subtarget.isLittleEndian())
+ std::swap(Lo, Hi);
+ SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
Results.push_back(RetReg);
} else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
isTypeLegal(Op0VT)) {
@@ -22538,14 +22552,27 @@ static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
MachineMemOperand *MMOHi = MF.getMachineMemOperand(
MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
- BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
- .addFrameIndex(FI)
- .addImm(0)
- .addMemOperand(MMOLo);
- BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
- .addFrameIndex(FI)
- .addImm(4)
- .addMemOperand(MMOHi);
+
+ // For big-endian, the high part is at offset 0 and the low part at offset 4.
+ if (!Subtarget.isLittleEndian()) {
+ BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMOLo);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
+ .addFrameIndex(FI)
+ .addImm(4)
+ .addMemOperand(MMOHi);
+ } else {
+ BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMOLo);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
+ .addFrameIndex(FI)
+ .addImm(4)
+ .addMemOperand(MMOHi);
+ }
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -22571,16 +22598,32 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
MachineMemOperand *MMOHi = MF.getMachineMemOperand(
MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
- BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
- .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
- .addFrameIndex(FI)
- .addImm(0)
- .addMemOperand(MMOLo);
- BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
- .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
- .addFrameIndex(FI)
- .addImm(4)
- .addMemOperand(MMOHi);
+
+ // For big-endian, store the high part at offset 0 and the low part at
+ // offset 4.
+ if (!Subtarget.isLittleEndian()) {
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
+ .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMOLo);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
+ .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
+ .addFrameIndex(FI)
+ .addImm(4)
+ .addMemOperand(MMOHi);
+ } else {
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
+ .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMOLo);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
+ .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
+ .addFrameIndex(FI)
+ .addImm(4)
+ .addMemOperand(MMOHi);
+ }
TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, Register());
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
@@ -23407,6 +23450,13 @@ static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
RegInfo.addLiveIn(HiVA.getLocReg(), HiVReg);
Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
}
+
+ // For big-endian, swap the order of Lo and Hi when building the pair.
+ const RISCVSubtarget &Subtarget = DAG.getSubtarget<RISCVSubtarget>();
+ // TESTED with: CodeGen/RISCV/bigendian-double-bitmanip.ll
+ if (!Subtarget.isLittleEndian())
+ std::swap(Lo, Hi);
+
return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
}
@@ -23778,6 +23828,10 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
SDValue Lo = SplitF64.getValue(0);
SDValue Hi = SplitF64.getValue(1);
+ // For big-endian, swap the order of Lo and Hi when passing.
+ if (!Subtarget.isLittleEndian())
+ std::swap(Lo, Hi);
+
Register RegLo = VA.getLocReg();
RegsToPass.push_back(std::make_pair(RegLo, Lo));
@@ -24005,8 +24059,14 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
MVT::i32, Glue);
Chain = RetValue2.getValue(1);
Glue = RetValue2.getValue(2);
- RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
- RetValue2);
+
+ // For big-endian, swap the order when building the pair.
+ SDValue Lo = RetValue;
+ SDValue Hi = RetValue2;
+ if (!Subtarget.isLittleEndian())
+ std::swap(Lo, Hi);
+
+ RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
} else
RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
@@ -24071,6 +24131,11 @@ RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
DAG.getVTList(MVT::i32, MVT::i32), Val);
SDValue Lo = SplitF64.getValue(0);
SDValue Hi = SplitF64.getValue(1);
+
+ // For big-endian, swap the order of Lo and Hi when returning.
+ if (!Subtarget.isLittleEndian())
+ std::swap(Lo, Hi);
+
Register RegLo = VA.getLocReg();
Register RegHi = RVLocs[++i].getLocReg();
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
index f86265a21d17e..87227a9d14504 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -108,7 +108,7 @@ RISCVSubtarget::RISCVSubtarget(const Triple &TT, StringRef CPU,
StringRef ABIName, unsigned RVVVectorBitsMin,
unsigned RVVVectorBitsMax,
const TargetMachine &TM)
- : RISCVGenSubtargetInfo(TT, CPU, TuneCPU, FS),
+ : RISCVGenSubtargetInfo(TT, CPU, TuneCPU, FS), TargetTriple(TT),
RVVVectorBitsMin(RVVVectorBitsMin), RVVVectorBitsMax(RVVVectorBitsMax),
FrameLowering(
initializeSubtargetDependencies(TT, CPU, TuneCPU, FS, ABIName)),
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 29df53c6c9893..792def5fe91a0 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -97,6 +97,8 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
RISCVProcFamilyEnum RISCVProcFamily = Others;
RISCVVRGatherCostModelEnum RISCVVRGatherCostModel = Quadratic;
+ Triple TargetTriple;
+
#define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \
bool ATTRIBUTE = DEFAULT;
#include "RISCVGenSubtargetInfo.inc"
@@ -220,6 +222,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
}
bool is64Bit() const { return IsRV64; }
+ bool isLittleEndian() const { return TargetTriple.isLittleEndian(); }
MVT getXLenVT() const {
return is64Bit() ? MVT::i64 : MVT::i32;
}
diff --git a/llvm/test/CodeGen/RISCV/bigendian-double-bitmanip.ll b/llvm/test/CodeGen/RISCV/bigendian-double-bitmanip.ll
new file mode 100644
index 0000000000000..c85fd5d4c55da
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/bigendian-double-bitmanip.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32 -mattr=+d -verify-machineinstrs < %s | FileCheck -check-prefix=RV32IFD-LE %s
+; RUN: llc -mtriple=riscv32be -target-abi=ilp32 -mattr=+d -verify-machineinstrs < %s | FileCheck -check-prefix=RV32IFD-BE %s
+
+; Test operations that involve SplitF64/BuildPairF64 on RV32 with D extension
+; but soft-float ABI. This configuration triggers the special handling for
+; big-endian.
+
+define double @fneg(double %a) nounwind {
+; RV32IFD-LE-LABEL: fneg:
+; RV32IFD-LE: # %bb.0:
+; RV32IFD-LE-NEXT: lui a2, 524288
+; RV32IFD-LE-NEXT: xor a1, a1, a2
+; RV32IFD-LE-NEXT: ret
+;
+; RV32IFD-BE-LABEL: fneg:
+; RV32IFD-BE: # %bb.0:
+; RV32IFD-BE-NEXT: lui a2, 524288
+; RV32IFD-BE-NEXT: xor a0, a0, a2
+; RV32IFD-BE-NEXT: ret
+ %1 = fneg double %a
+ ret double %1
+}
+
+define double @fabs(double %a) nounwind {
+; RV32IFD-LE-LABEL: fabs:
+; RV32IFD-LE: # %bb.0:
+; RV32IFD-LE-NEXT: slli a1, a1, 1
+; RV32IFD-LE-NEXT: srli a1, a1, 1
+; RV32IFD-LE-NEXT: ret
+;
+; RV32IFD-BE-LABEL: fabs:
+; RV32IFD-BE: # %bb.0:
+; RV32IFD-BE-NEXT: slli a0, a0, 1
+; RV32IFD-BE-NEXT: srli a0, a0, 1
+; RV32IFD-BE-NEXT: ret
+ %1 = call double @llvm.fabs.f64(double %a)
+ ret double %1
+}
+
+define double @fcopysign(double %a, double %b) nounwind {
+; RV32IFD-LE-LABEL: fcopysign:
+; RV32IFD-LE: # %bb.0:
+; RV32IFD-LE-NEXT: addi sp, sp, -16
+; RV32IFD-LE-NEXT: sw a2, 8(sp)
+; RV32IFD-LE-NEXT: sw a3, 12(sp)
+; RV32IFD-LE-NEXT: fld fa5, 8(sp)
+; RV32IFD-LE-NEXT: sw a0, 8(sp)
+; RV32IFD-LE-NEXT: sw a1, 12(sp)
+; RV32IFD-LE-NEXT: fld fa4, 8(sp)
+; RV32IFD-LE-NEXT: fsgnj.d fa5, fa4, fa5
+; RV32IFD-LE-NEXT: fsd fa5, 8(sp)
+; RV32IFD-LE-NEXT: lw a0, 8(sp)
+; RV32IFD-LE-NEXT: lw a1, 12(sp)
+; RV32IFD-LE-NEXT: addi sp, sp, 16
+; RV32IFD-LE-NEXT: ret
+;
+; RV32IFD-BE-LABEL: fcopysign:
+; RV32IFD-BE: # %bb.0:
+; RV32IFD-BE-NEXT: addi sp, sp, -16
+; RV32IFD-BE-NEXT: sw a2, 8(sp)
+; RV32IFD-BE-NEXT: sw a3, 12(sp)
+; RV32IFD-BE-NEXT: fld fa5, 8(sp)
+; RV32IFD-BE-NEXT: sw a0, 8(sp)
+; RV32IFD-BE-NEXT: sw a1, 12(sp)
+; RV32IFD-BE-NEXT: fld fa4, 8(sp)
+; RV32IFD-BE-NEXT: fsgnj.d fa5, fa4, fa5
+; RV32IFD-BE-NEXT: fsd fa5, 8(sp)
+; RV32IFD-BE-NEXT: lw a0, 8(sp)
+; RV32IFD-BE-NEXT: lw a1, 12(sp)
+; RV32IFD-BE-NEXT: addi sp, sp, 16
+; RV32IFD-BE-NEXT: ret
+ %1 = call double @llvm.copysign.f64(double %a, double %b)
+ ret double %1
+}
+
+declare double @llvm.fabs.f64(double)
+declare double @llvm.copysign.f64(double, double)
diff --git a/llvm/test/CodeGen/RISCV/bigendian-f64-call.ll b/llvm/test/CodeGen/RISCV/bigendian-f64-call.ll
new file mode 100644
index 0000000000000..83057e23a0d85
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/bigendian-f64-call.ll
@@ -0,0 +1,94 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32 -mattr=+d -verify-machineinstrs < %s | FileCheck -check-prefix=RV32LE %s
+; RUN: llc -mtriple=riscv32be -target-abi=ilp32 -mattr=+d -verify-machineinstrs < %s | FileCheck -check-prefix=RV32BE %s
+
+; Test f64 function calls with D extension and soft-float ABI
+; This specifically tests the LowerCall path that needs to swap Lo/Hi for BE
+
+declare double @external_func(double, double)
+
+define double @test_f64_call(double %a, double %b) {
+; RV32LE-LABEL: test_f64_call:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: addi sp, sp, -16
+; RV32LE-NEXT: .cfi_def_cfa_offset 16
+; RV32LE-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32LE-NEXT: .cfi_offset ra, -4
+; RV32LE-NEXT: call external_func
+; RV32LE-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32LE-NEXT: .cfi_restore ra
+; RV32LE-NEXT: addi sp, sp, 16
+; RV32LE-NEXT: .cfi_def_cfa_offset 0
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: test_f64_call:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: addi sp, sp, -16
+; RV32BE-NEXT: .cfi_def_cfa_offset 16
+; RV32BE-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32BE-NEXT: .cfi_offset ra, -4
+; RV32BE-NEXT: call external_func
+; RV32BE-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32BE-NEXT: .cfi_restore ra
+; RV32BE-NEXT: addi sp, sp, 16
+; RV32BE-NEXT: .cfi_def_cfa_offset 0
+; RV32BE-NEXT: ret
+ %result = call double @external_func(double %a, double %b)
+ ret double %result
+}
+
+; Test with a computation before the call to force SplitF64
+define double @test_f64_call_with_fadd(double %a, double %b) {
+; RV32LE-LABEL: test_f64_call_with_fadd:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: addi sp, sp, -16
+; RV32LE-NEXT: .cfi_def_cfa_offset 16
+; RV32LE-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32LE-NEXT: .cfi_offset ra, -4
+; RV32LE-NEXT: sw a2, 0(sp)
+; RV32LE-NEXT: sw a3, 4(sp)
+; RV32LE-NEXT: fld fa5, 0(sp)
+; RV32LE-NEXT: sw a0, 0(sp)
+; RV32LE-NEXT: sw a1, 4(sp)
+; RV32LE-NEXT: fld fa4, 0(sp)
+; RV32LE-NEXT: fadd.d fa5, fa4, fa5
+; RV32LE-NEXT: fsd fa5, 0(sp)
+; RV32LE-NEXT: lw a0, 0(sp)
+; RV32LE-NEXT: lw a1, 4(sp)
+; RV32LE-NEXT: mv a2, a0
+; RV32LE-NEXT: mv a3, a1
+; RV32LE-NEXT: call external_func
+; RV32LE-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32LE-NEXT: .cfi_restore ra
+; RV32LE-NEXT: addi sp, sp, 16
+; RV32LE-NEXT: .cfi_def_cfa_offset 0
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: test_f64_call_with_fadd:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: addi sp, sp, -16
+; RV32BE-NEXT: .cfi_def_cfa_offset 16
+; RV32BE-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32BE-NEXT: .cfi_offset ra, -4
+; RV32BE-NEXT: sw a2, 0(sp)
+; RV32BE-NEXT: sw a3, 4(sp)
+; RV32BE-NEXT: fld fa5, 0(sp)
+; RV32BE-NEXT: sw a0, 0(sp)
+; RV32BE-NEXT: sw a1, 4(sp)
+; RV32BE-NEXT: fld fa4, 0(sp)
+; RV32BE-NEXT: fadd.d fa5, fa4, fa5
+; RV32BE-NEXT: fsd fa5, 0(sp)
+; RV32BE-NEXT: lw a0, 0(sp)
+; RV32BE-NEXT: lw a1, 4(sp)
+; RV32BE-NEXT: mv a2, a0
+; RV32BE-NEXT: mv a3, a1
+; RV32BE-NEXT: call external_func
+; RV32BE-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32BE-NEXT: .cfi_restore ra
+; RV32BE-NEXT: addi sp, sp, 16
+; RV32BE-NEXT: .cfi_def_cfa_offset 0
+; RV32BE-NEXT: ret
+ %sum = fadd double %a, %b
+ %result = call double @external_func(double %sum, double %sum)
+ ret double %result
+}
diff --git a/llvm/test/CodeGen/RISCV/bigendian-load-store.ll b/llvm/test/CodeGen/RISCV/bigendian-load-store.ll
new file mode 100644
index 0000000000000..175346d5ab0f5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/bigendian-load-store.ll
@@ -0,0 +1,435 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32LE
+; RUN: llc -mtriple=riscv32be -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32BE
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64LE
+; RUN: llc -mtriple=riscv64be -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64BE
+
+; Test basic load/store operations on both little-endian and big-endian RISC-V
+
+define i32 @load_i32(ptr %p) {
+; RV32LE-LABEL: load_i32:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: lw a0, 0(a0)
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: load_i32:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: lw a0, 0(a0)
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: load_i32:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: lw a0, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: load_i32:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: lw a0, 0(a0)
+; RV64BE-NEXT: ret
+ %v = load i32, ptr %p
+ ret i32 %v
+}
+
+define void @store_i32(ptr %p, i32 %v) {
+; RV32LE-LABEL: store_i32:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: sw a1, 0(a0)
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: store_i32:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: sw a1, 0(a0)
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: store_i32:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: sw a1, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: store_i32:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: sw a1, 0(a0)
+; RV64BE-NEXT: ret
+ store i32 %v, ptr %p
+ ret void
+}
+
+define i16 @load_i16(ptr %p) {
+; RV32LE-LABEL: load_i16:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: lh a0, 0(a0)
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: load_i16:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: lh a0, 0(a0)
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: load_i16:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: lh a0, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: load_i16:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: lh a0, 0(a0)
+; RV64BE-NEXT: ret
+ %v = load i16, ptr %p
+ ret i16 %v
+}
+
+define void @store_i16(ptr %p, i16 %v) {
+; RV32LE-LABEL: store_i16:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: sh a1, 0(a0)
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: store_i16:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: sh a1, 0(a0)
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: store_i16:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: sh a1, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: store_i16:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: sh a1, 0(a0)
+; RV64BE-NEXT: ret
+ store i16 %v, ptr %p
+ ret void
+}
+
+define i8 @load_i8(ptr %p) {
+; RV32LE-LABEL: load_i8:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: lbu a0, 0(a0)
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: load_i8:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: lbu a0, 0(a0)
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: load_i8:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: lbu a0, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: load_i8:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: lbu a0, 0(a0)
+; RV64BE-NEXT: ret
+ %v = load i8, ptr %p
+ ret i8 %v
+}
+
+define void @store_i8(ptr %p, i8 %v) {
+; RV32LE-LABEL: store_i8:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: sb a1, 0(a0)
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: store_i8:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: sb a1, 0(a0)
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: store_i8:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: sb a1, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: store_i8:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: sb a1, 0(a0)
+; RV64BE-NEXT: ret
+ store i8 %v, ptr %p
+ ret void
+}
+
+define i64 @load_i64(ptr %p) {
+; RV32LE-LABEL: load_i64:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: lw a2, 0(a0)
+; RV32LE-NEXT: lw a1, 4(a0)
+; RV32LE-NEXT: mv a0, a2
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: load_i64:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: lw a2, 0(a0)
+; RV32BE-NEXT: lw a1, 4(a0)
+; RV32BE-NEXT: mv a0, a2
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: load_i64:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: ld a0, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: load_i64:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: ld a0, 0(a0)
+; RV64BE-NEXT: ret
+ %v = load i64, ptr %p
+ ret i64 %v
+}
+
+define void @store_i64(ptr %p, i64 %v) {
+; RV32LE-LABEL: store_i64:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: sw a1, 0(a0)
+; RV32LE-NEXT: sw a2, 4(a0)
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: store_i64:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: sw a1, 0(a0)
+; RV32BE-NEXT: sw a2, 4(a0)
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: store_i64:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: sd a1, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: store_i64:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: sd a1, 0(a0)
+; RV64BE-NEXT: ret
+ store i64 %v, ptr %p
+ ret void
+}
+
+; Test float/double loads and stores
+define float @load_float(ptr %p) {
+; RV32LE-LABEL: load_float:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: lw a0, 0(a0)
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: load_float:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: lw a0, 0(a0)
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: load_float:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: lw a0, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: load_float:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: lw a0, 0(a0)
+; RV64BE-NEXT: ret
+ %v = load float, ptr %p
+ ret float %v
+}
+
+define void @store_float(ptr %p, float %v) {
+; RV32LE-LABEL: store_float:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: sw a1, 0(a0)
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: store_float:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: sw a1, 0(a0)
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: store_float:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: sw a1, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: store_float:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: sw a1, 0(a0)
+; RV64BE-NEXT: ret
+ store float %v, ptr %p
+ ret void
+}
+
+define double @load_double(ptr %p) {
+; RV32LE-LABEL: load_double:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: lw a2, 0(a0)
+; RV32LE-NEXT: lw a1, 4(a0)
+; RV32LE-NEXT: mv a0, a2
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: load_double:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: lw a2, 0(a0)
+; RV32BE-NEXT: lw a1, 4(a0)
+; RV32BE-NEXT: mv a0, a2
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: load_double:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: ld a0, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: load_double:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: ld a0, 0(a0)
+; RV64BE-NEXT: ret
+ %v = load double, ptr %p
+ ret double %v
+}
+
+define void @store_double(ptr %p, double %v) {
+; RV32LE-LABEL: store_double:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: sw a1, 0(a0)
+; RV32LE-NEXT: sw a2, 4(a0)
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: store_double:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: sw a1, 0(a0)
+; RV32BE-NEXT: sw a2, 4(a0)
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: store_double:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: sd a1, 0(a0)
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: store_double:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: sd a1, 0(a0)
+; RV64BE-NEXT: ret
+ store double %v, ptr %p
+ ret void
+}
+
+; Test f64 argument passing and returns
+declare double @external_f64_func(double, double)
+
+define double @test_f64_arg_return(double %a, double %b) {
+; RV32LE-LABEL: test_f64_arg_return:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: addi sp, sp, -16
+; RV32LE-NEXT: .cfi_def_cfa_offset 16
+; RV32LE-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32LE-NEXT: .cfi_offset ra, -4
+; RV32LE-NEXT: call external_f64_func
+; RV32LE-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32LE-NEXT: .cfi_restore ra
+; RV32LE-NEXT: addi sp, sp, 16
+; RV32LE-NEXT: .cfi_def_cfa_offset 0
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: test_f64_arg_return:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: addi sp, sp, -16
+; RV32BE-NEXT: .cfi_def_cfa_offset 16
+; RV32BE-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32BE-NEXT: .cfi_offset ra, -4
+; RV32BE-NEXT: call external_f64_func
+; RV32BE-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32BE-NEXT: .cfi_restore ra
+; RV32BE-NEXT: addi sp, sp, 16
+; RV32BE-NEXT: .cfi_def_cfa_offset 0
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: test_f64_arg_return:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: addi sp, sp, -16
+; RV64LE-NEXT: .cfi_def_cfa_offset 16
+; RV64LE-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64LE-NEXT: .cfi_offset ra, -8
+; RV64LE-NEXT: call external_f64_func
+; RV64LE-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64LE-NEXT: .cfi_restore ra
+; RV64LE-NEXT: addi sp, sp, 16
+; RV64LE-NEXT: .cfi_def_cfa_offset 0
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: test_f64_arg_return:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: addi sp, sp, -16
+; RV64BE-NEXT: .cfi_def_cfa_offset 16
+; RV64BE-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64BE-NEXT: .cfi_offset ra, -8
+; RV64BE-NEXT: call external_f64_func
+; RV64BE-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64BE-NEXT: .cfi_restore ra
+; RV64BE-NEXT: addi sp, sp, 16
+; RV64BE-NEXT: .cfi_def_cfa_offset 0
+; RV64BE-NEXT: ret
+ %result = call double @external_f64_func(double %a, double %b)
+ ret double %result
+}
+
+; Test bitcast from f64 to i64
+define i64 @bitcast_f64_to_i64(double %x) {
+; RV32LE-LABEL: bitcast_f64_to_i64:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: bitcast_f64_to_i64:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: bitcast_f64_to_i64:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: bitcast_f64_to_i64:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: ret
+ %y = bitcast double %x to i64
+ ret i64 %y
+}
+
+; Test bitcast from i64 to f64
+define double @bitcast_i64_to_f64(i64 %x) {
+; RV32LE-LABEL: bitcast_i64_to_f64:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: bitcast_i64_to_f64:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: bitcast_i64_to_f64:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: bitcast_i64_to_f64:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: ret
+ %y = bitcast i64 %x to double
+ ret double %y
+}
+
+; Test i64 return value register order (a0=low/a1=high for LE, a0=high/a1=low for BE)
+define i64 @return_i64_const() {
+; RV32LE-LABEL: return_i64_const:
+; RV32LE: # %bb.0:
+; RV32LE-NEXT: li a0, 1
+; RV32LE-NEXT: li a1, 0
+; RV32LE-NEXT: ret
+;
+; RV32BE-LABEL: return_i64_const:
+; RV32BE: # %bb.0:
+; RV32BE-NEXT: li a1, 1
+; RV32BE-NEXT: li a0, 0
+; RV32BE-NEXT: ret
+;
+; RV64LE-LABEL: return_i64_const:
+; RV64LE: # %bb.0:
+; RV64LE-NEXT: li a0, 1
+; RV64LE-NEXT: ret
+;
+; RV64BE-LABEL: return_i64_const:
+; RV64BE: # %bb.0:
+; RV64BE-NEXT: li a0, 1
+; RV64BE-NEXT: ret
+ ret i64 1
+}
>From 3e847a55a1d15fba95cae54c369ef5a093becca6 Mon Sep 17 00:00:00 2001
From: Djordje Todorovic <djordje.todorovic at htecgroup.com>
Date: Thu, 18 Dec 2025 13:53:13 +0100
Subject: [PATCH 2/3] Address comments
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 68 ++++++++-------------
llvm/lib/Target/RISCV/RISCVSubtarget.cpp | 1 +
llvm/lib/Target/RISCV/RISCVSubtarget.h | 3 +-
3 files changed, 30 insertions(+), 42 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 154776d92fad6..5c91badc049be 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -22554,25 +22554,17 @@ static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
// For big-endian, the high part is at offset 0 and the low part at offset 4.
- if (!Subtarget.isLittleEndian()) {
- BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
- .addFrameIndex(FI)
- .addImm(0)
- .addMemOperand(MMOLo);
- BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
- .addFrameIndex(FI)
- .addImm(4)
- .addMemOperand(MMOHi);
- } else {
- BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
- .addFrameIndex(FI)
- .addImm(0)
- .addMemOperand(MMOLo);
- BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
- .addFrameIndex(FI)
- .addImm(4)
- .addMemOperand(MMOHi);
- }
+ if (!Subtarget.isLittleEndian())
+ std::swap(LoReg, HiReg);
+
+ BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMOLo);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
+ .addFrameIndex(FI)
+ .addImm(4)
+ .addMemOperand(MMOHi);
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -22589,6 +22581,8 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
Register DstReg = MI.getOperand(0).getReg();
Register LoReg = MI.getOperand(1).getReg();
Register HiReg = MI.getOperand(2).getReg();
+ bool KillLo = MI.getOperand(1).isKill();
+ bool KillHi = MI.getOperand(2).isKill();
const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
@@ -22602,28 +22596,20 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
// For big-endian, store the high part at offset 0 and the low part at
// offset 4.
if (!Subtarget.isLittleEndian()) {
- BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
- .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
- .addFrameIndex(FI)
- .addImm(0)
- .addMemOperand(MMOLo);
- BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
- .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
- .addFrameIndex(FI)
- .addImm(4)
- .addMemOperand(MMOHi);
- } else {
- BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
- .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
- .addFrameIndex(FI)
- .addImm(0)
- .addMemOperand(MMOLo);
- BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
- .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
- .addFrameIndex(FI)
- .addImm(4)
- .addMemOperand(MMOHi);
- }
+ std::swap(LoReg, HiReg);
+ std::swap(KillLo, KillHi);
+ }
+
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
+ .addReg(LoReg, getKillRegState(KillLo))
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addMemOperand(MMOLo);
+ BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
+ .addReg(HiReg, getKillRegState(KillHi))
+ .addFrameIndex(FI)
+ .addImm(4)
+ .addMemOperand(MMOHi);
TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, Register());
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
index 87227a9d14504..89dc8fb2fee1a 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -109,6 +109,7 @@ RISCVSubtarget::RISCVSubtarget(const Triple &TT, StringRef CPU,
unsigned RVVVectorBitsMax,
const TargetMachine &TM)
: RISCVGenSubtargetInfo(TT, CPU, TuneCPU, FS), TargetTriple(TT),
+ IsLittleEndian(TT.isLittleEndian()),
RVVVectorBitsMin(RVVVectorBitsMin), RVVVectorBitsMax(RVVVectorBitsMax),
FrameLowering(
initializeSubtargetDependencies(TT, CPU, TuneCPU, FS, ABIName)),
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 792def5fe91a0..41a63857c4e3d 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -98,6 +98,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
RISCVVRGatherCostModelEnum RISCVVRGatherCostModel = Quadratic;
Triple TargetTriple;
+ bool IsLittleEndian = true;
#define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \
bool ATTRIBUTE = DEFAULT;
@@ -222,7 +223,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
}
bool is64Bit() const { return IsRV64; }
- bool isLittleEndian() const { return TargetTriple.isLittleEndian(); }
+ bool isLittleEndian() const { return IsLittleEndian; }
MVT getXLenVT() const {
return is64Bit() ? MVT::i64 : MVT::i32;
}
>From 7a0a8cfb7b454a7d6cb737096e5786aaf75a74b0 Mon Sep 17 00:00:00 2001
From: Djordje Todorovic <djordje.todorovic at htecgroup.com>
Date: Thu, 18 Dec 2025 13:56:31 +0100
Subject: [PATCH 3/3] Apply clang-format
---
llvm/lib/Target/RISCV/RISCVSubtarget.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
index 89dc8fb2fee1a..ca0359d665564 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -109,8 +109,8 @@ RISCVSubtarget::RISCVSubtarget(const Triple &TT, StringRef CPU,
unsigned RVVVectorBitsMax,
const TargetMachine &TM)
: RISCVGenSubtargetInfo(TT, CPU, TuneCPU, FS), TargetTriple(TT),
- IsLittleEndian(TT.isLittleEndian()),
- RVVVectorBitsMin(RVVVectorBitsMin), RVVVectorBitsMax(RVVVectorBitsMax),
+ IsLittleEndian(TT.isLittleEndian()), RVVVectorBitsMin(RVVVectorBitsMin),
+ RVVVectorBitsMax(RVVVectorBitsMax),
FrameLowering(
initializeSubtargetDependencies(TT, CPU, TuneCPU, FS, ABIName)),
InstrInfo(*this), TLInfo(TM, *this) {
More information about the llvm-commits
mailing list