[clang] [clang][nfc] Remove `else` after `return` in ARM.cpp (PR #180733)
Andrzej WarzyĆski via cfe-commits
cfe-commits at lists.llvm.org
Tue Feb 10 04:47:33 PST 2026
https://github.com/banach-space created https://github.com/llvm/llvm-project/pull/180733
Align with the LLVM coding standard:
* https://llvm.org/docs/CodingStandards.html#don-t-use-else-after-a-return
>From 89c20253ebeb6d15d79a3fdcf7aa6af5d13b843e Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Tue, 10 Feb 2026 12:43:40 +0000
Subject: [PATCH] [clang][nfc] Remove `else` after `return` in ARM.cpp
Align with the LLVM coding standard:
* https://llvm.org/docs/CodingStandards.html#don-t-use-else-after-a-return
---
clang/lib/CodeGen/TargetBuiltins/ARM.cpp | 84 ++++++++++++------------
1 file changed, 41 insertions(+), 43 deletions(-)
diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index c45e819a03855..a4a43e8a1a94e 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -352,8 +352,8 @@ static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
if (CGF.Builder.getIsFPConstrained())
return CGF.Builder.CreateConstrainedFPCall(F, Args);
- else
- return CGF.Builder.CreateCall(F, Args);
+
+ return CGF.Builder.CreateCall(F, Args);
}
static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
@@ -373,13 +373,11 @@ static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
case NeonTypeFlags::BFloat16:
if (AllowBFloatArgsAndRet)
return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
- else
- return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Float16:
if (HasFastHalfType)
return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
- else
- return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Int32:
return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Int64:
@@ -441,8 +439,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
if (F->isConstrainedFPIntrinsic())
return Builder.CreateConstrainedFPCall(F, Ops, name);
- else
- return Builder.CreateCall(F, Ops, name);
+ return Builder.CreateCall(F, Ops, name);
}
Value *CodeGenFunction::EmitFP8NeonCall(unsigned IID,
@@ -535,8 +532,7 @@ Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
Shift = EmitNeonShiftVector(Shift, Ty, false);
if (usgn)
return Builder.CreateLShr(Vec, Shift, name);
- else
- return Builder.CreateAShr(Vec, Shift, name);
+ return Builder.CreateAShr(Vec, Shift, name);
}
enum {
@@ -4656,27 +4652,30 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
if (TypeFlags.isLoad())
return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
TypeFlags.isZExtReturn());
- else if (TypeFlags.isStore())
+ if (TypeFlags.isStore())
return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
- else if (TypeFlags.isGatherLoad())
+ if (TypeFlags.isGatherLoad())
return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (TypeFlags.isScatterStore())
+ if (TypeFlags.isScatterStore())
return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (TypeFlags.isPrefetch())
+ if (TypeFlags.isPrefetch())
return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (TypeFlags.isGatherPrefetch())
+ if (TypeFlags.isGatherPrefetch())
return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (TypeFlags.isStructLoad())
+ if (TypeFlags.isStructLoad())
return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (TypeFlags.isStructStore())
+ if (TypeFlags.isStructStore())
return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (TypeFlags.isTupleSet() || TypeFlags.isTupleGet())
+ if (TypeFlags.isTupleSet() || TypeFlags.isTupleGet())
return EmitSVETupleSetOrGet(TypeFlags, Ops);
- else if (TypeFlags.isTupleCreate())
+ if (TypeFlags.isTupleCreate())
return EmitSVETupleCreate(TypeFlags, Ty, Ops);
- else if (TypeFlags.isUndef())
+ if (TypeFlags.isUndef())
return UndefValue::get(Ty);
- else if (Builtin->LLVMIntrinsic != 0) {
+
+ // Handle built-ins for which there is a corresponding LLVM Intrinsic.
+ // -------------------------------------------------------------------
+ if (Builtin->LLVMIntrinsic != 0) {
// Emit set FPMR for intrinsics that require it
if (TypeFlags.setsFPMR())
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_set_fpmr),
@@ -5035,12 +5034,12 @@ Value *CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID,
if (TypeFlags.isLoad() || TypeFlags.isStore())
return EmitSMELd1St1(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (TypeFlags.isReadZA() || TypeFlags.isWriteZA())
+ if (TypeFlags.isReadZA() || TypeFlags.isWriteZA())
return EmitSMEReadWrite(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (BuiltinID == SME::BI__builtin_sme_svzero_mask_za ||
+ if (BuiltinID == SME::BI__builtin_sme_svzero_mask_za ||
BuiltinID == SME::BI__builtin_sme_svzero_za)
return EmitSMEZero(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (BuiltinID == SME::BI__builtin_sme_svldr_vnum_za ||
+ if (BuiltinID == SME::BI__builtin_sme_svldr_vnum_za ||
BuiltinID == SME::BI__builtin_sme_svstr_vnum_za ||
BuiltinID == SME::BI__builtin_sme_svldr_za ||
BuiltinID == SME::BI__builtin_sme_svstr_za)
@@ -5272,27 +5271,26 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
}
return ToRet;
- } else {
- // Load 8 i64 words from ValPtr, and store them to the address
- // via an LLVM intrinsic.
- SmallVector<llvm::Value *, 9> Args;
- Args.push_back(MemAddr);
- for (size_t i = 0; i < 8; i++) {
- llvm::Value *ValOffsetPtr =
- Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
- Address Addr =
- Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8));
- Args.push_back(Builder.CreateLoad(Addr));
- }
+ }
- auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_st64b
- ? Intrinsic::aarch64_st64b
- : BuiltinID == clang::AArch64::BI__builtin_arm_st64bv
- ? Intrinsic::aarch64_st64bv
- : Intrinsic::aarch64_st64bv0);
- Function *F = CGM.getIntrinsic(Intr);
- return Builder.CreateCall(F, Args);
+ // Load 8 i64 words from ValPtr, and store them to the address
+ // via an LLVM intrinsic.
+ SmallVector<llvm::Value *, 9> Args;
+ Args.push_back(MemAddr);
+ for (size_t i = 0; i < 8; i++) {
+ llvm::Value *ValOffsetPtr =
+ Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
+ Address Addr = Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8));
+ Args.push_back(Builder.CreateLoad(Addr));
}
+
+ auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_st64b
+ ? Intrinsic::aarch64_st64b
+ : BuiltinID == clang::AArch64::BI__builtin_arm_st64bv
+ ? Intrinsic::aarch64_st64bv
+ : Intrinsic::aarch64_st64bv0);
+ Function *F = CGM.getIntrinsic(Intr);
+ return Builder.CreateCall(F, Args);
}
if (BuiltinID == clang::AArch64::BI__builtin_arm_rndr ||
More information about the cfe-commits
mailing list