[llvm] de9d80c - [llvm] LLVM_FALLTHROUGH => [[fallthrough]]. NFC
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 8 11:24:23 PDT 2022
Author: Fangrui Song
Date: 2022-08-08T11:24:15-07:00
New Revision: de9d80c1c579e39cc658a508f1d4ba1cd792e4d5
URL: https://github.com/llvm/llvm-project/commit/de9d80c1c579e39cc658a508f1d4ba1cd792e4d5
DIFF: https://github.com/llvm/llvm-project/commit/de9d80c1c579e39cc658a508f1d4ba1cd792e4d5.diff
LOG: [llvm] LLVM_FALLTHROUGH => [[fallthrough]]. NFC
With C++17 there is no Clang pedantic warning or MSVC C5051.
Added:
Modified:
llvm/examples/BrainF/BrainF.cpp
llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
llvm/include/llvm/CodeGen/BasicTTIImpl.h
llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
llvm/include/llvm/Support/VirtualFileSystem.h
llvm/include/llvm/Target/CodeGenCWrappers.h
llvm/lib/Analysis/BasicAliasAnalysis.cpp
llvm/lib/Analysis/ConstantFolding.cpp
llvm/lib/Analysis/IVDescriptors.cpp
llvm/lib/Analysis/InstructionSimplify.cpp
llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
llvm/lib/Analysis/ScalarEvolution.cpp
llvm/lib/Analysis/TargetLibraryInfo.cpp
llvm/lib/Analysis/ValueTracking.cpp
llvm/lib/BinaryFormat/Magic.cpp
llvm/lib/Bitcode/Reader/BitcodeReader.cpp
llvm/lib/Bitcode/Reader/MetadataLoader.cpp
llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
llvm/lib/CodeGen/ExpandVectorPredication.cpp
llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp
llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
llvm/lib/CodeGen/GlobalISel/Utils.cpp
llvm/lib/CodeGen/MIRParser/MIParser.cpp
llvm/lib/CodeGen/MIRPrinter.cpp
llvm/lib/CodeGen/RegisterCoalescer.cpp
llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/lib/CodeGen/TargetLoweringBase.cpp
llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
llvm/lib/CodeGen/TargetPassConfig.cpp
llvm/lib/DWARFLinker/DWARFLinkerDeclContext.cpp
llvm/lib/DebugInfo/PDB/Native/InfoStream.cpp
llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
llvm/lib/IR/ConstantFold.cpp
llvm/lib/IR/Function.cpp
llvm/lib/IR/InlineAsm.cpp
llvm/lib/IR/Value.cpp
llvm/lib/IR/Verifier.cpp
llvm/lib/MC/MCParser/AsmLexer.cpp
llvm/lib/MC/MCParser/MasmParser.cpp
llvm/lib/MC/MCStreamer.cpp
llvm/lib/MC/MCWin64EH.cpp
llvm/lib/Object/ELFObjectFile.cpp
llvm/lib/ObjectYAML/ELFYAML.cpp
llvm/lib/Option/OptTable.cpp
llvm/lib/ProfileData/InstrProf.cpp
llvm/lib/Support/APFloat.cpp
llvm/lib/Support/CommandLine.cpp
llvm/lib/Support/FoldingSet.cpp
llvm/lib/Support/FormattedStream.cpp
llvm/lib/Support/GlobPattern.cpp
llvm/lib/Support/GraphWriter.cpp
llvm/lib/Support/Host.cpp
llvm/lib/Support/Triple.cpp
llvm/lib/Support/VirtualFileSystem.cpp
llvm/lib/Support/YAMLParser.cpp
llvm/lib/Support/raw_ostream.cpp
llvm/lib/TableGen/TGLexer.cpp
llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp
llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
llvm/lib/Target/AArch64/AArch64FastISel.cpp
llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
llvm/lib/Target/AArch64/AArch64MachineScheduler.cpp
llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
llvm/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
llvm/lib/Target/ARC/ARCOptAddrMode.cpp
llvm/lib/Target/ARC/ARCRegisterInfo.cpp
llvm/lib/Target/ARM/ARMAsmPrinter.cpp
llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
llvm/lib/Target/ARM/ARMFastISel.cpp
llvm/lib/Target/ARM/ARMFrameLowering.cpp
llvm/lib/Target/ARM/ARMISelLowering.cpp
llvm/lib/Target/ARM/ARMInstructionSelector.cpp
llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp
llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
llvm/lib/Target/AVR/AVRRegisterInfo.cpp
llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp
llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
llvm/lib/Target/M68k/M68kISelLowering.cpp
llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
llvm/lib/Target/Mips/MipsExpandPseudo.cpp
llvm/lib/Target/Mips/MipsISelLowering.cpp
llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
llvm/lib/Target/PowerPC/PPCFastISel.cpp
llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp
llvm/lib/Target/Sparc/SparcInstrInfo.cpp
llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
llvm/lib/Target/VE/VEISelLowering.cpp
llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
llvm/lib/Target/X86/X86AsmPrinter.cpp
llvm/lib/Target/X86/X86DynAllocaExpander.cpp
llvm/lib/Target/X86/X86FastISel.cpp
llvm/lib/Target/X86/X86FrameLowering.cpp
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
llvm/lib/Target/X86/X86InstrInfo.cpp
llvm/lib/Target/X86/X86MCInstLower.cpp
llvm/lib/Target/X86/X86TargetTransformInfo.cpp
llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
llvm/lib/TextAPI/TextStubCommon.cpp
llvm/lib/Transforms/IPO/Attributor.cpp
llvm/lib/Transforms/IPO/AttributorAttributes.cpp
llvm/lib/Transforms/IPO/FunctionAttrs.cpp
llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
llvm/lib/Transforms/ObjCARC/PtrState.cpp
llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
llvm/lib/Transforms/Utils/BuildLibCalls.cpp
llvm/lib/Transforms/Utils/LoopRotationUtils.cpp
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
llvm/tools/llvm-ar/llvm-ar.cpp
llvm/tools/llvm-config/llvm-config.cpp
llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp
llvm/tools/llvm-mc/Disassembler.cpp
llvm/tools/llvm-ml/Disassembler.cpp
llvm/tools/llvm-readobj/ELFDumper.cpp
llvm/tools/llvm-remark-size-diff/RemarkSizeDiff.cpp
llvm/tools/obj2yaml/macho2yaml.cpp
llvm/utils/TableGen/IntrinsicEmitter.cpp
Removed:
################################################################################
diff --git a/llvm/examples/BrainF/BrainF.cpp b/llvm/examples/BrainF/BrainF.cpp
index 11a752b1a5474..6075c85d06d11 100644
--- a/llvm/examples/BrainF/BrainF.cpp
+++ b/llvm/examples/BrainF/BrainF.cpp
@@ -335,7 +335,7 @@ void BrainF::readloop(PHINode *phi, BasicBlock *oldbb, BasicBlock *testbb,
switch(c) {
case '-':
direction = -1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case '+':
if (cursym == SYM_CHANGE) {
@@ -356,7 +356,7 @@ void BrainF::readloop(PHINode *phi, BasicBlock *oldbb, BasicBlock *testbb,
case '<':
direction = -1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case '>':
if (cursym == SYM_MOVE) {
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 1364e7ab9fb32..cf0995a9b0fe0 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -449,7 +449,7 @@ class TargetTransformInfoImplBase {
getCacheSize(TargetTransformInfo::CacheLevel Level) const {
switch (Level) {
case TargetTransformInfo::CacheLevel::L1D:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetTransformInfo::CacheLevel::L2D:
return llvm::Optional<unsigned>();
}
@@ -460,7 +460,7 @@ class TargetTransformInfoImplBase {
getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const {
switch (Level) {
case TargetTransformInfo::CacheLevel::L1D:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetTransformInfo::CacheLevel::L2D:
return llvm::Optional<unsigned>();
}
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 22ad98151dc9b..4bc3e6f686506 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -957,7 +957,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
// Check for NOOP conversions.
if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
return 0;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::BitCast:
// Bitcast between types that are legalized to the same type are free and
// assume int to/from ptr of the same size is also free.
@@ -972,7 +972,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
case Instruction::ZExt:
if (TLI->isZExtFree(SrcLT.second, DstLT.second))
return 0;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::SExt:
if (I && getTLI()->isExtFree(I))
return 0;
diff --git a/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h b/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
index f4b1980b9ede4..15f46cf46ba7e 100644
--- a/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
+++ b/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h
@@ -685,7 +685,7 @@ void CodeGenPassBuilder<Derived>::addPassesToHandleExceptions(
// pad is shared by multiple invokes and is also a target of a normal
// edge from elsewhere.
addPass(SjLjEHPreparePass());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
case ExceptionHandling::AIX:
diff --git a/llvm/include/llvm/Support/VirtualFileSystem.h b/llvm/include/llvm/Support/VirtualFileSystem.h
index 90ca3fb5e9ff0..447437f46ebd1 100644
--- a/llvm/include/llvm/Support/VirtualFileSystem.h
+++ b/llvm/include/llvm/Support/VirtualFileSystem.h
@@ -816,7 +816,7 @@ class RedirectingFileSystem : public vfs::FileSystem {
static bool classof(const Entry *E) {
switch (E->getKind()) {
case EK_DirectoryRemap:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case EK_File:
return true;
case EK_Directory:
diff --git a/llvm/include/llvm/Target/CodeGenCWrappers.h b/llvm/include/llvm/Target/CodeGenCWrappers.h
index a995463570535..6f589b80c9b2b 100644
--- a/llvm/include/llvm/Target/CodeGenCWrappers.h
+++ b/llvm/include/llvm/Target/CodeGenCWrappers.h
@@ -27,7 +27,7 @@ inline Optional<CodeModel::Model> unwrap(LLVMCodeModel Model, bool &JIT) {
switch (Model) {
case LLVMCodeModelJITDefault:
JIT = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LLVMCodeModelDefault:
return None;
case LLVMCodeModelTiny:
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 96a80fbf1047c..b9746ebbc75f4 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -387,7 +387,7 @@ static LinearExpression GetLinearExpression(
BOp, DT))
return Val;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Add: {
E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
Depth + 1, AC, DT);
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 95ad90b8c765f..1803a69bee741 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -2641,7 +2641,7 @@ static Constant *ConstantFoldScalarCall2(StringRef Name,
// undef - X -> { 0, false }
if (!C0 || !C1)
return Constant::getNullValue(Ty);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow:
// X + undef -> { -1, false }
@@ -2652,7 +2652,7 @@ static Constant *ConstantFoldScalarCall2(StringRef Name,
{Constant::getAllOnesValue(Ty->getStructElementType(0)),
Constant::getNullValue(Ty->getStructElementType(1))});
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow: {
// undef * X -> { 0, false }
@@ -2944,7 +2944,7 @@ static Constant *ConstantFoldScalarCall3(StringRef Name,
// wrong result if C3 was -0.0.
return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::fma:
case Intrinsic::fmuladd: {
diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp
index a51e974003f62..6d20043759c8e 100644
--- a/llvm/lib/Analysis/IVDescriptors.cpp
+++ b/llvm/lib/Analysis/IVDescriptors.cpp
@@ -789,7 +789,7 @@ RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi,
case Instruction::Select:
if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul)
return isConditionalRdxPattern(Kind, I);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::FCmp:
case Instruction::ICmp:
case Instruction::Call:
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index a4f26c842dbc4..62d44004114ae 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -3027,7 +3027,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
if (!Known.isNonNegative())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_UGT:
@@ -3038,7 +3038,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
if (!Known.isNonNegative())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_ULT:
@@ -5869,7 +5869,7 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
// sat(X + MAX) -> MAX
if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
return Constant::getAllOnesValue(ReturnType);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::sadd_sat:
// sat(X + undef) -> -1
// sat(undef + X) -> -1
@@ -5889,7 +5889,7 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
// sat(0 - X) -> 0, sat(X - MAX) -> 0
if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
return Constant::getNullValue(ReturnType);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::ssub_sat:
// X - X -> 0, X - undef -> 0, undef - X -> 0
if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 288a99b257ed6..d9bae54b05be1 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -621,7 +621,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// load query, we can safely ignore it (scan past it).
if (isLoad)
continue;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
// Otherwise, there is a potential dependence. Return a clobber.
return MemDepResult::getClobber(Inst);
@@ -993,7 +993,7 @@ SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache,
MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.end() - 1, Val);
Cache.insert(Entry, Val);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case 1:
// One new entry, Just insert the new value at the appropriate position.
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index a3965d073cb45..5bd110942597e 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -6013,7 +6013,7 @@ const SCEV *ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
case ICmpInst::ICMP_UGT:
@@ -6066,7 +6066,7 @@ const SCEV *ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
case ICmpInst::ICMP_NE:
// x != 0 ? x+y : C+y -> x == 0 ? C+y : x+y
std::swap(TrueVal, FalseVal);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_EQ:
// x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1
if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
@@ -10958,7 +10958,7 @@ bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
case ICmpInst::ICMP_SGE:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SLE:
// (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2.
if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2))
@@ -10968,7 +10968,7 @@ bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
case ICmpInst::ICMP_SGT:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SLT:
// (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2.
if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2))
@@ -10978,7 +10978,7 @@ bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
case ICmpInst::ICMP_UGE:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_ULE:
// (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2.
if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2))
@@ -10988,7 +10988,7 @@ bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
case ICmpInst::ICMP_UGT:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_ULT:
// (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2.
if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2))
@@ -11515,7 +11515,7 @@ bool ScalarEvolution::isImpliedCondBalancedTypes(
if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
CtxI))
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_UGT:
@@ -11538,7 +11538,7 @@ bool ScalarEvolution::isImpliedCondBalancedTypes(
if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
LHS, V, getConstant(SharperMin), CtxI))
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_ULT:
@@ -11994,7 +11994,7 @@ static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
case ICmpInst::ICMP_SGE:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SLE:
return
// min(A, ...) <= A
@@ -12004,7 +12004,7 @@ static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
case ICmpInst::ICMP_UGE:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_ULE:
return
// min(A, ...) <= A
@@ -12191,7 +12191,7 @@ static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
switch (Pred) {
case ICmpInst::ICMP_SGE:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SLE: {
// If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt.
const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS);
@@ -12202,7 +12202,7 @@ static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
}
case ICmpInst::ICMP_UGE:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_ULE: {
// If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt.
const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS);
@@ -13503,7 +13503,7 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
return DoesNotDominateBlock;
// Fall through into SCEVNAryExpr handling.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case scAddExpr:
case scMulExpr:
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 8ebdb65e88dc6..ef20a75ef8800 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -514,7 +514,7 @@ static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T,
// on Linux.
//
// Fall through to disable all of them.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
TLI.setUnavailable(LibFunc_exp10);
TLI.setUnavailable(LibFunc_exp10f);
@@ -945,7 +945,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_strlen:
return NumParams == 1 && FTy.getParamType(0)->isPointerTy() &&
FTy.getReturnType()->isIntegerTy(SizeTBits);
@@ -970,7 +970,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_strcat:
return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0) == FTy.getReturnType() &&
@@ -980,7 +980,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_strncat:
return (NumParams == 3 && FTy.getReturnType()->isPointerTy() &&
FTy.getParamType(0) == FTy.getReturnType() &&
@@ -992,7 +992,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_strcpy:
case LibFunc_stpcpy:
return (NumParams == 2 && FTy.getReturnType() == FTy.getParamType(0) &&
@@ -1004,7 +1004,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_strlcat:
case LibFunc_strlcpy:
return NumParams == 3 && FTy.getReturnType()->isIntegerTy(SizeTBits) &&
@@ -1017,7 +1017,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_strncpy:
case LibFunc_stpncpy:
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
@@ -1138,7 +1138,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_memcpy:
case LibFunc_mempcpy:
case LibFunc_memmove:
@@ -1151,7 +1151,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_memset:
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
FTy.getParamType(0)->isPointerTy() &&
@@ -1162,7 +1162,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
--NumParams;
if (!FTy.getParamType(NumParams)->isIntegerTy(SizeTBits))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_memccpy:
return (NumParams >= 2 && FTy.getParamType(1)->isPointerTy());
case LibFunc_memalign:
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index f6d0faf29ca81..65975d2274799 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1200,7 +1200,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
case Instruction::PtrToInt:
case Instruction::IntToPtr:
// Fall through and handle them the same as zext/trunc.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::ZExt:
case Instruction::Trunc: {
Type *SrcTy = I->getOperand(0)->getType();
@@ -2073,7 +2073,7 @@ static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
// power of two is not sufficient, and it has to be a constant.
if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::UDiv:
// Divisor must be a power of two.
// If OrZero is false, cannot guarantee induction variable is non-zero after
@@ -2085,7 +2085,7 @@ static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
case Instruction::AShr:
if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::LShr:
return OrZero || Q.IIQ.isExact(BO);
default:
@@ -3601,7 +3601,7 @@ static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
(!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::FAdd:
case Instruction::FRem:
return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
@@ -5137,7 +5137,7 @@ static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly,
return false;
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::CallBr:
case Instruction::Invoke: {
const auto *CB = cast<CallBase>(Op);
diff --git a/llvm/lib/BinaryFormat/Magic.cpp b/llvm/lib/BinaryFormat/Magic.cpp
index d45195fb95c5c..951668f68c0da 100644
--- a/llvm/lib/BinaryFormat/Magic.cpp
+++ b/llvm/lib/BinaryFormat/Magic.cpp
@@ -192,13 +192,13 @@ file_magic llvm::identify_magic(StringRef Magic) {
case 0x50: // mc68K
if (startswith(Magic, "\x50\xed\x55\xba"))
return file_magic::cuda_fatbinary;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 0x4c: // 80386 Windows
case 0xc4: // ARMNT Windows
if (Magic[1] == 0x01)
return file_magic::coff_object;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 0x90: // PA-RISC Windows
case 0x68: // mc68K Windows
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 884ea21b26113..321f2b0d64208 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -3445,7 +3445,7 @@ Error BitcodeReader::parseUseLists() {
break;
case bitc::USELIST_CODE_BB:
IsBB = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case bitc::USELIST_CODE_DEFAULT: {
unsigned RecordLength = Record.size();
if (RecordLength < 3)
diff --git a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
index 13d53a35084da..02d76f61695af 100644
--- a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
+++ b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp
@@ -552,7 +552,7 @@ class MetadataLoader::MetadataLoaderImpl {
case 0:
if (N >= 3 && Expr[N - 3] == dwarf::DW_OP_bit_piece)
Expr[N - 3] = dwarf::DW_OP_LLVM_fragment;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 1:
// Move DW_OP_deref to the end.
if (N && Expr[0] == dwarf::DW_OP_deref) {
@@ -564,7 +564,7 @@ class MetadataLoader::MetadataLoaderImpl {
*std::prev(End) = dwarf::DW_OP_deref;
}
NeedDeclareExpressionUpgrade = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 2: {
// Change DW_OP_plus to DW_OP_plus_uconst.
// Change DW_OP_minus to DW_OP_uconst, DW_OP_minus
@@ -613,7 +613,7 @@ class MetadataLoader::MetadataLoaderImpl {
SubExpr = SubExpr.slice(HistoricSize);
}
Expr = MutableArrayRef<uint64_t>(Buffer);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case 3:
// Up-to-date!
@@ -1285,7 +1285,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
}
case bitc::METADATA_DISTINCT_NODE:
IsDistinct = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case bitc::METADATA_NODE: {
SmallVector<Metadata *, 8> Elts;
Elts.reserve(Record.size());
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 32a10ad41d1fd..fdbac92414f4a 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -529,7 +529,7 @@ bool AsmPrinter::doInitialization(Module &M) {
switch (MAI->getExceptionHandlingType()) {
case ExceptionHandling::None:
// We may want to emit CFI for debug.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ExceptionHandling::SjLj:
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
@@ -553,7 +553,7 @@ bool AsmPrinter::doInitialization(Module &M) {
case ExceptionHandling::None:
if (!needsCFIForDebug())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ExceptionHandling::SjLj:
case ExceptionHandling::DwarfCFI:
ES = new DwarfCFIException(this);
@@ -2761,7 +2761,7 @@ const MCExpr *AsmPrinter::lowerConstant(const Constant *CV) {
// expression properly. This is important for
diff erences between
// blockaddress labels. Since the two labels are in the same function, it
// is reasonable to treat their delta as a 32-bit value.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::BitCast:
return lowerConstant(CE->getOperand(0));
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
index 88c82cbc958bd..57c7a0d2ba53b 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
@@ -480,7 +480,7 @@ bool AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
PrintAsmMemoryOperand(MI, OpNo, nullptr, O);
return false;
}
- LLVM_FALLTHROUGH; // GCC allows '%a' to behave like '%c' with immediates.
+ [[fallthrough]]; // GCC allows '%a' to behave like '%c' with immediates.
case 'c': // Substitute immediate value without immediate syntax
if (MO.isImm()) {
O << MO.getImm();
diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index 701c0affdfa65..f34e92278ad22 100644
--- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -1620,7 +1620,7 @@ TypeIndex CodeViewDebug::lowerType(const DIType *Ty, const DIType *ClassTy) {
case dwarf::DW_TAG_pointer_type:
if (cast<DIDerivedType>(Ty)->getName() == "__vtbl_ptr_type")
return lowerTypeVFTableShape(cast<DIDerivedType>(Ty));
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case dwarf::DW_TAG_reference_type:
case dwarf::DW_TAG_rvalue_reference_type:
return lowerTypePointer(cast<DIDerivedType>(Ty));
diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index db4d42bf3ca47..a0029fb71ff18 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -293,7 +293,7 @@ static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
APInt::getSignedMinValue(EltBits));
case Intrinsic::vp_reduce_fmax:
Negative = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::vp_reduce_fmin: {
FastMathFlags Flags = VPI.getFastMathFlags();
const fltSemantics &Semantics = EltTy->getFltSemantics();
diff --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
index 4f03af0fce82d..5abce2aa9464d 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
@@ -286,7 +286,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
LLT Ty = MRI.getType(MI.getOperand(1).getReg());
if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case TargetOpcode::G_ADD: {
computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
@@ -447,7 +447,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
if (DstTy.isVector())
break;
// Fall through and handle them the same as zext/trunc.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetOpcode::G_ASSERT_ZEXT:
case TargetOpcode::G_ZEXT:
case TargetOpcode::G_TRUNC: {
diff --git a/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp
index 6271a4514c272..8cfb1b786c243 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp
@@ -264,7 +264,7 @@ LegacyLegalizerInfo::findAction(const SizeAndActionsVec &Vec, const uint32_t Siz
// Special case for scalarization:
if (Vec == SizeAndActionsVec({{1, FewerElements}}))
return {1, FewerElements};
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NarrowScalar: {
// The following needs to be a loop, as for now, we do allow needing to
// go over "Unsupported" bit sizes before finding a legalizable bit size.
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
index 6adb7ddb5b66d..5caffe59ca436 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
@@ -126,7 +126,7 @@ static bool mutationIsSane(const LegalizeRule &Rule,
case FewerElements:
if (!OldTy.isVector())
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MoreElements: {
// MoreElements can go from scalar to vector.
const ElementCount OldElts = OldTy.isVector() ?
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index e0bcc2dd6a4e8..e1ee9f8c3502b 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -321,7 +321,7 @@ Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
case TargetOpcode::G_ANYEXT:
if (!LookThroughAnyExt)
return None;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetOpcode::G_TRUNC:
case TargetOpcode::G_SEXT:
case TargetOpcode::G_ZEXT:
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index e3d6b59c50774..6ddc8e960836b 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -2848,7 +2848,7 @@ bool MIParser::parseMachineOperand(const unsigned OpCode, const unsigned OpIdx,
if (const auto *Formatter = TII->getMIRFormatter()) {
return parseTargetImmMnemonic(OpCode, OpIdx, Dest, *Formatter);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default:
// FIXME: Parse the MCSymbol machine operand.
diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp
index 25823b1567f7d..0054e165f3f1e 100644
--- a/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -880,7 +880,7 @@ void MIPrinter::print(const MachineInstr &MI, unsigned OpIdx,
MachineOperand::printSubRegIdx(OS, Op.getImm(), TRI);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MachineOperand::MO_Register:
case MachineOperand::MO_CImmediate:
case MachineOperand::MO_FPImmediate:
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index aac98cf924e10..3003c4911b417 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -2956,7 +2956,7 @@ void JoinVals::computeAssignment(unsigned ValNo, JoinVals &Other) {
}
OtherV.Pruned = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default:
// This value number needs to go in the final joined live range.
@@ -3399,7 +3399,7 @@ void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
if (LI != nullptr)
dbgs() << "\t\t LHS = " << *LI << '\n';
});
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case CR_Erase: {
diff --git a/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp b/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
index a61a2b2728fa6..209c6d81f6020 100644
--- a/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
+++ b/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
@@ -147,7 +147,7 @@ ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
freeUnits &= ~ReservedScoreboard[StageCycle];
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case InstrStage::Reserved:
// Reserved FUs can conflict only with required ones.
freeUnits &= ~RequiredScoreboard[StageCycle];
@@ -198,7 +198,7 @@ void ScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
freeUnits &= ~ReservedScoreboard[cycle + i];
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case InstrStage::Reserved:
// Reserved FUs can conflict only with required ones.
freeUnits &= ~RequiredScoreboard[cycle + i];
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 8f2b1089129b7..4981dd2149184 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -1962,7 +1962,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
Changed = true;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
// Only add if it isn't already in the list.
@@ -14989,7 +14989,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
case ISD::SETLT:
case ISD::SETLE:
std::swap(TrueOpnd, FalseOpnd);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOGT:
case ISD::SETUGT:
case ISD::SETOGE:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 73b8c8d2ea00b..89800009861fe 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -850,7 +850,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Custom:
isCustom = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetLowering::Legal:
Value = SDValue(Node, 0);
Chain = SDValue(Node, 1);
@@ -1317,11 +1317,11 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
return;
}
LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetLowering::Expand:
if (ExpandNode(Node))
return;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetLowering::LibCall:
ConvertNodeToLibcall(Node);
return;
@@ -2961,7 +2961,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Results.push_back(Tmp2);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SINT_TO_FP:
case ISD::STRICT_SINT_TO_FP:
if ((Tmp1 = ExpandLegalINT_TO_FP(Node, Tmp2))) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index ff26d0fe77264..dc087679fa631 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2973,7 +2973,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
switch (BoolType) {
case TargetLoweringBase::UndefinedBooleanContent:
OVF = DAG.getNode(ISD::AND, dl, OvfVT, DAG.getConstant(1, dl, OvfVT), OVF);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetLoweringBase::ZeroOrOneBooleanContent:
OVF = DAG.getZExtOrTrunc(OVF, dl, NVT);
Hi = DAG.getNode(N->getOpcode(), dl, NVT, Hi, OVF);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 175b5eaba5bee..4eafb8e9013fc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -492,7 +492,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
if (LowerOperationWrapper(Node, ResultVals))
break;
LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetLowering::Expand:
LLVM_DEBUG(dbgs() << "Expanding\n");
Expand(Node, ResultVals);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 6752164a5903b..853b8690c5837 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -3755,7 +3755,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
// If the target has custom/legal support for the scalar FP intrinsic ops
// (they are probably not destined to become libcalls), then widen those
// like any other binary ops.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::FADD:
case ISD::FMUL:
@@ -3858,7 +3858,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
// If the target has custom/legal support for the scalar FP intrinsic ops
// (they are probably not destined to become libcalls), then widen those
// like any other unary ops.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ABS:
case ISD::BITREVERSE:
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index cecc59deac520..6d45afe6fb74b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2381,34 +2381,34 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
default: break;
case ISD::SETEQ: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
OpVT);
case ISD::SETNE: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
R==APFloat::cmpLessThan, dl, VT,
OpVT);
case ISD::SETLT: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
OpVT);
case ISD::SETGT: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
VT, OpVT);
case ISD::SETLE: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
R==APFloat::cmpEqual, dl, VT,
OpVT);
case ISD::SETGE: if (R==APFloat::cmpUnordered)
return getUNDEF(VT);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
R==APFloat::cmpEqual, dl, VT, OpVT);
case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
@@ -3495,7 +3495,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.Zero.setBitsFrom(1);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SUB:
case ISD::SUBC: {
assert(Op.getResNo() == 0 &&
@@ -3523,7 +3523,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.Zero.setBitsFrom(1);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ADD:
case ISD::ADDC:
case ISD::ADDE: {
@@ -3738,7 +3738,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.Zero.setBitsFrom(1);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ATOMIC_CMP_SWAP:
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
@@ -3771,7 +3771,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
default:
if (Opcode < ISD::BUILTIN_OP_END)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_VOID:
@@ -4983,7 +4983,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
case ISD::TRUNCATE:
if (C->isOpaque())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ZERO_EXTEND:
return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
C->isTargetOpcode(), C->isOpaque());
@@ -5842,7 +5842,7 @@ SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, /*AllowUndefs*/ true))
if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef())
return getUNDEF(VT);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::FADD:
case ISD::FMUL:
@@ -6053,12 +6053,12 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
const APInt &ShiftImm = N2C->getAPIntValue();
return getVScale(DL, VT, MulImm << ShiftImm);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SRA:
case ISD::SRL:
if (SDValue V = simplifyShift(N1, N2))
return V;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ROTL:
case ISD::ROTR:
assert(VT == N1.getValueType() &&
@@ -6348,7 +6348,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
// Handle undef ^ undef -> 0 special case. This is a common
// idiom (misuse).
return getConstant(0, DL, VT);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ADD:
case ISD::SUB:
case ISD::UDIV:
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index c0ffc73712001..5b0fa7bf3d73e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3346,7 +3346,7 @@ void SelectionDAGBuilder::visitSelect(const User &I) {
break;
case SPF_NABS:
Negate = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SPF_ABS:
IsUnaryAbs = true;
Opc = ISD::ABS;
@@ -7297,7 +7297,7 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
// The only reason why ebIgnore nodes still need to be chained is that
// they might depend on the current rounding mode, and therefore must
// not be moved across instruction that may change that mode.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case fp::ExceptionBehavior::ebMayTrap:
// These must not be moved across calls or instructions that may change
// floating-point exception masks.
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 8aefb5b363d5a..5bfb91a1271e5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -351,7 +351,7 @@ void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
break;
case ISD::SETO:
ShouldInvertCC = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETUO:
LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
(VT == MVT::f64) ? RTLIB::UO_F64 :
@@ -360,7 +360,7 @@ void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
case ISD::SETONE:
// SETONE = O && UNE
ShouldInvertCC = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETUEQ:
LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 :
(VT == MVT::f64) ? RTLIB::UO_F64 :
@@ -2583,7 +2583,7 @@ bool TargetLowering::SimplifyDemandedBits(
SDValue And1 = TLO.DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), One);
return TLO.CombineTo(Op, And1);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ADD:
case ISD::SUB: {
// Add, Sub, and Mul don't demand any bits in positions beyond that
@@ -2686,7 +2686,7 @@ bool TargetLowering::SimplifyDemandedBits(
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default:
if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
@@ -3321,7 +3321,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
Depth + 1, /*AssumeSingleUse*/ true))
return true;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ISD::OR:
case ISD::XOR:
@@ -9834,7 +9834,7 @@ bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT,
assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) &&
"If SETUE is expanded, SETOEQ or SETUNE must be legal!");
NeedInvert = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETO:
assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) &&
"If SETO is expanded, SETOEQ must be legal!");
@@ -9858,7 +9858,7 @@ bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT,
NeedInvert = ((unsigned)CCCode & 0x8U);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOEQ:
case ISD::SETOGT:
case ISD::SETOGE:
@@ -9879,7 +9879,7 @@ bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT,
break;
}
// Fallthrough if we are unsigned integer.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETLE:
case ISD::SETGT:
case ISD::SETGE:
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index f7c10008bf119..75ed8ba29a9a3 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -1424,7 +1424,7 @@ void TargetLoweringBase::computeRegisterProperties(
}
if (IsLegalWiderType)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case TypeWidenVector:
@@ -1458,7 +1458,7 @@ void TargetLoweringBase::computeRegisterProperties(
break;
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TypeSplitVector:
case TypeScalarizeVector: {
diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 2badbe34ae6a7..b114f42fbb285 100644
--- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -127,7 +127,7 @@ void TargetLoweringObjectFileELF::Initialize(MCContext &Ctx,
if (Ctx.getAsmInfo()->getExceptionHandlingType() == ExceptionHandling::ARM)
break;
// Fallthrough if not using EHABI
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Triple::ppc:
case Triple::ppcle:
case Triple::x86:
diff --git a/llvm/lib/CodeGen/TargetPassConfig.cpp b/llvm/lib/CodeGen/TargetPassConfig.cpp
index 0bd229f4fc682..ab8a872699ed2 100644
--- a/llvm/lib/CodeGen/TargetPassConfig.cpp
+++ b/llvm/lib/CodeGen/TargetPassConfig.cpp
@@ -960,7 +960,7 @@ void TargetPassConfig::addPassesToHandleExceptions() {
// pad is shared by multiple invokes and is also a target of a normal
// edge from elsewhere.
addPass(createSjLjEHPreparePass(TM));
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
case ExceptionHandling::AIX:
diff --git a/llvm/lib/DWARFLinker/DWARFLinkerDeclContext.cpp b/llvm/lib/DWARFLinker/DWARFLinkerDeclContext.cpp
index dfdfc5857569a..015a4f9e8ac6b 100644
--- a/llvm/lib/DWARFLinker/DWARFLinkerDeclContext.cpp
+++ b/llvm/lib/DWARFLinker/DWARFLinkerDeclContext.cpp
@@ -64,7 +64,7 @@ DeclContextTree::getChildDeclContext(DeclContext &Context, const DWARFDie &DIE,
Context.getTag() == dwarf::DW_TAG_compile_unit) &&
!dwarf::toUnsigned(DIE.find(dwarf::DW_AT_external), 0))
return PointerIntPair<DeclContext *, 1>(nullptr);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case dwarf::DW_TAG_member:
case dwarf::DW_TAG_namespace:
case dwarf::DW_TAG_structure_type:
diff --git a/llvm/lib/DebugInfo/PDB/Native/InfoStream.cpp b/llvm/lib/DebugInfo/PDB/Native/InfoStream.cpp
index 927a0ffee28c4..de16241bb21ac 100644
--- a/llvm/lib/DebugInfo/PDB/Native/InfoStream.cpp
+++ b/llvm/lib/DebugInfo/PDB/Native/InfoStream.cpp
@@ -63,7 +63,7 @@ Error InfoStream::reload() {
case uint32_t(PdbRaw_FeatureSig::VC110):
// No other flags for VC110 PDB.
Stop = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case uint32_t(PdbRaw_FeatureSig::VC140):
Features |= PdbFeatureContainsIdStream;
break;
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index c702584b7a33e..b5a64a70a89a4 100644
--- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -2027,7 +2027,7 @@ void RuntimeDyldELF::processX86_64TLSRelocation(
case ELF::R_X86_64_REX_GOTPCRELX:
case ELF::R_X86_64_GOTPCRELX:
IsGOTPCRel = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ELF::R_X86_64_PLT32:
IsSmallCodeModel = true;
break;
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
index f2ee1b06d4943..ddf1c3724a0a6 100644
--- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
+++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
@@ -118,7 +118,7 @@ class RuntimeDyldMachOAArch64
(void)p;
assert((*p & 0x3B000000) == 0x39000000 &&
"Only expected load / store instructions.");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case MachO::ARM64_RELOC_PAGEOFF12: {
// Verify that the relocation points to one of the expected load / store
@@ -222,7 +222,7 @@ class RuntimeDyldMachOAArch64
assert((*p & 0x3B000000) == 0x39000000 &&
"Only expected load / store instructions.");
(void)p;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case MachO::ARM64_RELOC_PAGEOFF12: {
// Verify that the relocation points to one of the expected load / store
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 736976d406438..d504a16738500 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -2293,7 +2293,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyWorkshareLoop(
case OMPScheduleType::BaseRuntimeSimd:
assert(!ChunkSize &&
"schedule type does not support user-defined chunk sizes");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OMPScheduleType::BaseDynamicChunked:
case OMPScheduleType::BaseGuidedChunked:
case OMPScheduleType::BaseGuidedIterativeChunked:
diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp
index 98adff107cecb..f0c41b3232525 100644
--- a/llvm/lib/IR/ConstantFold.cpp
+++ b/llvm/lib/IR/ConstantFold.cpp
@@ -903,7 +903,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
// Handle undef ^ undef -> 0 special case. This is a common
// idiom (misuse).
return Constant::getNullValue(C1->getType());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Add:
case Instruction::Sub:
return UndefValue::get(C1->getType());
@@ -979,7 +979,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
// -0.0 - undef --> undef (consistent with "fneg undef")
if (match(C1, m_NegZeroFP()) && isa<UndefValue>(C2))
return C2;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::FAdd:
case Instruction::FMul:
case Instruction::FDiv:
@@ -1513,7 +1513,7 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0))
if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2))
return areGlobalsPotentiallyEqual(GV, GV2);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::ZExt:
diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp
index 85f54c76cba09..cbf8ecd5b5984 100644
--- a/llvm/lib/IR/Function.cpp
+++ b/llvm/lib/IR/Function.cpp
@@ -1186,13 +1186,13 @@ static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
case IIT_EMPTYSTRUCT:
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0));
return;
- case IIT_STRUCT9: ++StructElts; LLVM_FALLTHROUGH;
- case IIT_STRUCT8: ++StructElts; LLVM_FALLTHROUGH;
- case IIT_STRUCT7: ++StructElts; LLVM_FALLTHROUGH;
- case IIT_STRUCT6: ++StructElts; LLVM_FALLTHROUGH;
- case IIT_STRUCT5: ++StructElts; LLVM_FALLTHROUGH;
- case IIT_STRUCT4: ++StructElts; LLVM_FALLTHROUGH;
- case IIT_STRUCT3: ++StructElts; LLVM_FALLTHROUGH;
+ case IIT_STRUCT9: ++StructElts; [[fallthrough]];
+ case IIT_STRUCT8: ++StructElts; [[fallthrough]];
+ case IIT_STRUCT7: ++StructElts; [[fallthrough]];
+ case IIT_STRUCT6: ++StructElts; [[fallthrough]];
+ case IIT_STRUCT5: ++StructElts; [[fallthrough]];
+ case IIT_STRUCT4: ++StructElts; [[fallthrough]];
+ case IIT_STRUCT3: ++StructElts; [[fallthrough]];
case IIT_STRUCT2: {
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct,StructElts));
diff --git a/llvm/lib/IR/InlineAsm.cpp b/llvm/lib/IR/InlineAsm.cpp
index 088fcfdec742a..ef3e40d8aaed6 100644
--- a/llvm/lib/IR/InlineAsm.cpp
+++ b/llvm/lib/IR/InlineAsm.cpp
@@ -282,7 +282,7 @@ Error InlineAsm::verify(FunctionType *Ty, StringRef ConstStr) {
break;
}
++NumIndirect;
- LLVM_FALLTHROUGH; // We fall through for Indirect Outputs.
+ [[fallthrough]]; // We fall through for Indirect Outputs.
case InlineAsm::isInput:
if (NumClobbers)
return makeStringError("input constraint occurs after clobber "
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index c70700491f4ed..13600e337f5f4 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -636,7 +636,7 @@ static const Value *stripPointerCastsAndOffsets(
case PSK_InBoundsConstantIndices:
if (!GEP->hasAllConstantIndices())
return V;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PSK_InBounds:
if (!GEP->isInBounds())
return V;
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index e3ea256af16de..24e013c562144 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -2494,7 +2494,7 @@ void Verifier::visitFunction(const Function &F) {
case CallingConv::SPIR_KERNEL:
Check(F.getReturnType()->isVoidTy(),
"Calling convention requires void return type", &F);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CallingConv::AMDGPU_VS:
case CallingConv::AMDGPU_HS:
case CallingConv::AMDGPU_GS:
@@ -2523,7 +2523,7 @@ void Verifier::visitFunction(const Function &F) {
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CallingConv::Fast:
case CallingConv::Cold:
case CallingConv::Intel_OCL_BI:
diff --git a/llvm/lib/MC/MCParser/AsmLexer.cpp b/llvm/lib/MC/MCParser/AsmLexer.cpp
index d03fb9c91efe2..73f73c5d58838 100644
--- a/llvm/lib/MC/MCParser/AsmLexer.cpp
+++ b/llvm/lib/MC/MCParser/AsmLexer.cpp
@@ -339,7 +339,7 @@ AsmToken AsmLexer::LexDigit() {
if (!FirstNonDecimal) {
FirstNonDecimal = CurPtr;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case '9':
case '8':
case '7':
diff --git a/llvm/lib/MC/MCParser/MasmParser.cpp b/llvm/lib/MC/MCParser/MasmParser.cpp
index af80d83272100..76533b2917cfb 100644
--- a/llvm/lib/MC/MCParser/MasmParser.cpp
+++ b/llvm/lib/MC/MCParser/MasmParser.cpp
@@ -2576,7 +2576,7 @@ bool MasmParser::parseStatement(ParseStatementInfo &Info,
// Size directive; part of an instruction.
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DK_SBYTE:
case DK_DB:
Lex();
@@ -2587,7 +2587,7 @@ bool MasmParser::parseStatement(ParseStatementInfo &Info,
// Size directive; part of an instruction.
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DK_SWORD:
case DK_DW:
Lex();
@@ -2598,7 +2598,7 @@ bool MasmParser::parseStatement(ParseStatementInfo &Info,
// Size directive; part of an instruction.
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DK_SDWORD:
case DK_DD:
Lex();
@@ -2609,7 +2609,7 @@ bool MasmParser::parseStatement(ParseStatementInfo &Info,
// Size directive; part of an instruction.
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DK_DF:
Lex();
return parseDirectiveNamedValue(nextVal, 6, IDVal, IDLoc);
@@ -2619,7 +2619,7 @@ bool MasmParser::parseStatement(ParseStatementInfo &Info,
// Size directive; part of an instruction.
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DK_SQWORD:
case DK_DQ:
Lex();
diff --git a/llvm/lib/MC/MCStreamer.cpp b/llvm/lib/MC/MCStreamer.cpp
index a229d282dabe1..e93787c09c7e4 100644
--- a/llvm/lib/MC/MCStreamer.cpp
+++ b/llvm/lib/MC/MCStreamer.cpp
@@ -1286,7 +1286,7 @@ static VersionTuple getMachoBuildVersionSupportedOS(const Triple &Target) {
// Mac Catalyst always uses the build version load command.
if (Target.isMacCatalystEnvironment())
return VersionTuple();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Triple::TvOS:
return VersionTuple(12);
case Triple::WatchOS:
diff --git a/llvm/lib/MC/MCWin64EH.cpp b/llvm/lib/MC/MCWin64EH.cpp
index e77cc70352d56..8f53ab1534cb7 100644
--- a/llvm/lib/MC/MCWin64EH.cpp
+++ b/llvm/lib/MC/MCWin64EH.cpp
@@ -1779,7 +1779,7 @@ static bool tryARMPackedUnwind(MCStreamer &streamer, WinEH::FrameInfo *info,
Step = 2;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Win64EH::UOP_WideSaveRegMask:
if (Step != 1 && Step != 2)
return false;
@@ -2043,7 +2043,7 @@ static bool tryARMPackedUnwind(MCStreamer &streamer, WinEH::FrameInfo *info,
case Win64EH::UOP_WideEndNop:
GotReturn = true;
Ret = (Inst.Operation == Win64EH::UOP_EndNop) ? 1 : 2;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Win64EH::UOP_End:
if (Step != 6 && Step != 7 && Step != 8 && Step != 9 && Step != 10)
return false;
diff --git a/llvm/lib/Object/ELFObjectFile.cpp b/llvm/lib/Object/ELFObjectFile.cpp
index 1f342e55e77fe..359fbe81989cc 100644
--- a/llvm/lib/Object/ELFObjectFile.cpp
+++ b/llvm/lib/Object/ELFObjectFile.cpp
@@ -322,7 +322,7 @@ SubtargetFeatures ELFObjectFileBase::getRISCVFeatures() const {
break;
case 'd':
Features.AddFeature("f"); // D-ext will imply F-ext.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'e':
case 'm':
case 'a':
diff --git a/llvm/lib/ObjectYAML/ELFYAML.cpp b/llvm/lib/ObjectYAML/ELFYAML.cpp
index 4308276c55a15..82cb8cbb7c06d 100644
--- a/llvm/lib/ObjectYAML/ELFYAML.cpp
+++ b/llvm/lib/ObjectYAML/ELFYAML.cpp
@@ -595,7 +595,7 @@ void ScalarBitSetTraits<ELFYAML::ELF_EF>::bitset(IO &IO,
switch (Object->Header.ABIVersion) {
default:
// ELFOSABI_AMDGPU_PAL, ELFOSABI_AMDGPU_MESA3D support *_V3 flags.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
BCase(EF_AMDGPU_FEATURE_XNACK_V3);
BCase(EF_AMDGPU_FEATURE_SRAMECC_V3);
diff --git a/llvm/lib/Option/OptTable.cpp b/llvm/lib/Option/OptTable.cpp
index c93b7ad7f5fa8..ef4873eb7f9c4 100644
--- a/llvm/lib/Option/OptTable.cpp
+++ b/llvm/lib/Option/OptTable.cpp
@@ -565,7 +565,7 @@ static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) {
case Option::SeparateClass: case Option::JoinedOrSeparateClass:
case Option::RemainingArgsClass: case Option::RemainingArgsJoinedClass:
Name += ' ';
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Option::JoinedClass: case Option::CommaJoinedClass:
case Option::JoinedAndSeparateClass:
if (const char *MetaVarName = Opts.getOptionMetaVar(Id))
diff --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp
index f8d7c4d36481d..ee7bdda7e75e4 100644
--- a/llvm/lib/ProfileData/InstrProf.cpp
+++ b/llvm/lib/ProfileData/InstrProf.cpp
@@ -1349,7 +1349,7 @@ Expected<Header> Header::readFromBuffer(const unsigned char *Buffer) {
"if not add a case statement to fall through to the latest version.");
case 8ull:
H.MemProfOffset = read(Buffer, offsetOf(&Header::MemProfOffset));
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default: // Version7 (when the backwards compatible header was introduced).
H.HashType = read(Buffer, offsetOf(&Header::HashType));
H.HashOffset = read(Buffer, offsetOf(&Header::HashOffset));
diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp
index 2ae28fe066cd7..6f888e314f13a 100644
--- a/llvm/lib/Support/APFloat.cpp
+++ b/llvm/lib/Support/APFloat.cpp
@@ -1485,7 +1485,7 @@ IEEEFloat::opStatus IEEEFloat::addOrSubtractSpecials(const IEEEFloat &rhs,
case PackCategoriesIntoKey(fcNormal, fcNaN):
case PackCategoriesIntoKey(fcInfinity, fcNaN):
assign(rhs);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
case PackCategoriesIntoKey(fcNaN, fcInfinity):
@@ -1610,7 +1610,7 @@ IEEEFloat::opStatus IEEEFloat::multiplySpecials(const IEEEFloat &rhs) {
case PackCategoriesIntoKey(fcInfinity, fcNaN):
assign(rhs);
sign = false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
case PackCategoriesIntoKey(fcNaN, fcInfinity):
@@ -1654,7 +1654,7 @@ IEEEFloat::opStatus IEEEFloat::divideSpecials(const IEEEFloat &rhs) {
case PackCategoriesIntoKey(fcInfinity, fcNaN):
assign(rhs);
sign = false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
case PackCategoriesIntoKey(fcNaN, fcInfinity):
@@ -1699,7 +1699,7 @@ IEEEFloat::opStatus IEEEFloat::modSpecials(const IEEEFloat &rhs) {
case PackCategoriesIntoKey(fcNormal, fcNaN):
case PackCategoriesIntoKey(fcInfinity, fcNaN):
assign(rhs);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
case PackCategoriesIntoKey(fcNaN, fcInfinity):
@@ -1737,7 +1737,7 @@ IEEEFloat::opStatus IEEEFloat::remainderSpecials(const IEEEFloat &rhs) {
case PackCategoriesIntoKey(fcNormal, fcNaN):
case PackCategoriesIntoKey(fcInfinity, fcNaN):
assign(rhs);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PackCategoriesIntoKey(fcNaN, fcZero):
case PackCategoriesIntoKey(fcNaN, fcNormal):
case PackCategoriesIntoKey(fcNaN, fcInfinity):
diff --git a/llvm/lib/Support/CommandLine.cpp b/llvm/lib/Support/CommandLine.cpp
index b86be5ea4219e..b8375f64be813 100644
--- a/llvm/lib/Support/CommandLine.cpp
+++ b/llvm/lib/Support/CommandLine.cpp
@@ -1679,7 +1679,7 @@ bool CommandLineParser::ParseCommandLineOptions(int argc,
switch (PositionalOpts[i]->getNumOccurrencesFlag()) {
case cl::Optional:
Done = true; // Optional arguments want _at most_ one value
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case cl::ZeroOrMore: // Zero or more will take all they can get...
case cl::OneOrMore: // One or more will take all they can get...
ProvidePositionalOption(PositionalOpts[i],
@@ -1733,7 +1733,7 @@ bool CommandLineParser::ParseCommandLineOptions(int argc,
Opt.second->error("must be specified at least once!");
ErrorParsing = true;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
break;
}
diff --git a/llvm/lib/Support/FoldingSet.cpp b/llvm/lib/Support/FoldingSet.cpp
index 178855289fe8a..443a06ef7da83 100644
--- a/llvm/lib/Support/FoldingSet.cpp
+++ b/llvm/lib/Support/FoldingSet.cpp
@@ -89,8 +89,8 @@ void FoldingSetNodeID::AddString(StringRef String) {
// Pos will have overshot size by 4 - #bytes left over.
// No need to take endianness into account here - this is always executed.
switch (Pos - Size) {
- case 1: V = (V << 8) | (unsigned char)String[Size - 3]; LLVM_FALLTHROUGH;
- case 2: V = (V << 8) | (unsigned char)String[Size - 2]; LLVM_FALLTHROUGH;
+ case 1: V = (V << 8) | (unsigned char)String[Size - 3]; [[fallthrough]];
+ case 2: V = (V << 8) | (unsigned char)String[Size - 2]; [[fallthrough]];
case 3: V = (V << 8) | (unsigned char)String[Size - 1]; break;
default: return; // Nothing left.
}
diff --git a/llvm/lib/Support/FormattedStream.cpp b/llvm/lib/Support/FormattedStream.cpp
index 5716afc187e48..c0d2843509957 100644
--- a/llvm/lib/Support/FormattedStream.cpp
+++ b/llvm/lib/Support/FormattedStream.cpp
@@ -39,7 +39,7 @@ void formatted_raw_ostream::UpdatePosition(const char *Ptr, size_t Size) {
switch (CP[0]) {
case '\n':
Line += 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case '\r':
Column = 0;
break;
diff --git a/llvm/lib/Support/GlobPattern.cpp b/llvm/lib/Support/GlobPattern.cpp
index 8dae6941ec770..c02d47d1f5502 100644
--- a/llvm/lib/Support/GlobPattern.cpp
+++ b/llvm/lib/Support/GlobPattern.cpp
@@ -96,7 +96,7 @@ static Expected<BitVector> scan(StringRef &S, StringRef Original) {
// Eat this character and fall through below to treat it like a non-meta
// character.
S = S.substr(1);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
BitVector BV(256, false);
BV[(uint8_t)S[0]] = true;
diff --git a/llvm/lib/Support/GraphWriter.cpp b/llvm/lib/Support/GraphWriter.cpp
index e875e18a7e92e..c28d4a001fc72 100644
--- a/llvm/lib/Support/GraphWriter.cpp
+++ b/llvm/lib/Support/GraphWriter.cpp
@@ -74,7 +74,7 @@ std::string llvm::DOT::EscapeString(const std::string &Label) {
Str.erase(Str.begin()+i); continue;
default: break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case '{': case '}':
case '<': case '>':
case '|': case '"':
diff --git a/llvm/lib/Support/Host.cpp b/llvm/lib/Support/Host.cpp
index c97f273b07398..40d85924de41c 100644
--- a/llvm/lib/Support/Host.cpp
+++ b/llvm/lib/Support/Host.cpp
@@ -291,7 +291,7 @@ StringRef sys::detail::getHostCPUNameForARM(StringRef ProcCpuinfoContent) {
switch (Exynos) {
default:
// Default by falling through to Exynos M3.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 0x1002:
return "exynos-m3";
case 0x1003:
diff --git a/llvm/lib/Support/Triple.cpp b/llvm/lib/Support/Triple.cpp
index 6696d158b2c1a..3766f265223a2 100644
--- a/llvm/lib/Support/Triple.cpp
+++ b/llvm/lib/Support/Triple.cpp
@@ -1926,7 +1926,7 @@ VersionTuple Triple::getCanonicalVersionForOS(OSType OSKind,
// macOS 10.16 is canonicalized to macOS 11.
if (Version == VersionTuple(10, 16))
return VersionTuple(11, 0);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return Version;
}
diff --git a/llvm/lib/Support/VirtualFileSystem.cpp b/llvm/lib/Support/VirtualFileSystem.cpp
index 97d63fff1069c..14742f727411c 100644
--- a/llvm/lib/Support/VirtualFileSystem.cpp
+++ b/llvm/lib/Support/VirtualFileSystem.cpp
@@ -1246,7 +1246,7 @@ class llvm::vfs::RedirectingFSDirIterImpl
sys::fs::file_type Type = sys::fs::file_type::type_unknown;
switch ((*Current)->getKind()) {
case RedirectingFileSystem::EK_Directory:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case RedirectingFileSystem::EK_DirectoryRemap:
Type = sys::fs::file_type::directory_file;
break;
diff --git a/llvm/lib/Support/YAMLParser.cpp b/llvm/lib/Support/YAMLParser.cpp
index 578ce228079b2..e9619f578b8b7 100644
--- a/llvm/lib/Support/YAMLParser.cpp
+++ b/llvm/lib/Support/YAMLParser.cpp
@@ -778,7 +778,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'O':
if (S[1] == 'N') // ON
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'o':
if (S[1] == 'n') //[Oo]n
return true;
@@ -786,7 +786,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'N':
if (S[1] == 'O') // NO
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'n':
if (S[1] == 'o') //[Nn]o
return false;
@@ -799,7 +799,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'O':
if (S.drop_front() == "FF") // OFF
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'o':
if (S.drop_front() == "ff") //[Oo]ff
return false;
@@ -807,7 +807,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'Y':
if (S.drop_front() == "ES") // YES
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'y':
if (S.drop_front() == "es") //[Yy]es
return true;
@@ -820,7 +820,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'T':
if (S.drop_front() == "RUE") // TRUE
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 't':
if (S.drop_front() == "rue") //[Tt]rue
return true;
@@ -833,7 +833,7 @@ llvm::Optional<bool> yaml::parseBool(StringRef S) {
case 'F':
if (S.drop_front() == "ALSE") // FALSE
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'f':
if (S.drop_front() == "alse") //[Ff]alse
return false;
@@ -2285,7 +2285,7 @@ void MappingNode::increment() {
break;
default:
setError("Unexpected token. Expected Key or Block End", T);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Token::TK_Error:
IsAtEnd = true;
CurrentEntry = nullptr;
@@ -2298,7 +2298,7 @@ void MappingNode::increment() {
return increment();
case Token::TK_FlowMappingEnd:
getNext();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Token::TK_Error:
// Set this to end iterator.
IsAtEnd = true;
@@ -2341,7 +2341,7 @@ void SequenceNode::increment() {
default:
setError( "Unexpected token. Expected Block Entry or Block End."
, T);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Token::TK_Error:
IsAtEnd = true;
CurrentEntry = nullptr;
@@ -2370,7 +2370,7 @@ void SequenceNode::increment() {
return increment();
case Token::TK_FlowSequenceEnd:
getNext();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Token::TK_Error:
// Set this to end iterator.
IsAtEnd = true;
diff --git a/llvm/lib/Support/raw_ostream.cpp b/llvm/lib/Support/raw_ostream.cpp
index 651949ad5765b..71d0b593b4778 100644
--- a/llvm/lib/Support/raw_ostream.cpp
+++ b/llvm/lib/Support/raw_ostream.cpp
@@ -285,10 +285,10 @@ void raw_ostream::copy_to_buffer(const char *Ptr, size_t Size) {
// Handle short strings specially, memcpy isn't very good at very short
// strings.
switch (Size) {
- case 4: OutBufCur[3] = Ptr[3]; LLVM_FALLTHROUGH;
- case 3: OutBufCur[2] = Ptr[2]; LLVM_FALLTHROUGH;
- case 2: OutBufCur[1] = Ptr[1]; LLVM_FALLTHROUGH;
- case 1: OutBufCur[0] = Ptr[0]; LLVM_FALLTHROUGH;
+ case 4: OutBufCur[3] = Ptr[3]; [[fallthrough]];
+ case 3: OutBufCur[2] = Ptr[2]; [[fallthrough]];
+ case 2: OutBufCur[1] = Ptr[1]; [[fallthrough]];
+ case 1: OutBufCur[0] = Ptr[0]; [[fallthrough]];
case 0: break;
default:
memcpy(OutBufCur, Ptr, Size);
diff --git a/llvm/lib/TableGen/TGLexer.cpp b/llvm/lib/TableGen/TGLexer.cpp
index 2a4ee4473b56c..a33ed81dcf29e 100644
--- a/llvm/lib/TableGen/TGLexer.cpp
+++ b/llvm/lib/TableGen/TGLexer.cpp
@@ -239,7 +239,7 @@ tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) {
case '0': case '1':
if (NextChar == 'b')
return LexNumber();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case '2': case '3': case '4': case '5':
case '6': case '7': case '8': case '9':
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
@@ -306,7 +306,7 @@ tgtok::TokKind TGLexer::LexString() {
case '\0':
if (CurPtr == CurBuf.end())
return ReturnError(StrStart, "End of file in string literal");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return ReturnError(CurPtr, "invalid escape in string literal");
}
diff --git a/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp b/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp
index ff4a4dfc1b95d..a307ba14dc8cd 100644
--- a/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp
@@ -162,7 +162,7 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI,
case AArch64::SUBWrs:
case AArch64::SUBWrx:
IsFlagSetting = false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::ADDSWri:
case AArch64::ADDSWrr:
case AArch64::ADDSWrs:
@@ -218,7 +218,7 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI,
case AArch64::SUBXrs:
case AArch64::SUBXrx:
IsFlagSetting = false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::ADDSXri:
case AArch64::ADDSXrr:
case AArch64::ADDSXrs:
diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index 343f888b7552e..e540434ae88c8 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -333,7 +333,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
++NumImmRangeRejs;
return nullptr;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::SUBSWrr:
case AArch64::SUBSXrr:
case AArch64::ADDSWrr:
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 910f8cdede753..0922a1926c605 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -461,7 +461,7 @@ bool AArch64ExpandPseudo::expand_DestructiveOp(
UseRev = true;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::DestructiveBinary:
case AArch64::DestructiveBinaryImm:
std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 2, 3);
@@ -1086,7 +1086,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
return true;
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::MOVaddr:
case AArch64::MOVaddrJT:
case AArch64::MOVaddrCP:
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 49fffa01a974d..f51b3534f11a4 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -2098,7 +2098,7 @@ bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr,
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type.");
- case MVT::i1: VTIsi1 = true; LLVM_FALLTHROUGH;
+ case MVT::i1: VTIsi1 = true; [[fallthrough]];
case MVT::i8: Opc = OpcTable[Idx][0]; break;
case MVT::i16: Opc = OpcTable[Idx][1]; break;
case MVT::i32: Opc = OpcTable[Idx][2]; break;
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index c1230301cdf3a..611748ad3b701 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -993,7 +993,7 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
llvm_unreachable("No SEH Opcode for this instruction");
case AArch64::LDPDpost:
Imm = -Imm;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::STPDpre: {
unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(2).getReg());
@@ -1006,7 +1006,7 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
}
case AArch64::LDPXpost:
Imm = -Imm;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::STPXpre: {
Register Reg0 = MBBI->getOperand(1).getReg();
Register Reg1 = MBBI->getOperand(2).getReg();
@@ -1024,7 +1024,7 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
}
case AArch64::LDRDpost:
Imm = -Imm;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::STRDpre: {
unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg_X))
@@ -1035,7 +1035,7 @@ static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
}
case AArch64::LDRXpost:
Imm = -Imm;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::STRXpre: {
unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg_X))
@@ -1452,7 +1452,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
.addImm(Subtarget.isTargetILP32() ? 32 : 0);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SwiftAsyncFramePointerMode::Always:
// ORR x29, x29, #0x1000_0000_0000_0000
@@ -2025,7 +2025,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// Avoid the reload as it is GOT relative, and instead fall back to the
// hardcoded value below. This allows a mismatch between the OS and
// application without immediately terminating on the
diff erence.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SwiftAsyncFramePointerMode::Always:
// We need to reset FP to its untagged state on return. Bit 60 is
// currently used to show the presence of an extended frame.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 00621b84d2f23..6e358c2193e82 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -3565,7 +3565,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
return;
if (tryBitfieldInsertInZeroOp(Node))
return;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ROTR:
case ISD::SHL:
if (tryShiftAmountMod(Node))
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index cb1adc57696c2..d3bca4f61e2f7 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2531,7 +2531,7 @@ MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
AArch64::LR, /*isDef*/ true,
/*isImp*/ true, /*isKill*/ false, /*isDead*/ true,
/*isUndef*/ false, /*isEarlyClobber*/ true));
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetOpcode::STACKMAP:
case TargetOpcode::PATCHPOINT:
return emitPatchPoint(MI, BB);
@@ -2821,7 +2821,7 @@ static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
break;
case ISD::SETUO:
Invert = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETO:
CondCode = AArch64CC::MI;
CondCode2 = AArch64CC::GE;
@@ -6247,9 +6247,9 @@ SDValue AArch64TargetLowering::LowerCallResult(
case CCValAssign::AExtUpper:
Val = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Val,
DAG.getConstant(32, DL, VA.getLocVT()));
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::AExt:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::ZExt:
Val = DAG.getZExtOrTrunc(Val, DL, VA.getValVT());
break;
@@ -12073,7 +12073,7 @@ static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
if (!NoNans)
return SDValue();
// If we ignore NaNs then we can use to the LS implementation.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64CC::LS:
if (IsZero)
return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS);
@@ -12082,7 +12082,7 @@ static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
if (!NoNans)
return SDValue();
// If we ignore NaNs then we can use to the MI implementation.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64CC::MI:
if (IsZero)
return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);
@@ -12727,7 +12727,7 @@ bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
// trunc(sext ty1 to ty2) to ty1.
if (Instr->getType() == Ext->getOperand(0)->getType())
continue;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return false;
}
@@ -12832,14 +12832,14 @@ bool AArch64TargetLowering::shouldSinkOperands(
Ops.push_back(&II->getOperandUse(1));
return true;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::fma:
if (isa<VectorType>(I->getType()) &&
cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
!Subtarget->hasFullFP16())
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::aarch64_neon_sqdmull:
case Intrinsic::aarch64_neon_sqdmulh:
case Intrinsic::aarch64_neon_sqrdmulh:
@@ -21335,12 +21335,12 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE(
Val = DAG.getNode(ExtendOpc, DL, MVT::nxv8i16, Val);
if (VT.getVectorElementType() == MVT::i16)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::nxv8i16:
Val = DAG.getNode(ExtendOpc, DL, MVT::nxv4i32, Val);
if (VT.getVectorElementType() == MVT::i32)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::nxv4i32:
Val = DAG.getNode(ExtendOpc, DL, MVT::nxv2i64, Val);
assert(VT.getVectorElementType() == MVT::i64 && "Unexpected element type!");
@@ -21369,13 +21369,13 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv4i32, Val, Val);
if (VT.getVectorElementType() == MVT::i32)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::nxv4i32:
Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv8i16, Val);
Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv8i16, Val, Val);
if (VT.getVectorElementType() == MVT::i16)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::nxv8i16:
Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i8, Val);
Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv16i8, Val, Val);
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 1bcca7761c645..bd135288f42e9 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -549,7 +549,7 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
return 0;
// fall-through to ADDXri and ADDWri.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::ADDXri:
case AArch64::ADDWri:
// add x, 1 -> csinc.
@@ -577,7 +577,7 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
return 0;
// fall-through to SUBXrr and SUBWrr.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::SUBXrr:
case AArch64::SUBWrr: {
// neg x -> csneg, represented as sub dst, xzr, src.
@@ -1576,7 +1576,7 @@ static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
case AArch64CC::HI: // Z clear and C set
case AArch64CC::LS: // Z set or C clear
UsedFlags.Z = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64CC::HS: // C set
case AArch64CC::LO: // C clear
UsedFlags.C = true;
@@ -1595,7 +1595,7 @@ static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
case AArch64CC::GT: // Z clear, N and V the same
case AArch64CC::LE: // Z set, N and V
diff er
UsedFlags.Z = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64CC::GE: // N and V the same
case AArch64CC::LT: // N and V
diff er
UsedFlags.N = true;
@@ -8021,7 +8021,7 @@ Optional<RegImmPair> AArch64InstrInfo::isAddImmediate(const MachineInstr &MI,
case AArch64::SUBSWri:
case AArch64::SUBSXri:
Sign *= -1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::ADDSWri:
case AArch64::ADDSXri:
case AArch64::ADDWri:
diff --git a/llvm/lib/Target/AArch64/AArch64MachineScheduler.cpp b/llvm/lib/Target/AArch64/AArch64MachineScheduler.cpp
index 79866c9b0a053..4d8fd1495dce6 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineScheduler.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MachineScheduler.cpp
@@ -24,7 +24,7 @@ static bool needReorderStoreMI(const MachineInstr *MI) {
case AArch64::STRQui:
if (!MI->getMF()->getSubtarget<AArch64Subtarget>().isStoreAddressAscend())
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::STPQi:
return AArch64InstrInfo::getLdStOffsetOp(*MI).isImm();
}
diff --git a/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
index 019220e3a5272..631ff0f1dc6f0 100644
--- a/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
@@ -176,7 +176,7 @@ bool AArch64RedundantCopyElimination::knownRegValInBlock(
case AArch64::ADDSWri:
case AArch64::ADDSXri:
IsCMN = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// CMP is an alias for SUBS with a dead destination register.
case AArch64::SUBSWri:
case AArch64::SUBSXri: {
diff --git a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
index 87be7bb6d113e..841275cfe82f6 100644
--- a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
@@ -556,7 +556,7 @@ bool AArch64SpeculationHardening::expandSpeculationSafeValue(
break;
case AArch64::SpeculationSafeValueW:
Is64Bit = false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::SpeculationSafeValueX:
// Just remove the SpeculationSafe pseudo's if control flow
// miss-speculation isn't happening because we're already inserting barriers
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index e6e6ad0318e6d..e55a7593fc8a9 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -2017,7 +2017,7 @@ InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
TargetTransformInfo::OP_None);
return Cost;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::UDIV: {
if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) {
auto VT = TLI->getValueType(DL, Ty);
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index ade23f6435384..a206d10caf660 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -2313,7 +2313,7 @@ void AArch64Operand::print(raw_ostream &OS) const {
OS << "<register " << getReg() << ">";
if (!getShiftExtendAmount() && !hasShiftExtendAmount())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case k_ShiftExtend:
OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
<< getShiftExtendAmount();
@@ -4745,7 +4745,7 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
if (RI->isSubRegisterEq(Rn, Rt2))
return Error(Loc[1], "unpredictable LDP instruction, writeback base "
"is also a destination");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case AArch64::LDPDi:
case AArch64::LDPQi:
diff --git a/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
index 2f20232e452db..d7f4d4e92a701 100644
--- a/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
+++ b/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
@@ -908,7 +908,7 @@ DecodeThreeAddrSRegInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
// if shift == '11' then ReservedValue()
if (shiftHi == 0x3)
return Fail;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::ANDWrs:
case AArch64::ANDSWrs:
case AArch64::BICWrs:
@@ -932,7 +932,7 @@ DecodeThreeAddrSRegInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
// if shift == '11' then ReservedValue()
if (shiftHi == 0x3)
return Fail;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::ANDXrs:
case AArch64::ANDSXrs:
case AArch64::BICXrs:
@@ -1260,7 +1260,7 @@ DecodeExclusiveLdStInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
case AArch64::STXRB:
case AArch64::STXRH:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::LDARW:
case AArch64::LDARB:
case AArch64::LDARH:
@@ -1284,7 +1284,7 @@ DecodeExclusiveLdStInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
case AArch64::STLXRX:
case AArch64::STXRX:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::LDARX:
case AArch64::LDAXRX:
case AArch64::LDXRX:
@@ -1296,7 +1296,7 @@ DecodeExclusiveLdStInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
case AArch64::STLXPW:
case AArch64::STXPW:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::LDAXPW:
case AArch64::LDXPW:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
@@ -1305,7 +1305,7 @@ DecodeExclusiveLdStInstruction(MCInst &Inst, uint32_t insn, uint64_t Addr,
case AArch64::STLXPX:
case AArch64::STXPX:
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::LDAXPX:
case AArch64::LDXPX:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
@@ -1385,7 +1385,7 @@ static DecodeStatus DecodePairLdStInstruction(MCInst &Inst, uint32_t insn,
case AArch64::STGPpre:
case AArch64::STGPpost:
NeedsDisjointWritebackTransfer = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::LDNPXi:
case AArch64::STNPXi:
case AArch64::LDPXi:
@@ -1400,7 +1400,7 @@ static DecodeStatus DecodePairLdStInstruction(MCInst &Inst, uint32_t insn,
case AArch64::LDPWpre:
case AArch64::STPWpre:
NeedsDisjointWritebackTransfer = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AArch64::LDNPWi:
case AArch64::STNPWi:
case AArch64::LDPWi:
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
index 38afc5deb42f3..1f671682cb3bb 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
@@ -161,7 +161,7 @@ void AArch64GISelUtils::changeVectorFCMPPredToAArch64CC(
break;
case CmpInst::FCMP_UNO:
Invert = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CmpInst::FCMP_ORD:
CondCode = AArch64CC::MI;
CondCode2 = AArch64CC::GE;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 1b1c2c4690d62..870c8e27f9949 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -2569,7 +2569,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
// For s32, use a cp load if we have optsize/minsize.
if (!shouldOptForSize(&MF))
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 16:
case 64:
case 128: {
@@ -2972,7 +2972,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_ASHR:
if (MRI.getType(I.getOperand(0).getReg()).isVector())
return selectVectorAshrLshr(I, MRI);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetOpcode::G_SHL:
if (Opcode == TargetOpcode::G_SHL &&
MRI.getType(I.getOperand(0).getReg()).isVector())
@@ -2997,7 +2997,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
I.getOperand(2).setReg(Trunc.getReg(0));
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetOpcode::G_OR: {
// Reject the various things we don't support yet.
if (unsupportedBinOp(I, RBI, MRI, TRI))
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index f0b311289c419..c48ab31a429f9 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -648,7 +648,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
/*NumOperands*/ 1);
}
// Both registers are generic, use G_BITCAST.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case TargetOpcode::G_BITCAST: {
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 13a65f1ad6017..27677391ab6b0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -949,7 +949,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
static unsigned getRsrcReg(CallingConv::ID CallConv) {
switch (CallConv) {
- default: LLVM_FALLTHROUGH;
+ default: [[fallthrough]];
case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1;
case CallingConv::AMDGPU_LS: return R_00B528_SPI_SHADER_PGM_RSRC1_LS;
case CallingConv::AMDGPU_HS: return R_00B428_SPI_SHADER_PGM_RSRC1_HS;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 887341e67454e..04f413e463840 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -471,7 +471,7 @@ AMDGPURegisterBankInfo::getInstrAlternativeMappings(
return addMappingFromTable<1>(MI, MRI, {{ 0 }}, Table);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case TargetOpcode::G_FCONSTANT:
case TargetOpcode::G_FRAME_INDEX:
@@ -2367,7 +2367,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
llvm_unreachable("lowerAbsToMaxNeg should have succeeded");
return;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case AMDGPU::G_ADD:
case AMDGPU::G_SUB:
@@ -3717,7 +3717,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case AMDGPU::G_PTR_ADD:
case AMDGPU::G_PTRMASK:
@@ -3743,7 +3743,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_UBFX:
if (isSALUMapping(MI))
return getDefaultMappingSOP(MI);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AMDGPU::G_SADDSAT: // FIXME: Could lower sat ops for SALU
case AMDGPU::G_SSUBSAT:
@@ -3906,7 +3906,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case AMDGPU::G_MERGE_VALUES:
case AMDGPU::G_CONCAT_VECTORS: {
@@ -4353,7 +4353,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
unsigned IdxSize = MRI.getType(IdxReg).getSizeInBits();
unsigned IdxBank = getRegBankID(IdxReg, MRI, AMDGPU::SGPRRegBankID);
OpdsMapping[3] = AMDGPU::getValueMapping(IdxBank, IdxSize);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::amdgcn_readfirstlane: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index cf4826d81b4bb..a15f3a26a3f7e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -460,21 +460,21 @@ bool AMDGPUSubtarget::makeLIDRangeMetadata(Instruction *I) const {
case Intrinsic::amdgcn_workitem_id_x:
case Intrinsic::r600_read_tidig_x:
IdQuery = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::r600_read_local_size_x:
Dim = 0;
break;
case Intrinsic::amdgcn_workitem_id_y:
case Intrinsic::r600_read_tidig_y:
IdQuery = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::r600_read_local_size_y:
Dim = 1;
break;
case Intrinsic::amdgcn_workitem_id_z:
case Intrinsic::r600_read_tidig_z:
IdQuery = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::r600_read_local_size_z:
Dim = 2;
break;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index a79cd2e9499e9..8feff8fbb9bbf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -588,7 +588,7 @@ InstructionCost GCNTTIImpl::getArithmeticInstrCost(
return TargetTransformInfo::TCC_Free;
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::FADD:
case ISD::FSUB:
if (ST->hasPackedFP32Ops() && SLT == MVT::f32)
diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index 44542b352b585..9416328bd0ce1 100644
--- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -977,7 +977,7 @@ int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteVGPRReadlaneRead);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AMDGPU::V_WRITELANE_B32: {
UseReg = AMDGPU::EXEC;
int WaitStatesNeededForDef =
@@ -1913,7 +1913,7 @@ int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) {
break;
case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprReadWaitStates;
break;
- case 16: LLVM_FALLTHROUGH;
+ case 16: [[fallthrough]];
default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprReadWaitStates;
break;
}
@@ -1923,7 +1923,7 @@ int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) {
break;
case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprWriteWaitStates;
break;
- case 16: LLVM_FALLTHROUGH;
+ case 16: [[fallthrough]];
default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprWriteWaitStates;
break;
}
@@ -1984,7 +1984,7 @@ int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) {
break;
case 8: NeedWaitStates = MFMA16x16ReadSrcCAccVgprWriteWaitStates;
break;
- case 16: LLVM_FALLTHROUGH;
+ case 16: [[fallthrough]];
default: NeedWaitStates = MFMA32x32ReadSrcCAccVgprWriteWaitStates;
break;
}
@@ -2140,7 +2140,7 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
? SMFMA16x16WritesVGPROverlappedDMFMASrcCWaitStates
: SMFMA16x16WritesVGPROverlappedSMFMASrcCWaitStates;
break;
- case 16: LLVM_FALLTHROUGH;
+ case 16: [[fallthrough]];
default:
NeedWaitStates = ST.hasGFX940Insts()
? isXDL(ST, *MI1)
@@ -2186,7 +2186,7 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
: GFX940_SMFMA8PassWritesVGPROverlappedSrcABWaitStates
: SMFMA16x16WritesVGPROverlappedSrcABWaitStates;
break;
- case 16: LLVM_FALLTHROUGH;
+ case 16: [[fallthrough]];
default:
NeedWaitStates = ST.hasGFX940Insts()
? isXDL(ST, *MI1)
@@ -2407,7 +2407,7 @@ int GCNHazardRecognizer::checkMAIVALUHazards(MachineInstr *MI) {
: GFX940_SMFMA8PassWriteVgprVALUMemExpReadWaitStates
: SMFMA16x16WriteVgprVALUMemExpReadWaitStates;
break;
- case 16: LLVM_FALLTHROUGH;
+ case 16: [[fallthrough]];
default:
NeedWaitStates =
isDGEMM(MFMA->getOpcode())
@@ -2502,7 +2502,7 @@ int GCNHazardRecognizer::checkMAIVALUHazards(MachineInstr *MI) {
: GFX940_SMFMA8PassWriteVgprVALUWawWaitStates
: SMFMA16x16WriteVgprVALUWawWaitStates;
break;
- case 16: LLVM_FALLTHROUGH;
+ case 16: [[fallthrough]];
default:
NeedWaitStates = isDGEMM(MFMA->getOpcode())
? DMFMA16x16WriteVgprVALUWriteWaitStates
@@ -2555,7 +2555,7 @@ int GCNHazardRecognizer::checkMAIVALUHazards(MachineInstr *MI) {
break;
case 8: NeedWaitStates = SMFMA16x16ReadVgprVALUWarWaitStates;
break;
- case 16: LLVM_FALLTHROUGH;
+ case 16: [[fallthrough]];
default: NeedWaitStates = SMFMA32x32ReadVgprVALUWarWaitStates;
break;
}
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index 21ff2744e5b4c..15069a0378d99 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -715,7 +715,7 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
printImmediate16(static_cast<uint16_t>(Op.getImm()), STI, O);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
printImmediateInt16(static_cast<uint16_t>(Op.getImm()), STI, O);
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
index 745b4bc1fc8eb..1ba05e765bc31 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
@@ -274,7 +274,7 @@ uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
if (OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16)
return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
diff --git a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
index afcb6b4d65f8a..23a996914f948 100644
--- a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp
@@ -71,7 +71,7 @@ void R600AsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
if (STM.getGeneration() >= AMDGPUSubtarget::EVERGREEN) {
// Evergreen / Northern Islands
switch (MF.getFunction().getCallingConv()) {
- default: LLVM_FALLTHROUGH;
+ default: [[fallthrough]];
case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
case CallingConv::AMDGPU_PS: RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break;
@@ -80,9 +80,9 @@ void R600AsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
} else {
// R600 / R700
switch (MF.getFunction().getCallingConv()) {
- default: LLVM_FALLTHROUGH;
- case CallingConv::AMDGPU_GS: LLVM_FALLTHROUGH;
- case CallingConv::AMDGPU_CS: LLVM_FALLTHROUGH;
+ default: [[fallthrough]];
+ case CallingConv::AMDGPU_GS: [[fallthrough]];
+ case CallingConv::AMDGPU_CS: [[fallthrough]];
case CallingConv::AMDGPU_VS: RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break;
case CallingConv::AMDGPU_PS: RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break;
}
diff --git a/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp b/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
index 4bf38a3c6ceb1..4e26bc8a4b52c 100644
--- a/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
@@ -527,7 +527,7 @@ class R600ControlFlowFinalizer : public MachineFunctionPass {
CFStack.pushBranch(R600::CF_PUSH_EG);
} else
CFStack.pushBranch(R600::CF_ALU_PUSH_BEFORE);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case R600::CF_ALU:
I = MI;
AluClauses.push_back(MakeALUClause(MBB, I));
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index 5199a37a0519c..bdcdc9c0ef1a2 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -589,7 +589,7 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N,
// Since we don't care about out of bounds values we can use FP_TO_SINT for
// uints too. The DAGLegalizer code for uint considers some extra cases
// which are not necessary here.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::FP_TO_SINT: {
if (N->getValueType(0) == MVT::i1) {
Results.push_back(lowerFP_TO_SINT(N->getOperand(0), DAG));
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 36eeca8739c4d..bf03730bffc50 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4338,7 +4338,7 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::DS_GWS_SEMA_BR:
case AMDGPU::DS_GWS_BARRIER:
TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::data0);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AMDGPU::DS_GWS_SEMA_V:
case AMDGPU::DS_GWS_SEMA_P:
case AMDGPU::DS_GWS_SEMA_RELEASE_ALL:
@@ -7807,7 +7807,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
DAG.getContext()->diagnose(NoFpRet);
return SDValue();
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::amdgcn_global_atomic_fmin:
case Intrinsic::amdgcn_global_atomic_fmax:
case Intrinsic::amdgcn_flat_atomic_fadd:
@@ -7824,7 +7824,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_global_atomic_fadd:
if (!Subtarget->hasAtomicFaddNoRtnInsts())
return makeV_ILLEGAL(Op, DAG);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::amdgcn_flat_atomic_fadd: {
EVT VT = Op.getOperand(3).getValueType();
return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT,
@@ -10168,7 +10168,7 @@ bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default:
return denormalsEnabledForType(DAG, Op.getValueType()) &&
@@ -11554,7 +11554,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
case ISD::LOAD: {
if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
return Widended;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default: {
if (!DCI.isBeforeLegalize()) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 0f4c77838763b..93269da8b8147 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -4082,7 +4082,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
ErrInfo = "Expected immediate, but got non-immediate";
return false;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
continue;
}
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 9872703acadaa..4c2509c51e70d 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -362,14 +362,14 @@ static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
case AMDGPU::S_BUFFER_LOAD_DWORDX8_IMM:
case AMDGPU::S_LOAD_DWORDX8_IMM:
return 8;
- case AMDGPU::DS_READ_B32: LLVM_FALLTHROUGH;
- case AMDGPU::DS_READ_B32_gfx9: LLVM_FALLTHROUGH;
- case AMDGPU::DS_WRITE_B32: LLVM_FALLTHROUGH;
+ case AMDGPU::DS_READ_B32: [[fallthrough]];
+ case AMDGPU::DS_READ_B32_gfx9: [[fallthrough]];
+ case AMDGPU::DS_WRITE_B32: [[fallthrough]];
case AMDGPU::DS_WRITE_B32_gfx9:
return 1;
- case AMDGPU::DS_READ_B64: LLVM_FALLTHROUGH;
- case AMDGPU::DS_READ_B64_gfx9: LLVM_FALLTHROUGH;
- case AMDGPU::DS_WRITE_B64: LLVM_FALLTHROUGH;
+ case AMDGPU::DS_READ_B64: [[fallthrough]];
+ case AMDGPU::DS_READ_B64_gfx9: [[fallthrough]];
+ case AMDGPU::DS_WRITE_B64: [[fallthrough]];
case AMDGPU::DS_WRITE_B64_gfx9:
return 2;
default:
@@ -635,7 +635,7 @@ static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) {
case AMDGPU::GLOBAL_STORE_DWORDX3_SADDR:
case AMDGPU::GLOBAL_STORE_DWORDX4_SADDR:
Result.SAddr = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AMDGPU::GLOBAL_LOAD_DWORD:
case AMDGPU::GLOBAL_LOAD_DWORDX2:
case AMDGPU::GLOBAL_LOAD_DWORDX3:
diff --git a/llvm/lib/Target/ARC/ARCOptAddrMode.cpp b/llvm/lib/Target/ARC/ARCOptAddrMode.cpp
index 589c58e285bb0..358f9039ae22e 100644
--- a/llvm/lib/Target/ARC/ARCOptAddrMode.cpp
+++ b/llvm/lib/Target/ARC/ARCOptAddrMode.cpp
@@ -136,7 +136,7 @@ static bool isAddConstantOp(const MachineInstr &MI, int64_t &Amount) {
switch (MI.getOpcode()) {
case ARC::SUB_rru6:
Sign = -1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARC::ADD_rru6:
assert(MI.getOperand(2).isImm() && "Expected immediate operand");
Amount = Sign * MI.getOperand(2).getImm();
diff --git a/llvm/lib/Target/ARC/ARCRegisterInfo.cpp b/llvm/lib/Target/ARC/ARCRegisterInfo.cpp
index 91ddd7fe36e1d..117c7851c1b9d 100644
--- a/llvm/lib/Target/ARC/ARCRegisterInfo.cpp
+++ b/llvm/lib/Target/ARC/ARCRegisterInfo.cpp
@@ -82,11 +82,11 @@ static void replaceFrameIndex(MachineBasicBlock::iterator II,
switch (MI.getOpcode()) {
case ARC::LD_rs9:
assert((Offset % 4 == 0) && "LD needs 4 byte alignment.");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARC::LDH_rs9:
case ARC::LDH_X_rs9:
assert((Offset % 2 == 0) && "LDH needs 2 byte alignment.");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARC::LDB_rs9:
case ARC::LDB_X_rs9:
LLVM_DEBUG(dbgs() << "Building LDFI\n");
@@ -97,10 +97,10 @@ static void replaceFrameIndex(MachineBasicBlock::iterator II,
break;
case ARC::ST_rs9:
assert((Offset % 4 == 0) && "ST needs 4 byte alignment.");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARC::STH_rs9:
assert((Offset % 2 == 0) && "STH needs 2 byte alignment.");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARC::STB_rs9:
LLVM_DEBUG(dbgs() << "Building STFI\n");
BuildMI(MBB, II, DL, TII.get(MI.getOpcode()))
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 57cbd7a3b2b8f..a7b63fcc13702 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -1165,7 +1165,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
case ARM::tPUSH:
// Special case here: no src & dst reg, but two extra imp ops.
StartOp = 2; NumOffset = 2;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::STMDB_UPD:
case ARM::t2STMDB_UPD:
case ARM::VSTMDDB_UPD:
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 183febe756c11..92aa103112136 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -2953,7 +2953,7 @@ static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) {
case ARM::tASRrr:
case ARM::tROR:
IsThumb1 = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::RSBrr:
case ARM::RSBri:
case ARM::RSCrr:
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index d77c3afd05e57..386a74877bc17 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -801,7 +801,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
case ARM::Bcc:
isCond = true;
UOpc = ARM::B;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::B:
Bits = 24;
Scale = 4;
diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp
index a167225e27430..0ca1a61eb70ec 100644
--- a/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -1053,7 +1053,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
TII.get(Opc), Res)
.addReg(SrcReg).addImm(1));
SrcReg = Res;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case MVT::i8:
if (isThumb2) {
@@ -1393,7 +1393,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
case MVT::i8:
case MVT::i16:
needsExt = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::i32:
if (isThumb2) {
if (!UseImm)
@@ -1837,7 +1837,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
// For AAPCS ABI targets, just use VFP variant of the calling convention.
return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CallingConv::C:
case CallingConv::CXX_FAST_TLS:
// Use target triple & subtarget features to do actual dispatch.
@@ -1857,7 +1857,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
// Fall through to soft float variant, variadic functions don't
// use hard floating point ABI.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CallingConv::ARM_AAPCS:
return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
case CallingConv::ARM_APCS:
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 48b4d266b41a6..a0a20878a412d 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -812,7 +812,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
GPRCS2Size += 4;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::R0:
case ARM::R1:
case ARM::R2:
@@ -1105,7 +1105,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R12:
if (STI.splitFramePushPop(MF))
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::R0:
case ARM::R1:
case ARM::R2:
@@ -1842,11 +1842,11 @@ skipAlignedDPRCS2Spills(MachineBasicBlock::iterator MI,
case 7:
++MI;
assert(MI->mayStore() && "Expecting spill instruction");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
++MI;
assert(MI->mayStore() && "Expecting spill instruction");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 1:
case 2:
case 4:
@@ -2344,7 +2344,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
switch (Reg) {
case ARM::LR:
LRSpilled = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::R0: case ARM::R1:
case ARM::R2: case ARM::R3:
case ARM::R4: case ARM::R5:
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 05febfd76cf4b..b6b6da5f5f873 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -6739,23 +6739,23 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG,
if (ST->hasMVEFloatOps()) {
Opc = ARMCC::NE; break;
} else {
- Invert = true; LLVM_FALLTHROUGH;
+ Invert = true; [[fallthrough]];
}
case ISD::SETOEQ:
case ISD::SETEQ: Opc = ARMCC::EQ; break;
case ISD::SETOLT:
- case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETLT: Swap = true; [[fallthrough]];
case ISD::SETOGT:
case ISD::SETGT: Opc = ARMCC::GT; break;
case ISD::SETOLE:
- case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETLE: Swap = true; [[fallthrough]];
case ISD::SETOGE:
case ISD::SETGE: Opc = ARMCC::GE; break;
- case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETUGE: Swap = true; [[fallthrough]];
case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break;
- case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETUGT: Swap = true; [[fallthrough]];
case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break;
- case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
+ case ISD::SETUEQ: Invert = true; [[fallthrough]];
case ISD::SETONE: {
// Expand this to (OLT | OGT).
SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
@@ -6767,7 +6767,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG,
Result = DAG.getNOT(dl, Result, VT);
return Result;
}
- case ISD::SETUO: Invert = true; LLVM_FALLTHROUGH;
+ case ISD::SETUO: Invert = true; [[fallthrough]];
case ISD::SETO: {
// Expand this to (OLT | OGE).
SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
@@ -6788,16 +6788,16 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG,
if (ST->hasMVEIntegerOps()) {
Opc = ARMCC::NE; break;
} else {
- Invert = true; LLVM_FALLTHROUGH;
+ Invert = true; [[fallthrough]];
}
case ISD::SETEQ: Opc = ARMCC::EQ; break;
- case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETLT: Swap = true; [[fallthrough]];
case ISD::SETGT: Opc = ARMCC::GT; break;
- case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETLE: Swap = true; [[fallthrough]];
case ISD::SETGE: Opc = ARMCC::GE; break;
- case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETULT: Swap = true; [[fallthrough]];
case ISD::SETUGT: Opc = ARMCC::HI; break;
- case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETULE: Swap = true; [[fallthrough]];
case ISD::SETUGE: Opc = ARMCC::HS; break;
}
@@ -12301,7 +12301,7 @@ static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
// (zext cc) can never be the all ones value.
if (AllOnes)
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SIGN_EXTEND: {
SDLoc dl(N);
EVT VT = N->getValueType(0);
@@ -20979,7 +20979,7 @@ Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
case AtomicOrdering::SequentiallyConsistent:
if (!Inst->hasAtomicStore())
return nullptr; // Nothing to do
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AtomicOrdering::Release:
case AtomicOrdering::AcquireRelease:
if (Subtarget->preferISHSTBarriers())
diff --git a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
index 1c44893581f9f..624db8838dcf4 100644
--- a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
+++ b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
@@ -861,7 +861,7 @@ bool ARMInstructionSelector::select(MachineInstr &I) {
switch (I.getOpcode()) {
case G_SEXT:
isSExt = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case G_ZEXT: {
assert(MRI.getType(I.getOperand(0).getReg()).getSizeInBits() <= 32 &&
"Unsupported destination size for extension");
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index b385e0b936a68..6f36ac603a9dc 100644
--- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -6223,7 +6223,7 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
// Fall though for the Identifier case that is not a register or a
// special name.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
case AsmToken::Integer: // things like 1f and 2b as a branch targets
@@ -6290,7 +6290,7 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
return false;
}
// w/ a ':' after the '#', it's just like a plain ':'.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case AsmToken::Colon: {
S = Parser.getTok().getLoc();
diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index f15cbb7c4fe55..8169be4e0b568 100644
--- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -2010,7 +2010,7 @@ static DecodeStatus DecodeCopMemInstruction(MCInst &Inst, unsigned Insn,
case ARM::STC_POST:
case ARM::STCL_POST:
imm |= U << 8;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
// The 'option' variant doesn't encode 'U' in the immediate since
// the immediate is unsigned [0,255].
@@ -3193,7 +3193,7 @@ static DecodeStatus DecodeVLDInstruction(MCInst &Inst, unsigned Insn,
break;
}
// Fall through to handle the register offset variant.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::VLD1d8wb_fixed:
case ARM::VLD1d16wb_fixed:
case ARM::VLD1d32wb_fixed:
@@ -5015,7 +5015,7 @@ static DecodeStatus DecodeMSRMask(MCInst &Inst, unsigned Val, uint64_t Address,
case 0x93: // faultmask_ns
if (!(FeatureBits[ARM::HasV8MMainlineOps]))
return MCDisassembler::Fail;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 10: // msplim
case 11: // psplim
case 0x88: // msp_ns
@@ -6345,7 +6345,7 @@ static DecodeStatus DecodeLOLoop(MCInst &Inst, unsigned Insn, uint64_t Address,
case ARM::MVE_LETP:
Inst.addOperand(MCOperand::createReg(ARM::LR));
Inst.addOperand(MCOperand::createReg(ARM::LR));
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::t2LE:
if (!Check(S, DecodeBFLabelOperand<false, true, true, 11>(
Inst, Imm, Address, Decoder)))
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index 23430dfc017a0..e8032c376f60a 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -462,7 +462,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
assert(STI != nullptr);
if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
Value >>= 16;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::fixup_arm_movw_lo16: {
unsigned Hi4 = (Value & 0xF000) >> 12;
unsigned Lo12 = Value & 0x0FFF;
@@ -475,7 +475,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
assert(STI != nullptr);
if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
Value >>= 16;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::fixup_t2_movw_lo16: {
unsigned Hi4 = (Value & 0xF000) >> 12;
unsigned i = (Value & 0x800) >> 11;
@@ -491,11 +491,11 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
case ARM::fixup_arm_ldst_pcrel_12:
// ARM PC-relative values are offset by 8.
Value -= 4;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::fixup_t2_ldst_pcrel_12:
// Offset by 4, adjusted by two due to the half-word ordering of thumb.
Value -= 4;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::fixup_arm_ldst_abs_12: {
bool isAdd = true;
if ((int64_t)Value < 0) {
@@ -742,7 +742,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
case ARM::fixup_arm_pcrel_10:
Value = Value - 4; // ARM fixups offset by an additional word and don't
// need to adjust for the half-word ordering.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::fixup_t2_pcrel_10: {
// Offset by 4, adjusted by two due to the half-word ordering of thumb.
Value = Value - 4;
@@ -769,7 +769,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
case ARM::fixup_arm_pcrel_9:
Value = Value - 4; // ARM fixups offset by an additional word and don't
// need to adjust for the half-word ordering.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::fixup_t2_pcrel_9: {
// Offset by 4, adjusted by two due to the half-word ordering of thumb.
Value = Value - 4;
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
index c33bbfcc7114d..bafc0f8537560 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
@@ -1680,7 +1680,7 @@ getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
case ARM_AM::lsl: SBits = 0x0; break;
case ARM_AM::lsr: SBits = 0x2; break;
case ARM_AM::asr: SBits = 0x4; break;
- case ARM_AM::rrx: LLVM_FALLTHROUGH;
+ case ARM_AM::rrx: [[fallthrough]];
case ARM_AM::ror: SBits = 0x6; break;
}
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
index 0ea51839824b8..b26773099a886 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
@@ -218,7 +218,7 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
if (Asm.isThumbFunc(A))
FixedValue &= 0xfffffffe;
MovtBit = 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::fixup_t2_movw_lo16:
ThumbBit = 1;
break;
diff --git a/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp b/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp
index 3e76efb5133ff..34f9ea107e168 100644
--- a/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp
+++ b/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp
@@ -221,7 +221,7 @@ static bool tryInterleave(Instruction *Start,
default:
return false;
}
- LLVM_FALLTHROUGH; // Fall through to treating these like an operator below.
+ [[fallthrough]]; // Fall through to treating these like an operator below.
}
// Binary/tertiary ops
case Instruction::Add:
@@ -255,7 +255,7 @@ static bool tryInterleave(Instruction *Start,
// A shuffle of a splat is a splat.
if (cast<ShuffleVectorInst>(I)->isZeroEltSplat())
continue;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
LLVM_DEBUG(dbgs() << " Unhandled instruction: " << *I << "\n");
diff --git a/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
index df64710712ccc..9855e48b623e4 100644
--- a/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -218,7 +218,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
FRSize += 4;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::R8:
case ARM::R9:
case ARM::R10:
@@ -226,13 +226,13 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
GPRCS2Size += 4;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::LR:
if (HasFrameRecordArea) {
FRSize += 4;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::R4:
case ARM::R5:
case ARM::R6:
@@ -368,7 +368,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R12:
if (STI.splitFramePushPop(MF))
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::R0:
case ARM::R1:
case ARM::R2:
diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index 7ae4b19afb606..3dddbb9e35edf 100644
--- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -694,7 +694,7 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
case ARM::t2ADDSri:
if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARM::t2ADDSrr:
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
}
diff --git a/llvm/lib/Target/AVR/AVRRegisterInfo.cpp b/llvm/lib/Target/AVR/AVRRegisterInfo.cpp
index 87e6558c12c22..baf336720a91b 100644
--- a/llvm/lib/Target/AVR/AVRRegisterInfo.cpp
+++ b/llvm/lib/Target/AVR/AVRRegisterInfo.cpp
@@ -199,7 +199,7 @@ void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
Opcode = AVR::ADIWRdK;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default: {
// This opcode will get expanded into a pair of subi/sbci.
diff --git a/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp b/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
index 9e1c7b781f0f4..c36185a96835f 100644
--- a/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
+++ b/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
@@ -537,7 +537,7 @@ bool AVRAsmParser::parseOperand(OperandVector &Operands) {
if (!tryParseRegisterOperand(Operands)) {
return false;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AsmToken::LParen:
case AsmToken::Integer:
case AsmToken::Dot:
diff --git a/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp b/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
index 4c064d65d9192..9ba5dee6f2500 100644
--- a/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
+++ b/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
@@ -368,7 +368,7 @@ BPFAsmParser::parseOperandAsOperator(OperandVector &Operands) {
case AsmToken::Plus: {
if (getLexer().peekTok().is(AsmToken::Integer))
return MatchOperand_NoMatch;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case AsmToken::Equal:
diff --git a/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
index 4553f2fd9228f..86e0b5181cb7f 100644
--- a/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
+++ b/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
@@ -573,7 +573,7 @@ bool HexagonAsmParser::matchOneInstruction(MCInst &MCI, SMLoc IDLoc,
case Match_MnemonicFail:
return Error(IDLoc, "unrecognized instruction");
case Match_InvalidOperand:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Match_InvalidTiedOperand:
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0U) {
@@ -1440,7 +1440,7 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
// Translate a "$Rx = CONST32(#imm)" to "$Rx = memw(gp+#LABEL) "
case Hexagon::CONST32:
is32bit = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// Translate a "$Rx:y = CONST64(#imm)" to "$Rx:y = memd(gp+#LABEL) "
case Hexagon::CONST64:
// FIXME: need better way to detect AsmStreamer (upstream removed getKind())
diff --git a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 673b397ef3c52..d942276ddc23c 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -1975,10 +1975,10 @@ bool BitSimplification::genStoreImmediate(MachineInstr *MI) {
switch (Opc) {
case Hexagon::S2_storeri_io:
Align++;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Hexagon::S2_storerh_io:
Align++;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Hexagon::S2_storerb_io:
break;
default:
diff --git a/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp b/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
index 8e014b395286b..6e661ad4a63e0 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
@@ -997,7 +997,7 @@ bool HexagonEvaluator::evaluate(const MachineInstr &BI,
case Hexagon::J2_jumpfnew:
case Hexagon::J2_jumpfnewpt:
Negated = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Hexagon::J2_jumpt:
case Hexagon::J2_jumptpt:
case Hexagon::J2_jumptnew:
diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
index 8029dcff80526..88517ee663d1e 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -2269,7 +2269,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &BrI,
case Hexagon::J2_jumpfnew:
case Hexagon::J2_jumpfnewpt:
Negated = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Hexagon::J2_jumpt:
case Hexagon::J2_jumptnew:
case Hexagon::J2_jumptnewpt:
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 01501109f3b14..2c896943d94e6 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -2715,12 +2715,12 @@ bool HexagonFrameLowering::mayOverflowFrameOffset(MachineFunction &MF) const {
case Hexagon::S4_storeirif_io:
case Hexagon::S4_storeiri_io:
++LS;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Hexagon::S4_storeirht_io:
case Hexagon::S4_storeirhf_io:
case Hexagon::S4_storeirh_io:
++LS;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Hexagon::S4_storeirbt_io:
case Hexagon::S4_storeirbf_io:
case Hexagon::S4_storeirb_io:
diff --git a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
index 00615f355146e..92e7432736115 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -335,7 +335,7 @@ bool HexagonGenPredicate::isScalarPred(RegisterSubReg PredReg) {
if (MRI->getRegClass(PR.R) != PredRC)
return false;
// If it is a copy between two predicate registers, fall through.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Hexagon::C2_and:
case Hexagon::C2_andn:
diff --git a/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp b/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp
index 78ab44379af18..99aaf1c1b592c 100644
--- a/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp
@@ -132,7 +132,7 @@ bool HexagonCP::interpretAsCopy(const MachineInstr *MI, EqualityMap &EM) {
const MachineOperand &A = MI->getOperand(2);
if (!A.isImm() || A.getImm() != 0)
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Hexagon::A2_tfr: {
const MachineOperand &DstOp = MI->getOperand(0);
diff --git a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
index f0e56d74fcd11..a833e032eee43 100644
--- a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
@@ -255,7 +255,7 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
case Hexagon::PS_vstorerw_ai:
case Hexagon::PS_vstorerw_nt_ai:
IsPair = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Hexagon::PS_vloadrv_ai:
case Hexagon::PS_vloadrv_nt_ai:
case Hexagon::PS_vstorerv_ai:
diff --git a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
index ada78ca70559e..2862ad2d2097d 100644
--- a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -350,7 +350,7 @@ int32_t HexagonSplitDoubleRegs::profit(const MachineInstr *MI) const {
case Hexagon::A4_combineri:
ImmX++;
// Fall through into A4_combineir.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Hexagon::A4_combineir: {
ImmX++;
const MachineOperand &OpX = MI->getOperand(ImmX);
@@ -360,7 +360,7 @@ int32_t HexagonSplitDoubleRegs::profit(const MachineInstr *MI) const {
return 10;
}
// Fall through into A2_combinew.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Hexagon::A2_combinew:
return 2;
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
index 37866a73ed0f9..27a1830f05234 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
@@ -445,7 +445,7 @@ class HexagonAsmBackend : public MCAsmBackend {
case fixup_Hexagon_B7_PCREL:
if (!(isIntN(7, sValue)))
HandleFixupError(7, 2, (int64_t)FixupValue, "B7_PCREL");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case fixup_Hexagon_B7_PCREL_X:
InstMask = 0x00001f18; // Word32_B7
Reloc = (((Value >> 2) & 0x1f) << 8) | // Value 6-2 = Target 12-8
@@ -455,7 +455,7 @@ class HexagonAsmBackend : public MCAsmBackend {
case fixup_Hexagon_B9_PCREL:
if (!(isIntN(9, sValue)))
HandleFixupError(9, 2, (int64_t)FixupValue, "B9_PCREL");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case fixup_Hexagon_B9_PCREL_X:
InstMask = 0x003000fe; // Word32_B9
Reloc = (((Value >> 7) & 0x3) << 20) | // Value 8-7 = Target 21-20
@@ -467,7 +467,7 @@ class HexagonAsmBackend : public MCAsmBackend {
case fixup_Hexagon_B13_PCREL:
if (!(isIntN(13, sValue)))
HandleFixupError(13, 2, (int64_t)FixupValue, "B13_PCREL");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case fixup_Hexagon_B13_PCREL_X:
InstMask = 0x00202ffe; // Word32_B13
Reloc = (((Value >> 12) & 0x1) << 21) | // Value 12 = Target 21
@@ -478,7 +478,7 @@ class HexagonAsmBackend : public MCAsmBackend {
case fixup_Hexagon_B15_PCREL:
if (!(isIntN(15, sValue)))
HandleFixupError(15, 2, (int64_t)FixupValue, "B15_PCREL");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case fixup_Hexagon_B15_PCREL_X:
InstMask = 0x00df20fe; // Word32_B15
Reloc = (((Value >> 13) & 0x3) << 22) | // Value 14-13 = Target 23-22
@@ -490,7 +490,7 @@ class HexagonAsmBackend : public MCAsmBackend {
case fixup_Hexagon_B22_PCREL:
if (!(isIntN(22, sValue)))
HandleFixupError(22, 2, (int64_t)FixupValue, "B22_PCREL");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case fixup_Hexagon_B22_PCREL_X:
InstMask = 0x01ff3ffe; // Word32_B22
Reloc = (((Value >> 13) & 0x1ff) << 16) | // Value 21-13 = Target 24-16
@@ -588,7 +588,7 @@ class HexagonAsmBackend : public MCAsmBackend {
switch (Fixup.getTargetKind()) {
case fixup_Hexagon_B22_PCREL:
// GetFixupCount assumes B22 won't relax
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return false;
break;
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
index f2d1173cd503f..b2eb968714e63 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
@@ -475,22 +475,22 @@ FeatureBitset Hexagon_MC::completeHVXFeatures(const FeatureBitset &S) {
switch (CpuArch) {
case ArchV69:
FB.set(ExtensionHVXV69);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ArchV68:
FB.set(ExtensionHVXV68);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ArchV67:
FB.set(ExtensionHVXV67);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ArchV66:
FB.set(ExtensionHVXV66);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ArchV65:
FB.set(ExtensionHVXV65);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ArchV62:
FB.set(ExtensionHVXV62);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ArchV60:
FB.set(ExtensionHVXV60);
break;
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
index c8805296017dd..6858885a8a6f5 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
@@ -488,10 +488,10 @@ HexagonShuffler::HexagonPacketSummary HexagonShuffler::GetPacketSummary() {
case HexagonII::TypeCVI_GATHER_DV:
case HexagonII::TypeCVI_GATHER_RST:
++Summary.NonZCVIloads;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case HexagonII::TypeCVI_ZW:
++Summary.AllCVIloads;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case HexagonII::TypeLD:
++Summary.loads;
++Summary.memory;
@@ -510,7 +510,7 @@ HexagonShuffler::HexagonPacketSummary HexagonShuffler::GetPacketSummary() {
case HexagonII::TypeCVI_SCATTER_NEW_RST:
case HexagonII::TypeCVI_SCATTER_NEW_ST:
++Summary.CVIstores;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case HexagonII::TypeST:
++Summary.stores;
++Summary.memory;
diff --git a/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp b/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
index 33e7068622f19..09eb08badcbb3 100644
--- a/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
+++ b/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
@@ -812,7 +812,7 @@ std::unique_ptr<LanaiOperand> LanaiAsmParser::parseImmediate() {
case AsmToken::Dot:
if (!Parser.parseExpression(ExprVal))
return LanaiOperand::createImm(ExprVal, Start, End);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return nullptr;
}
diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
index 250519efd14aa..3cc0c7fdde718 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -1666,7 +1666,7 @@ SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC,
case ISD::SHL: {
if (Op.getNode()->getFlags().hasNoSignedWrap())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default:
NeedOF = true;
@@ -1755,7 +1755,7 @@ SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC,
if (/*!Subtarget.hasBMI() ||*/ !IsAndn || !IsLegalAndnType)
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SUB:
case ISD::OR:
case ISD::XOR:
diff --git a/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp b/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
index 196e492046b90..3b936fffae225 100644
--- a/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
+++ b/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
@@ -461,7 +461,7 @@ bool MSP430AsmParser::ParseOperand(OperandVector &Operands) {
Operands.push_back(MSP430Operand::CreateReg(RegNo, StartLoc, EndLoc));
return false;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case AsmToken::Integer:
case AsmToken::Plus:
diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index 73ab3b52e907c..1484c6e6e89da 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -1064,7 +1064,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
break;
case ISD::SETULE:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETUGE:
// Turn lhs u>= rhs with lhs constant into rhs u< lhs+1, this allows us to
// fold constant into instruction.
@@ -1078,7 +1078,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
break;
case ISD::SETUGT:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETULT:
// Turn lhs u< rhs with lhs constant into rhs u>= lhs+1, this allows us to
// fold constant into instruction.
@@ -1092,7 +1092,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
break;
case ISD::SETLE:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETGE:
// Turn lhs >= rhs with lhs constant into rhs < lhs+1, this allows us to
// fold constant into instruction.
@@ -1106,7 +1106,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
break;
case ISD::SETGT:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETLT:
// Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
// fold constant into instruction.
diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index b5817d9ae700d..5ce8ffd954e62 100644
--- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -1890,7 +1890,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
case Mips::BBIT1:
case Mips::BBIT132:
assert(hasCnMips() && "instruction only valid for octeon cpus");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::BEQ:
case Mips::BNE:
@@ -2072,7 +2072,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
case Mips::SDIV_MM:
FirstOp = 0;
SecondOp = 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::SDivMacro:
case Mips::DSDivMacro:
case Mips::UDivMacro:
@@ -5664,7 +5664,7 @@ bool MipsAsmParser::expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
switch (Inst.getOpcode()) {
case Mips::MFTC0:
IsMFTR = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::MTTC0:
u = 0;
rd = getRegisterForMxtrC0(Inst, IsMFTR);
@@ -5672,7 +5672,7 @@ bool MipsAsmParser::expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
break;
case Mips::MFTGPR:
IsMFTR = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::MTTGPR:
rd = Inst.getOperand(IsMFTR ? 1 : 0).getReg();
break;
@@ -5681,7 +5681,7 @@ bool MipsAsmParser::expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
case Mips::MFTACX:
case Mips::MFTDSP:
IsMFTR = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::MTTLO:
case Mips::MTTHI:
case Mips::MTTACX:
@@ -5691,7 +5691,7 @@ bool MipsAsmParser::expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
break;
case Mips::MFTHC1:
h = 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::MFTC1:
IsMFTR = true;
rd = getRegisterForMxtrFP(Inst, IsMFTR);
@@ -5699,14 +5699,14 @@ bool MipsAsmParser::expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
break;
case Mips::MTTHC1:
h = 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::MTTC1:
rd = getRegisterForMxtrFP(Inst, IsMFTR);
sel = 2;
break;
case Mips::CFTC1:
IsMFTR = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::CTTC1:
rd = getRegisterForMxtrFP(Inst, IsMFTR);
sel = 3;
diff --git a/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
index 4e40a84ecfd0f..bf5f7b69f7fa8 100644
--- a/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
+++ b/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
@@ -1813,7 +1813,7 @@ static DecodeStatus DecodeMemMMImm12(MCInst &Inst, unsigned Insn,
break;
case Mips::SC_MM:
Inst.addOperand(MCOperand::createReg(Reg));
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
Inst.addOperand(MCOperand::createReg(Reg));
if (Inst.getOpcode() == Mips::LWP_MM || Inst.getOpcode() == Mips::SWP_MM)
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 4990696fcfe0b..84e8c9f071fb1 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -558,7 +558,7 @@ bool MipsELFObjectWriter::needsRelocateWithSymbol(const MCSymbol &Sym,
case ELF::R_MIPS_GPREL32:
if (cast<MCSymbolELF>(Sym).getOther() & ELF::STO_MIPS_MICROMIPS)
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ELF::R_MIPS_26:
case ELF::R_MIPS_64:
case ELF::R_MIPS_GPREL16:
diff --git a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
index d242083f958b8..2c2554b5b4bc3 100644
--- a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
+++ b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
@@ -347,50 +347,50 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword(
switch (I->getOpcode()) {
case Mips::ATOMIC_LOAD_NAND_I8_POSTRA:
SEOp = Mips::SEB;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_LOAD_NAND_I16_POSTRA:
IsNand = true;
break;
case Mips::ATOMIC_SWAP_I8_POSTRA:
SEOp = Mips::SEB;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_SWAP_I16_POSTRA:
IsSwap = true;
break;
case Mips::ATOMIC_LOAD_ADD_I8_POSTRA:
SEOp = Mips::SEB;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_LOAD_ADD_I16_POSTRA:
Opcode = Mips::ADDu;
break;
case Mips::ATOMIC_LOAD_SUB_I8_POSTRA:
SEOp = Mips::SEB;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_LOAD_SUB_I16_POSTRA:
Opcode = Mips::SUBu;
break;
case Mips::ATOMIC_LOAD_AND_I8_POSTRA:
SEOp = Mips::SEB;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_LOAD_AND_I16_POSTRA:
Opcode = Mips::AND;
break;
case Mips::ATOMIC_LOAD_OR_I8_POSTRA:
SEOp = Mips::SEB;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_LOAD_OR_I16_POSTRA:
Opcode = Mips::OR;
break;
case Mips::ATOMIC_LOAD_XOR_I8_POSTRA:
SEOp = Mips::SEB;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_LOAD_XOR_I16_POSTRA:
Opcode = Mips::XOR;
break;
case Mips::ATOMIC_LOAD_UMIN_I8_POSTRA:
case Mips::ATOMIC_LOAD_UMIN_I16_POSTRA:
IsUnsigned = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_LOAD_MIN_I8_POSTRA:
case Mips::ATOMIC_LOAD_MIN_I16_POSTRA:
IsMin = true;
@@ -398,7 +398,7 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword(
case Mips::ATOMIC_LOAD_UMAX_I8_POSTRA:
case Mips::ATOMIC_LOAD_UMAX_I16_POSTRA:
IsUnsigned = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_LOAD_MAX_I8_POSTRA:
case Mips::ATOMIC_LOAD_MAX_I16_POSTRA:
IsMax = true;
@@ -694,7 +694,7 @@ bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB,
case Mips::ATOMIC_LOAD_UMIN_I32_POSTRA:
case Mips::ATOMIC_LOAD_UMIN_I64_POSTRA:
IsUnsigned = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_LOAD_MIN_I32_POSTRA:
case Mips::ATOMIC_LOAD_MIN_I64_POSTRA:
IsMin = true;
@@ -702,7 +702,7 @@ bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB,
case Mips::ATOMIC_LOAD_UMAX_I32_POSTRA:
case Mips::ATOMIC_LOAD_UMAX_I64_POSTRA:
IsUnsigned = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::ATOMIC_LOAD_MAX_I32_POSTRA:
case Mips::ATOMIC_LOAD_MAX_I64_POSTRA:
IsMax = true;
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 4dfc16526a000..05f411c9e4cd3 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -3323,19 +3323,19 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
break;
case CCValAssign::SExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
break;
case CCValAssign::ZExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
break;
case CCValAssign::AExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::AExt:
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
break;
@@ -3854,19 +3854,19 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
break;
case CCValAssign::AExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::AExt:
Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
break;
case CCValAssign::ZExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::ZExt:
Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
break;
case CCValAssign::SExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::SExt:
Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
break;
diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 1124111c1a6ed..df357506b34f2 100644
--- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -172,7 +172,7 @@ void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
MI.addOperand(MachineOperand::CreateReg(Mips::SP, false, true));
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Mips::BuildPairF64:
case Mips::ExtractElementF64:
if (Subtarget->isABI_FPXX() && !Subtarget->hasMTHC1())
diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 45e82e9357727..7fada302e05ff 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -2017,7 +2017,7 @@ NVPTXAsmPrinter::lowerConstantForGV(const Constant *CV, bool ProcessingGeneric)
// expression properly. This is important for
diff erences between
// blockaddress labels. Since the two labels are in the same function, it
// is reasonable to treat their delta as a 32-bit value.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::BitCast:
return lowerConstantForGV(CE->getOperand(0), ProcessingGeneric);
diff --git a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
index 7113fe33b5d7a..915f689ac688c 100644
--- a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
+++ b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
@@ -1439,7 +1439,7 @@ bool PPCAsmParser::ParseOperand(OperandVector &Operands) {
if (!ParseExpression(EVal))
break;
// Fall-through
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return Error(S, "unknown operand");
}
diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
index bfdbdcb0254e5..e8d54b4c95a01 100644
--- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
@@ -912,7 +912,7 @@ bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
case MVT::i8:
case MVT::i16:
NeedsExt = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::i32:
if (!UseImm)
CmpOpc = IsZExt ? PPC::CMPLW : PPC::CMPW;
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 14c4fd3a9ffad..e1264d949b799 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -2772,7 +2772,7 @@ class IntegerCompareEliminator {
if (CmpInGPR == ICGPR_Sext || CmpInGPR == ICGPR_SextI32 ||
CmpInGPR == ICGPR_SextI64)
return nullptr;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SIGN_EXTEND:
if (CmpInGPR == ICGPR_Zext || CmpInGPR == ICGPR_ZextI32 ||
CmpInGPR == ICGPR_ZextI64)
@@ -3200,7 +3200,7 @@ IntegerCompareEliminator::get32BitZExtCompare(SDValue LHS, SDValue RHS,
std::swap(LHS, RHS);
ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
IsRHSZero = RHSConst && RHSConst->isZero();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ISD::SETLE: {
if (CmpInGPR == ICGPR_NonExtIn)
@@ -3251,7 +3251,7 @@ IntegerCompareEliminator::get32BitZExtCompare(SDValue LHS, SDValue RHS,
ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
IsRHSZero = RHSConst && RHSConst->isZero();
IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ISD::SETLT: {
// (zext (setcc %a, %b, setlt)) -> (lshr (sub %a, %b), 63)
@@ -3286,7 +3286,7 @@ IntegerCompareEliminator::get32BitZExtCompare(SDValue LHS, SDValue RHS,
// (zext (setcc %a, %b, setuge)) -> (xor (lshr (sub %b, %a), 63), 1)
// (zext (setcc %a, %b, setule)) -> (xor (lshr (sub %a, %b), 63), 1)
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETULE: {
if (CmpInGPR == ICGPR_NonExtIn)
return SDValue();
@@ -3306,7 +3306,7 @@ IntegerCompareEliminator::get32BitZExtCompare(SDValue LHS, SDValue RHS,
// (zext (setcc %a, %b, setugt)) -> (lshr (sub %b, %a), 63)
// (zext (setcc %a, %b, setult)) -> (lshr (sub %a, %b), 63)
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETULT: {
if (CmpInGPR == ICGPR_NonExtIn)
return SDValue();
@@ -3384,7 +3384,7 @@ IntegerCompareEliminator::get32BitSExtCompare(SDValue LHS, SDValue RHS,
std::swap(LHS, RHS);
ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
IsRHSZero = RHSConst && RHSConst->isZero();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ISD::SETLE: {
if (CmpInGPR == ICGPR_NonExtIn)
@@ -3430,7 +3430,7 @@ IntegerCompareEliminator::get32BitSExtCompare(SDValue LHS, SDValue RHS,
ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
IsRHSZero = RHSConst && RHSConst->isZero();
IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ISD::SETLT: {
// (sext (setcc %a, %b, setgt)) -> (ashr (sub %a, %b), 63)
@@ -3459,7 +3459,7 @@ IntegerCompareEliminator::get32BitSExtCompare(SDValue LHS, SDValue RHS,
// (sext (setcc %a, %b, setuge)) -> (add (lshr (sub %a, %b), 63), -1)
// (sext (setcc %a, %b, setule)) -> (add (lshr (sub %b, %a), 63), -1)
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETULE: {
if (CmpInGPR == ICGPR_NonExtIn)
return SDValue();
@@ -3479,7 +3479,7 @@ IntegerCompareEliminator::get32BitSExtCompare(SDValue LHS, SDValue RHS,
// (sext (setcc %a, %b, setugt)) -> (ashr (sub %b, %a), 63)
// (sext (setcc %a, %b, setugt)) -> (ashr (sub %a, %b), 63)
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETULT: {
if (CmpInGPR == ICGPR_NonExtIn)
return SDValue();
@@ -3542,7 +3542,7 @@ IntegerCompareEliminator::get64BitZExtCompare(SDValue LHS, SDValue RHS,
std::swap(LHS, RHS);
ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
IsRHSZero = RHSConst && RHSConst->isZero();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ISD::SETLE: {
// {subc.reg, subc.CA} = (subcarry %b, %a)
@@ -3585,7 +3585,7 @@ IntegerCompareEliminator::get64BitZExtCompare(SDValue LHS, SDValue RHS,
ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
IsRHSZero = RHSConst && RHSConst->isZero();
IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ISD::SETLT: {
// {subc.reg, subc.CA} = (subcarry %a, %b)
@@ -3618,7 +3618,7 @@ IntegerCompareEliminator::get64BitZExtCompare(SDValue LHS, SDValue RHS,
// {subc.reg, subc.CA} = (subcarry %a, %b)
// (zext (setcc %a, %b, setuge)) -> (add (sube %b, %b, subc.CA), 1)
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETULE: {
// {subc.reg, subc.CA} = (subcarry %b, %a)
// (zext (setcc %a, %b, setule)) -> (add (sube %a, %a, subc.CA), 1)
@@ -3635,7 +3635,7 @@ IntegerCompareEliminator::get64BitZExtCompare(SDValue LHS, SDValue RHS,
// {subc.reg, subc.CA} = (subcarry %b, %a)
// (zext (setcc %a, %b, setugt)) -> -(sube %b, %b, subc.CA)
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETULT: {
// {subc.reg, subc.CA} = (subcarry %a, %b)
// (zext (setcc %a, %b, setult)) -> -(sube %a, %a, subc.CA)
@@ -3701,7 +3701,7 @@ IntegerCompareEliminator::get64BitSExtCompare(SDValue LHS, SDValue RHS,
std::swap(LHS, RHS);
ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
IsRHSZero = RHSConst && RHSConst->isZero();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ISD::SETLE: {
// {subc.reg, subc.CA} = (subcarry %b, %a)
@@ -3745,7 +3745,7 @@ IntegerCompareEliminator::get64BitSExtCompare(SDValue LHS, SDValue RHS,
ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS);
IsRHSZero = RHSConst && RHSConst->isZero();
IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ISD::SETLT: {
// {subc.reg, subc.CA} = (subcarry %a, %b)
@@ -3781,7 +3781,7 @@ IntegerCompareEliminator::get64BitSExtCompare(SDValue LHS, SDValue RHS,
// {subc.reg, subc.CA} = (subcarry %a, %b)
// (sext (setcc %a, %b, setuge)) -> ~(sube %b, %b, subc.CA)
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETULE: {
// {subc.reg, subc.CA} = (subcarry %b, %a)
// (sext (setcc %a, %b, setule)) -> ~(sube %a, %a, subc.CA)
@@ -3798,7 +3798,7 @@ IntegerCompareEliminator::get64BitSExtCompare(SDValue LHS, SDValue RHS,
// {subc.reg, subc.CA} = (subcarry %b, %a)
// (sext (setcc %a, %b, setugt)) -> (sube %b, %b, subc.CA)
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETULT: {
// {subc.reg, subc.CA} = (subcarry %a, %b)
// (sext (setcc %a, %b, setult)) -> (sube %a, %a, subc.CA)
@@ -4604,7 +4604,7 @@ static bool mayUseP9Setb(SDNode *N, const ISD::CondCode &CC, SelectionDAG *DAG,
if (!IsUnCmp && InnerCC != ISD::SETNE)
return false;
IsUnCmp = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETLT:
if (InnerCC == ISD::SETNE || (InnerCC == ISD::SETGT && !InnerSwapped) ||
(InnerCC == ISD::SETLT && InnerSwapped))
@@ -4623,7 +4623,7 @@ static bool mayUseP9Setb(SDNode *N, const ISD::CondCode &CC, SelectionDAG *DAG,
if (!IsUnCmp && InnerCC != ISD::SETNE)
return false;
IsUnCmp = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETGT:
if (InnerCC == ISD::SETNE || (InnerCC == ISD::SETLT && !InnerSwapped) ||
(InnerCC == ISD::SETGT && InnerSwapped))
@@ -6488,7 +6488,7 @@ void PPCDAGToDAGISel::PeepholeCROps() {
Op.getOperand(0) == Op.getOperand(1))
Op2Not = true;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case PPC::BC:
case PPC::BCn:
@@ -7306,7 +7306,7 @@ void PPCDAGToDAGISel::PeepholePPC64() {
case PPC::DFLOADf64:
case PPC::DFLOADf32:
RequiresMod4Offset = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PPC::LBZ:
case PPC::LBZ8:
case PPC::LFD:
@@ -7324,7 +7324,7 @@ void PPCDAGToDAGISel::PeepholePPC64() {
case PPC::DFSTOREf64:
case PPC::DFSTOREf32:
RequiresMod4Offset = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PPC::STB:
case PPC::STB8:
case PPC::STFD:
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 19e248ec7ef23..8961b0546ce63 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -6646,7 +6646,7 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
case MVT::i64:
// i64 arguments should have been split to i32 for PPC32.
assert(IsPPC64 && "PPC32 should have split i64 values.");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::i1:
case MVT::i32: {
const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
@@ -7945,7 +7945,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
default: break; // SETUO etc aren't handled by fsel.
case ISD::SETNE:
std::swap(TV, FV);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETEQ:
if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
@@ -7957,7 +7957,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
case ISD::SETULT:
case ISD::SETLT:
std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOGE:
case ISD::SETGE:
if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
@@ -7966,7 +7966,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
case ISD::SETUGT:
case ISD::SETGT:
std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOLE:
case ISD::SETLE:
if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
@@ -7980,7 +7980,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
default: break; // SETUO etc aren't handled by fsel.
case ISD::SETNE:
std::swap(TV, FV);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETEQ:
Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
@@ -15140,7 +15140,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SETCC:
if (SDValue CSCC = combineSetCC(N, DCI))
return CSCC;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SELECT_CC:
return DAGCombineTruncBoolExt(N, DCI);
case ISD::SINT_TO_FP:
@@ -16957,7 +16957,7 @@ bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
// we can materialize all immediatess via XXSPLTI32DX and XXSPLTIDP.
return true;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::ppcf128:
return Imm.isPosZero();
}
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index c85f57f04c7d1..de9cee0c31eb7 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -3022,7 +3022,7 @@ bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
// BUILD_UACC is expanded to 4 copies of the underlying vsx registers.
// So after building the 4 copies, we can replace the BUILD_UACC instruction
// with a NOP.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case PPC::KILL_PAIR: {
MI.setDesc(get(PPC::UNENCODED_NOP));
@@ -4259,7 +4259,7 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
}
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PPC::XFLOADf32:
III.ImmOpcode = PPC::DFLOADf32;
break;
@@ -4273,7 +4273,7 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
}
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PPC::XFLOADf64:
III.ImmOpcode = PPC::DFLOADf64;
break;
@@ -4291,7 +4291,7 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
}
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PPC::XFSTOREf32:
III.ImmOpcode = PPC::DFSTOREf32;
break;
@@ -4305,7 +4305,7 @@ bool PPCInstrInfo::instrHasImmForm(unsigned Opc, bool IsVFReg,
}
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PPC::XFSTOREf64:
III.ImmOpcode = PPC::DFSTOREf64;
break;
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index c8945879d0bc3..6a86cfa50121d 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -256,12 +256,12 @@ InstructionCost PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
return TTI::TCC_Free;
case Instruction::And:
RunFree = true; // (for the rotate-and-mask instructions)
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Add:
case Instruction::Or:
case Instruction::Xor:
ShiftedFree = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Sub:
case Instruction::Mul:
case Instruction::Shl:
@@ -273,7 +273,7 @@ InstructionCost PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
UnsignedFree = true;
ImmIdx = 1;
// Zero comparisons can use record-form instructions.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Select:
ZeroFree = true;
break;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index ac713d95d579f..1caf089c2fa00 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7110,7 +7110,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
return;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ISD::ADD:
case ISD::SUB:
@@ -9406,7 +9406,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SRA:
if (SDValue V = performSRACombine(N, DAG, Subtarget))
return V;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SRL:
case ISD::SHL: {
SDValue ShAmt = N->getOperand(1);
diff --git a/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp b/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp
index 920729e9ebbf2..319967a8ad75b 100644
--- a/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp
+++ b/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp
@@ -275,7 +275,7 @@ static bool isSignExtendingOpW(MachineInstr &MI, MachineRegisterInfo &MRI,
// SLLIW reads the lowest 5 bits, while SLLI reads lowest 6 bits
if (MI.getOperand(2).getImm() >= 32)
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case RISCV::ADD:
case RISCV::LD:
case RISCV::LWU:
@@ -337,7 +337,7 @@ static bool isSignExtendedW(MachineInstr &OrigMI, MachineRegisterInfo &MRI,
case RISCV::BSETI:
if (MI->getOperand(2).getImm() >= 31)
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case RISCV::REM:
case RISCV::ANDI:
case RISCV::ORI:
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index d6f5175ee8741..cc360483053c1 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -67,7 +67,7 @@ InstructionCost RISCVTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
// zext.w
if (Imm == UINT64_C(0xffffffff) && ST->hasStdExtZba())
return TTI::TCC_Free;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Add:
case Instruction::Or:
case Instruction::Xor:
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
index 7b2d8afd3605b..e75acc3441c12 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
@@ -279,7 +279,7 @@ namespace {
case Sparc::fixup_sparc_wplt30:
if (Target.getSymA()->getSymbol().isTemporary())
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Sparc::fixup_sparc_tls_gd_hi22:
case Sparc::fixup_sparc_tls_gd_lo10:
case Sparc::fixup_sparc_tls_gd_add:
diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp
index cc73ea7e61205..522a8877a1c60 100644
--- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp
+++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp
@@ -227,7 +227,7 @@ void SparcMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
auto ELFSymbol = cast<MCSymbolELF>(Symbol);
if (!ELFSymbol->isBindingSet())
ELFSymbol->setBinding(ELF::STB_GLOBAL);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case VK_Sparc_TLS_GD_HI22:
case VK_Sparc_TLS_GD_LO10:
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index a8a0b2cc9e674..595670735c74b 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -117,19 +117,19 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
case SPCC::CPCC_A: return SPCC::CPCC_N;
case SPCC::CPCC_N: return SPCC::CPCC_A;
- case SPCC::CPCC_3: LLVM_FALLTHROUGH;
- case SPCC::CPCC_2: LLVM_FALLTHROUGH;
- case SPCC::CPCC_23: LLVM_FALLTHROUGH;
- case SPCC::CPCC_1: LLVM_FALLTHROUGH;
- case SPCC::CPCC_13: LLVM_FALLTHROUGH;
- case SPCC::CPCC_12: LLVM_FALLTHROUGH;
- case SPCC::CPCC_123: LLVM_FALLTHROUGH;
- case SPCC::CPCC_0: LLVM_FALLTHROUGH;
- case SPCC::CPCC_03: LLVM_FALLTHROUGH;
- case SPCC::CPCC_02: LLVM_FALLTHROUGH;
- case SPCC::CPCC_023: LLVM_FALLTHROUGH;
- case SPCC::CPCC_01: LLVM_FALLTHROUGH;
- case SPCC::CPCC_013: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_3: [[fallthrough]];
+ case SPCC::CPCC_2: [[fallthrough]];
+ case SPCC::CPCC_23: [[fallthrough]];
+ case SPCC::CPCC_1: [[fallthrough]];
+ case SPCC::CPCC_13: [[fallthrough]];
+ case SPCC::CPCC_12: [[fallthrough]];
+ case SPCC::CPCC_123: [[fallthrough]];
+ case SPCC::CPCC_0: [[fallthrough]];
+ case SPCC::CPCC_03: [[fallthrough]];
+ case SPCC::CPCC_02: [[fallthrough]];
+ case SPCC::CPCC_023: [[fallthrough]];
+ case SPCC::CPCC_01: [[fallthrough]];
+ case SPCC::CPCC_013: [[fallthrough]];
case SPCC::CPCC_012:
// "Opposite" code is not meaningful, as we don't know
// what the CoProc condition means here. The cond-code will
diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index 9ac7eafd5f34e..d976cd7b3d1b9 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -860,7 +860,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
RxSBG.Input = N.getOperand(0);
return true;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SIGN_EXTEND: {
// Check that the extension bits are don't-care (i.e. are masked out
@@ -1349,7 +1349,7 @@ bool SystemZDAGToDAGISel::tryFoldLoadStoreIntoMemOperand(SDNode *Node) {
return false;
case SystemZISD::SSUBO:
NegateOperand = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SystemZISD::SADDO:
if (MemVT == MVT::i32)
NewOpc = SystemZ::ASI;
@@ -1360,7 +1360,7 @@ bool SystemZDAGToDAGISel::tryFoldLoadStoreIntoMemOperand(SDNode *Node) {
break;
case SystemZISD::USUBO:
NegateOperand = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SystemZISD::UADDO:
if (MemVT == MVT::i32)
NewOpc = SystemZ::ALSI;
@@ -1562,7 +1562,7 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {
if (Node->getOperand(1).getOpcode() != ISD::Constant)
if (tryRxSBG(Node, SystemZ::RNSBG))
return;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ROTL:
case ISD::SHL:
case ISD::SRL:
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index ac4531262187c..32880d7305ed1 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -3037,7 +3037,7 @@ SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG,
// Handle tests for order using (or (ogt y x) (oge x y)).
case ISD::SETUO:
Invert = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETO: {
assert(IsFP && "Unexpected integer comparison");
SDValue LT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode),
@@ -3054,7 +3054,7 @@ SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG,
// Handle <> tests using (or (ogt y x) (ogt x y)).
case ISD::SETUEQ:
Invert = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETONE: {
assert(IsFP && "Unexpected integer comparison");
SDValue LT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode),
@@ -7320,7 +7320,7 @@ SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case Intrinsic::s390_vupllh:
case Intrinsic::s390_vupllf:
IsLogical = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH
case Intrinsic::s390_vuphh:
case Intrinsic::s390_vuphf:
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 1436be1e40523..760a6f6823b06 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -629,7 +629,7 @@ bool SystemZInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
switch (UseOpc) {
case SystemZ::SELRMux:
TieOps = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SystemZ::LOCRMux:
if (!STI.hasLoadStoreOnCond2())
return false;
@@ -643,7 +643,7 @@ bool SystemZInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
break;
case SystemZ::SELGR:
TieOps = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SystemZ::LOCGR:
if (!STI.hasLoadStoreOnCond2())
return false;
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
index 69914049a00c3..8b079e4e70018 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
@@ -1056,7 +1056,7 @@ isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) {
case Instruction::ICmp:
if (LoadedBits == 32 && ZExtBits == 64)
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Mul: // SE: 16->32, 32->64, z14:16->64
if (UserI->getOpcode() != Instruction::ICmp) {
if (LoadedBits == 16 &&
@@ -1066,11 +1066,11 @@ isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) {
if (LoadOrTruncBits == 16)
return true;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::SDiv:// SE: 32->64
if (LoadedBits == 32 && SExtBits == 64)
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::UDiv:
case Instruction::And:
case Instruction::Or:
diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index 2eea65033870d..75eb657a57123 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -2710,7 +2710,7 @@ static bool isI32Insn(const SDNode *User, const SDNode *N) {
if (User->getOperand(2).getNode() != N &&
User->getOperand(3).getNode() != N)
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::AND:
case ISD::OR:
case ISD::XOR:
diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
index d31715e367ece..16e877812236f 100644
--- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
+++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
@@ -184,7 +184,7 @@ bool WebAssemblyAsmTypeCheck::getGlobal(SMLoc ErrorLoc, const MCInst &Inst,
default:
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return typeError(ErrorLoc, StringRef("symbol ") + WasmSym->getName() +
" missing .globaltype");
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
index d2eb4b29e9fd6..2814404b0222d 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
@@ -1501,7 +1501,7 @@ void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) {
std::next(WebAssembly::findCatch(EHPad)->getReverseIterator());
if (NextIt != EHPad->rend())
Worklist.push_back(NextIt);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case WebAssembly::END_BLOCK:
case WebAssembly::END_LOOP:
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index 8ddd414b043ae..c21eee8d56e2f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -1260,7 +1260,7 @@ bool WebAssemblyFastISel::selectStore(const Instruction *I) {
switch (getSimpleType(Store->getValueOperand()->getType())) {
case MVT::i1:
VTIsi1 = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::i8:
Opc = A64 ? WebAssembly::STORE8_I32_A64 : WebAssembly::STORE8_I32_A32;
break;
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index d0a3a1827435c..2742a7525ce5c 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -1921,7 +1921,7 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
break;
return Error(Tok.getLoc(), "unknown token in expression");
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AsmToken::String: {
if (Parser.isParsingMasm()) {
// MASM parsers handle strings in expressions as constants.
@@ -1937,7 +1937,7 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
return Error(ValueLoc, ErrMsg);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case AsmToken::At:
case AsmToken::Identifier: {
diff --git a/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
index 1da6bf86397ec..ed5c644d860b9 100644
--- a/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -713,7 +713,7 @@ static int readModRM(struct InternalInstruction *insn) {
break;
case 0x1:
insn->displacementSize = 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 0x2:
insn->eaDisplacement = (mod == 0x1 ? EA_DISP_8 : EA_DISP_32);
switch (rm & 7) {
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
index da90befb23209..c5eb81a79397a 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
@@ -310,7 +310,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA4_PACKED_RR(FMADD)
CASE_FMA4_SCALAR_RR(FMADD)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA4_PACKED_RM(FMADD)
CASE_FMA4_SCALAR_RM(FMADD)
Mul2Name = getRegName(MI->getOperand(2).getReg());
@@ -325,7 +325,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA4_PACKED_RR(FMSUB)
CASE_FMA4_SCALAR_RR(FMSUB)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA4_PACKED_RM(FMSUB)
CASE_FMA4_SCALAR_RM(FMSUB)
Mul2Name = getRegName(MI->getOperand(2).getReg());
@@ -342,7 +342,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA4_PACKED_RR(FNMADD)
CASE_FMA4_SCALAR_RR(FNMADD)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA4_PACKED_RM(FNMADD)
CASE_FMA4_SCALAR_RM(FNMADD)
Mul2Name = getRegName(MI->getOperand(2).getReg());
@@ -359,7 +359,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA4_PACKED_RR(FNMSUB)
CASE_FMA4_SCALAR_RR(FNMSUB)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA4_PACKED_RM(FNMSUB)
CASE_FMA4_SCALAR_RM(FNMSUB)
Mul2Name = getRegName(MI->getOperand(2).getReg());
@@ -377,7 +377,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA4_PACKED_RR(FMADDSUB)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA4_PACKED_RM(FMADDSUB)
Mul2Name = getRegName(MI->getOperand(2).getReg());
Mul1Name = getRegName(MI->getOperand(1).getReg());
@@ -391,7 +391,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA4_PACKED_RR(FMSUBADD)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA4_PACKED_RM(FMSUBADD)
Mul2Name = getRegName(MI->getOperand(2).getReg());
Mul1Name = getRegName(MI->getOperand(1).getReg());
@@ -407,7 +407,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FMADD132)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMADD132)
CASE_FMA_SCALAR_MEM(FMADD132)
AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -418,7 +418,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FMADD213)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMADD213)
CASE_FMA_SCALAR_MEM(FMADD213)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -429,7 +429,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FMADD231)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMADD231)
CASE_FMA_SCALAR_MEM(FMADD231)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -440,7 +440,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FMSUB132)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMSUB132)
CASE_FMA_SCALAR_MEM(FMSUB132)
AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -452,7 +452,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FMSUB213)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMSUB213)
CASE_FMA_SCALAR_MEM(FMSUB213)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -464,7 +464,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FMSUB231)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMSUB231)
CASE_FMA_SCALAR_MEM(FMSUB231)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -476,7 +476,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FNMADD132)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FNMADD132)
CASE_FMA_SCALAR_MEM(FNMADD132)
AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -488,7 +488,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FNMADD213)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FNMADD213)
CASE_FMA_SCALAR_MEM(FNMADD213)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -500,7 +500,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FNMADD231)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FNMADD231)
CASE_FMA_SCALAR_MEM(FNMADD231)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -512,7 +512,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FNMSUB132)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FNMSUB132)
CASE_FMA_SCALAR_MEM(FNMSUB132)
AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -525,7 +525,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FNMSUB213)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FNMSUB213)
CASE_FMA_SCALAR_MEM(FNMSUB213)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -538,7 +538,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_SCALAR_REG(FNMSUB231)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FNMSUB231)
CASE_FMA_SCALAR_MEM(FNMSUB231)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -550,7 +550,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_PACKED_REG(FMADDSUB132)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMADDSUB132)
AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
Mul1Name = getRegName(MI->getOperand(1).getReg());
@@ -560,7 +560,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_PACKED_REG(FMADDSUB213)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMADDSUB213)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
Mul2Name = getRegName(MI->getOperand(1).getReg());
@@ -570,7 +570,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_PACKED_REG(FMADDSUB231)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMADDSUB231)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
AccName = getRegName(MI->getOperand(1).getReg());
@@ -580,7 +580,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_PACKED_REG(FMSUBADD132)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMSUBADD132)
AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
Mul1Name = getRegName(MI->getOperand(1).getReg());
@@ -590,7 +590,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_PACKED_REG(FMSUBADD213)
AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMSUBADD213)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
Mul2Name = getRegName(MI->getOperand(1).getReg());
@@ -600,7 +600,7 @@ static bool printFMAComments(const MCInst *MI, raw_ostream &OS,
CASE_FMA_PACKED_REG(FMSUBADD231)
Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_FMA_PACKED_MEM(FMSUBADD231)
Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
AccName = getRegName(MI->getOperand(1).getReg());
@@ -655,7 +655,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VBLENDPDrri:
case X86::VBLENDPDYrri:
Src2Name = getRegName(MI->getOperand(2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::BLENDPDrmi:
case X86::VBLENDPDrmi:
case X86::VBLENDPDYrmi:
@@ -671,7 +671,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VBLENDPSrri:
case X86::VBLENDPSYrri:
Src2Name = getRegName(MI->getOperand(2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::BLENDPSrmi:
case X86::VBLENDPSrmi:
case X86::VBLENDPSYrmi:
@@ -687,7 +687,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VPBLENDWrri:
case X86::VPBLENDWYrri:
Src2Name = getRegName(MI->getOperand(2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::PBLENDWrmi:
case X86::VPBLENDWrmi:
case X86::VPBLENDWYrmi:
@@ -702,7 +702,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VPBLENDDrri:
case X86::VPBLENDDYrri:
Src2Name = getRegName(MI->getOperand(2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::VPBLENDDrmi:
case X86::VPBLENDDYrmi:
if (MI->getOperand(NumOperands - 1).isImm())
@@ -717,7 +717,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VINSERTPSrr:
case X86::VINSERTPSZrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::INSERTPSrm:
case X86::VINSERTPSrm:
case X86::VINSERTPSZrm:
@@ -780,7 +780,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_MOVDUP(MOVSLDUP, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_MOVDUP(MOVSLDUP, m)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -789,7 +789,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_MOVDUP(MOVSHDUP, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_MOVDUP(MOVSHDUP, m)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -798,7 +798,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_MOVDUP(MOVDDUP, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_MOVDUP(MOVDDUP, m)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -812,7 +812,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VPSLLDQZ256ri:
case X86::VPSLLDQZri:
Src1Name = getRegName(MI->getOperand(1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::VPSLLDQZ128mi:
case X86::VPSLLDQZ256mi:
case X86::VPSLLDQZmi:
@@ -830,7 +830,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VPSRLDQZ256ri:
case X86::VPSRLDQZri:
Src1Name = getRegName(MI->getOperand(1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::VPSRLDQZ128mi:
case X86::VPSRLDQZ256mi:
case X86::VPSRLDQZmi:
@@ -844,7 +844,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_SHUF(PALIGNR, rri)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_SHUF(PALIGNR, rmi)
Src2Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
@@ -860,7 +860,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_AVX512_INS_COMMON(ALIGNQ, Z128, rri)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_AVX512_INS_COMMON(ALIGNQ, Z, rmi)
CASE_AVX512_INS_COMMON(ALIGNQ, Z256, rmi)
@@ -878,7 +878,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_AVX512_INS_COMMON(ALIGND, Z128, rri)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_AVX512_INS_COMMON(ALIGND, Z, rmi)
CASE_AVX512_INS_COMMON(ALIGND, Z256, rmi)
@@ -893,7 +893,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_SHUF(PSHUFD, ri)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_SHUF(PSHUFD, mi)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -905,7 +905,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_SHUF(PSHUFHW, ri)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_SHUF(PSHUFHW, mi)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -917,7 +917,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_SHUF(PSHUFLW, ri)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_SHUF(PSHUFLW, mi)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -929,7 +929,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MMX_PSHUFWri:
Src1Name = getRegName(MI->getOperand(1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::MMX_PSHUFWmi:
DestName = getRegName(MI->getOperand(0).getReg());
@@ -940,7 +940,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PSWAPDrr:
Src1Name = getRegName(MI->getOperand(1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::PSWAPDrm:
DestName = getRegName(MI->getOperand(0).getReg());
@@ -951,7 +951,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MMX_PUNPCKHBWrr:
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(PUNPCKHBW, m)
case X86::MMX_PUNPCKHBWrm:
@@ -964,7 +964,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MMX_PUNPCKHWDrr:
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(PUNPCKHWD, m)
case X86::MMX_PUNPCKHWDrm:
@@ -977,7 +977,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MMX_PUNPCKHDQrr:
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(PUNPCKHDQ, m)
case X86::MMX_PUNPCKHDQrm:
@@ -989,7 +989,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_UNPCK(PUNPCKHQDQ, r)
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(PUNPCKHQDQ, m)
Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -1001,7 +1001,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MMX_PUNPCKLBWrr:
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(PUNPCKLBW, m)
case X86::MMX_PUNPCKLBWrm:
@@ -1014,7 +1014,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MMX_PUNPCKLWDrr:
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(PUNPCKLWD, m)
case X86::MMX_PUNPCKLWDrm:
@@ -1027,7 +1027,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MMX_PUNPCKLDQrr:
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(PUNPCKLDQ, m)
case X86::MMX_PUNPCKLDQrm:
@@ -1039,7 +1039,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_UNPCK(PUNPCKLQDQ, r)
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(PUNPCKLQDQ, m)
Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
@@ -1050,7 +1050,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_SHUF(SHUFPD, rri)
Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_SHUF(SHUFPD, rmi)
if (MI->getOperand(NumOperands - 1).isImm())
@@ -1063,7 +1063,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_SHUF(SHUFPS, rri)
Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_SHUF(SHUFPS, rmi)
if (MI->getOperand(NumOperands - 1).isImm())
@@ -1077,7 +1077,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_VSHUF(64X2, r)
Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_VSHUF(64X2, m)
decodeVSHUF64x2FamilyMask(getRegOperandNumElts(MI, 64, 0), 64,
@@ -1090,7 +1090,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_VSHUF(32X4, r)
Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_VSHUF(32X4, m)
decodeVSHUF64x2FamilyMask(getRegOperandNumElts(MI, 32, 0), 32,
@@ -1103,7 +1103,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_UNPCK(UNPCKLPD, r)
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(UNPCKLPD, m)
DecodeUNPCKLMask(getRegOperandNumElts(MI, 64, 0), 64, ShuffleMask);
@@ -1114,7 +1114,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_UNPCK(UNPCKLPS, r)
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(UNPCKLPS, m)
DecodeUNPCKLMask(getRegOperandNumElts(MI, 32, 0), 32, ShuffleMask);
@@ -1125,7 +1125,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_UNPCK(UNPCKHPD, r)
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(UNPCKHPD, m)
DecodeUNPCKHMask(getRegOperandNumElts(MI, 64, 0), 64, ShuffleMask);
@@ -1136,7 +1136,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_UNPCK(UNPCKHPS, r)
Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
RegForm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_UNPCK(UNPCKHPS, m)
DecodeUNPCKHMask(getRegOperandNumElts(MI, 32, 0), 32, ShuffleMask);
@@ -1146,7 +1146,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_VPERMILPI(PERMILPS, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_VPERMILPI(PERMILPS, m)
if (MI->getOperand(NumOperands - 1).isImm())
@@ -1158,7 +1158,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_VPERMILPI(PERMILPD, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_VPERMILPI(PERMILPD, m)
if (MI->getOperand(NumOperands - 1).isImm())
@@ -1171,7 +1171,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VPERM2F128rr:
case X86::VPERM2I128rr:
Src2Name = getRegName(MI->getOperand(2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::VPERM2F128rm:
case X86::VPERM2I128rm:
@@ -1185,7 +1185,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_VPERM(PERMPD, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_VPERM(PERMPD, m)
if (MI->getOperand(NumOperands - 1).isImm())
@@ -1197,7 +1197,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_VPERM(PERMQ, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_VPERM(PERMQ, m)
if (MI->getOperand(NumOperands - 1).isImm())
@@ -1212,7 +1212,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VMOVSDZrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::MOVSDrm_alt:
case X86::MOVSDrm:
@@ -1229,7 +1229,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VMOVSSZrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::MOVSSrm:
case X86::MOVSSrm_alt:
@@ -1248,7 +1248,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VMOVZPQILo2PQIrr:
case X86::VMOVZPQILo2PQIZrr:
Src1Name = getRegName(MI->getOperand(1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::MOVQI2PQIrm:
case X86::VMOVQI2PQIrm:
@@ -1319,7 +1319,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
break;
CASE_AVX512_INS_COMMON(BROADCASTI32X2, Z128, rr)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_AVX512_INS_COMMON(BROADCASTI32X2, Z128, rm)
DecodeSubVectorBroadcast(4, 2, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
@@ -1327,7 +1327,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_AVX512_INS_COMMON(BROADCASTF32X2, Z256, rr)
CASE_AVX512_INS_COMMON(BROADCASTI32X2, Z256, rr)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_AVX512_INS_COMMON(BROADCASTF32X2, Z256, rm)
CASE_AVX512_INS_COMMON(BROADCASTI32X2, Z256, rm)
DecodeSubVectorBroadcast(8, 2, ShuffleMask);
@@ -1336,7 +1336,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_AVX512_INS_COMMON(BROADCASTF32X2, Z, rr)
CASE_AVX512_INS_COMMON(BROADCASTI32X2, Z, rr)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_AVX512_INS_COMMON(BROADCASTF32X2, Z, rm)
CASE_AVX512_INS_COMMON(BROADCASTI32X2, Z, rm)
DecodeSubVectorBroadcast(16, 2, ShuffleMask);
@@ -1345,7 +1345,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_PMOVZX(PMOVZXBW, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_PMOVZX(PMOVZXBW, m)
DecodeZeroExtendMask(8, 16, getRegOperandNumElts(MI, 16, 0), false,
ShuffleMask);
@@ -1354,7 +1354,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_PMOVZX(PMOVZXBD, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_PMOVZX(PMOVZXBD, m)
DecodeZeroExtendMask(8, 32, getRegOperandNumElts(MI, 32, 0), false,
ShuffleMask);
@@ -1363,7 +1363,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_PMOVZX(PMOVZXBQ, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_PMOVZX(PMOVZXBQ, m)
DecodeZeroExtendMask(8, 64, getRegOperandNumElts(MI, 64, 0), false,
ShuffleMask);
@@ -1372,7 +1372,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_PMOVZX(PMOVZXWD, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_PMOVZX(PMOVZXWD, m)
DecodeZeroExtendMask(16, 32, getRegOperandNumElts(MI, 32, 0), false,
ShuffleMask);
@@ -1381,7 +1381,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_PMOVZX(PMOVZXWQ, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_PMOVZX(PMOVZXWQ, m)
DecodeZeroExtendMask(16, 64, getRegOperandNumElts(MI, 64, 0), false,
ShuffleMask);
@@ -1390,7 +1390,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_PMOVZX(PMOVZXDQ, r)
Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
CASE_PMOVZX(PMOVZXDQ, m)
DecodeZeroExtendMask(32, 64, getRegOperandNumElts(MI, 64, 0), false,
ShuffleMask);
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index a21bb6da86de0..87ec927dc9c81 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -1373,7 +1373,7 @@ void X86MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
OpcodeOffset = MI.getOperand(NumOps - 1).getImm();
assert(OpcodeOffset < 16 && "Unexpected opcode offset!");
--NumOps; // Drop the operand from the end.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86II::RawFrm:
emitByte(BaseOpcode + OpcodeOffset, OS);
diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 3fc0eec8d1ed2..d0db28dc9219e 100644
--- a/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -474,7 +474,7 @@ static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO,
break;
case 'V':
EmitPercent = false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'q':
// Print 64-bit register names if 64-bit integer registers are available.
// Otherwise, print 32-bit register names.
diff --git a/llvm/lib/Target/X86/X86DynAllocaExpander.cpp b/llvm/lib/Target/X86/X86DynAllocaExpander.cpp
index c8ceebb8b8e65..357189c5ea8df 100644
--- a/llvm/lib/Target/X86/X86DynAllocaExpander.cpp
+++ b/llvm/lib/Target/X86/X86DynAllocaExpander.cpp
@@ -231,7 +231,7 @@ void X86DynAllocaExpander::lower(MachineInstr *MI, Lowering L) {
break;
// Fall through to make any remaining adjustment.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Sub:
assert(Amount > 0);
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index f2c362eeaa485..7c307300b4ed8 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -188,15 +188,15 @@ getX86SSEConditionCode(CmpInst::Predicate Predicate) {
switch (Predicate) {
default: llvm_unreachable("Unexpected predicate");
case CmpInst::FCMP_OEQ: CC = 0; break;
- case CmpInst::FCMP_OGT: NeedSwap = true; LLVM_FALLTHROUGH;
+ case CmpInst::FCMP_OGT: NeedSwap = true; [[fallthrough]];
case CmpInst::FCMP_OLT: CC = 1; break;
- case CmpInst::FCMP_OGE: NeedSwap = true; LLVM_FALLTHROUGH;
+ case CmpInst::FCMP_OGE: NeedSwap = true; [[fallthrough]];
case CmpInst::FCMP_OLE: CC = 2; break;
case CmpInst::FCMP_UNO: CC = 3; break;
case CmpInst::FCMP_UNE: CC = 4; break;
- case CmpInst::FCMP_ULE: NeedSwap = true; LLVM_FALLTHROUGH;
+ case CmpInst::FCMP_ULE: NeedSwap = true; [[fallthrough]];
case CmpInst::FCMP_UGE: CC = 5; break;
- case CmpInst::FCMP_ULT: NeedSwap = true; LLVM_FALLTHROUGH;
+ case CmpInst::FCMP_ULT: NeedSwap = true; [[fallthrough]];
case CmpInst::FCMP_UGT: CC = 6; break;
case CmpInst::FCMP_ORD: CC = 7; break;
case CmpInst::FCMP_UEQ: CC = 8; break;
@@ -500,7 +500,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
TII.get(X86::AND8ri), AndResult)
.addReg(ValReg).addImm(1);
ValReg = AndResult;
- LLVM_FALLTHROUGH; // handle i1 as i8.
+ [[fallthrough]]; // handle i1 as i8.
}
case MVT::i8: Opc = X86::MOV8mr; break;
case MVT::i16: Opc = X86::MOV16mr; break;
@@ -666,7 +666,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
default: break;
case MVT::i1:
Signed = false;
- LLVM_FALLTHROUGH; // Handle as i8.
+ [[fallthrough]]; // Handle as i8.
case MVT::i8: Opc = X86::MOV8mi; break;
case MVT::i16: Opc = X86::MOV16mi; break;
case MVT::i32: Opc = X86::MOV32mi; break;
@@ -1688,7 +1688,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
default: break;
case CmpInst::FCMP_OEQ:
std::swap(TrueMBB, FalseMBB);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CmpInst::FCMP_UNE:
NeedExtraBranch = true;
Predicate = CmpInst::FCMP_ONE;
@@ -3117,7 +3117,7 @@ bool X86FastISel::fastLowerArguments() {
default: llvm_unreachable("Unexpected value type.");
case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
- case MVT::f32: LLVM_FALLTHROUGH;
+ case MVT::f32: [[fallthrough]];
case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
}
Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
@@ -3697,7 +3697,7 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
default: llvm_unreachable("Unexpected value type");
case MVT::i1:
VT = MVT::i8;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::i8: Opc = X86::MOV8ri; break;
case MVT::i16: Opc = X86::MOV16ri; break;
case MVT::i32: Opc = X86::MOV32ri; break;
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index a4e27a6416e7d..ee1df16d909b8 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1533,7 +1533,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
.addUse(X86::NoRegister);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SwiftAsyncFramePointerMode::Always:
BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8), MachineFramePtr)
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 3927737cb4f8b..c8bc586c18b00 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2313,7 +2313,7 @@ bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
case ISD::UMUL_LOHI:
// A mul_lohi where we need the low part can be folded as a plain multiply.
if (N.getResNo() != 0) break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::MUL:
case X86ISD::MUL_IMM:
// X*[3,5,9] -> X+X*[2,4,8]
@@ -3310,7 +3310,7 @@ bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
MVT::Other, Ops);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86ISD::ADD:
// Try to match inc/dec.
if (!Subtarget->slowIncDec() || CurDAG->shouldOptForSize()) {
@@ -3328,7 +3328,7 @@ bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
break;
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86ISD::ADC:
case X86ISD::SBB:
case X86ISD::AND:
@@ -4962,7 +4962,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
case ISD::SRL:
if (matchBitExtract(Node))
return;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SRA:
case ISD::SHL:
if (tryShiftAmountMod(Node))
@@ -5005,7 +5005,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
if (AndImmShrink && shrinkAndImmediate(Node))
return;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::OR:
case ISD::XOR:
if (tryShrinkShlLogicImm(Node))
@@ -5015,7 +5015,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
if (tryVPTERNLOG(Node))
return;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ADD:
case ISD::SUB: {
// Try to avoid folding immediates with multiple uses for optsize.
@@ -5126,7 +5126,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
// i16/i32/i64 are handled with isel patterns.
if (NVT != MVT::i8)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86ISD::UMUL: {
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 78279cbf5e6a4..b6729d50d7df7 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -12803,11 +12803,11 @@ static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
case MVT::v4i64:
case MVT::v8i32:
assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::v4f64:
case MVT::v8f32:
assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::v2f64:
case MVT::v2i64:
case MVT::v4f32:
@@ -12843,11 +12843,11 @@ static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
MVT::v16i16, DL, Lo, Hi,
{0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case MVT::v32i8:
assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::v16i8: {
assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
@@ -23615,7 +23615,7 @@ static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
case ISD::SHL:
if (Op.getNode()->getFlags().hasNoSignedWrap())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
NeedOF = true;
break;
@@ -23646,7 +23646,7 @@ static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
if (!hasNonFlagsUse(Op))
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::ADD:
case ISD::SUB:
case ISD::OR:
@@ -24057,19 +24057,19 @@ static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
case ISD::SETOEQ:
case ISD::SETEQ: SSECC = 0; break;
case ISD::SETOGT:
- case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETGT: Swap = true; [[fallthrough]];
case ISD::SETLT:
case ISD::SETOLT: SSECC = 1; break;
case ISD::SETOGE:
- case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETGE: Swap = true; [[fallthrough]];
case ISD::SETLE:
case ISD::SETOLE: SSECC = 2; break;
case ISD::SETUO: SSECC = 3; break;
case ISD::SETUNE:
case ISD::SETNE: SSECC = 4; break;
- case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETULE: Swap = true; [[fallthrough]];
case ISD::SETUGE: SSECC = 5; break;
- case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
+ case ISD::SETULT: Swap = true; [[fallthrough]];
case ISD::SETUGT: SSECC = 6; break;
case ISD::SETO: SSECC = 7; break;
case ISD::SETUEQ: SSECC = 8; break;
@@ -24521,9 +24521,9 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
unsigned Opc;
switch (Cond) {
default: llvm_unreachable("Unexpected condition code");
- case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
+ case ISD::SETUGT: Invert = true; [[fallthrough]];
case ISD::SETULE: Opc = ISD::UMIN; break;
- case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
+ case ISD::SETULT: Invert = true; [[fallthrough]];
case ISD::SETUGE: Opc = ISD::UMAX; break;
}
@@ -27164,7 +27164,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::x86_avx_vtestz_ps_256:
case Intrinsic::x86_avx_vtestz_pd_256:
TestOpc = X86ISD::TESTP;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::x86_sse41_ptestz:
case Intrinsic::x86_avx_ptestz_256:
// ZF = 1
@@ -27175,7 +27175,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::x86_avx_vtestc_ps_256:
case Intrinsic::x86_avx_vtestc_pd_256:
TestOpc = X86ISD::TESTP;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::x86_sse41_ptestc:
case Intrinsic::x86_avx_ptestc_256:
// CF = 1
@@ -27186,7 +27186,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::x86_avx_vtestnzc_ps_256:
case Intrinsic::x86_avx_vtestnzc_pd_256:
TestOpc = X86ISD::TESTP;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::x86_sse41_ptestnzc:
case Intrinsic::x86_avx_ptestnzc_256:
// ZF and CF = 0
@@ -39518,7 +39518,7 @@ combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
Chain.push_back(V);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::BITCAST:
V = V.getOperand(0);
continue;
@@ -39663,7 +39663,7 @@ static SDValue canonicalizeShuffleWithBinOps(SDValue N, SelectionDAG &DAG,
if (!getTargetShuffleMask(N.getNode(), ShuffleVT.getSimpleVT(), false, Ops,
Mask))
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case X86ISD::VBROADCAST:
case X86ISD::MOVDDUP:
@@ -39708,7 +39708,7 @@ static SDValue canonicalizeShuffleWithBinOps(SDValue N, SelectionDAG &DAG,
unsigned ZeroMask = InsertPSMask & 0xF;
if (ZeroMask != 0)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case X86ISD::MOVSD:
case X86ISD::MOVSS:
@@ -39793,7 +39793,7 @@ static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
if ((Mask & 0x3) != ((Mask >> 2) & 0x3))
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86ISD::VSHLI:
case X86ISD::VSRLI:
case X86ISD::VSRAI:
@@ -40968,7 +40968,7 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
Depth + 1, AssumeSingleUse))
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case X86ISD::VSHLI:
case X86ISD::VSRLI:
@@ -42157,7 +42157,7 @@ static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size,
case ISD::TRUNCATE:
if (!AllowTruncate)
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETCC:
return Src.getOperand(0).getValueSizeInBits() == Size;
case ISD::AND:
@@ -44609,7 +44609,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
// Converting this to a min would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOLT:
case ISD::SETLT:
case ISD::SETLE:
@@ -44641,7 +44641,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
// Converting this to a max would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOGT:
case ISD::SETGT:
case ISD::SETGE:
@@ -44676,7 +44676,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
// Converting this to a min would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOGT:
case ISD::SETGT:
case ISD::SETGE:
@@ -44706,7 +44706,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
// Converting this to a max would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SETOLT:
case ISD::SETLT:
case ISD::SETLE:
@@ -45218,7 +45218,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
break;
assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
"Invalid use of SETCC_CARRY!");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86ISD::SETCC:
// Set the condition code or opposite one if necessary.
CC = X86::CondCode(SetCC.getConstantOperandVal(0));
@@ -45291,7 +45291,7 @@ static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
case ISD::AND:
case X86ISD::AND:
isAnd = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::OR:
case X86ISD::OR:
SetCC0 = Cond->getOperand(0);
@@ -50114,7 +50114,7 @@ static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
TLI.isOperationLegal(SrcOpcode, VT) &&
!TLI.isOperationLegal(SrcOpcode, SrcVT))
return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::AND:
case ISD::XOR:
case ISD::OR:
@@ -53967,7 +53967,7 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
return DAG.getNode(Op0.getOpcode(), DL, VT,
ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86ISD::VPERMILPI:
if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
Op0.getOperand(1) == Ops[1].getOperand(1)) {
@@ -54043,7 +54043,7 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
}
return DAG.getBitcast(VT, Res);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86ISD::VSRAI:
case X86ISD::VSHL:
case X86ISD::VSRL:
@@ -55328,7 +55328,7 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
case ISD::OR:
case ISD::XOR:
Commute = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ISD::SUB: {
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
@@ -55581,7 +55581,7 @@ TargetLowering::ConstraintWeight
switch (*constraint) {
default:
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'R':
case 'q':
case 'Q':
@@ -55640,7 +55640,7 @@ TargetLowering::ConstraintWeight
case 'v':
if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
weight = CW_Register;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'x':
if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
@@ -55978,7 +55978,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return std::make_pair(0U, &X86::GR64RegClass);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// 32-bit fallthrough
case 'Q': // Q_REGS
if (VT == MVT::i8 || VT == MVT::i1)
@@ -56060,7 +56060,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
case MVT::v8f16:
if (!Subtarget.hasFP16())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::f128:
case MVT::v16i8:
case MVT::v8i16:
@@ -56075,7 +56075,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
case MVT::v16f16:
if (!Subtarget.hasFP16())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
@@ -56090,7 +56090,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
case MVT::v32f16:
if (!Subtarget.hasFP16())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::v64i8:
case MVT::v32i16:
case MVT::v8f64:
@@ -56133,7 +56133,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
case MVT::v8f16:
if (!Subtarget.hasFP16())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::f128:
case MVT::v16i8:
case MVT::v8i16:
@@ -56146,7 +56146,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
case MVT::v16f16:
if (!Subtarget.hasFP16())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
@@ -56159,7 +56159,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
case MVT::v32f16:
if (!Subtarget.hasFP16())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::v64i8:
case MVT::v32i16:
case MVT::v8f64:
diff --git a/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp b/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
index 7e751a4c8811e..6f8b87da1c571 100644
--- a/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
+++ b/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp
@@ -118,7 +118,7 @@ static bool needsPrologueENDBR(MachineFunction &MF, const Module *M) {
return F.hasAddressTaken();
}
// if !IBTSeal, fall into default case.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// Address taken or externally linked functions may be reachable.
default:
return (F.hasAddressTaken() || !F.hasLocalLinkage());
diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
index c098122685bef..49aac529b7df8 100644
--- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
@@ -135,7 +135,7 @@ static Value *simplifyX86immShift(const IntrinsicInst &II,
case Intrinsic::x86_avx512_psrai_q_512:
case Intrinsic::x86_avx512_psrai_w_512:
IsImm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::x86_sse2_psra_d:
case Intrinsic::x86_sse2_psra_w:
case Intrinsic::x86_avx2_psra_d:
@@ -158,7 +158,7 @@ static Value *simplifyX86immShift(const IntrinsicInst &II,
case Intrinsic::x86_avx512_psrli_q_512:
case Intrinsic::x86_avx512_psrli_w_512:
IsImm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::x86_sse2_psrl_d:
case Intrinsic::x86_sse2_psrl_q:
case Intrinsic::x86_sse2_psrl_w:
@@ -181,7 +181,7 @@ static Value *simplifyX86immShift(const IntrinsicInst &II,
case Intrinsic::x86_avx512_pslli_q_512:
case Intrinsic::x86_avx512_pslli_w_512:
IsImm = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Intrinsic::x86_sse2_psll_d:
case Intrinsic::x86_sse2_psll_q:
case Intrinsic::x86_sse2_psll_w:
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 81db055305987..f0c8a49b17627 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -105,7 +105,7 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
// It's not always legal to reference the low 8-bit of the larger
// register in 32-bit mode.
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::MOVSX32rr16:
case X86::MOVZX32rr16:
case X86::MOVSX64rr16:
@@ -1395,7 +1395,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
}
case X86::SHL8ri:
Is8BitOp = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::SHL16ri: {
assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
@@ -1456,7 +1456,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
case X86::DEC8r:
case X86::INC8r:
Is8BitOp = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::DEC16r:
case X86::INC16r:
return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
@@ -1512,7 +1512,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
case X86::ADD8rr:
case X86::ADD8rr_DB:
Is8BitOp = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::ADD16rr:
case X86::ADD16rr_DB:
return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
@@ -1554,7 +1554,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
case X86::ADD8ri:
case X86::ADD8ri_DB:
Is8BitOp = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::ADD16ri:
case X86::ADD16ri8:
case X86::ADD16ri_DB:
@@ -2047,7 +2047,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
OpIdx1, OpIdx2);
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::PBLENDWrri:
case X86::VBLENDPDYrri:
case X86::VBLENDPSYrri:
@@ -2798,18 +2798,18 @@ X86::getX86ConditionCode(CmpInst::Predicate Predicate) {
default: break;
// Floating-point Predicates
case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
- case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH;
+ case CmpInst::FCMP_OLT: NeedSwap = true; [[fallthrough]];
case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
- case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH;
+ case CmpInst::FCMP_OLE: NeedSwap = true; [[fallthrough]];
case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
- case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH;
+ case CmpInst::FCMP_UGT: NeedSwap = true; [[fallthrough]];
case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
- case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH;
+ case CmpInst::FCMP_UGE: NeedSwap = true; [[fallthrough]];
case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
- case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH;
+ case CmpInst::FCMP_OEQ: [[fallthrough]];
case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
// Integer Predicates
@@ -4494,7 +4494,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
// can't do the optimization.
if (NoSignFlag)
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::COND_O: case X86::COND_NO:
// If OF is used, the instruction needs to clear it like CmpZero does.
if (!ClearsOverflowFlag)
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index 1c08cf300ad3e..8488dc6ed2a1e 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -2500,7 +2500,7 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
case X86::TAILJMPd64:
if (IndCSPrefix && MI->hasRegisterImplicitUseOperand(X86::R11))
EmitAndCountInstruction(MCInstBuilder(X86::CS_PREFIX));
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::TAILJMPr:
case X86::TAILJMPm:
case X86::TAILJMPd:
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index aa993166a2366..14cd86cb9c945 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -109,7 +109,7 @@ llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
// - Kabylake
switch (Level) {
case TargetTransformInfo::CacheLevel::L1D:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TargetTransformInfo::CacheLevel::L2D:
return 8;
}
diff --git a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
index 691fdf16bc0f0..1c6e25607136d 100644
--- a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
+++ b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
@@ -134,7 +134,7 @@ void XCoreAsmPrinter::emitGlobalVariable(const GlobalVariable *GV) {
if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
GV->hasCommonLinkage())
OutStreamer->emitSymbolAttribute(GVSym, MCSA_Weak);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case GlobalValue::InternalLinkage:
case GlobalValue::PrivateLinkage:
break;
diff --git a/llvm/lib/TextAPI/TextStubCommon.cpp b/llvm/lib/TextAPI/TextStubCommon.cpp
index 29b74f981a917..0a03f42a25188 100644
--- a/llvm/lib/TextAPI/TextStubCommon.cpp
+++ b/llvm/lib/TextAPI/TextStubCommon.cpp
@@ -64,17 +64,17 @@ void ScalarTraits<PlatformSet>::output(const PlatformSet &Values, void *IO,
OS << "macosx";
break;
case PLATFORM_IOSSIMULATOR:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PLATFORM_IOS:
OS << "ios";
break;
case PLATFORM_WATCHOSSIMULATOR:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PLATFORM_WATCHOS:
OS << "watchos";
break;
case PLATFORM_TVOSSIMULATOR:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case PLATFORM_TVOS:
OS << "tvos";
break;
diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp
index c82001c39d96e..0d9cbe10b29b9 100644
--- a/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -2803,7 +2803,7 @@ void InformationCache::initializeInformationCache(const Function &CF,
if (const Function *Callee = cast<CallInst>(I).getCalledFunction())
getFunctionInfo(*Callee).CalledViaMustTail = true;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::CallBr:
case Instruction::Invoke:
case Instruction::CleanupRet:
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 276fcc2c61d8b..05e074832beda 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -6346,7 +6346,7 @@ ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
if (UsesCheck(AI))
break;
AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AllocationInfo::STACK_DUE_TO_FREE:
if (FreeCheck(AI))
break;
diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 50710eaa1b57d..399dc00a2a279 100644
--- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -1146,7 +1146,7 @@ static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) {
break;
if (CB.getCalledFunction() && SCCNodes.count(CB.getCalledFunction()))
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default:
return false; // Did not come from an allocation.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index bc01d2ef7fe20..7a361b42a619a 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1301,7 +1301,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
Value *Cmp = Builder.CreateICmpNE(I0, Zero);
return CastInst::Create(Instruction::ZExt, Cmp, II->getType());
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::umax: {
Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
@@ -1322,7 +1322,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
// If both operands of unsigned min/max are sign-extended, it is still ok
// to narrow the operation.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::smax:
case Intrinsic::smin: {
@@ -1850,7 +1850,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
return FAdd;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::fma: {
// fma fneg(x), fneg(y), z -> fma x, y, z
@@ -1940,7 +1940,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
return replaceOperand(*II, 0, TVal);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::ceil:
case Intrinsic::floor:
@@ -2504,7 +2504,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
return replaceInstUsesWith(CI, Res);
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::vector_reduce_add: {
if (IID == Intrinsic::vector_reduce_add) {
@@ -2531,7 +2531,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::vector_reduce_xor: {
if (IID == Intrinsic::vector_reduce_xor) {
@@ -2555,7 +2555,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::vector_reduce_mul: {
if (IID == Intrinsic::vector_reduce_mul) {
@@ -2577,7 +2577,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::vector_reduce_umin:
case Intrinsic::vector_reduce_umax: {
@@ -2604,7 +2604,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::vector_reduce_smin:
case Intrinsic::vector_reduce_smax: {
@@ -2642,7 +2642,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::vector_reduce_fmax:
case Intrinsic::vector_reduce_fmin:
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 1712f391c0c8f..39f526c95fb68 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -3479,7 +3479,7 @@ Instruction *InstCombinerImpl::foldICmpBinOpWithConstant(ICmpInst &Cmp,
case Instruction::UDiv:
if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C))
return I;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::SDiv:
if (Instruction *I = foldICmpDivConstant(Cmp, BO, C))
return I;
@@ -5947,7 +5947,7 @@ static Instruction *canonicalizeICmpBool(ICmpInst &I,
case ICmpInst::ICMP_UGT:
// icmp ugt -> icmp ult
std::swap(A, B);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_ULT:
// icmp ult i1 A, B -> ~A & B
return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
@@ -5955,7 +5955,7 @@ static Instruction *canonicalizeICmpBool(ICmpInst &I,
case ICmpInst::ICMP_SGT:
// icmp sgt -> icmp slt
std::swap(A, B);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SLT:
// icmp slt i1 A, B -> A & ~B
return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
@@ -5963,7 +5963,7 @@ static Instruction *canonicalizeICmpBool(ICmpInst &I,
case ICmpInst::ICMP_UGE:
// icmp uge -> icmp ule
std::swap(A, B);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_ULE:
// icmp ule i1 A, B -> ~A | B
return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
@@ -5971,7 +5971,7 @@ static Instruction *canonicalizeICmpBool(ICmpInst &I,
case ICmpInst::ICMP_SGE:
// icmp sge -> icmp sle
std::swap(A, B);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SLE:
// icmp sle i1 A, B -> A | ~B
return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
index a3e0c45269ed6..a0a5926d2771e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
@@ -399,7 +399,7 @@ std::array<Value *, 2> Negator::getSortedOperandsOfBinOp(Instruction *I) {
if (match(Ops[1], m_One()))
return Builder.CreateNot(Ops[0], I->getName() + ".neg");
// Else, just defer to Instruction::Add handling.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Instruction::Add: {
// `add` is negatible if both of its operands are negatible.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index ad96a5f475f15..f6f8f1141df43 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -1371,7 +1371,7 @@ static Value *canonicalizeClampLike(SelectInst &Sel0, ICmpInst &Cmp0,
C2->getType()->getScalarSizeInBits()))))
return nullptr; // Can't do, have signed max element[s].
C2 = InstCombiner::AddOne(C2);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::Predicate::ICMP_SGE:
// Also non-canonical, but here we don't need to change C2,
// so we don't have any restrictions on C2, so we can just handle it.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index febd0f51d25fa..3f816b0a80ced 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -397,7 +397,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::ZExt: {
unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
@@ -498,7 +498,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return Builder.CreateSExt(Or, VTy);
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Sub: {
APInt DemandedFromOps;
if (simplifyOperandsBasedOnUnusedHighBits(DemandedFromOps))
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index b80c58183dd52..d435a7d76d0de 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -1653,7 +1653,7 @@ static bool canEvaluateShuffled(Value *V, ArrayRef<int> Mask,
// from an undefined element in an operand.
if (llvm::is_contained(Mask, -1))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 71c763de43b4c..57d12e8050307 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2778,7 +2778,7 @@ static bool isAllocSiteRemovable(Instruction *AI,
MemIntrinsic *MI = cast<MemIntrinsic>(II);
if (MI->isVolatile() || MI->getRawDest() != PI)
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Intrinsic::assume:
case Intrinsic::invariant_start:
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 378dd96e10301..863b085fc15b8 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -676,7 +676,7 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
switch (ord) {
case AtomicOrdering::NotAtomic:
llvm_unreachable("unexpected atomic ordering!");
- case AtomicOrdering::Unordered: LLVM_FALLTHROUGH;
+ case AtomicOrdering::Unordered: [[fallthrough]];
case AtomicOrdering::Monotonic: v = 0; break;
// Not specified yet:
// case AtomicOrdering::Consume: v = 1; break;
diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
index f64c26ef2bed6..cb6f6e1661aba 100644
--- a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
+++ b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
@@ -428,7 +428,7 @@ bool ObjCARCContract::tryToPeepholeInstruction(
if (!optimizeRetainCall(F, Inst))
return false;
// If we succeed in our optimization, fall through.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ARCInstKind::RetainRV:
case ARCInstKind::UnsafeClaimRV: {
// Return true if this is a bundled retainRV/claimRV call, which is always
diff --git a/llvm/lib/Transforms/ObjCARC/PtrState.cpp b/llvm/lib/Transforms/ObjCARC/PtrState.cpp
index d10d5851d5ea0..e9b2dbeb62e60 100644
--- a/llvm/lib/Transforms/ObjCARC/PtrState.cpp
+++ b/llvm/lib/Transforms/ObjCARC/PtrState.cpp
@@ -212,7 +212,7 @@ bool BottomUpPtrState::MatchWithRetain() {
// imprecise release, clear our reverse insertion points.
if (OldSeq != S_Use || IsTrackingImpreciseReleases())
ClearReverseInsertPts();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case S_CanRelease:
return true;
case S_None:
@@ -360,7 +360,7 @@ bool TopDownPtrState::MatchWithRelease(ARCMDKindCache &Cache,
case S_CanRelease:
if (OldSeq == S_Retain || ReleaseMetadata != nullptr)
ClearReverseInsertPts();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case S_Use:
SetReleaseMetadata(ReleaseMetadata);
SetTailCallRelease(cast<CallInst>(Release)->isTailCall());
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 9698ed97379e4..42a3ae22dc6dc 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -675,7 +675,7 @@ static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L) {
// An IV counter must preserve its type.
if (IncI->getNumOperands() == 2)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return nullptr;
}
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 328615011ceb5..096ce9477c3e9 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -307,7 +307,7 @@ InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
case ICmpInst::ICMP_SLE:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SGE:
IsSigned = true;
if (match(RHS, m_ConstantInt<0>())) {
@@ -318,7 +318,7 @@ InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
case ICmpInst::ICMP_SLT:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_SGT:
IsSigned = true;
if (match(RHS, m_ConstantInt<-1>())) {
@@ -335,7 +335,7 @@ InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
case ICmpInst::ICMP_ULT:
std::swap(LHS, RHS);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ICmpInst::ICMP_UGT:
IsSigned = false;
if (IsLoopInvariant(LHS)) {
diff --git a/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index e25ec74a05723..048f132f174b0 100644
--- a/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -316,7 +316,7 @@ bool llvm::inferNonMandatoryLibFuncAttrs(Function &F,
case LibFunc_strcpy:
case LibFunc_strncpy:
Changed |= setReturnedArg(F, 0);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_stpcpy:
case LibFunc_stpncpy:
Changed |= setOnlyAccessesArgMemory(F);
@@ -386,7 +386,7 @@ bool llvm::inferNonMandatoryLibFuncAttrs(Function &F,
break;
case LibFunc_strndup:
Changed |= setArgNoUndef(F, 1);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_strdup:
Changed |= setAllocFamily(F, "malloc");
Changed |= setOnlyAccessesInaccessibleMemOrArgMem(F);
@@ -448,7 +448,7 @@ bool llvm::inferNonMandatoryLibFuncAttrs(Function &F,
Changed |= setAlignedAllocParam(F, 0);
Changed |= setAllocSize(F, 1, None);
Changed |= setAllocKind(F, AllocFnKind::Alloc | AllocFnKind::Uninitialized | AllocFnKind::Aligned);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_valloc:
case LibFunc_malloc:
case LibFunc_vec_malloc:
@@ -507,7 +507,7 @@ bool llvm::inferNonMandatoryLibFuncAttrs(Function &F,
case LibFunc_mempcpy:
case LibFunc_memccpy:
Changed |= setWillReturn(F);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_memcpy_chk:
Changed |= setDoesNotThrow(F);
Changed |= setOnlyAccessesArgMemory(F);
@@ -985,7 +985,7 @@ bool llvm::inferNonMandatoryLibFuncAttrs(Function &F,
break;
case LibFunc_dunder_strndup:
Changed |= setArgNoUndef(F, 1);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_dunder_strdup:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
@@ -1078,10 +1078,10 @@ bool llvm::inferNonMandatoryLibFuncAttrs(Function &F,
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_memset:
Changed |= setWillReturn(F);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LibFunc_memset_chk:
Changed |= setOnlyAccessesArgMemory(F);
Changed |= setOnlyWritesMemory(F, 0);
diff --git a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp
index 597c88ad13df1..1d5529940df3a 100644
--- a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp
+++ b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp
@@ -713,7 +713,7 @@ static bool shouldSpeculateInstrs(BasicBlock::iterator Begin,
if (!cast<GEPOperator>(I)->hasAllConstantIndices())
return false;
// fall-thru to increment case
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Add:
case Instruction::Sub:
case Instruction::And:
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index af956aa56d480..368ab0078c0b9 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -5007,7 +5007,7 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
case CM_ScalarEpilogueAllowed:
return computeFeasibleMaxVF(TC, UserVF, false);
case CM_ScalarEpilogueNotAllowedUsePredicate:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CM_ScalarEpilogueNotNeededUsePredicate:
LLVM_DEBUG(
dbgs() << "LV: vector predicate hint/switch found.\n"
@@ -7072,7 +7072,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
// likely.
return Cost / getReciprocalPredBlockProb();
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
@@ -7178,7 +7178,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
case Instruction::BitCast:
if (I->getType()->isPointerTy())
return 0;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
@@ -7287,7 +7287,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
// the result would need to be a vector of pointers.
if (VF.isScalable())
return InstructionCost::getInvalid();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
// This opcode is unknown. Assume that it is the same as 'mul'.
return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 1d37674aa48ff..999b396034b35 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -644,7 +644,7 @@ static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
if (isVectorIntrinsicWithScalarOpAtArg(ID, i))
return (CI->getArgOperand(i) == Scalar);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default:
return false;
diff --git a/llvm/tools/llvm-ar/llvm-ar.cpp b/llvm/tools/llvm-ar/llvm-ar.cpp
index 4ffc5cf337a24..b11ee5182f302 100644
--- a/llvm/tools/llvm-ar/llvm-ar.cpp
+++ b/llvm/tools/llvm-ar/llvm-ar.cpp
@@ -1218,7 +1218,7 @@ static void runMRIScript() {
break;
case MRICommand::CreateThin:
Thin = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MRICommand::Create:
Create = true;
if (!ArchiveName.empty())
diff --git a/llvm/tools/llvm-config/llvm-config.cpp b/llvm/tools/llvm-config/llvm-config.cpp
index 2c6c55f89d38c..18958e8be45f7 100644
--- a/llvm/tools/llvm-config/llvm-config.cpp
+++ b/llvm/tools/llvm-config/llvm-config.cpp
@@ -660,7 +660,7 @@ int main(int argc, char **argv) {
}
WithColor::error(errs(), "llvm-config")
<< "component libraries and shared library\n\n";
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LinkModeStatic:
for (auto &Lib : MissingLibs)
WithColor::error(errs(), "llvm-config") << "missing: " << Lib << "\n";
diff --git a/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp b/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp
index 962136a1f87e5..b07a44d2cdde5 100644
--- a/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp
+++ b/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp
@@ -96,7 +96,7 @@ static void appendCodeTemplates(const LLVMState &State,
switch (ExecutionModeBit) {
case ExecutionMode::ALWAYS_SERIAL_IMPLICIT_REGS_ALIAS:
// Nothing to do, the instruction is always serial.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ExecutionMode::ALWAYS_SERIAL_TIED_REGS_ALIAS: {
// Picking whatever value for the tied variable will make the instruction
// serial.
diff --git a/llvm/tools/llvm-mc/Disassembler.cpp b/llvm/tools/llvm-mc/Disassembler.cpp
index ac55d05db192a..2d18334297188 100644
--- a/llvm/tools/llvm-mc/Disassembler.cpp
+++ b/llvm/tools/llvm-mc/Disassembler.cpp
@@ -65,7 +65,7 @@ static bool PrintInsts(const MCDisassembler &DisAsm,
SM.PrintMessage(SMLoc::getFromPointer(Bytes.second[Index]),
SourceMgr::DK_Warning,
"potentially undefined instruction encoding");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MCDisassembler::Success:
Streamer.emitInstruction(Inst, STI);
diff --git a/llvm/tools/llvm-ml/Disassembler.cpp b/llvm/tools/llvm-ml/Disassembler.cpp
index 6a96c88184210..72d88d98ed080 100644
--- a/llvm/tools/llvm-ml/Disassembler.cpp
+++ b/llvm/tools/llvm-ml/Disassembler.cpp
@@ -61,7 +61,7 @@ static bool PrintInsts(const MCDisassembler &DisAsm, const ByteArrayTy &Bytes,
SM.PrintMessage(SMLoc::getFromPointer(Bytes.second[Index]),
SourceMgr::DK_Warning,
"potentially undefined instruction encoding");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MCDisassembler::Success:
Streamer.emitInstruction(Inst, STI);
diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp
index ba7bae96ade36..b26868a3b2628 100644
--- a/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -2329,7 +2329,7 @@ std::string ELFDumper<ELFT>::getDynamicEntry(uint64_t Type,
return "REL";
if (Value == DT_RELA)
return "RELA";
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DT_PLTGOT:
case DT_HASH:
case DT_STRTAB:
@@ -6499,7 +6499,7 @@ template <class ELFT> void LLVMELFDumper<ELFT>::printFileHeaders() {
break;
case 0:
// ELFOSABI_AMDGPU_PAL, ELFOSABI_AMDGPU_MESA3D support *_V3 flags.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
W.printFlags("Flags", E.e_flags,
makeArrayRef(ElfHeaderAMDGPUFlagsABIVersion3),
diff --git a/llvm/tools/llvm-remark-size-
diff /RemarkSizeDiff.cpp b/llvm/tools/llvm-remark-size-
diff /RemarkSizeDiff.cpp
index ab59820f38dbc..c3bdb2c1ae27e 100644
--- a/llvm/tools/llvm-remark-size-
diff /RemarkSizeDiff.cpp
+++ b/llvm/tools/llvm-remark-size-
diff /RemarkSizeDiff.cpp
@@ -404,13 +404,13 @@ getFunctionDiffListAsJSON(const SmallVector<FunctionDiff> &FunctionDiffs,
InstCountA = InstCountB = StackSizeA = StackSizeB = 0;
switch (WhichFiles) {
case BOTH:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case A:
InstCountA = Diff.getInstCountA();
StackSizeA = Diff.getStackSizeA();
if (WhichFiles != BOTH)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case B:
InstCountB = Diff.getInstCountB();
StackSizeB = Diff.getStackSizeB();
diff --git a/llvm/tools/obj2yaml/macho2yaml.cpp b/llvm/tools/obj2yaml/macho2yaml.cpp
index 6b121a74d12f3..e4259a220856a 100644
--- a/llvm/tools/obj2yaml/macho2yaml.cpp
+++ b/llvm/tools/obj2yaml/macho2yaml.cpp
@@ -386,7 +386,7 @@ void MachODumper::dumpRebaseOpcodes(std::unique_ptr<MachOYAML::Object> &Y) {
ULEB = decodeULEB128(OpCode + 1, &Count);
RebaseOp.ExtraData.push_back(ULEB);
OpCode += Count;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// Intentionally no break here -- This opcode has two ULEB values
case MachO::REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
case MachO::REBASE_OPCODE_ADD_ADDR_ULEB:
@@ -434,7 +434,7 @@ void MachODumper::dumpBindOpcodes(
ULEB = decodeULEB128(OpCode + 1, &Count);
BindOp.ULEBExtraData.push_back(ULEB);
OpCode += Count;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// Intentionally no break here -- this opcode has two ULEB values
case MachO::BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB:
diff --git a/llvm/utils/TableGen/IntrinsicEmitter.cpp b/llvm/utils/TableGen/IntrinsicEmitter.cpp
index fca2bc34e09a7..099f99e8e97c5 100644
--- a/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -361,10 +361,10 @@ static void EncodeFixedType(Record *R, std::vector<unsigned char> &ArgCodes,
unsigned Tmp = 0;
switch (VT) {
default: break;
- case MVT::iPTRAny: ++Tmp; LLVM_FALLTHROUGH;
- case MVT::vAny: ++Tmp; LLVM_FALLTHROUGH;
- case MVT::fAny: ++Tmp; LLVM_FALLTHROUGH;
- case MVT::iAny: ++Tmp; LLVM_FALLTHROUGH;
+ case MVT::iPTRAny: ++Tmp; [[fallthrough]];
+ case MVT::vAny: ++Tmp; [[fallthrough]];
+ case MVT::fAny: ++Tmp; [[fallthrough]];
+ case MVT::iAny: ++Tmp; [[fallthrough]];
case MVT::Any: {
// If this is an "any" valuetype, then the type is the type of the next
// type in the list specified to getIntrinsic().
@@ -444,16 +444,16 @@ static void UpdateArgCodes(Record *R, std::vector<unsigned char> &ArgCodes,
break;
case MVT::iPTRAny:
++Tmp;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::vAny:
++Tmp;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::fAny:
++Tmp;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::iAny:
++Tmp;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MVT::Any:
unsigned OriginalIdx = ArgCodes.size() - NumInserted;
assert(OriginalIdx >= Mapping.size());
More information about the llvm-commits
mailing list