[llvm] 63fae3e - [AMDGPU] clang-tidy: no else after return etc. NFC. (#99298)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 17 13:11:03 PDT 2024


Author: Jay Foad
Date: 2024-07-17T21:11:00+01:00
New Revision: 63fae3ed656241a1d6a19c3e773ecc9bfff3e182

URL: https://github.com/llvm/llvm-project/commit/63fae3ed656241a1d6a19c3e773ecc9bfff3e182
DIFF: https://github.com/llvm/llvm-project/commit/63fae3ed656241a1d6a19c3e773ecc9bfff3e182.diff

LOG: [AMDGPU] clang-tidy: no else after return etc. NFC. (#99298)

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
    llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
    llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
    llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
    llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
    llvm/lib/Target/AMDGPU/GCNILPSched.cpp
    llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
    llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
    llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
    llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
    llvm/lib/Target/AMDGPU/R600ISelDAGToDAG.cpp
    llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
    llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
    llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
    llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
    llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
    llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
    llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
    llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
    llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
    llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 632657589bdd2..3154dc6fe433d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -1450,7 +1450,8 @@ bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
     AMDGPUInstPrinter::printRegOperand(MO.getReg(), O,
                                        *MF->getSubtarget().getRegisterInfo());
     return false;
-  } else if (MO.isImm()) {
+  }
+  if (MO.isImm()) {
     int64_t Val = MO.getImm();
     if (AMDGPU::isInlinableIntLiteral(Val)) {
       O << Val;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
index 5874a6f1f3992..07b2ecc2fed0e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -568,16 +568,14 @@ class RegionMRT : public MRT {
   bool contains(MachineBasicBlock *MBB) {
     for (auto *CI : Children) {
       if (CI->isMBB()) {
-        if (MBB == CI->getMBBMRT()->getMBB()) {
+        if (MBB == CI->getMBBMRT()->getMBB())
           return true;
-        }
       } else {
-        if (CI->getRegionMRT()->contains(MBB)) {
+        if (CI->getRegionMRT()->contains(MBB))
           return true;
-        } else if (CI->getRegionMRT()->getLinearizedRegion() != nullptr &&
-                   CI->getRegionMRT()->getLinearizedRegion()->contains(MBB)) {
+        if (CI->getRegionMRT()->getLinearizedRegion() != nullptr &&
+            CI->getRegionMRT()->getLinearizedRegion()->contains(MBB))
           return true;
-        }
       }
     }
     return false;
@@ -2259,63 +2257,60 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
     CodeBB->addSuccessor(MergeBB);
     CurrentRegion->addMBB(CodeBB);
     return nullptr;
-  } else {
-    // Handle internal block.
-    const TargetRegisterClass *RegClass = MRI->getRegClass(BBSelectRegIn);
-    Register CodeBBSelectReg = MRI->createVirtualRegister(RegClass);
-    rewriteCodeBBTerminator(CodeBB, MergeBB, CodeBBSelectReg);
-    bool IsRegionEntryBB = CurrentRegion->getEntry() == CodeBB;
-    MachineBasicBlock *IfBB = createIfBlock(MergeBB, CodeBB, CodeBB, CodeBB,
-                                            BBSelectRegIn, IsRegionEntryBB);
-    CurrentRegion->addMBB(IfBB);
-    // If this is the entry block we need to make the If block the new
-    // linearized region entry.
-    if (IsRegionEntryBB) {
-      CurrentRegion->setEntry(IfBB);
-
-      if (CurrentRegion->getHasLoop()) {
-        MachineBasicBlock *RegionExit = CurrentRegion->getExit();
-        MachineBasicBlock *ETrueBB = nullptr;
-        MachineBasicBlock *EFalseBB = nullptr;
-        SmallVector<MachineOperand, 1> ECond;
-
-        const DebugLoc &DL = DebugLoc();
-        TII->analyzeBranch(*RegionExit, ETrueBB, EFalseBB, ECond);
-        TII->removeBranch(*RegionExit);
-
-        // We need to create a backedge if there is a loop
-        Register Reg = TII->insertNE(
-            RegionExit, RegionExit->instr_end(), DL,
-            CurrentRegion->getRegionMRT()->getInnerOutputRegister(),
-            CurrentRegion->getRegionMRT()->getEntry()->getNumber());
-        MachineOperand RegOp =
-            MachineOperand::CreateReg(Reg, false, false, true);
-        ArrayRef<MachineOperand> Cond(RegOp);
-        LLVM_DEBUG(dbgs() << "RegionExitReg: ");
-        LLVM_DEBUG(RegOp.print(dbgs(), TRI));
-        LLVM_DEBUG(dbgs() << "\n");
-        TII->insertBranch(*RegionExit, CurrentRegion->getEntry(), RegionExit,
-                          Cond, DebugLoc());
-        RegionExit->addSuccessor(CurrentRegion->getEntry());
-      }
-    }
-    CurrentRegion->addMBB(CodeBB);
-    LinearizedRegion InnerRegion(CodeBB, MRI, TRI, PHIInfo);
+  }
+  // Handle internal block.
+  const TargetRegisterClass *RegClass = MRI->getRegClass(BBSelectRegIn);
+  Register CodeBBSelectReg = MRI->createVirtualRegister(RegClass);
+  rewriteCodeBBTerminator(CodeBB, MergeBB, CodeBBSelectReg);
+  bool IsRegionEntryBB = CurrentRegion->getEntry() == CodeBB;
+  MachineBasicBlock *IfBB = createIfBlock(MergeBB, CodeBB, CodeBB, CodeBB,
+                                          BBSelectRegIn, IsRegionEntryBB);
+  CurrentRegion->addMBB(IfBB);
+  // If this is the entry block we need to make the If block the new
+  // linearized region entry.
+  if (IsRegionEntryBB) {
+    CurrentRegion->setEntry(IfBB);
+
+    if (CurrentRegion->getHasLoop()) {
+      MachineBasicBlock *RegionExit = CurrentRegion->getExit();
+      MachineBasicBlock *ETrueBB = nullptr;
+      MachineBasicBlock *EFalseBB = nullptr;
+      SmallVector<MachineOperand, 1> ECond;
 
-    InnerRegion.setParent(CurrentRegion);
-    LLVM_DEBUG(dbgs() << "Insert BB Select PHI (BB)\n");
-    insertMergePHI(IfBB, CodeBB, MergeBB, BBSelectRegOut, BBSelectRegIn,
-                   CodeBBSelectReg);
-    InnerRegion.addMBB(MergeBB);
+      const DebugLoc &DL = DebugLoc();
+      TII->analyzeBranch(*RegionExit, ETrueBB, EFalseBB, ECond);
+      TII->removeBranch(*RegionExit);
 
-    LLVM_DEBUG(InnerRegion.print(dbgs(), TRI));
-    rewriteLiveOutRegs(IfBB, CodeBB, MergeBB, &InnerRegion, CurrentRegion);
-    extractKilledPHIs(CodeBB);
-    if (IsRegionEntryBB) {
-      createEntryPHIs(CurrentRegion);
+      // We need to create a backedge if there is a loop
+      Register Reg =
+          TII->insertNE(RegionExit, RegionExit->instr_end(), DL,
+                        CurrentRegion->getRegionMRT()->getInnerOutputRegister(),
+                        CurrentRegion->getRegionMRT()->getEntry()->getNumber());
+      MachineOperand RegOp = MachineOperand::CreateReg(Reg, false, false, true);
+      ArrayRef<MachineOperand> Cond(RegOp);
+      LLVM_DEBUG(dbgs() << "RegionExitReg: ");
+      LLVM_DEBUG(RegOp.print(dbgs(), TRI));
+      LLVM_DEBUG(dbgs() << "\n");
+      TII->insertBranch(*RegionExit, CurrentRegion->getEntry(), RegionExit,
+                        Cond, DebugLoc());
+      RegionExit->addSuccessor(CurrentRegion->getEntry());
     }
-    return IfBB;
   }
+  CurrentRegion->addMBB(CodeBB);
+  LinearizedRegion InnerRegion(CodeBB, MRI, TRI, PHIInfo);
+
+  InnerRegion.setParent(CurrentRegion);
+  LLVM_DEBUG(dbgs() << "Insert BB Select PHI (BB)\n");
+  insertMergePHI(IfBB, CodeBB, MergeBB, BBSelectRegOut, BBSelectRegIn,
+                 CodeBBSelectReg);
+  InnerRegion.addMBB(MergeBB);
+
+  LLVM_DEBUG(InnerRegion.print(dbgs(), TRI));
+  rewriteLiveOutRegs(IfBB, CodeBB, MergeBB, &InnerRegion, CurrentRegion);
+  extractKilledPHIs(CodeBB);
+  if (IsRegionEntryBB)
+    createEntryPHIs(CurrentRegion);
+  return IfBB;
 }
 
 MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
@@ -2712,12 +2707,11 @@ bool AMDGPUMachineCFGStructurizer::structurizeRegion(RegionMRT *Region) {
   if (false && regionIsSimpleIf(Region)) {
     transformSimpleIfRegion(Region);
     return true;
-  } else if (regionIsSequence(Region)) {
+  }
+  if (regionIsSequence(Region))
     fixupRegionExits(Region);
-    return false;
-  } else {
+  else
     structurizeComplexRegion(Region);
-  }
   return false;
 }
 
@@ -2784,12 +2778,11 @@ AMDGPUMachineCFGStructurizer::initializeSelectRegisters(MRT *MRT, unsigned Selec
       InnerSelectOut = initializeSelectRegisters(CI, InnerSelectOut, MRI, TII);
     MRT->setBBSelectRegIn(InnerSelectOut);
     return InnerSelectOut;
-  } else {
-    MRT->setBBSelectRegOut(SelectOut);
-    unsigned NewSelectIn = createBBSelectReg(TII, MRI);
-    MRT->setBBSelectRegIn(NewSelectIn);
-    return NewSelectIn;
   }
+  MRT->setBBSelectRegOut(SelectOut);
+  unsigned NewSelectIn = createBBSelectReg(TII, MRI);
+  MRT->setBBSelectRegIn(NewSelectIn);
+  return NewSelectIn;
 }
 
 static void checkRegOnlyPHIInputs(MachineFunction &MF) {

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 17413ab55536d..73796edb5d3e3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -1116,15 +1116,14 @@ bool AMDGPURegisterBankInfo::applyMappingLoad(
             LegalizerHelper::Legalized)
           return false;
         return true;
+      }
+      LLT WiderTy = widen96To128(LoadTy);
+      auto WideLoad = B.buildLoadFromOffset(WiderTy, PtrReg, *MMO, 0);
+      if (WiderTy.isScalar()) {
+        B.buildTrunc(MI.getOperand(0), WideLoad);
       } else {
-        LLT WiderTy = widen96To128(LoadTy);
-        auto WideLoad = B.buildLoadFromOffset(WiderTy, PtrReg, *MMO, 0);
-        if (WiderTy.isScalar())
-          B.buildTrunc(MI.getOperand(0), WideLoad);
-        else {
-          B.buildDeleteTrailingVectorElements(MI.getOperand(0).getReg(),
-                                              WideLoad);
-        }
+        B.buildDeleteTrailingVectorElements(MI.getOperand(0).getReg(),
+                                            WideLoad);
       }
     }
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index 55218afb9a8e8..2e1bdf4692478 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -1038,15 +1038,14 @@ unsigned GCNSubtarget::getNSAThreshold(const MachineFunction &MF) const {
 const AMDGPUSubtarget &AMDGPUSubtarget::get(const MachineFunction &MF) {
   if (MF.getTarget().getTargetTriple().getArch() == Triple::amdgcn)
     return static_cast<const AMDGPUSubtarget&>(MF.getSubtarget<GCNSubtarget>());
-  else
-    return static_cast<const AMDGPUSubtarget&>(MF.getSubtarget<R600Subtarget>());
+  return static_cast<const AMDGPUSubtarget &>(MF.getSubtarget<R600Subtarget>());
 }
 
 const AMDGPUSubtarget &AMDGPUSubtarget::get(const TargetMachine &TM, const Function &F) {
   if (TM.getTargetTriple().getArch() == Triple::amdgcn)
     return static_cast<const AMDGPUSubtarget&>(TM.getSubtarget<GCNSubtarget>(F));
-  else
-    return static_cast<const AMDGPUSubtarget&>(TM.getSubtarget<R600Subtarget>(F));
+  return static_cast<const AMDGPUSubtarget &>(
+      TM.getSubtarget<R600Subtarget>(F));
 }
 
 GCNUserSGPRUsageInfo::GCNUserSGPRUsageInfo(const Function &F,

diff  --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 1d43043308ed9..217487b2cc7e6 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -99,13 +99,11 @@ class AMDGPUOperand : public MCParsedAsmOperand {
     int64_t getModifiersOperand() const {
       assert(!(hasFPModifiers() && hasIntModifiers())
            && "fp and int modifiers should not be used simultaneously");
-      if (hasFPModifiers()) {
+      if (hasFPModifiers())
         return getFPModifiersOperand();
-      } else if (hasIntModifiers()) {
+      if (hasIntModifiers())
         return getIntModifiersOperand();
-      } else {
-        return 0;
-      }
+      return 0;
     }
 
     friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
@@ -2162,10 +2160,9 @@ template <bool IsFake16> bool AMDGPUOperand::isT16VRegWithInputMods() const {
 bool AMDGPUOperand::isSDWAOperand(MVT type) const {
   if (AsmParser->isVI())
     return isVReg32();
-  else if (AsmParser->isGFX9Plus())
+  if (AsmParser->isGFX9Plus())
     return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(type);
-  else
-    return false;
+  return false;
 }
 
 bool AMDGPUOperand::isSDWAFP16Operand() const {
@@ -3680,19 +3677,17 @@ static OperandIndices getSrcOperandIndices(unsigned Opcode,
 
 bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
   const MCOperand &MO = Inst.getOperand(OpIdx);
-  if (MO.isImm()) {
+  if (MO.isImm())
     return !isInlineConstant(Inst, OpIdx);
-  } else if (MO.isReg()) {
+  if (MO.isReg()) {
     auto Reg = MO.getReg();
-    if (!Reg) {
+    if (!Reg)
       return false;
-    }
     const MCRegisterInfo *TRI = getContext().getRegisterInfo();
     auto PReg = mc2PseudoReg(Reg);
     return isSGPR(PReg, TRI) && PReg != SGPR_NULL;
-  } else {
-    return true;
   }
+  return true;
 }
 
 // Based on the comment for `AMDGPUInstructionSelector::selectWritelane`:
@@ -6338,16 +6333,20 @@ StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
     setForcedDPP(true);
     setForcedEncodingSize(64);
     return Name.substr(0, Name.size() - 8);
-  } else if (Name.ends_with("_e64")) {
+  }
+  if (Name.ends_with("_e64")) {
     setForcedEncodingSize(64);
     return Name.substr(0, Name.size() - 4);
-  } else if (Name.ends_with("_e32")) {
+  }
+  if (Name.ends_with("_e32")) {
     setForcedEncodingSize(32);
     return Name.substr(0, Name.size() - 4);
-  } else if (Name.ends_with("_dpp")) {
+  }
+  if (Name.ends_with("_dpp")) {
     setForcedDPP(true);
     return Name.substr(0, Name.size() - 4);
-  } else if (Name.ends_with("_sdwa")) {
+  }
+  if (Name.ends_with("_sdwa")) {
     setForcedSDWA(true);
     return Name.substr(0, Name.size() - 5);
   }
@@ -7754,10 +7753,9 @@ AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
     Val = getToken().getStringContents();
     lex();
     return true;
-  } else {
-    Error(getLoc(), ErrMsg);
-    return false;
   }
+  Error(getLoc(), ErrMsg);
+  return false;
 }
 
 bool
@@ -7766,11 +7764,10 @@ AMDGPUAsmParser::parseId(StringRef &Val, const StringRef ErrMsg) {
     Val = getTokenStr();
     lex();
     return true;
-  } else {
-    if (!ErrMsg.empty())
-      Error(getLoc(), ErrMsg);
-    return false;
   }
+  if (!ErrMsg.empty())
+    Error(getLoc(), ErrMsg);
+  return false;
 }
 
 AsmToken
@@ -9475,8 +9472,8 @@ void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
            (SkipSrcVcc && Inst.getNumOperands() == 5))) {
         SkippedVcc = true;
         continue;
-      } else if (BasicInstType == SIInstrFlags::VOPC &&
-                 Inst.getNumOperands() == 0) {
+      }
+      if (BasicInstType == SIInstrFlags::VOPC && Inst.getNumOperands() == 0) {
         SkippedVcc = true;
         continue;
       }

diff  --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index 3e7b6ab19dd0c..1a0dc7098347a 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -1566,8 +1566,7 @@ AMDGPUDisassembler::decodeNonVGPRSrcOp(const OpWidthTy Width, unsigned Val,
     if (MandatoryLiteral)
       // Keep a sentinel value for deferred setting
       return MCOperand::createImm(LITERAL_CONST);
-    else
-      return decodeLiteralConstant(Sema == AMDGPU::OperandSemantics::FP64);
+    return decodeLiteralConstant(Sema == AMDGPU::OperandSemantics::FP64);
   }
 
   switch (Width) {
@@ -1701,9 +1700,9 @@ AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, const unsigned Val,
       return decodeFPImmed(ImmWidth, SVal, Sema);
 
     return decodeSpecialReg32(SVal);
-  } else if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
-    return createRegOperand(getVgprClassId(Width), Val);
   }
+  if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands))
+    return createRegOperand(getVgprClassId(Width), Val);
   llvm_unreachable("unsupported target");
 }
 
@@ -1731,15 +1730,13 @@ MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
     if (TTmpIdx >= 0) {
       auto TTmpClsId = getTtmpClassId(IsWave64 ? OPW64 : OPW32);
       return createSRegOperand(TTmpClsId, TTmpIdx);
-    } else if (Val > SGPR_MAX) {
-      return IsWave64 ? decodeSpecialReg64(Val)
-                      : decodeSpecialReg32(Val);
-    } else {
-      return createSRegOperand(getSgprClassId(IsWave64 ? OPW64 : OPW32), Val);
     }
-  } else {
-    return createRegOperand(IsWave64 ? AMDGPU::VCC : AMDGPU::VCC_LO);
+    if (Val > SGPR_MAX) {
+      return IsWave64 ? decodeSpecialReg64(Val) : decodeSpecialReg32(Val);
+    }
+    return createSRegOperand(getSgprClassId(IsWave64 ? OPW64 : OPW32), Val);
   }
+  return createRegOperand(IsWave64 ? AMDGPU::VCC : AMDGPU::VCC_LO);
 }
 
 MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
@@ -2265,7 +2262,8 @@ Expected<bool> AMDGPUDisassembler::decodeKernelDescriptorDirective(
       return createReservedKDBitsError(
           KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
           amdhsa::KERNEL_CODE_PROPERTIES_OFFSET, "must be zero on gfx9");
-    } else if (isGFX10Plus()) {
+    }
+    if (isGFX10Plus()) {
       PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
                       KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
     }

diff  --git a/llvm/lib/Target/AMDGPU/GCNILPSched.cpp b/llvm/lib/Target/AMDGPU/GCNILPSched.cpp
index 5926abca12449..8f15cc1b2b537 100644
--- a/llvm/lib/Target/AMDGPU/GCNILPSched.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNILPSched.cpp
@@ -224,13 +224,11 @@ const SUnit *GCNILPScheduler::pickBest(const SUnit *left, const SUnit *right)
       return result > 0 ? right : left;
     return left;
   }
-  else {
-    if (left->getHeight() != right->getHeight())
-      return (left->getHeight() > right->getHeight()) ? right : left;
+  if (left->getHeight() != right->getHeight())
+    return (left->getHeight() > right->getHeight()) ? right : left;
 
-    if (left->getDepth() != right->getDepth())
-      return (left->getDepth() < right->getDepth()) ? right : left;
-  }
+  if (left->getDepth() != right->getDepth())
+    return (left->getDepth() < right->getDepth()) ? right : left;
 
   assert(left->NodeQueueId && right->NodeQueueId &&
         "NodeQueueId cannot be zero");

diff  --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index bb5de368810d5..37bb9675d8c1d 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -1071,7 +1071,8 @@ void AMDGPUInstPrinter::printDPPCtrl(const MCInst *MI, unsigned OpNo,
   if (!AMDGPU::isLegalDPALU_DPPControl(Imm) && AMDGPU::isDPALU_DPP(Desc)) {
     O << " /* DP ALU dpp only supports row_newbcast */";
     return;
-  } else if (Imm <= DppCtrl::QUAD_PERM_LAST) {
+  }
+  if (Imm <= DppCtrl::QUAD_PERM_LAST) {
     O << "quad_perm:[";
     O << formatDec(Imm & 0x3)         << ',';
     O << formatDec((Imm & 0xc)  >> 2) << ',';

diff  --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
index 30dd384051b94..d2ac5a7ebb2fb 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
@@ -88,8 +88,7 @@ static MCInstPrinter *createAMDGPUMCInstPrinter(const Triple &T,
                                                 const MCRegisterInfo &MRI) {
   if (T.getArch() == Triple::r600)
     return new R600InstPrinter(MAI, MII, MRI);
-  else
-    return new AMDGPUInstPrinter(MAI, MII, MRI);
+  return new AMDGPUInstPrinter(MAI, MII, MRI);
 }
 
 static MCTargetStreamer *createAMDGPUAsmTargetStreamer(MCStreamer &S,

diff  --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
index 6c539df7677ee..fa040d548f64c 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
@@ -94,7 +94,8 @@ void R600MCCodeEmitter::encodeInstruction(const MCInst &MI,
     MI.getOpcode() == R600::BUNDLE ||
     MI.getOpcode() == R600::KILL) {
     return;
-  } else if (IS_VTX(Desc)) {
+  }
+  if (IS_VTX(Desc)) {
     uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups, STI);
     uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
     if (!(STI.hasFeature(R600::FeatureCaymanISA))) {
@@ -105,29 +106,24 @@ void R600MCCodeEmitter::encodeInstruction(const MCInst &MI,
     emit(InstWord2, CB);
     emit((uint32_t)0, CB);
   } else if (IS_TEX(Desc)) {
-      int64_t Sampler = MI.getOperand(14).getImm();
-
-      int64_t SrcSelect[4] = {
-        MI.getOperand(2).getImm(),
-        MI.getOperand(3).getImm(),
-        MI.getOperand(4).getImm(),
-        MI.getOperand(5).getImm()
-      };
-      int64_t Offsets[3] = {
-        MI.getOperand(6).getImm() & 0x1F,
-        MI.getOperand(7).getImm() & 0x1F,
-        MI.getOperand(8).getImm() & 0x1F
-      };
-
-      uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups, STI);
-      uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 |
-          SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 |
-          SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 |
-          Offsets[2] << 10;
-
-      emit(Word01, CB);
-      emit(Word2, CB);
-      emit((uint32_t)0, CB);
+    int64_t Sampler = MI.getOperand(14).getImm();
+
+    int64_t SrcSelect[4] = {
+        MI.getOperand(2).getImm(), MI.getOperand(3).getImm(),
+        MI.getOperand(4).getImm(), MI.getOperand(5).getImm()};
+    int64_t Offsets[3] = {MI.getOperand(6).getImm() & 0x1F,
+                          MI.getOperand(7).getImm() & 0x1F,
+                          MI.getOperand(8).getImm() & 0x1F};
+
+    uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups, STI);
+    uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 |
+                     SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 |
+                     SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 |
+                     Offsets[1] << 5 | Offsets[2] << 10;
+
+    emit(Word01, CB);
+    emit(Word2, CB);
+    emit((uint32_t)0, CB);
   } else {
     uint64_t Inst = getBinaryCodeForInstr(MI, Fixups, STI);
     if ((STI.hasFeature(R600::FeatureR600ALUInst)) &&

diff  --git a/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp b/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
index 4e26bc8a4b52c..81b142e4e7b9e 100644
--- a/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
@@ -89,15 +89,14 @@ bool CFStack::requiresWorkAroundForInst(unsigned Opcode) {
       // work-around when CurrentSubEntries > 3 allows us to over-allocate stack
       // resources without any problems.
       return CurrentSubEntries > 3;
-    } else {
-      assert(ST->getWavefrontSize() == 32);
-      // We are being conservative here.  We only require the work-around if
-      // CurrentSubEntries > 7 &&
-      // (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0)
-      // See the comment on the wavefront size == 64 case for why we are
-      // being conservative.
-      return CurrentSubEntries > 7;
     }
+    assert(ST->getWavefrontSize() == 32);
+    // We are being conservative here.  We only require the work-around if
+    // CurrentSubEntries > 7 &&
+    // (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0)
+    // See the comment on the wavefront size == 64 case for why we are
+    // being conservative.
+    return CurrentSubEntries > 7;
   }
 }
 
@@ -106,19 +105,18 @@ unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
   default:
     return 0;
   case CFStack::FIRST_NON_WQM_PUSH:
-  assert(!ST->hasCaymanISA());
-  if (ST->getGeneration() <= AMDGPUSubtarget::R700) {
-    // +1 For the push operation.
-    // +2 Extra space required.
-    return 3;
-  } else {
+    assert(!ST->hasCaymanISA());
+    if (ST->getGeneration() <= AMDGPUSubtarget::R700) {
+      // +1 For the push operation.
+      // +2 Extra space required.
+      return 3;
+    }
     // Some documentation says that this is not necessary on Evergreen,
     // but experimentation has show that we need to allocate 1 extra
     // sub-entry for the first non-WQM push.
     // +1 For the push operation.
     // +1 Extra space required.
     return 2;
-  }
   case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY:
     assert(ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN);
     // +1 For the push operation.
@@ -294,8 +292,8 @@ class R600ControlFlowFinalizer : public MachineFunctionPass {
     if ((DstRegs.find(SrcMI) == DstRegs.end())) {
       DstRegs.insert(DstMI);
       return true;
-    } else
-      return false;
+    }
+    return false;
   }
 
   ClauseFile

diff  --git a/llvm/lib/Target/AMDGPU/R600ISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/R600ISelDAGToDAG.cpp
index 28bcf72b3b091..6b4f5a88c6476 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelDAGToDAG.cpp
@@ -175,8 +175,9 @@ bool R600DAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
                                        MVT::i32);
     return true;
     // If the pointer address is constant, we can move it to the offset field.
-  } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr)) &&
-             isInt<16>(IMMOffset->getZExtValue())) {
+  }
+  if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr)) &&
+      isInt<16>(IMMOffset->getZExtValue())) {
     Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
                                   SDLoc(CurDAG->getEntryNode()), R600::ZERO,
                                   MVT::i32);

diff  --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index 159b2d440b31a..7e0d96622f3c5 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -775,13 +775,11 @@ SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
 }
 
 bool R600TargetLowering::isZero(SDValue Op) const {
-  if(ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
+  if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op))
     return Cst->isZero();
-  } else if(ConstantFPSDNode *CstFP = dyn_cast<ConstantFPSDNode>(Op)){
+  if (ConstantFPSDNode *CstFP = dyn_cast<ConstantFPSDNode>(Op))
     return CstFP->isZero();
-  } else {
-    return false;
-  }
+  return false;
 }
 
 bool R600TargetLowering::isHWTrueValue(SDValue Op) const {
@@ -1187,7 +1185,8 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
       return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL,
                                      Op->getVTList(), Args, MemVT,
                                      StoreNode->getMemOperand());
-    } else if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR && VT.bitsGE(MVT::i32)) {
+    }
+    if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR && VT.bitsGE(MVT::i32)) {
       // Convert pointer from byte address to dword address.
       Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, PtrVT, DWordAddr);
 
@@ -1348,16 +1347,15 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
     if (isa<Constant>(LoadNode->getMemOperand()->getValue()) ||
         isa<ConstantSDNode>(Ptr)) {
       return constBufferLoad(LoadNode, LoadNode->getAddressSpace(), DAG);
-    } else {
-      //TODO: Does this even work?
-      // non-constant ptr can't be folded, keeps it as a v4f32 load
-      Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
-          DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
-                      DAG.getConstant(4, DL, MVT::i32)),
-                      DAG.getConstant(LoadNode->getAddressSpace() -
-                                      AMDGPUAS::CONSTANT_BUFFER_0, DL, MVT::i32)
-          );
     }
+    // TODO: Does this even work?
+    //  non-constant ptr can't be folded, keeps it as a v4f32 load
+    Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
+                         DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
+                                     DAG.getConstant(4, DL, MVT::i32)),
+                         DAG.getConstant(LoadNode->getAddressSpace() -
+                                             AMDGPUAS::CONSTANT_BUFFER_0,
+                                         DL, MVT::i32));
 
     if (!VT.isVector()) {
       Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result,

diff  --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
index 29a43bf4dc52f..a3159944a2add 100644
--- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -679,7 +679,8 @@ bool R600InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
     if (LastOpc == R600::JUMP) {
       TBB = LastInst.getOperand(0).getMBB();
       return false;
-    } else if (LastOpc == R600::JUMP_COND) {
+    }
+    if (LastOpc == R600::JUMP_COND) {
       auto predSet = I;
       while (!isPredicateSetter(predSet->getOpcode())) {
         predSet = --I;
@@ -739,38 +740,36 @@ unsigned R600InstrInfo::insertBranch(MachineBasicBlock &MBB,
     if (Cond.empty()) {
       BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(TBB);
       return 1;
-    } else {
-      MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
-      assert(PredSet && "No previous predicate !");
-      addFlag(*PredSet, 0, MO_FLAG_PUSH);
-      PredSet->getOperand(2).setImm(Cond[1].getImm());
-
-      BuildMI(&MBB, DL, get(R600::JUMP_COND))
-             .addMBB(TBB)
-             .addReg(R600::PREDICATE_BIT, RegState::Kill);
-      MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
-      if (CfAlu == MBB.end())
-        return 1;
-      assert (CfAlu->getOpcode() == R600::CF_ALU);
-      CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE));
-      return 1;
     }
-  } else {
     MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
     assert(PredSet && "No previous predicate !");
     addFlag(*PredSet, 0, MO_FLAG_PUSH);
     PredSet->getOperand(2).setImm(Cond[1].getImm());
+
     BuildMI(&MBB, DL, get(R600::JUMP_COND))
-            .addMBB(TBB)
-            .addReg(R600::PREDICATE_BIT, RegState::Kill);
-    BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(FBB);
+        .addMBB(TBB)
+        .addReg(R600::PREDICATE_BIT, RegState::Kill);
     MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
     if (CfAlu == MBB.end())
-      return 2;
+      return 1;
     assert (CfAlu->getOpcode() == R600::CF_ALU);
     CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE));
-    return 2;
+    return 1;
   }
+  MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
+  assert(PredSet && "No previous predicate !");
+  addFlag(*PredSet, 0, MO_FLAG_PUSH);
+  PredSet->getOperand(2).setImm(Cond[1].getImm());
+  BuildMI(&MBB, DL, get(R600::JUMP_COND))
+      .addMBB(TBB)
+      .addReg(R600::PREDICATE_BIT, RegState::Kill);
+  BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(FBB);
+  MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
+  if (CfAlu == MBB.end())
+    return 2;
+  assert(CfAlu->getOpcode() == R600::CF_ALU);
+  CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE));
+  return 2;
 }
 
 unsigned R600InstrInfo::removeBranch(MachineBasicBlock &MBB,
@@ -853,20 +852,19 @@ bool R600InstrInfo::isPredicable(const MachineInstr &MI) const {
   // be predicated.  Until we have proper support for instruction clauses in the
   // backend, we will mark KILL* instructions as unpredicable.
 
-  if (MI.getOpcode() == R600::KILLGT) {
+  if (MI.getOpcode() == R600::KILLGT)
     return false;
-  } else if (MI.getOpcode() == R600::CF_ALU) {
+  if (MI.getOpcode() == R600::CF_ALU) {
     // If the clause start in the middle of MBB then the MBB has more
     // than a single clause, unable to predicate several clauses.
     if (MI.getParent()->begin() != MachineBasicBlock::const_iterator(MI))
       return false;
     // TODO: We don't support KC merging atm
     return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0;
-  } else if (isVector(MI)) {
-    return false;
-  } else {
-    return TargetInstrInfo::isPredicable(MI);
   }
+  if (isVector(MI))
+    return false;
+  return TargetInstrInfo::isPredicable(MI);
 }
 
 bool

diff  --git a/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
index abcccc492c671..4db5808c93f50 100644
--- a/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
@@ -598,7 +598,7 @@ MachineInstr *R600MachineCFGStructurizer::getLoopendBlockBranchInstr(
     if (MI) {
       if (isCondBranch(MI) || isUncondBranch(MI))
         return MI;
-      else if (!TII->isMov(MI->getOpcode()))
+      if (!TII->isMov(MI->getOpcode()))
         break;
     }
   }

diff  --git a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
index d26879ed8d608..eded8063feaaa 100644
--- a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
@@ -202,11 +202,9 @@ void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
 
 bool R600SchedStrategy::regBelongsToClass(Register Reg,
                                           const TargetRegisterClass *RC) const {
-  if (!Reg.isVirtual()) {
+  if (!Reg.isVirtual())
     return RC->contains(Reg);
-  } else {
-    return MRI->getRegClass(Reg) == RC;
-  }
+  return MRI->getRegClass(Reg) == RC;
 }
 
 R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
@@ -319,9 +317,8 @@ SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) {
       InstructionsGroupCandidate.pop_back();
       Q.erase((It + 1).base());
       return SU;
-    } else {
-      InstructionsGroupCandidate.pop_back();
     }
+    InstructionsGroupCandidate.pop_back();
   }
   return nullptr;
 }

diff  --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 3491558a3e8e7..6dfd0bb3964e9 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -456,7 +456,8 @@ static bool hoistAndMergeSGPRInits(unsigned Reg,
           (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
         Imm = nullptr;
         break;
-      } else if (MO.isImm())
+      }
+      if (MO.isImm())
         Imm = &MO;
     }
     if (Imm)

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index df5a334f83082..b68962e0541ce 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1663,14 +1663,14 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
 
 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
                                         const MachineFunction &MF) const {
-  if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
+  if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS)
     return (MemVT.getSizeInBits() <= 4 * 32);
-  } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
+  if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
     unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
     return (MemVT.getSizeInBits() <= MaxPrivateBits);
-  } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
-    return (MemVT.getSizeInBits() <= 2 * 32);
   }
+  if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS)
+    return (MemVT.getSizeInBits() <= 2 * 32);
   return true;
 }
 
@@ -3031,7 +3031,8 @@ SDValue SITargetLowering::LowerFormalArguments(
 
       InVals.push_back(NewArg);
       continue;
-    } else if (!IsEntryFunc && VA.isMemLoc()) {
+    }
+    if (!IsEntryFunc && VA.isMemLoc()) {
       SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
       InVals.push_back(Val);
       if (!Arg.Flags.isByVal())
@@ -10921,7 +10922,8 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
       return expandUnalignedStore(Store, DAG);
 
     return SDValue();
-  } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
+  }
+  if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
     switch (Subtarget->getMaxPrivateElementSize()) {
     case 4:
       return scalarizeVectorStore(Store, DAG);
@@ -12516,11 +12518,12 @@ SITargetLowering::performSignExtendInRegCombine(SDNode *N,
         Opc, DL, ResList, Ops, M->getMemoryVT(), M->getMemOperand());
     SDValue LoadVal = DCI.DAG.getNode(ISD::TRUNCATE, DL, VT, BufferLoad);
     return LoadVal;
-  } else if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
-               VTSign->getVT() == MVT::i8) ||
-              (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
-               VTSign->getVT() == MVT::i16)) &&
-             Src.hasOneUse()) {
+  }
+  if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
+        VTSign->getVT() == MVT::i8) ||
+       (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
+        VTSign->getVT() == MVT::i16)) &&
+      Src.hasOneUse()) {
     auto *M = cast<MemSDNode>(Src);
     SDValue Ops[] = {
       Src.getOperand(0), // Chain
@@ -16343,7 +16346,7 @@ SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
                                                : &AMDGPU::SReg_32RegClass;
   if (!TRI->isSGPRClass(RC) && !isDivergent)
     return TRI->getEquivalentSGPRClass(RC);
-  else if (TRI->isSGPRClass(RC) && isDivergent)
+  if (TRI->isSGPRClass(RC) && isDivergent)
     return TRI->getEquivalentVGPRClass(RC);
 
   return RC;

diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 6d12e8c6f2de2..7f7b7c4472042 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1381,13 +1381,13 @@ unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
     // Assume hi bits are unneeded. Only _e64 true16 instructions are legal
     // before RA.
     return RI.isSGPRClass(DstRC) ? AMDGPU::COPY : AMDGPU::V_MOV_B16_t16_e64;
-  } else if (RI.getRegSizeInBits(*DstRC) == 32) {
+  }
+  if (RI.getRegSizeInBits(*DstRC) == 32)
     return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
-  } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) {
+  if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC))
     return AMDGPU::S_MOV_B64;
-  } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) {
-    return  AMDGPU::V_MOV_B64_PSEUDO;
-  }
+  if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC))
+    return AMDGPU::V_MOV_B64_PSEUDO;
   return AMDGPU::COPY;
 }
 
@@ -4546,13 +4546,11 @@ bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
 
   // SGPRs use the constant bus
   if (MO.isImplicit()) {
-    return MO.getReg() == AMDGPU::M0 ||
-           MO.getReg() == AMDGPU::VCC ||
+    return MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
            MO.getReg() == AMDGPU::VCC_LO;
-  } else {
-    return AMDGPU::SReg_32RegClass.contains(MO.getReg()) ||
-           AMDGPU::SReg_64RegClass.contains(MO.getReg());
   }
+  return AMDGPU::SReg_32RegClass.contains(MO.getReg()) ||
+         AMDGPU::SReg_64RegClass.contains(MO.getReg());
 }
 
 static Register findImplicitSGPRRead(const MachineInstr &MI) {
@@ -4859,8 +4857,8 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
         ErrInfo =
             "Dst register should be tied to implicit use of preserved register";
         return false;
-      } else if (TiedMO.getReg().isPhysical() &&
-                 Dst.getReg() != TiedMO.getReg()) {
+      }
+      if (TiedMO.getReg().isPhysical() && Dst.getReg() != TiedMO.getReg()) {
         ErrInfo = "Dst register should use same physical register as preserved";
         return false;
       }
@@ -5232,7 +5230,8 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
                   "row_newbroadcast/row_share is not supported before "
                   "GFX90A/GFX10";
         return false;
-      } else if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) {
+      }
+      if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) {
         ErrInfo = "Invalid dpp_ctrl value: "
                   "row_share and row_xmask are not supported before GFX10";
         return false;
@@ -9513,7 +9512,8 @@ MachineInstr *SIInstrInfo::foldMemoryOperandImpl(
       if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) {
         MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
         return nullptr;
-      } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) {
+      }
+      if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) {
         MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass);
         return nullptr;
       }

diff  --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 2186c1ede468c..c5251826b117c 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -778,7 +778,8 @@ bool SIMachineFunctionInfo::usesAGPRs(const MachineFunction &MF) const {
     if (RC && SIRegisterInfo::isAGPRClass(RC)) {
       UsesAGPRs = true;
       return true;
-    } else if (!RC && !MRI.use_empty(Reg) && MRI.getType(Reg).isValid()) {
+    }
+    if (!RC && !MRI.use_empty(Reg) && MRI.getType(Reg).isValid()) {
       // Defer caching UsesAGPRs, function might not yet been regbank selected.
       return true;
     }

diff  --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
index fb4f5ea4aa760..7c7e0204b1764 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
@@ -617,9 +617,8 @@ SIScheduleBlockCreator::getBlocks(SISchedulerBlockCreatorVariant BlockVariant) {
     Res.TopDownBlock2Index = TopDownBlock2Index;
     Blocks[BlockVariant] = Res;
     return Res;
-  } else {
-    return B->second;
   }
+  return B->second;
 }
 
 bool SIScheduleBlockCreator::isSUInBlock(SUnit *SU, unsigned ID) {
@@ -705,45 +704,42 @@ void SIScheduleBlockCreator::colorHighLatenciesGroups() {
                                                HasSubGraph);
         if (!HasSubGraph)
           continue; // No dependencies between each other
-        else if (SubGraph.size() > 5) {
+        if (SubGraph.size() > 5) {
           // Too many elements would be required to be added to the block.
           CompatibleGroup = false;
           break;
         }
-        else {
-          // Check the type of dependency
-          for (unsigned k : SubGraph) {
-            // If in the path to join the two instructions,
-            // there is another high latency instruction,
-            // or instructions colored for another block
-            // abort the merge.
-            if (DAG->IsHighLatencySU[k] ||
-                (CurrentColoring[k] != ProposedColor &&
-                 CurrentColoring[k] != 0)) {
-              CompatibleGroup = false;
-              break;
-            }
-            // If one of the SU in the subgraph depends on the result of SU j,
-            // there'll be a data dependency.
-            if (hasDataDependencyPred(DAG->SUnits[k], DAG->SUnits[j])) {
-              CompatibleGroup = false;
-              break;
-            }
-          }
-          if (!CompatibleGroup)
+        // Check the type of dependency
+        for (unsigned k : SubGraph) {
+          // If in the path to join the two instructions,
+          // there is another high latency instruction,
+          // or instructions colored for another block
+          // abort the merge.
+          if (DAG->IsHighLatencySU[k] || (CurrentColoring[k] != ProposedColor &&
+                                          CurrentColoring[k] != 0)) {
+            CompatibleGroup = false;
             break;
-          // Same check for the SU
-          if (hasDataDependencyPred(SU, DAG->SUnits[j])) {
+          }
+          // If one of the SU in the subgraph depends on the result of SU j,
+          // there'll be a data dependency.
+          if (hasDataDependencyPred(DAG->SUnits[k], DAG->SUnits[j])) {
             CompatibleGroup = false;
             break;
           }
-          // Add all the required instructions to the block
-          // These cannot live in another block (because they
-          // depend (order dependency) on one of the
-          // instruction in the block, and are required for the
-          // high latency instruction we add.
-          llvm::append_range(AdditionalElements, SubGraph);
         }
+        if (!CompatibleGroup)
+          break;
+        // Same check for the SU
+        if (hasDataDependencyPred(SU, DAG->SUnits[j])) {
+          CompatibleGroup = false;
+          break;
+        }
+        // Add all the required instructions to the block
+        // These cannot live in another block (because they
+        // depend (order dependency) on one of the
+        // instruction in the block, and are required for the
+        // high latency instruction we add.
+        llvm::append_range(AdditionalElements, SubGraph);
       }
       if (CompatibleGroup) {
         FormingGroup.insert(SU.NodeNum);

diff  --git a/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
index 1f6f45e9630ce..93b70fa4ba974 100644
--- a/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
+++ b/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
@@ -503,12 +503,12 @@ bool SIOptimizeExecMasking::optimizeExecSequence() {
           SaveExecInst = &*J;
           LLVM_DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');
           continue;
-        } else {
-          LLVM_DEBUG(dbgs()
-                     << "Instruction does not read exec copy: " << *J << '\n');
-          break;
         }
-      } else if (ReadsCopyFromExec && !SaveExecInst) {
+        LLVM_DEBUG(dbgs() << "Instruction does not read exec copy: " << *J
+                          << '\n');
+        break;
+      }
+      if (ReadsCopyFromExec && !SaveExecInst) {
         // Make sure no other instruction is trying to use this copy, before it
         // will be rewritten by the saveexec, i.e. hasOneUse. There may have
         // been another use, such as an inserted spill. For example:

diff  --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index d428864c9dd59..d80e1277b2a8a 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -597,12 +597,11 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
         Opcode == AMDGPU::V_LSHLREV_B32_e64) {
       return std::make_unique<SDWADstOperand>(
           Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
-    } else {
-      return std::make_unique<SDWASrcOperand>(
-          Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
-          Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
-          Opcode != AMDGPU::V_LSHRREV_B32_e64);
     }
+    return std::make_unique<SDWASrcOperand>(
+        Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
+        Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
+            Opcode != AMDGPU::V_LSHRREV_B32_e64);
     break;
   }
 
@@ -633,14 +632,12 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
       break;
 
     if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
-        Opcode == AMDGPU::V_LSHLREV_B16_e64) {
+        Opcode == AMDGPU::V_LSHLREV_B16_e64)
       return std::make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD);
-    } else {
-      return std::make_unique<SDWASrcOperand>(
-            Src1, Dst, BYTE_1, false, false,
-            Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
+    return std::make_unique<SDWASrcOperand>(
+        Src1, Dst, BYTE_1, false, false,
+        Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
             Opcode != AMDGPU::V_LSHRREV_B16_e64);
-    }
     break;
   }
 

diff  --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index bb5f2328129f9..96d4863e94014 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -537,8 +537,7 @@ CanBeVOPD getCanBeVOPD(unsigned Opc) {
   const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
   if (Info)
     return {Info->CanBeVOPDX, true};
-  else
-    return {false, false};
+  return {false, false};
 }
 
 unsigned getVOPDOpcode(unsigned Opc) {
@@ -1479,11 +1478,10 @@ static unsigned getCombinedCountBitMask(const IsaVersion &Version,
     unsigned Storecnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
                                    getStorecntBitWidth(Version.Major));
     return Dscnt | Storecnt;
-  } else {
-    unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
-                                  getLoadcntBitWidth(Version.Major));
-    return Dscnt | Loadcnt;
   }
+  unsigned Loadcnt = getBitMask(getLoadcntStorecntBitShift(Version.Major),
+                                getLoadcntBitWidth(Version.Major));
+  return Dscnt | Loadcnt;
 }
 
 Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt) {


        


More information about the llvm-commits mailing list