[clang] 9727c77 - [NFC] Rename Instrinsic to Intrinsic

David Green via cfe-commits cfe-commits at lists.llvm.org
Mon Apr 25 10:13:30 PDT 2022


Author: David Green
Date: 2022-04-25T18:13:23+01:00
New Revision: 9727c77d58ac920a4158d08c15659470e52ddda4

URL: https://github.com/llvm/llvm-project/commit/9727c77d58ac920a4158d08c15659470e52ddda4
DIFF: https://github.com/llvm/llvm-project/commit/9727c77d58ac920a4158d08c15659470e52ddda4.diff

LOG: [NFC] Rename Instrinsic to Intrinsic

Added: 
    

Modified: 
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/test/CodeGen/builtins-nvptx-mma.py
    clang/test/CodeGenCUDA/fp-contract.cu
    clang/test/Profile/c-avoid-direct-call.c
    clang/test/Profile/c-indirect-call.c
    clang/test/Profile/cxx-indirect-call.cpp
    llvm/include/llvm/Analysis/VectorUtils.h
    llvm/include/llvm/CodeGen/MachineInstr.h
    llvm/include/llvm/CodeGen/ReplaceWithVeclib.h
    llvm/include/llvm/IR/InstVisitor.h
    llvm/include/llvm/IR/IntrinsicsARM.td
    llvm/include/llvm/IR/Metadata.h
    llvm/include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h
    llvm/include/llvm/Transforms/Utils/Local.h
    llvm/lib/Analysis/CallGraphSCCPass.cpp
    llvm/lib/Analysis/ConstantFolding.cpp
    llvm/lib/Analysis/IVDescriptors.cpp
    llvm/lib/Analysis/VectorUtils.cpp
    llvm/lib/CodeGen/ReplaceWithVeclib.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/lib/CodeGen/SjLjEHPrepare.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/lib/Target/AArch64/AArch64StackTagging.cpp
    llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
    llvm/lib/Target/Mips/MipsISelLowering.h
    llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
    llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
    llvm/lib/Target/PowerPC/README_P9.txt
    llvm/lib/Target/X86/X86LowerAMXType.cpp
    llvm/lib/Transforms/IPO/GlobalOpt.cpp
    llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
    llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
    llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
    llvm/lib/Transforms/Scalar/Scalarizer.cpp
    llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp
    llvm/lib/Transforms/Utils/Local.cpp
    llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
    llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
    llvm/lib/Transforms/Utils/SimplifyCFG.cpp
    llvm/lib/Transforms/Utils/StripGCRelocates.cpp
    llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
    llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
    llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
    llvm/test/CodeGen/Hexagon/hvx-byte-store-double.ll
    llvm/test/DebugInfo/WebAssembly/dbg-declare.ll
    llvm/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
    llvm/test/Transforms/FunctionSpecialization/function-specialization-nodup2.ll
    llvm/test/Transforms/Inline/inline_constprop.ll
    llvm/test/Transforms/InstCombine/stacksave-debuginfo.ll
    llvm/test/Transforms/SROA/basictest-opaque-ptrs.ll
    llvm/test/Transforms/SROA/basictest.ll
    llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index fc2d32f3e26fe..f9966c1fd777c 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -18200,7 +18200,7 @@ RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
 
 /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
 /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
-/// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
+/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
 /// TODO: actually use ptrmask once most optimization passes know about it.
 RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
   BuiltinAlignArgs Args(E, *this);

diff  --git a/clang/test/CodeGen/builtins-nvptx-mma.py b/clang/test/CodeGen/builtins-nvptx-mma.py
index dc40f04c11ce6..6c09910020278 100644
--- a/clang/test/CodeGen/builtins-nvptx-mma.py
+++ b/clang/test/CodeGen/builtins-nvptx-mma.py
@@ -1,5 +1,5 @@
 # This script generates all variants of wmma builtins, verifies that clang calls
-# correct LLVM instrinsics, and checks that availability of specific builtins is
+# correct LLVM intrinsics, and checks that availability of specific builtins is
 # constrained by the correct PTX version and the target GPU variant.
 
 # Dummy test run to avoid lit warnings.

diff  --git a/clang/test/CodeGenCUDA/fp-contract.cu b/clang/test/CodeGenCUDA/fp-contract.cu
index d466affded132..60824ba59ddfb 100644
--- a/clang/test/CodeGenCUDA/fp-contract.cu
+++ b/clang/test/CodeGenCUDA/fp-contract.cu
@@ -105,7 +105,7 @@
 
 // Explicit -ffp-contract=on -- fusing by front-end.
 // In IR,
-//    mult/add in the same statement - llvm.fmuladd instrinsic emitted
+//    mult/add in the same statement - llvm.fmuladd intrinsic emitted
 //    mult/add in 
diff erent statement -  fmul/fadd instructions without
 //                                       contract flag are emitted.
 // In backend

diff  --git a/clang/test/Profile/c-avoid-direct-call.c b/clang/test/Profile/c-avoid-direct-call.c
index cd02e714dbe17..ee75886327f1f 100644
--- a/clang/test/Profile/c-avoid-direct-call.c
+++ b/clang/test/Profile/c-avoid-direct-call.c
@@ -1,4 +1,4 @@
-// Check the value profiling instrinsics emitted by instrumentation.
+// Check the value profiling intrinsics emitted by instrumentation.
 
 // RUN: %clang_cc1 -triple x86_64-apple-macosx10.9 -main-file-name c-avoid-direct-call.c %s -o - -emit-llvm -fprofile-instrument=clang -mllvm -enable-value-profiling | FileCheck %s
 

diff  --git a/clang/test/Profile/c-indirect-call.c b/clang/test/Profile/c-indirect-call.c
index 963beb6905815..731f571e8c0c1 100644
--- a/clang/test/Profile/c-indirect-call.c
+++ b/clang/test/Profile/c-indirect-call.c
@@ -1,4 +1,4 @@
-// Check the value profiling instrinsics emitted by instrumentation.
+// Check the value profiling intrinsics emitted by instrumentation.
 
 // RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-macosx10.9 -main-file-name c-indirect-call.c %s -o - -emit-llvm -fprofile-instrument=clang -mllvm -enable-value-profiling | FileCheck --check-prefix=NOEXT %s
 // RUN: %clang_cc1 -no-opaque-pointers -triple s390x-ibm-linux -main-file-name c-indirect-call.c %s -o - -emit-llvm -fprofile-instrument=clang -mllvm -enable-value-profiling | FileCheck --check-prefix=EXT %s

diff  --git a/clang/test/Profile/cxx-indirect-call.cpp b/clang/test/Profile/cxx-indirect-call.cpp
index 6cecc64943d0e..74a5160b1d2b0 100644
--- a/clang/test/Profile/cxx-indirect-call.cpp
+++ b/clang/test/Profile/cxx-indirect-call.cpp
@@ -1,4 +1,4 @@
-// Check the value profiling instrinsics emitted by instrumentation.
+// Check the value profiling intrinsics emitted by instrumentation.
 
 // RUN: %clang_cc1 -no-opaque-pointers %s -o - -emit-llvm -fprofile-instrument=clang -mllvm -enable-value-profiling -fexceptions -fcxx-exceptions -triple x86_64-apple-macosx10.9 | FileCheck %s
 

diff  --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h
index f90c68a88578c..868781197eda0 100644
--- a/llvm/include/llvm/Analysis/VectorUtils.h
+++ b/llvm/include/llvm/Analysis/VectorUtils.h
@@ -309,16 +309,16 @@ inline Type *ToVectorTy(Type *Scalar, unsigned VF) {
 /// Identify if the intrinsic is trivially vectorizable.
 /// This method returns true if the intrinsic's argument types are all scalars
 /// for the scalar form of the intrinsic and all vectors (or scalars handled by
-/// hasVectorInstrinsicScalarOpd) for the vector form of the intrinsic.
+/// hasVectorIntrinsicScalarOpd) for the vector form of the intrinsic.
 bool isTriviallyVectorizable(Intrinsic::ID ID);
 
 /// Identifies if the vector form of the intrinsic has a scalar operand.
-bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx);
+bool hasVectorIntrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx);
 
 /// Identifies if the vector form of the intrinsic has a scalar operand that has
 /// an overloaded type.
-bool hasVectorInstrinsicOverloadedScalarOpd(Intrinsic::ID ID,
-                                            unsigned ScalarOpdIdx);
+bool hasVectorIntrinsicOverloadedScalarOpd(Intrinsic::ID ID,
+                                           unsigned ScalarOpdIdx);
 
 /// Returns intrinsic ID for call.
 /// For the input call instruction it finds mapping intrinsic and returns

diff  --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index e0ca1645b35be..cb6698c12d8ee 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -98,7 +98,7 @@ class MachineInstr
     FmContract   = 1 << 8,              // Instruction supports Fast math
                                         // contraction operations like fma.
     FmAfn        = 1 << 9,              // Instruction may map to Fast math
-                                        // instrinsic approximation.
+                                        // intrinsic approximation.
     FmReassoc    = 1 << 10,             // Instruction supports Fast math
                                         // reassociation of operand order.
     NoUWrap      = 1 << 11,             // Instruction supports binary operator

diff  --git a/llvm/include/llvm/CodeGen/ReplaceWithVeclib.h b/llvm/include/llvm/CodeGen/ReplaceWithVeclib.h
index ea4d0c83f4e33..c71aca0c992b3 100644
--- a/llvm/include/llvm/CodeGen/ReplaceWithVeclib.h
+++ b/llvm/include/llvm/CodeGen/ReplaceWithVeclib.h
@@ -1,4 +1,4 @@
-//===- ReplaceWithVeclib.h - Replace vector instrinsics with veclib calls -===//
+//===- ReplaceWithVeclib.h - Replace vector intrinsics with veclib calls --===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.

diff  --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index 89bf234c9de7c..7fec081d81555 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -199,7 +199,7 @@ class InstVisitor {
   RetTy visitCatchPadInst(CatchPadInst &I)     { DELEGATE(FuncletPadInst); }
   RetTy visitFreezeInst(FreezeInst &I)         { DELEGATE(Instruction); }
 
-  // Handle the special instrinsic instruction classes.
+  // Handle the special intrinsic instruction classes.
   RetTy visitDbgDeclareInst(DbgDeclareInst &I)    { DELEGATE(DbgVariableIntrinsic);}
   RetTy visitDbgValueInst(DbgValueInst &I)        { DELEGATE(DbgVariableIntrinsic);}
   RetTy visitDbgVariableIntrinsic(DbgVariableIntrinsic &I)

diff  --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td
index a424847575927..ab5f6023faf4c 100644
--- a/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -1158,7 +1158,7 @@ defm int_arm_mve_vabav: MVEPredicated<
   [llvm_i32_ty],
   [llvm_i32_ty, llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>], llvm_anyvector_ty>;
 
-// The following 3 instrinsics are MVE vector reductions with two vector
+// The following 3 intrinsics are MVE vector reductions with two vector
 // operands.
 // The first 3 operands are boolean flags (must be compile-time constants):
 // * unsigned - the instruction operates on vectors of unsigned values and

diff  --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h
index 7965884990e55..eeeeb905506a7 100644
--- a/llvm/include/llvm/IR/Metadata.h
+++ b/llvm/include/llvm/IR/Metadata.h
@@ -169,7 +169,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const Metadata &MD) {
 /// Metadata wrapper in the Value hierarchy.
 ///
 /// A member of the \a Value hierarchy to represent a reference to metadata.
-/// This allows, e.g., instrinsics to have metadata as operands.
+/// This allows, e.g., intrinsics to have metadata as operands.
 ///
 /// Notably, this is the only thing in either hierarchy that is allowed to
 /// reference \a LocalAsMetadata.

diff  --git a/llvm/include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h b/llvm/include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h
index e4002159edbd0..5e876fc82ac17 100644
--- a/llvm/include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h
+++ b/llvm/include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h
@@ -1,5 +1,5 @@
 //===- ScalarizeMaskedMemIntrin.h - Scalarize unsupported masked mem ----===//
-//                                    instrinsics
+//                                    intrinsics
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.

diff  --git a/llvm/include/llvm/Transforms/Utils/Local.h b/llvm/include/llvm/Transforms/Utils/Local.h
index d8abca426a7ae..946fc84b9a2cc 100644
--- a/llvm/include/llvm/Transforms/Utils/Local.h
+++ b/llvm/include/llvm/Transforms/Utils/Local.h
@@ -335,7 +335,7 @@ bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint,
 
 /// Remove all instructions from a basic block other than its terminator
 /// and any present EH pad instructions. Returns a pair where the first element
-/// is the number of instructions (excluding debug info instrinsics) that have
+/// is the number of instructions (excluding debug info intrinsics) that have
 /// been removed, and the second element is the number of debug info intrinsics
 /// that have been removed.
 std::pair<unsigned, unsigned>

diff  --git a/llvm/lib/Analysis/CallGraphSCCPass.cpp b/llvm/lib/Analysis/CallGraphSCCPass.cpp
index b6e54991591a6..8438f33f47127 100644
--- a/llvm/lib/Analysis/CallGraphSCCPass.cpp
+++ b/llvm/lib/Analysis/CallGraphSCCPass.cpp
@@ -270,7 +270,7 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
           Calls.count(Call) ||
 
           // If the call edge is not from a call or invoke, or it is a
-          // instrinsic call, then the function pass RAUW'd a call with
+          // intrinsic call, then the function pass RAUW'd a call with
           // another value. This can happen when constant folding happens
           // of well known functions etc.
           (Call->getCalledFunction() &&

diff  --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 359d5bbf9ac14..299ea335e50a8 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -3038,7 +3038,7 @@ static Constant *ConstantFoldFixedVectorCall(
     // Gather a column of constants.
     for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
       // Some intrinsics use a scalar type for certain arguments.
-      if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) {
+      if (hasVectorIntrinsicScalarOpd(IntrinsicID, J)) {
         Lane[J] = Operands[J];
         continue;
       }

diff  --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp
index 8c4511d6b9e39..e03cf6c48a2a3 100644
--- a/llvm/lib/Analysis/IVDescriptors.cpp
+++ b/llvm/lib/Analysis/IVDescriptors.cpp
@@ -466,7 +466,7 @@ bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurKind Kind,
 
   // This means we have seen one but not the other instruction of the
   // pattern or more than just a select and cmp. Zero implies that we saw a
-  // llvm.min/max instrinsic, which is always OK.
+  // llvm.min/max intrinsic, which is always OK.
   if (isMinMaxRecurrenceKind(Kind) && NumCmpSelectPatternInst != 2 &&
       NumCmpSelectPatternInst != 0)
     return false;

diff  --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index ac0a3571e049f..6dace5abe29cd 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -40,7 +40,7 @@ static cl::opt<unsigned> MaxInterleaveGroupFactor(
 /// Return true if all of the intrinsic's arguments and return type are scalars
 /// for the scalar form of the intrinsic, and vectors for the vector form of the
 /// intrinsic (except operands that are marked as always being scalar by
-/// hasVectorInstrinsicScalarOpd).
+/// hasVectorIntrinsicScalarOpd).
 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
   switch (ID) {
   case Intrinsic::abs:   // Begin integer bit-manipulation.
@@ -96,8 +96,8 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
 }
 
 /// Identifies if the vector form of the intrinsic has a scalar operand.
-bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
-                                        unsigned ScalarOpdIdx) {
+bool llvm::hasVectorIntrinsicScalarOpd(Intrinsic::ID ID,
+                                       unsigned ScalarOpdIdx) {
   switch (ID) {
   case Intrinsic::abs:
   case Intrinsic::ctlz:
@@ -114,8 +114,8 @@ bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
   }
 }
 
-bool llvm::hasVectorInstrinsicOverloadedScalarOpd(Intrinsic::ID ID,
-                                                  unsigned ScalarOpdIdx) {
+bool llvm::hasVectorIntrinsicOverloadedScalarOpd(Intrinsic::ID ID,
+                                                 unsigned ScalarOpdIdx) {
   switch (ID) {
   case Intrinsic::powi:
     return (ScalarOpdIdx == 1);

diff  --git a/llvm/lib/CodeGen/ReplaceWithVeclib.cpp b/llvm/lib/CodeGen/ReplaceWithVeclib.cpp
index b34ad6833dbf9..688734763612e 100644
--- a/llvm/lib/CodeGen/ReplaceWithVeclib.cpp
+++ b/llvm/lib/CodeGen/ReplaceWithVeclib.cpp
@@ -1,4 +1,4 @@
-//=== ReplaceWithVeclib.cpp - Replace vector instrinsics with veclib calls ===//
+//=== ReplaceWithVeclib.cpp - Replace vector intrinsics with veclib calls -===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -109,7 +109,7 @@ static bool replaceWithCallToVeclib(const TargetLibraryInfo &TLI,
     auto *ArgType = Arg.value()->getType();
     // Vector calls to intrinsics can still have
     // scalar operands for specific arguments.
-    if (hasVectorInstrinsicScalarOpd(IntrinsicID, Arg.index())) {
+    if (hasVectorIntrinsicScalarOpd(IntrinsicID, Arg.index())) {
       ScalarTypes.push_back(ArgType);
     } else {
       // The argument in this place should be a vector if

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 7ef919642c2cb..49431af877703 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4770,7 +4770,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
     }
   }
 
-  // Info is set by getTgtMemInstrinsic
+  // Info is set by getTgtMemIntrinsic
   TargetLowering::IntrinsicInfo Info;
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,

diff  --git a/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index 8211e3d6a9dd5..1fcee02184a96 100644
--- a/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -413,7 +413,7 @@ bool SjLjEHPrepare::setupEntryBlockAndCallSites(Function &F) {
   Val = Builder.CreateCall(StackAddrFn, {}, "sp");
   Builder.CreateStore(Val, StackPtr, /*isVolatile=*/true);
 
-  // Call the setup_dispatch instrinsic. It fills in the rest of the jmpbuf.
+  // Call the setup_dispatch intrinsic. It fills in the rest of the jmpbuf.
   Builder.CreateCall(BuiltinSetupDispatchFn, {});
 
   // Store a pointer to the function context so that the back-end will know

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 2a53809284850..d5316cdea0f59 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3593,7 +3593,7 @@ AArch64TargetLowering::LowerVectorFP_TO_INT_SAT(SDValue Op,
          "Saturation width cannot exceed result width");
 
   // TODO: Consider lowering to SVE operations, as in LowerVectorFP_TO_INT.
-  // Currently, the `llvm.fpto[su]i.sat.*` instrinsics don't accept scalable
+  // Currently, the `llvm.fpto[su]i.sat.*` intrinsics don't accept scalable
   // types, so this is hard to reach.
   if (DstVT.isScalableVector())
     return SDValue();

diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index baf6b5e9b246f..10c5ae447a456 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6464,7 +6464,7 @@ def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
 def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
           (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
 
-// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
+// Patterns for FP16 Intrinsics - requires reg copy to/from as i16s not supported.
 
 def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
           (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;

diff  --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
index c7a84c154cee8..fb28ce0a745a2 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
@@ -593,7 +593,7 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
   }
 
   // If we have instrumented at least one alloca, all unrecognized lifetime
-  // instrinsics have to go.
+  // intrinsics have to go.
   for (auto &I : SInfo.UnrecognizedLifetimes)
     I->eraseFromParent();
 

diff  --git a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
index de1b66b3c01be..293b4cd09c77e 100644
--- a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
+++ b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
@@ -143,9 +143,9 @@ class BPFAbstractMemberAccess final {
   Module *M = nullptr;
 
   static std::map<std::string, GlobalVariable *> GEPGlobals;
-  // A map to link preserve_*_access_index instrinsic calls.
+  // A map to link preserve_*_access_index intrinsic calls.
   std::map<CallInst *, std::pair<CallInst *, CallInfo>> AIChain;
-  // A map to hold all the base preserve_*_access_index instrinsic calls.
+  // A map to hold all the base preserve_*_access_index intrinsic calls.
   // The base call is not an input of any other preserve_*
   // intrinsics.
   std::map<CallInst *, CallInfo> BaseAICalls;

diff  --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h
index 3905a18895ded..97116ffc57fe0 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -157,7 +157,7 @@ class TargetRegisterClass;
       Ins,
       CIns,
 
-      // EXTR.W instrinsic nodes.
+      // EXTR.W intrinsic nodes.
       EXTP,
       EXTPDP,
       EXTR_S_H,

diff  --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index f4d6171f87dfb..d30c3d13f7ce0 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -1974,7 +1974,7 @@ multiclass ATOM3P_impl<string AsmStr,  Intrinsic Intr,
                     (Intr Int64Regs:$src, (ImmTy Imm:$b), (ImmTy Imm:$c))>;
 }
 
-// Constructs instrinsic name and instruction asm strings.
+// Constructs intrinsic name and instruction asm strings.
 multiclass ATOM2N_impl<string OpStr, string IntTypeStr, string TypeStr,
                        string ScopeStr, string SpaceStr,
                        NVPTXRegClass regclass, Operand ImmType, SDNode Imm,

diff  --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index 936aa95c5b314..fc4bc6b3cbf77 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -96,7 +96,7 @@ bool NVPTXTTIImpl::isSourceOfDivergence(const Value *V) {
       // Instructions that read threadIdx are obviously divergent.
       if (readsThreadIndex(II) || readsLaneId(II))
         return true;
-      // Handle the NVPTX atomic instrinsics that cannot be represented as an
+      // Handle the NVPTX atomic intrinsics that cannot be represented as an
       // atomic IR instruction.
       if (isNVVMAtomic(II))
         return true;

diff  --git a/llvm/lib/Target/PowerPC/README_P9.txt b/llvm/lib/Target/PowerPC/README_P9.txt
index a85c4b97c5fb6..ee1ea735acad9 100644
--- a/llvm/lib/Target/PowerPC/README_P9.txt
+++ b/llvm/lib/Target/PowerPC/README_P9.txt
@@ -310,7 +310,7 @@ VSX:
   . I checked existing instruction "XSCMPUDP". They are 
diff erent in target
     register. "XSCMPUDP" write to CR field, xscmp*dp write to VSX register
 
-  . Use instrinsic:
+  . Use intrinsic:
     (set i128:$XT, (int_ppc_vsx_xscmpeqdp f64:$XA, f64:$XB))
     (set i128:$XT, (int_ppc_vsx_xscmpgedp f64:$XA, f64:$XB))
     (set i128:$XT, (int_ppc_vsx_xscmpgtdp f64:$XA, f64:$XB))
@@ -322,7 +322,7 @@ VSX:
                                  "xvcmpeqdp", "$XT, $XA, $XB", IIC_VecFPCompare,
                                  int_ppc_vsx_xvcmpeqdp, v2i64, v2f64>;
 
-  . So we should use "XX3Form_Rcr" to implement instrinsic
+  . So we should use "XX3Form_Rcr" to implement intrinsic
 
 - Convert DP -> QP: xscvdpqp
   . Similar to XSCVDPSP:

diff  --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp
index 1596f3cb0bb5e..95cb783634b2b 100644
--- a/llvm/lib/Target/X86/X86LowerAMXType.cpp
+++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp
@@ -74,7 +74,7 @@ static bool isAMXCast(Instruction *II) {
          match(II, m_Intrinsic<Intrinsic::x86_cast_tile_to_vector>(m_Value()));
 }
 
-static bool isAMXInstrinsic(User *I) {
+static bool isAMXIntrinsic(User *I) {
   auto *II = dyn_cast<IntrinsicInst>(I);
   if (!II)
     return false;
@@ -195,7 +195,7 @@ static std::pair<Value *, Value *> getShape(PHINode *Phi) {
       Use &U = *(V->use_begin());
       OpNo = U.getOperandNo();
       V = U.getUser();
-    } else if (isAMXInstrinsic(V)) {
+    } else if (isAMXIntrinsic(V)) {
       return getShape(cast<IntrinsicInst>(V), OpNo);
     } else if (isa<PHINode>(V)) {
       if (V->use_empty())

diff  --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 6ed6c61957625..72b94cda01bf4 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -1781,7 +1781,7 @@ hasOnlyColdCalls(Function &F,
           return false;
         if (!CalledFn->hasLocalLinkage())
           return false;
-        // Skip over instrinsics since they won't remain as function calls.
+        // Skip over intrinsics since they won't remain as function calls.
         if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic)
           continue;
         // Check if it's valid to use coldcc calling convention.

diff  --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index 1bfa19600bd66..7889b829490c4 100644
--- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -2206,9 +2206,9 @@ bool DevirtModule::run() {
 
     removeRedundantTypeTests();
 
-    // We have lowered or deleted the type instrinsics, so we will no
-    // longer have enough information to reason about the liveness of virtual
-    // function pointers in GlobalDCE.
+    // We have lowered or deleted the type intrinsics, so we will no longer have
+    // enough information to reason about the liveness of virtual function
+    // pointers in GlobalDCE.
     for (GlobalVariable &GV : M.globals())
       GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
 
@@ -2341,9 +2341,9 @@ bool DevirtModule::run() {
     for (VTableBits &B : Bits)
       rebuildGlobal(B);
 
-  // We have lowered or deleted the type instrinsics, so we will no
-  // longer have enough information to reason about the liveness of virtual
-  // function pointers in GlobalDCE.
+  // We have lowered or deleted the type intrinsics, so we will no longer have
+  // enough information to reason about the liveness of virtual function
+  // pointers in GlobalDCE.
   for (GlobalVariable &GV : M.globals())
     GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
 

diff  --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 7d5ff6b38604c..11c756ca8c559 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -797,7 +797,7 @@ bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
 }
 
 /// processLoopMemIntrinsic - Template function for calling 
diff erent processor
-/// functions based on mem instrinsic type.
+/// functions based on mem intrinsic type.
 template <typename MemInst>
 bool LoopIdiomRecognize::processLoopMemIntrinsic(
     BasicBlock *BB,

diff  --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
index b849bd0b1758f..38a69ad2f1825 100644
--- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
@@ -1,5 +1,5 @@
 //===- ScalarizeMaskedMemIntrin.cpp - Scalarize unsupported masked mem ----===//
-//                                    instrinsics
+//                                    intrinsics
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.

diff  --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
index 28135ffb331ed..cff8f513404de 100644
--- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -577,7 +577,7 @@ bool ScalarizerVisitor::splitCall(CallInst &CI) {
       assert(Scattered[I].size() == NumElems && "mismatched call operands");
     } else {
       ScalarOperands[I] = OpI;
-      if (hasVectorInstrinsicOverloadedScalarOpd(ID, I))
+      if (hasVectorIntrinsicOverloadedScalarOpd(ID, I))
         Tys.push_back(OpI->getType());
     }
   }
@@ -593,7 +593,7 @@ bool ScalarizerVisitor::splitCall(CallInst &CI) {
     ScalarCallOps.clear();
 
     for (unsigned J = 0; J != NumArgs; ++J) {
-      if (hasVectorInstrinsicScalarOpd(ID, J))
+      if (hasVectorIntrinsicScalarOpd(ID, J))
         ScalarCallOps.push_back(ScalarOperands[J]);
       else
         ScalarCallOps.push_back(Scattered[J][Elem]);

diff  --git a/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp b/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp
index bbf2f694c421f..9ac4608134c28 100644
--- a/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp
+++ b/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp
@@ -275,7 +275,7 @@ bool SpeculativeExecutionPass::considerHoistingFromTo(
       });
     }
 
-    // Usially debug label instrinsic corresponds to label in LLVM IR. In these
+    // Usially debug label intrinsic corresponds to label in LLVM IR. In these
     // cases we should not move it here.
     // TODO: Possible special processing needed to detect it is related to a
     // hoisted instruction.
@@ -301,7 +301,7 @@ bool SpeculativeExecutionPass::considerHoistingFromTo(
       if (TotalSpeculationCost > SpecExecMaxSpeculationCost)
         return false;  // too much to hoist
     } else {
-      // Debug info instrinsics should not be counted for threshold.
+      // Debug info intrinsics should not be counted for threshold.
       if (!isa<DbgInfoIntrinsic>(I))
         NotHoistedInstCount++;
       if (NotHoistedInstCount > SpecExecMaxNotHoisted)

diff  --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index ed1fe6bdf8893..7a9a272691b3d 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1379,7 +1379,7 @@ Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
 static bool PhiHasDebugValue(DILocalVariable *DIVar,
                              DIExpression *DIExpr,
                              PHINode *APN) {
-  // Since we can't guarantee that the original dbg.declare instrinsic
+  // Since we can't guarantee that the original dbg.declare intrinsic
   // is removed by LowerDbgDeclare(), we need to make sure that we are
   // not inserting the same dbg.value intrinsic over and over.
   SmallVector<DbgValueInst *, 1> DbgValues;
@@ -1458,7 +1458,7 @@ void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
                       << *DII << '\n');
     // For now, when there is a store to parts of the variable (but we do not
-    // know which part) we insert an dbg.value instrinsic to indicate that we
+    // know which part) we insert an dbg.value intrinsic to indicate that we
     // know nothing about the variable's content.
     DV = UndefValue::get(DV->getType());
     Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);

diff  --git a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 7b53a4ce5a35d..83fbe635cc238 100644
--- a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -676,7 +676,7 @@ void PromoteMem2Reg::run() {
     A->eraseFromParent();
   }
 
-  // Remove alloca's dbg.declare instrinsics from the function.
+  // Remove alloca's dbg.declare intrinsics from the function.
   for (auto &DbgUsers : AllocaDbgUsers) {
     for (auto *DII : DbgUsers)
       if (DII->isAddressOfVariable() || DII->getExpression()->startsWithDeref())

diff  --git a/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp b/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
index 037f42f5b0509..9264274506825 100644
--- a/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
+++ b/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
@@ -143,7 +143,7 @@ static void convertToRelLookupTable(GlobalVariable &LookupTable) {
   Value *Offset =
       Builder.CreateShl(Index, ConstantInt::get(IntTy, 2), "reltable.shift");
 
-  // Insert the call to load.relative instrinsic before LOAD.
+  // Insert the call to load.relative intrinsic before LOAD.
   // GEP might not be immediately followed by a LOAD, like it can be hoisted
   // outside the loop or another instruction might be inserted them in between.
   Builder.SetInsertPoint(Load);

diff  --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index f1c5f2d9699a5..d2b7b71e611d5 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -1494,7 +1494,7 @@ bool SimplifyCFGOpt::HoistThenElseCodeToIf(BranchInst *BI,
       return false;
     if (!I1NonDbg->isTerminator())
       return false;
-    // Now we know that we only need to hoist debug instrinsics and the
+    // Now we know that we only need to hoist debug intrinsics and the
     // terminator. Let the loop below handle those 2 cases.
   }
 

diff  --git a/llvm/lib/Transforms/Utils/StripGCRelocates.cpp b/llvm/lib/Transforms/Utils/StripGCRelocates.cpp
index 2c38447fe28e1..0ff88e8b4612a 100644
--- a/llvm/lib/Transforms/Utils/StripGCRelocates.cpp
+++ b/llvm/lib/Transforms/Utils/StripGCRelocates.cpp
@@ -9,7 +9,7 @@
 // This is a little utility pass that removes the gc.relocates inserted by
 // RewriteStatepointsForGC. Note that the generated IR is incorrect,
 // but this is useful as a single pass in itself, for analysis of IR, without
-// the GC.relocates. The statepoint and gc.result instrinsics would still be
+// the GC.relocates. The statepoint and gc.result intrinsics would still be
 // present.
 //===----------------------------------------------------------------------===//
 

diff  --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index 9ce5229d94f53..5ecee44090a75 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -772,7 +772,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
         auto *SE = PSE.getSE();
         Intrinsic::ID IntrinID = getVectorIntrinsicIDForCall(CI, TLI);
         for (unsigned i = 0, e = CI->arg_size(); i != e; ++i)
-          if (hasVectorInstrinsicScalarOpd(IntrinID, i)) {
+          if (hasVectorIntrinsicScalarOpd(IntrinID, i)) {
             if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(i)), TheLoop)) {
               reportVectorizationFailure("Found unvectorizable intrinsic",
                   "intrinsic instruction cannot be vectorized",

diff  --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index bc17b3c2ac25f..783100a150f0b 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -4244,11 +4244,11 @@ void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
       // Some intrinsics have a scalar argument - don't replace it with a
       // vector.
       Value *Arg;
-      if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
+      if (!UseVectorIntrinsic || !hasVectorIntrinsicScalarOpd(ID, I.index()))
         Arg = State.get(I.value(), Part);
       else {
         Arg = State.get(I.value(), VPIteration(0, 0));
-        if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
+        if (hasVectorIntrinsicOverloadedScalarOpd(ID, I.index()))
           TysForDecl.push_back(Arg->getType());
       }
       Args.push_back(Arg);
@@ -8791,7 +8791,7 @@ VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
     Builder.setInsertPoint(VPBB);
 
     // Introduce each ingredient into VPlan.
-    // TODO: Model and preserve debug instrinsics in VPlan.
+    // TODO: Model and preserve debug intrinsics in VPlan.
     for (Instruction &I : BB->instructionsWithoutDebug()) {
       Instruction *Instr = &I;
 

diff  --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index ea914a08d75a7..ce088827b5eda 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -632,7 +632,7 @@ static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
     CallInst *CI = cast<CallInst>(UserInst);
     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
     for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
-      if (hasVectorInstrinsicScalarOpd(ID, i))
+      if (hasVectorIntrinsicScalarOpd(ID, i))
         return (CI->getArgOperand(i) == Scalar);
     }
     LLVM_FALLTHROUGH;
@@ -4710,7 +4710,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
       unsigned NumArgs = CI->arg_size();
       SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
       for (unsigned j = 0; j != NumArgs; ++j)
-        if (hasVectorInstrinsicScalarOpd(ID, j))
+        if (hasVectorIntrinsicScalarOpd(ID, j))
           ScalarArgs[j] = CI->getArgOperand(j);
       for (Value *V : VL) {
         CallInst *CI2 = dyn_cast<CallInst>(V);
@@ -4729,7 +4729,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
         // Some intrinsics have scalar arguments and should be same in order for
         // them to be vectorized.
         for (unsigned j = 0; j != NumArgs; ++j) {
-          if (hasVectorInstrinsicScalarOpd(ID, j)) {
+          if (hasVectorIntrinsicScalarOpd(ID, j)) {
             Value *A1J = CI2->getArgOperand(j);
             if (ScalarArgs[j] != A1J) {
               BS.cancelScheduling(VL, VL0);
@@ -4762,7 +4762,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
       for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
         // For scalar operands no need to to create an entry since no need to
         // vectorize it.
-        if (hasVectorInstrinsicScalarOpd(ID, i))
+        if (hasVectorIntrinsicScalarOpd(ID, i))
           continue;
         ValueList Operands;
         // Prepare the operand vector.
@@ -7315,11 +7315,11 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
         ValueList OpVL;
         // Some intrinsics have scalar arguments. This argument should not be
         // vectorized.
-        if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) {
+        if (UseIntrinsic && hasVectorIntrinsicScalarOpd(IID, j)) {
           CallInst *CEI = cast<CallInst>(VL0);
           ScalarArg = CEI->getArgOperand(j);
           OpVecs.push_back(CEI->getArgOperand(j));
-          if (hasVectorInstrinsicOverloadedScalarOpd(IID, j))
+          if (hasVectorIntrinsicOverloadedScalarOpd(IID, j))
             TysForDecl.push_back(ScalarArg->getType());
           continue;
         }

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-byte-store-double.ll b/llvm/test/CodeGen/Hexagon/hvx-byte-store-double.ll
index e54ca1ea34358..07ac087c1bdbf 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-byte-store-double.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-byte-store-double.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -march=hexagon -mattr=+hvxv60,+hvx-length128b < %s | FileCheck %s
 
-; Test that we generate code for the vector byte enable store instrinsics.
+; Test that we generate code for the vector byte enable store intrinsics.
 
 ; CHECK-LABEL: f0:
 ; CHECK: if (q{{[0-3]}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}

diff  --git a/llvm/test/DebugInfo/WebAssembly/dbg-declare.ll b/llvm/test/DebugInfo/WebAssembly/dbg-declare.ll
index d6337092b2811..309a05bf1c168 100644
--- a/llvm/test/DebugInfo/WebAssembly/dbg-declare.ll
+++ b/llvm/test/DebugInfo/WebAssembly/dbg-declare.ll
@@ -4,7 +4,7 @@
 ; CHECK: DW_TAG_variable
 ; CHECK-FAST: DW_TAG_variable
 
-; Test that llvm.dbg.declare() instrinsics do not crash the backend
+; Test that llvm.dbg.declare() intrinsics do not crash the backend
 
 source_filename = "test/DebugInfo/WebAssembly/dbg-declare.ll"
 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll b/llvm/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll
index 2fc229ddbfd4b..ab8283a4d127b 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll
@@ -3,7 +3,7 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-;; Placeholder tests that will fail once element atomic @llvm.mem[move|set] instrinsics have
+;; Placeholder tests that will fail once element atomic @llvm.mem[move|set] intrinsics have
 ;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
 ;; verify that dfsan handles these intrinsics properly once they have been
 ;; added to that class hierarchy.

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
index 616bff68672e1..875d642b2aed3 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
@@ -248,7 +248,7 @@ declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
 ; CHECK: ret void
 
 ;; ------------
-;; Placeholder tests that will fail once element atomic @llvm.mem[cpy|move|set] instrinsics have
+;; Placeholder tests that will fail once element atomic @llvm.mem[cpy|move|set] intrinsics have
 ;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
 ;; verify that MSAN handles these intrinsics properly once they have been
 ;; added to that class hierarchy.

diff  --git a/llvm/test/Transforms/FunctionSpecialization/function-specialization-nodup2.ll b/llvm/test/Transforms/FunctionSpecialization/function-specialization-nodup2.ll
index bb2d3a151d547..84d5dd81bf3c2 100644
--- a/llvm/test/Transforms/FunctionSpecialization/function-specialization-nodup2.ll
+++ b/llvm/test/Transforms/FunctionSpecialization/function-specialization-nodup2.ll
@@ -1,6 +1,6 @@
 ; RUN: opt -function-specialization -force-function-specialization -S < %s | FileCheck %s
 
-; Check that function foo does not gets specialised as it contains an instrinsic
+; Check that function foo does not gets specialised as it contains an intrinsic
 ; that is marked as NoDuplicate.
 ; Please note that the use of the hardwareloop intrinsic is arbitrary; it's
 ; just an easy to use intrinsic that has NoDuplicate.

diff  --git a/llvm/test/Transforms/Inline/inline_constprop.ll b/llvm/test/Transforms/Inline/inline_constprop.ll
index 276c72cd3bd23..f5c82eeceb202 100644
--- a/llvm/test/Transforms/Inline/inline_constprop.ll
+++ b/llvm/test/Transforms/Inline/inline_constprop.ll
@@ -116,7 +116,7 @@ declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
 
 define i8 @caller4(i8 %z) {
 ; Check that we can constant fold through intrinsics such as the
-; overflow-detecting arithmetic instrinsics. These are particularly important
+; overflow-detecting arithmetic intrinsics. These are particularly important
 ; as they are used heavily in standard library code and generic C++ code where
 ; the arguments are oftent constant but complete generality is required.
 ;

diff  --git a/llvm/test/Transforms/InstCombine/stacksave-debuginfo.ll b/llvm/test/Transforms/InstCombine/stacksave-debuginfo.ll
index be98cc73c61ab..0a76a53e3ede9 100644
--- a/llvm/test/Transforms/InstCombine/stacksave-debuginfo.ll
+++ b/llvm/test/Transforms/InstCombine/stacksave-debuginfo.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; dbg.value instrinsics should not affect peephole combining of stacksave/stackrestore.
+; dbg.value intrinsics should not affect peephole combining of stacksave/stackrestore.
 ; PR37713
 ; RUN: opt -passes=instcombine %s -S | FileCheck %s
 

diff  --git a/llvm/test/Transforms/SROA/basictest-opaque-ptrs.ll b/llvm/test/Transforms/SROA/basictest-opaque-ptrs.ll
index 8be24472e341f..339149ba345f4 100644
--- a/llvm/test/Transforms/SROA/basictest-opaque-ptrs.ll
+++ b/llvm/test/Transforms/SROA/basictest-opaque-ptrs.ll
@@ -821,7 +821,7 @@ entry:
 }
 
 define void @test18(i8* %src, i8* %dst, i32 %size) {
-; Preserve transfer instrinsics with a variable size, even if they overlap with
+; Preserve transfer intrinsics with a variable size, even if they overlap with
 ; fixed size operations. Further, continue to split and promote allocas preceding
 ; the variable sized intrinsic.
 ; CHECK-LABEL: @test18(

diff  --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll
index a80cdc9e6cdbd..66443c1715332 100644
--- a/llvm/test/Transforms/SROA/basictest.ll
+++ b/llvm/test/Transforms/SROA/basictest.ll
@@ -888,7 +888,7 @@ entry:
 }
 
 define void @test18(i8* %src, i8* %dst, i32 %size) {
-; Preserve transfer instrinsics with a variable size, even if they overlap with
+; Preserve transfer intrinsics with a variable size, even if they overlap with
 ; fixed size operations. Further, continue to split and promote allocas preceding
 ; the variable sized intrinsic.
 ; CHECK-LABEL: @test18(

diff  --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
index 00fa937d8ad9d..abc45172fa558 100644
--- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
+++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
@@ -2621,7 +2621,7 @@ TEST(IRSimilarityIdentifier, CommutativeSimilarity) {
 
 // This test makes sure that intrinsic functions that are marked commutative
 // are still treated as non-commutative since they are function calls.
-TEST(IRSimilarityIdentifier, InstrinsicCommutative) {
+TEST(IRSimilarityIdentifier, IntrinsicCommutative) {
   // If treated as commutative, we will fail to find a valid mapping, causing
   // an assertion error.
   StringRef ModuleString = R"(


        


More information about the cfe-commits mailing list