[llvm] d5a614d - [CodeGen][NFC] Fix documentation comments (#100092)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 23 04:21:04 PDT 2024
Author: Piotr Fusik
Date: 2024-07-23T13:21:01+02:00
New Revision: d5a614d44d4ef1f7354b89d3c163dde4497a2053
URL: https://github.com/llvm/llvm-project/commit/d5a614d44d4ef1f7354b89d3c163dde4497a2053
DIFF: https://github.com/llvm/llvm-project/commit/d5a614d44d4ef1f7354b89d3c163dde4497a2053.diff
LOG: [CodeGen][NFC] Fix documentation comments (#100092)
Added:
Modified:
llvm/include/llvm/CodeGen/ISDOpcodes.h
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index daceaf98583bd..5b657fb171296 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -217,9 +217,9 @@ enum NodeType {
/// UNDEF - An undefined node.
UNDEF,
- // FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
- // is evaluated to UNDEF), or returns VAL otherwise. Note that each
- // read of UNDEF can yield
diff erent value, but FREEZE(UNDEF) cannot.
+ /// FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
+ /// is evaluated to UNDEF), or returns VAL otherwise. Note that each
+ /// read of UNDEF can yield
diff erent value, but FREEZE(UNDEF) cannot.
FREEZE,
/// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
@@ -300,7 +300,7 @@ enum NodeType {
/// it to the add/sub hardware instruction, and then inverting the outgoing
/// carry/borrow.
///
- /// The use of these opcodes is preferable to adde/sube if the target supports
+ /// The use of these opcodes is preferable to ADDE/SUBE if the target supports
/// it, as the carry is a regular value rather than a glue, which allows
/// further optimisation.
///
@@ -490,7 +490,7 @@ enum NodeType {
STRICT_FSETCC,
STRICT_FSETCCS,
- // FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
+ /// FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
FPTRUNC_ROUND,
/// FMA - Perform a * b + c with no intermediate rounding step.
@@ -684,10 +684,10 @@ enum NodeType {
AVGCEILS,
AVGCEILU,
- // ABDS/ABDU - Absolute
diff erence - Return the absolute
diff erence between
- // two numbers interpreted as signed/unsigned.
- // i.e trunc(abs(sext(Op0) - sext(Op1))) becomes abds(Op0, Op1)
- // or trunc(abs(zext(Op0) - zext(Op1))) becomes abdu(Op0, Op1)
+ /// ABDS/ABDU - Absolute
diff erence - Return the absolute
diff erence between
+ /// two numbers interpreted as signed/unsigned.
+ /// i.e trunc(abs(sext(Op0) - sext(Op1))) becomes abds(Op0, Op1)
+ /// or trunc(abs(zext(Op0) - zext(Op1))) becomes abdu(Op0, Op1)
ABDS,
ABDU,
@@ -728,8 +728,9 @@ enum NodeType {
/// amount modulo the element size of the first operand.
///
/// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
- /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
- /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
+ ///
+ /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
+ /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
SHL,
SRA,
SRL,
@@ -787,7 +788,8 @@ enum NodeType {
/// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
/// integer shift operations. The operation ordering is:
- /// [Lo,Hi] = op [LoLHS,HiLHS], Amt
+ ///
+ /// [Lo,Hi] = op [LoLHS,HiLHS], Amt
SHL_PARTS,
SRA_PARTS,
SRL_PARTS,
@@ -998,7 +1000,7 @@ enum NodeType {
/// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
/// values.
- //
+ ///
/// In the case where a single input is a NaN (either signaling or quiet),
/// the non-NaN input is returned.
///
@@ -1196,11 +1198,11 @@ enum NodeType {
VAEND,
VASTART,
- // PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE
- // with the preallocated call Value.
+ /// PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE
+ /// with the preallocated call Value.
PREALLOCATED_SETUP,
- // PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE
- // with the preallocated call Value, and a constant int.
+ /// PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE
+ /// with the preallocated call Value, and a constant int.
PREALLOCATED_ARG,
/// SRCVALUE - This is a node type that holds a Value* that is used to
@@ -1319,24 +1321,24 @@ enum NodeType {
ATOMIC_LOAD_UINC_WRAP,
ATOMIC_LOAD_UDEC_WRAP,
- // Masked load and store - consecutive vector load and store operations
- // with additional mask operand that prevents memory accesses to the
- // masked-off lanes.
- //
- // Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
- // OutChain = MSTORE(Value, BasePtr, Mask)
+ /// Masked load and store - consecutive vector load and store operations
+ /// with additional mask operand that prevents memory accesses to the
+ /// masked-off lanes.
+ ///
+ /// Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
+ /// OutChain = MSTORE(Value, BasePtr, Mask)
MLOAD,
MSTORE,
- // Masked gather and scatter - load and store operations for a vector of
- // random addresses with additional mask operand that prevents memory
- // accesses to the masked-off lanes.
- //
- // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
- // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
- //
- // The Index operand can have more vector elements than the other operands
- // due to type legalization. The extra elements are ignored.
+ /// Masked gather and scatter - load and store operations for a vector of
+ /// random addresses with additional mask operand that prevents memory
+ /// accesses to the masked-off lanes.
+ ///
+ /// Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
+ /// OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
+ ///
+ /// The Index operand can have more vector elements than the other operands
+ /// due to type legalization. The extra elements are ignored.
MGATHER,
MSCATTER,
@@ -1385,9 +1387,11 @@ enum NodeType {
/// pow-of-2 vectors, one valid legalizer expansion is to use a tree
/// reduction, i.e.:
/// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC
- /// PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
- /// PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
- /// RES = FADD PART_RDX2[0], PART_RDX2[1]
+ ///
+ /// PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
+ /// PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
+ /// RES = FADD PART_RDX2[0], PART_RDX2[1]
+ ///
/// For non-pow-2 vectors, this can be computed by extracting each element
/// and performing the operation as if it were scalarized.
VECREDUCE_FADD,
More information about the llvm-commits
mailing list