[llvm] r258867 - [x86] make the subtarget member a const reference, not a pointer ; NFCI

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 26 15:04:04 PST 2016


It's a pipe dream, but maybe we can make x86 suck less. :)

On Tue, Jan 26, 2016 at 3:16 PM, Eric Christopher <echristo at gmail.com>
wrote:

> Thanks for the cleanup.
>
> -eric
>
> On Tue, Jan 26, 2016 at 2:12 PM Sanjay Patel via llvm-commits <
> llvm-commits at lists.llvm.org> wrote:
>
>> Author: spatel
>> Date: Tue Jan 26 16:08:58 2016
>> New Revision: 258867
>>
>> URL: http://llvm.org/viewvc/llvm-project?rev=258867&view=rev
>> Log:
>> [x86] make the subtarget member a const reference, not a pointer ; NFCI
>>
>> It's passed in as a reference; it's not optional; it's not a pointer.
>>
>>
>> Modified:
>>     llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
>>     llvm/trunk/lib/Target/X86/X86ISelLowering.h
>>
>> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=258867&r1=258866&r2=258867&view=diff
>>
>> ==============================================================================
>> --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
>> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Jan 26 16:08:58 2016
>> @@ -71,9 +71,9 @@ static cl::opt<bool> ExperimentalVectorW
>>
>>  X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
>>                                       const X86Subtarget &STI)
>> -    : TargetLowering(TM), Subtarget(&STI) {
>> -  X86ScalarSSEf64 = Subtarget->hasSSE2();
>> -  X86ScalarSSEf32 = Subtarget->hasSSE1();
>> +    : TargetLowering(TM), Subtarget(STI) {
>> +  X86ScalarSSEf64 = Subtarget.hasSSE2();
>> +  X86ScalarSSEf32 = Subtarget.hasSSE1();
>>    MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
>>
>>    // Set up the TargetLowering object.
>> @@ -86,24 +86,24 @@ X86TargetLowering::X86TargetLowering(con
>>    // For 64-bit, since we have so many registers, use the ILP scheduler.
>>    // For 32-bit, use the register pressure specific scheduling.
>>    // For Atom, always use ILP scheduling.
>> -  if (Subtarget->isAtom())
>> +  if (Subtarget.isAtom())
>>      setSchedulingPreference(Sched::ILP);
>> -  else if (Subtarget->is64Bit())
>> +  else if (Subtarget.is64Bit())
>>      setSchedulingPreference(Sched::ILP);
>>    else
>>      setSchedulingPreference(Sched::RegPressure);
>> -  const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
>> +  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
>>    setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
>>
>>    // Bypass expensive divides on Atom when compiling with O2.
>>    if (TM.getOptLevel() >= CodeGenOpt::Default) {
>> -    if (Subtarget->hasSlowDivide32())
>> +    if (Subtarget.hasSlowDivide32())
>>        addBypassSlowDiv(32, 8);
>> -    if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
>> +    if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
>>        addBypassSlowDiv(64, 16);
>>    }
>>
>> -  if (Subtarget->isTargetKnownWindowsMSVC()) {
>> +  if (Subtarget.isTargetKnownWindowsMSVC()) {
>>      // Setup Windows compiler runtime calls.
>>      setLibcallName(RTLIB::SDIV_I64, "_alldiv");
>>      setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
>> @@ -117,11 +117,11 @@ X86TargetLowering::X86TargetLowering(con
>>      setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
>>    }
>>
>> -  if (Subtarget->isTargetDarwin()) {
>> +  if (Subtarget.isTargetDarwin()) {
>>      // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
>>      setUseUnderscoreSetJmp(false);
>>      setUseUnderscoreLongJmp(false);
>> -  } else if (Subtarget->isTargetWindowsGNU()) {
>> +  } else if (Subtarget.isTargetWindowsGNU()) {
>>      // MS runtime is weird: it exports _setjmp, but longjmp!
>>      setUseUnderscoreSetJmp(true);
>>      setUseUnderscoreLongJmp(false);
>> @@ -134,7 +134,7 @@ X86TargetLowering::X86TargetLowering(con
>>    addRegisterClass(MVT::i8, &X86::GR8RegClass);
>>    addRegisterClass(MVT::i16, &X86::GR16RegClass);
>>    addRegisterClass(MVT::i32, &X86::GR32RegClass);
>> -  if (Subtarget->is64Bit())
>> +  if (Subtarget.is64Bit())
>>      addRegisterClass(MVT::i64, &X86::GR64RegClass);
>>
>>    for (MVT VT : MVT::integer_valuetypes())
>> @@ -164,14 +164,14 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::UINT_TO_FP       , MVT::i8   , Promote);
>>    setOperationAction(ISD::UINT_TO_FP       , MVT::i16  , Promote);
>>
>> -  if (Subtarget->is64Bit()) {
>> -    if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512())
>> +  if (Subtarget.is64Bit()) {
>> +    if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512())
>>        // f32/f64 are legal, f80 is custom.
>>        setOperationAction(ISD::UINT_TO_FP   , MVT::i32  , Custom);
>>      else
>>        setOperationAction(ISD::UINT_TO_FP   , MVT::i32  , Promote);
>>      setOperationAction(ISD::UINT_TO_FP     , MVT::i64  , Custom);
>> -  } else if (!Subtarget->useSoftFloat()) {
>> +  } else if (!Subtarget.useSoftFloat()) {
>>      // We have an algorithm for SSE2->double, and we turn this into a
>>      // 64-bit FILD followed by conditional FADD for other targets.
>>      setOperationAction(ISD::UINT_TO_FP     , MVT::i64  , Custom);
>> @@ -185,7 +185,7 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::SINT_TO_FP       , MVT::i1   , Promote);
>>    setOperationAction(ISD::SINT_TO_FP       , MVT::i8   , Promote);
>>
>> -  if (!Subtarget->useSoftFloat()) {
>> +  if (!Subtarget.useSoftFloat()) {
>>      // SSE has no i16 to fp conversion, only i32
>>      if (X86ScalarSSEf32) {
>>        setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Promote);
>> @@ -205,7 +205,7 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::FP_TO_SINT       , MVT::i1   , Promote);
>>    setOperationAction(ISD::FP_TO_SINT       , MVT::i8   , Promote);
>>
>> -  if (!Subtarget->useSoftFloat()) {
>> +  if (!Subtarget.useSoftFloat()) {
>>      // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and
>> F64
>>      // are Legal, f80 is custom lowered.
>>      setOperationAction(ISD::FP_TO_SINT     , MVT::i64  , Custom);
>> @@ -231,8 +231,8 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::FP_TO_UINT       , MVT::i8   , Promote);
>>    setOperationAction(ISD::FP_TO_UINT       , MVT::i16  , Promote);
>>
>> -  if (Subtarget->is64Bit()) {
>> -    if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512()) {
>> +  if (Subtarget.is64Bit()) {
>> +    if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
>>        // FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80.
>>        setOperationAction(ISD::FP_TO_UINT   , MVT::i32  , Custom);
>>        setOperationAction(ISD::FP_TO_UINT   , MVT::i64  , Custom);
>> @@ -240,9 +240,9 @@ X86TargetLowering::X86TargetLowering(con
>>        setOperationAction(ISD::FP_TO_UINT   , MVT::i32  , Promote);
>>        setOperationAction(ISD::FP_TO_UINT   , MVT::i64  , Expand);
>>      }
>> -  } else if (!Subtarget->useSoftFloat()) {
>> +  } else if (!Subtarget.useSoftFloat()) {
>>      // Since AVX is a superset of SSE3, only check for SSE here.
>> -    if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
>> +    if (Subtarget.hasSSE1() && !Subtarget.hasSSE3())
>>        // Expand FP_TO_UINT into a select.
>>        // FIXME: We would like to use a Custom expander here eventually
>> to do
>>        // the optimal thing for SSE vs. the default expansion in the
>> legalizer.
>> @@ -260,12 +260,12 @@ X86TargetLowering::X86TargetLowering(con
>>    if (!X86ScalarSSEf64) {
>>      setOperationAction(ISD::BITCAST        , MVT::f32  , Expand);
>>      setOperationAction(ISD::BITCAST        , MVT::i32  , Expand);
>> -    if (Subtarget->is64Bit()) {
>> +    if (Subtarget.is64Bit()) {
>>        setOperationAction(ISD::BITCAST      , MVT::f64  , Expand);
>>        // Without SSE, i64->f64 goes through memory.
>>        setOperationAction(ISD::BITCAST      , MVT::i64  , Expand);
>>      }
>> -  } else if (!Subtarget->is64Bit())
>> +  } else if (!Subtarget.is64Bit())
>>      setOperationAction(ISD::BITCAST      , MVT::i64  , Custom);
>>
>>    // Scalar integer divide and remainder are lowered to use operations
>> that
>> @@ -311,14 +311,14 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::SELECT_CC        , MVT::i16,   Expand);
>>    setOperationAction(ISD::SELECT_CC        , MVT::i32,   Expand);
>>    setOperationAction(ISD::SELECT_CC        , MVT::i64,   Expand);
>> -  if (Subtarget->is64Bit())
>> +  if (Subtarget.is64Bit())
>>      setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
>>    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Legal);
>>    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Legal);
>>    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);
>>    setOperationAction(ISD::FP_ROUND_INREG   , MVT::f32  , Expand);
>>
>> -  if (Subtarget->is32Bit() && Subtarget->isTargetKnownWindowsMSVC()) {
>> +  if (Subtarget.is32Bit() && Subtarget.isTargetKnownWindowsMSVC()) {
>>      // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
>>      // is. We should promote the value to 64-bits to solve this.
>>      // This is what the CRT headers do - `fmodf` is an inline header
>> @@ -338,19 +338,19 @@ X86TargetLowering::X86TargetLowering(con
>>    AddPromotedToType (ISD::CTTZ             , MVT::i8   , MVT::i32);
>>    setOperationAction(ISD::CTTZ_ZERO_UNDEF  , MVT::i8   , Promote);
>>    AddPromotedToType (ISD::CTTZ_ZERO_UNDEF  , MVT::i8   , MVT::i32);
>> -  if (Subtarget->hasBMI()) {
>> +  if (Subtarget.hasBMI()) {
>>      setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16  , Expand);
>>      setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32  , Expand);
>> -    if (Subtarget->is64Bit())
>> +    if (Subtarget.is64Bit())
>>        setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
>>    } else {
>>      setOperationAction(ISD::CTTZ           , MVT::i16  , Custom);
>>      setOperationAction(ISD::CTTZ           , MVT::i32  , Custom);
>> -    if (Subtarget->is64Bit())
>> +    if (Subtarget.is64Bit())
>>        setOperationAction(ISD::CTTZ         , MVT::i64  , Custom);
>>    }
>>
>> -  if (Subtarget->hasLZCNT()) {
>> +  if (Subtarget.hasLZCNT()) {
>>      // When promoting the i8 variants, force them to i32 for a shorter
>>      // encoding.
>>      setOperationAction(ISD::CTLZ           , MVT::i8   , Promote);
>> @@ -359,7 +359,7 @@ X86TargetLowering::X86TargetLowering(con
>>      AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
>>      setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16  , Expand);
>>      setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32  , Expand);
>> -    if (Subtarget->is64Bit())
>> +    if (Subtarget.is64Bit())
>>        setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
>>    } else {
>>      setOperationAction(ISD::CTLZ           , MVT::i8   , Custom);
>> @@ -368,7 +368,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , Custom);
>>      setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16  , Custom);
>>      setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32  , Custom);
>> -    if (Subtarget->is64Bit()) {
>> +    if (Subtarget.is64Bit()) {
>>        setOperationAction(ISD::CTLZ         , MVT::i64  , Custom);
>>        setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
>>      }
>> @@ -377,7 +377,7 @@ X86TargetLowering::X86TargetLowering(con
>>    // Special handling for half-precision floating point conversions.
>>    // If we don't have F16C support, then lower half float conversions
>>    // into library calls.
>> -  if (Subtarget->useSoftFloat() || !Subtarget->hasF16C()) {
>> +  if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
>>      setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
>>      setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
>>    }
>> @@ -395,19 +395,19 @@ X86TargetLowering::X86TargetLowering(con
>>    setTruncStoreAction(MVT::f64, MVT::f16, Expand);
>>    setTruncStoreAction(MVT::f80, MVT::f16, Expand);
>>
>> -  if (Subtarget->hasPOPCNT()) {
>> +  if (Subtarget.hasPOPCNT()) {
>>      setOperationAction(ISD::CTPOP          , MVT::i8   , Promote);
>>    } else {
>>      setOperationAction(ISD::CTPOP          , MVT::i8   , Expand);
>>      setOperationAction(ISD::CTPOP          , MVT::i16  , Expand);
>>      setOperationAction(ISD::CTPOP          , MVT::i32  , Expand);
>> -    if (Subtarget->is64Bit())
>> +    if (Subtarget.is64Bit())
>>        setOperationAction(ISD::CTPOP        , MVT::i64  , Expand);
>>    }
>>
>>    setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);
>>
>> -  if (!Subtarget->hasMOVBE())
>> +  if (!Subtarget.hasMOVBE())
>>      setOperationAction(ISD::BSWAP          , MVT::i16  , Expand);
>>
>>    // These should be promoted to a larger select which is supported.
>> @@ -430,7 +430,7 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::SETCCE          , MVT::i8   , Custom);
>>    setOperationAction(ISD::SETCCE          , MVT::i16  , Custom);
>>    setOperationAction(ISD::SETCCE          , MVT::i32  , Custom);
>> -  if (Subtarget->is64Bit()) {
>> +  if (Subtarget.is64Bit()) {
>>      setOperationAction(ISD::SELECT        , MVT::i64  , Custom);
>>      setOperationAction(ISD::SETCC         , MVT::i64  , Custom);
>>      setOperationAction(ISD::SETCCE        , MVT::i64  , Custom);
>> @@ -450,11 +450,11 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::JumpTable       , MVT::i32  , Custom);
>>    setOperationAction(ISD::GlobalAddress   , MVT::i32  , Custom);
>>    setOperationAction(ISD::GlobalTLSAddress, MVT::i32  , Custom);
>> -  if (Subtarget->is64Bit())
>> +  if (Subtarget.is64Bit())
>>      setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
>>    setOperationAction(ISD::ExternalSymbol  , MVT::i32  , Custom);
>>    setOperationAction(ISD::BlockAddress    , MVT::i32  , Custom);
>> -  if (Subtarget->is64Bit()) {
>> +  if (Subtarget.is64Bit()) {
>>      setOperationAction(ISD::ConstantPool  , MVT::i64  , Custom);
>>      setOperationAction(ISD::JumpTable     , MVT::i64  , Custom);
>>      setOperationAction(ISD::GlobalAddress , MVT::i64  , Custom);
>> @@ -465,13 +465,13 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::SHL_PARTS       , MVT::i32  , Custom);
>>    setOperationAction(ISD::SRA_PARTS       , MVT::i32  , Custom);
>>    setOperationAction(ISD::SRL_PARTS       , MVT::i32  , Custom);
>> -  if (Subtarget->is64Bit()) {
>> +  if (Subtarget.is64Bit()) {
>>      setOperationAction(ISD::SHL_PARTS     , MVT::i64  , Custom);
>>      setOperationAction(ISD::SRA_PARTS     , MVT::i64  , Custom);
>>      setOperationAction(ISD::SRL_PARTS     , MVT::i64  , Custom);
>>    }
>>
>> -  if (Subtarget->hasSSE1())
>> +  if (Subtarget.hasSSE1())
>>      setOperationAction(ISD::PREFETCH      , MVT::Other, Legal);
>>
>>    setOperationAction(ISD::ATOMIC_FENCE  , MVT::Other, Custom);
>> @@ -483,13 +483,13 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
>>    }
>>
>> -  if (Subtarget->hasCmpxchg16b()) {
>> +  if (Subtarget.hasCmpxchg16b()) {
>>      setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128,
>> Custom);
>>    }
>>
>>    // FIXME - use subtarget debug flags
>> -  if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
>> -      !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
>> +  if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
>> +      !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64()) {
>>      setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
>>    }
>>
>> @@ -505,7 +505,7 @@ X86TargetLowering::X86TargetLowering(con
>>    // VASTART needs to be custom lowered to use the VarArgsFrameIndex
>>    setOperationAction(ISD::VASTART           , MVT::Other, Custom);
>>    setOperationAction(ISD::VAEND             , MVT::Other, Expand);
>> -  if (Subtarget->is64Bit()) {
>> +  if (Subtarget.is64Bit()) {
>>      setOperationAction(ISD::VAARG           , MVT::Other, Custom);
>>      setOperationAction(ISD::VACOPY          , MVT::Other, Custom);
>>    } else {
>> @@ -523,7 +523,7 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
>>    setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
>>
>> -  if (!Subtarget->useSoftFloat() && X86ScalarSSEf64) {
>> +  if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
>>      // f32 and f64 use SSE.
>>      // Set up the FP register classes.
>>      addRegisterClass(MVT::f32, &X86::FR32RegClass);
>> @@ -557,7 +557,7 @@ X86TargetLowering::X86TargetLowering(con
>>      // cases we handle.
>>      addLegalFPImmediate(APFloat(+0.0)); // xorpd
>>      addLegalFPImmediate(APFloat(+0.0f)); // xorps
>> -  } else if (!Subtarget->useSoftFloat() && X86ScalarSSEf32) {
>> +  } else if (!Subtarget.useSoftFloat() && X86ScalarSSEf32) {
>>      // Use SSE for f32, x87 for f64.
>>      // Set up the FP register classes.
>>      addRegisterClass(MVT::f32, &X86::FR32RegClass);
>> @@ -592,7 +592,7 @@ X86TargetLowering::X86TargetLowering(con
>>        setOperationAction(ISD::FCOS   , MVT::f64, Expand);
>>        setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
>>      }
>> -  } else if (!Subtarget->useSoftFloat()) {
>> +  } else if (!Subtarget.useSoftFloat()) {
>>      // f32 and f64 in x87.
>>      // Set up the FP register classes.
>>      addRegisterClass(MVT::f64, &X86::RFP64RegClass);
>> @@ -626,8 +626,8 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::FMA, MVT::f32, Expand);
>>
>>    // Long double always uses X87, except f128 in MMX.
>> -  if (!Subtarget->useSoftFloat()) {
>> -    if (Subtarget->is64Bit() && Subtarget->hasMMX()) {
>> +  if (!Subtarget.useSoftFloat()) {
>> +    if (Subtarget.is64Bit() && Subtarget.hasMMX()) {
>>        addRegisterClass(MVT::f128, &X86::FR128RegClass);
>>        ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
>>        setOperationAction(ISD::FABS , MVT::f128, Custom);
>> @@ -774,7 +774,7 @@ X86TargetLowering::X86TargetLowering(con
>>
>>    // FIXME: In order to prevent SSE instructions being expanded to MMX
>> ones
>>    // with -msoft-float, disable use of MMX as well.
>> -  if (!Subtarget->useSoftFloat() && Subtarget->hasMMX()) {
>> +  if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
>>      addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
>>      // No operations on x86mmx supported, everything uses intrinsics.
>>    }
>> @@ -792,7 +792,7 @@ X86TargetLowering::X86TargetLowering(con
>>    }
>>    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v1i64, Expand);
>>
>> -  if (!Subtarget->useSoftFloat() && Subtarget->hasSSE1()) {
>> +  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
>>      addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
>>
>>      setOperationAction(ISD::FADD,               MVT::v4f32, Legal);
>> @@ -811,7 +811,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::UINT_TO_FP,         MVT::v4i32, Custom);
>>    }
>>
>> -  if (!Subtarget->useSoftFloat() && Subtarget->hasSSE2()) {
>> +  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
>>      addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
>>
>>      // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
>> @@ -908,7 +908,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v2f64, Custom);
>>      setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
>>
>> -    if (Subtarget->is64Bit()) {
>> +    if (Subtarget.is64Bit()) {
>>        setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v2i64, Custom);
>>        setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
>>      }
>> @@ -942,7 +942,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::UINT_TO_FP,         MVT::v4i16, Custom);
>>      // As there is no 64-bit GPR available, we need build a special
>> custom
>>      // sequence to convert from v2i32 to v2f32.
>> -    if (!Subtarget->is64Bit())
>> +    if (!Subtarget.is64Bit())
>>        setOperationAction(ISD::UINT_TO_FP,       MVT::v2f32, Custom);
>>
>>      setOperationAction(ISD::FP_EXTEND,          MVT::v2f32, Custom);
>> @@ -956,7 +956,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::BITCAST,            MVT::v8i8,  Custom);
>>    }
>>
>> -  if (!Subtarget->useSoftFloat() && Subtarget->hasSSE41()) {
>> +  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
>>      for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
>>        setOperationAction(ISD::FFLOOR,           RoundedTy,  Legal);
>>        setOperationAction(ISD::FCEIL,            RoundedTy,  Legal);
>> @@ -1020,13 +1020,13 @@ X86TargetLowering::X86TargetLowering(con
>>
>>      // FIXME: these should be Legal, but that's only for the case where
>>      // the index is constant.  For now custom expand to deal with that.
>> -    if (Subtarget->is64Bit()) {
>> +    if (Subtarget.is64Bit()) {
>>        setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v2i64, Custom);
>>        setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
>>      }
>>    }
>>
>> -  if (Subtarget->hasSSE2()) {
>> +  if (Subtarget.hasSSE2()) {
>>      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64,
>> Custom);
>>      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32,
>> Custom);
>>      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16,
>> Custom);
>> @@ -1052,7 +1052,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::SRA,               MVT::v4i32, Custom);
>>    }
>>
>> -  if (Subtarget->hasXOP()) {
>> +  if (Subtarget.hasXOP()) {
>>      setOperationAction(ISD::ROTL,              MVT::v16i8, Custom);
>>      setOperationAction(ISD::ROTL,              MVT::v8i16, Custom);
>>      setOperationAction(ISD::ROTL,              MVT::v4i32, Custom);
>> @@ -1063,7 +1063,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::ROTL,              MVT::v4i64, Custom);
>>    }
>>
>> -  if (!Subtarget->useSoftFloat() && Subtarget->hasFp256()) {
>> +  if (!Subtarget.useSoftFloat() && Subtarget.hasFp256()) {
>>      addRegisterClass(MVT::v32i8,  &X86::VR256RegClass);
>>      addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
>>      addRegisterClass(MVT::v8i32,  &X86::VR256RegClass);
>> @@ -1162,7 +1162,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::CTTZ_ZERO_UNDEF,   MVT::v8i32, Custom);
>>      setOperationAction(ISD::CTTZ_ZERO_UNDEF,   MVT::v4i64, Custom);
>>
>> -    if (Subtarget->hasAnyFMA()) {
>> +    if (Subtarget.hasAnyFMA()) {
>>        setOperationAction(ISD::FMA,             MVT::v8f32, Legal);
>>        setOperationAction(ISD::FMA,             MVT::v4f64, Legal);
>>        setOperationAction(ISD::FMA,             MVT::v4f32, Legal);
>> @@ -1171,7 +1171,7 @@ X86TargetLowering::X86TargetLowering(con
>>        setOperationAction(ISD::FMA,             MVT::f64, Legal);
>>      }
>>
>> -    if (Subtarget->hasInt256()) {
>> +    if (Subtarget.hasInt256()) {
>>        setOperationAction(ISD::ADD,             MVT::v4i64, Legal);
>>        setOperationAction(ISD::ADD,             MVT::v8i32, Legal);
>>        setOperationAction(ISD::ADD,             MVT::v16i16, Legal);
>> @@ -1289,7 +1289,7 @@ X86TargetLowering::X86TargetLowering(con
>>        setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
>>      }
>>
>> -    if (Subtarget->hasInt256())
>> +    if (Subtarget.hasInt256())
>>        setOperationAction(ISD::VSELECT,         MVT::v32i8, Legal);
>>
>>      // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
>> @@ -1307,7 +1307,7 @@ X86TargetLowering::X86TargetLowering(con
>>      }
>>    }
>>
>> -  if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512()) {
>> +  if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
>>      addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
>>      addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
>>      addRegisterClass(MVT::v8i64,  &X86::VR512RegClass);
>> @@ -1388,7 +1388,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setTruncStoreAction(MVT::v8i64,   MVT::v8i32,  Legal);
>>      setTruncStoreAction(MVT::v16i32,  MVT::v16i8,  Legal);
>>      setTruncStoreAction(MVT::v16i32,  MVT::v16i16, Legal);
>> -    if (Subtarget->hasVLX()){
>> +    if (Subtarget.hasVLX()){
>>        setTruncStoreAction(MVT::v4i64, MVT::v4i8,  Legal);
>>        setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
>>        setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
>> @@ -1411,7 +1411,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::TRUNCATE,           MVT::v8i32, Custom);
>>      setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v8i1,  Custom);
>>      setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v16i1, Custom);
>> -    if (Subtarget->hasDQI()) {
>> +    if (Subtarget.hasDQI()) {
>>        setOperationAction(ISD::TRUNCATE,         MVT::v2i1, Custom);
>>        setOperationAction(ISD::TRUNCATE,         MVT::v4i1, Custom);
>>
>> @@ -1419,7 +1419,7 @@ X86TargetLowering::X86TargetLowering(con
>>        setOperationAction(ISD::UINT_TO_FP,       MVT::v8i64, Legal);
>>        setOperationAction(ISD::FP_TO_SINT,       MVT::v8i64, Legal);
>>        setOperationAction(ISD::FP_TO_UINT,       MVT::v8i64, Legal);
>> -      if (Subtarget->hasVLX()) {
>> +      if (Subtarget.hasVLX()) {
>>          setOperationAction(ISD::SINT_TO_FP,    MVT::v4i64, Legal);
>>          setOperationAction(ISD::SINT_TO_FP,    MVT::v2i64, Legal);
>>          setOperationAction(ISD::UINT_TO_FP,    MVT::v4i64, Legal);
>> @@ -1430,7 +1430,7 @@ X86TargetLowering::X86TargetLowering(con
>>          setOperationAction(ISD::FP_TO_UINT,    MVT::v2i64, Legal);
>>        }
>>      }
>> -    if (Subtarget->hasVLX()) {
>> +    if (Subtarget.hasVLX()) {
>>        setOperationAction(ISD::SINT_TO_FP,       MVT::v8i32, Legal);
>>        setOperationAction(ISD::UINT_TO_FP,       MVT::v8i32, Legal);
>>        setOperationAction(ISD::FP_TO_SINT,       MVT::v8i32, Legal);
>> @@ -1452,7 +1452,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::SIGN_EXTEND,        MVT::v16i8, Custom);
>>      setOperationAction(ISD::SIGN_EXTEND,        MVT::v8i16, Custom);
>>      setOperationAction(ISD::SIGN_EXTEND,        MVT::v16i16, Custom);
>> -    if (Subtarget->hasDQI()) {
>> +    if (Subtarget.hasDQI()) {
>>        setOperationAction(ISD::SIGN_EXTEND,        MVT::v4i32, Custom);
>>        setOperationAction(ISD::SIGN_EXTEND,        MVT::v2i64, Custom);
>>      }
>> @@ -1524,7 +1524,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::OR,                 MVT::v16i32, Legal);
>>      setOperationAction(ISD::XOR,                MVT::v16i32, Legal);
>>
>> -    if (Subtarget->hasCDI()) {
>> +    if (Subtarget.hasCDI()) {
>>        setOperationAction(ISD::CTLZ,             MVT::v8i64,  Legal);
>>        setOperationAction(ISD::CTLZ,             MVT::v16i32, Legal);
>>        setOperationAction(ISD::CTLZ_ZERO_UNDEF,  MVT::v8i64,  Expand);
>> @@ -1542,7 +1542,7 @@ X86TargetLowering::X86TargetLowering(con
>>        setOperationAction(ISD::CTTZ_ZERO_UNDEF,  MVT::v8i64,  Custom);
>>        setOperationAction(ISD::CTTZ_ZERO_UNDEF,  MVT::v16i32, Custom);
>>
>> -      if (Subtarget->hasVLX()) {
>> +      if (Subtarget.hasVLX()) {
>>          setOperationAction(ISD::CTLZ,             MVT::v4i64, Legal);
>>          setOperationAction(ISD::CTLZ,             MVT::v8i32, Legal);
>>          setOperationAction(ISD::CTLZ,             MVT::v2i64, Legal);
>> @@ -1566,9 +1566,9 @@ X86TargetLowering::X86TargetLowering(con
>>          setOperationAction(ISD::CTLZ_ZERO_UNDEF,  MVT::v2i64, Expand);
>>          setOperationAction(ISD::CTLZ_ZERO_UNDEF,  MVT::v4i32, Expand);
>>        }
>> -    } // Subtarget->hasCDI()
>> +    } // Subtarget.hasCDI()
>>
>> -    if (Subtarget->hasDQI()) {
>> +    if (Subtarget.hasDQI()) {
>>        setOperationAction(ISD::MUL,             MVT::v2i64, Legal);
>>        setOperationAction(ISD::MUL,             MVT::v4i64, Legal);
>>        setOperationAction(ISD::MUL,             MVT::v8i64, Legal);
>> @@ -1617,7 +1617,7 @@ X86TargetLowering::X86TargetLowering(con
>>      }
>>    }// has  AVX-512
>>
>> -  if (!Subtarget->useSoftFloat() && Subtarget->hasBWI()) {
>> +  if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
>>      addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
>>      addRegisterClass(MVT::v64i8,  &X86::VR512RegClass);
>>
>> @@ -1678,10 +1678,10 @@ X86TargetLowering::X86TargetLowering(con
>>
>>      setTruncStoreAction(MVT::v32i16,  MVT::v32i8, Legal);
>>      setTruncStoreAction(MVT::v16i16,  MVT::v16i8, Legal);
>> -    if (Subtarget->hasVLX())
>> +    if (Subtarget.hasVLX())
>>        setTruncStoreAction(MVT::v8i16,   MVT::v8i8,  Legal);
>>
>> -    if (Subtarget->hasCDI()) {
>> +    if (Subtarget.hasCDI()) {
>>        setOperationAction(ISD::CTLZ,            MVT::v32i16, Custom);
>>        setOperationAction(ISD::CTLZ,            MVT::v64i8,  Custom);
>>        setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i16, Expand);
>> @@ -1704,7 +1704,7 @@ X86TargetLowering::X86TargetLowering(con
>>      }
>>    }
>>
>> -  if (!Subtarget->useSoftFloat() && Subtarget->hasVLX()) {
>> +  if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
>>      addRegisterClass(MVT::v4i1,   &X86::VK4RegClass);
>>      addRegisterClass(MVT::v2i1,   &X86::VK2RegClass);
>>
>> @@ -1744,7 +1744,7 @@ X86TargetLowering::X86TargetLowering(con
>>    setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
>>    setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
>>    setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
>> -  if (!Subtarget->is64Bit()) {
>> +  if (!Subtarget.is64Bit()) {
>>      setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
>>      setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
>>    }
>> @@ -1756,7 +1756,7 @@ X86TargetLowering::X86TargetLowering(con
>>    // subtraction on x86-32 once PR3203 is fixed.  We really can't do
>> much better
>>    // than generic legalization for 64-bit multiplication-with-overflow,
>> though.
>>    for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
>> -    if (VT == MVT::i64 && !Subtarget->is64Bit())
>> +    if (VT == MVT::i64 && !Subtarget.is64Bit())
>>        continue;
>>      // Add/Sub/Mul with overflow operations are custom lowered.
>>      setOperationAction(ISD::SADDO, VT, Custom);
>> @@ -1767,7 +1767,7 @@ X86TargetLowering::X86TargetLowering(con
>>      setOperationAction(ISD::UMULO, VT, Custom);
>>    }
>>
>> -  if (!Subtarget->is64Bit()) {
>> +  if (!Subtarget.is64Bit()) {
>>      // These libcalls are not available in 32-bit.
>>      setLibcallName(RTLIB::SHL_I128, nullptr);
>>      setLibcallName(RTLIB::SRL_I128, nullptr);
>> @@ -1775,10 +1775,10 @@ X86TargetLowering::X86TargetLowering(con
>>    }
>>
>>    // Combine sin / cos into one node or libcall if possible.
>> -  if (Subtarget->hasSinCos()) {
>> +  if (Subtarget.hasSinCos()) {
>>      setLibcallName(RTLIB::SINCOS_F32, "sincosf");
>>      setLibcallName(RTLIB::SINCOS_F64, "sincos");
>> -    if (Subtarget->isTargetDarwin()) {
>> +    if (Subtarget.isTargetDarwin()) {
>>        // For MacOSX, we don't want the normal expansion of a libcall to
>> sincos.
>>        // We want to issue a libcall to __sincos_stret to avoid memory
>> traffic.
>>        setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
>> @@ -1786,7 +1786,7 @@ X86TargetLowering::X86TargetLowering(con
>>      }
>>    }
>>
>> -  if (Subtarget->isTargetWin64()) {
>> +  if (Subtarget.isTargetWin64()) {
>>      setOperationAction(ISD::SDIV, MVT::i128, Custom);
>>      setOperationAction(ISD::UDIV, MVT::i128, Custom);
>>      setOperationAction(ISD::SREM, MVT::i128, Custom);
>> @@ -1832,7 +1832,7 @@ X86TargetLowering::X86TargetLowering(con
>>    setTargetDAGCombine(ISD::MSCATTER);
>>    setTargetDAGCombine(ISD::MGATHER);
>>
>> -  computeRegisterProperties(Subtarget->getRegisterInfo());
>> +  computeRegisterProperties(Subtarget.getRegisterInfo());
>>
>>    MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
>>    MaxStoresPerMemsetOptSize = 8;
>> @@ -1844,7 +1844,7 @@ X86TargetLowering::X86TargetLowering(con
>>
>>    // A predictable cmov does not hurt on an in-order CPU.
>>    // FIXME: Use a CPU attribute to trigger this, not a CPU model.
>> -  PredictableSelectIsExpensive = !Subtarget->isAtom();
>> +  PredictableSelectIsExpensive = !Subtarget.isAtom();
>>    EnableExtLdPromotion = true;
>>    setPrefFunctionAlignment(4); // 2^4 bytes.
>>
>> @@ -1853,7 +1853,7 @@ X86TargetLowering::X86TargetLowering(con
>>
>>  // This has so far only been implemented for 64-bit MachO.
>>  bool X86TargetLowering::useLoadStackGuardNode() const {
>> -  return Subtarget->isTargetMachO() && Subtarget->is64Bit();
>> +  return Subtarget.isTargetMachO() && Subtarget.is64Bit();
>>  }
>>
>>  TargetLoweringBase::LegalizeTypeAction
>> @@ -1869,21 +1869,21 @@ X86TargetLowering::getPreferredVectorAct
>>  EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
>> LLVMContext &,
>>                                            EVT VT) const {
>>    if (!VT.isVector())
>> -    return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
>> +    return Subtarget.hasAVX512() ? MVT::i1: MVT::i8;
>>
>>    if (VT.isSimple()) {
>>      MVT VVT = VT.getSimpleVT();
>>      const unsigned NumElts = VVT.getVectorNumElements();
>>      const MVT EltVT = VVT.getVectorElementType();
>>      if (VVT.is512BitVector()) {
>> -      if (Subtarget->hasAVX512())
>> +      if (Subtarget.hasAVX512())
>>          if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
>>              EltVT == MVT::f32 || EltVT == MVT::f64)
>>            switch(NumElts) {
>>            case  8: return MVT::v8i1;
>>            case 16: return MVT::v16i1;
>>          }
>> -      if (Subtarget->hasBWI())
>> +      if (Subtarget.hasBWI())
>>          if (EltVT == MVT::i8 || EltVT == MVT::i16)
>>            switch(NumElts) {
>>            case 32: return MVT::v32i1;
>> @@ -1892,7 +1892,7 @@ EVT X86TargetLowering::getSetCCResultTyp
>>      }
>>
>>      if (VVT.is256BitVector() || VVT.is128BitVector()) {
>> -      if (Subtarget->hasVLX())
>> +      if (Subtarget.hasVLX())
>>          if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
>>              EltVT == MVT::f32 || EltVT == MVT::f64)
>>            switch(NumElts) {
>> @@ -1900,7 +1900,7 @@ EVT X86TargetLowering::getSetCCResultTyp
>>            case 4: return MVT::v4i1;
>>            case 8: return MVT::v8i1;
>>          }
>> -      if (Subtarget->hasBWI() && Subtarget->hasVLX())
>> +      if (Subtarget.hasBWI() && Subtarget.hasVLX())
>>          if (EltVT == MVT::i8 || EltVT == MVT::i16)
>>            switch(NumElts) {
>>            case  8: return MVT::v8i1;
>> @@ -1944,7 +1944,7 @@ static void getMaxByValAlign(Type *Ty, u
>>  /// are at 4-byte boundaries.
>>  unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
>>                                                    const DataLayout &DL)
>> const {
>> -  if (Subtarget->is64Bit()) {
>> +  if (Subtarget.is64Bit()) {
>>      // Max of 8 and alignment of type.
>>      unsigned TyAlign = DL.getABITypeAlignment(Ty);
>>      if (TyAlign > 8)
>> @@ -1953,7 +1953,7 @@ unsigned X86TargetLowering::getByValType
>>    }
>>
>>    unsigned Align = 4;
>> -  if (Subtarget->hasSSE1())
>> +  if (Subtarget.hasSSE1())
>>      getMaxByValAlign(Ty, Align);
>>    return Align;
>>  }
>> @@ -1979,23 +1979,23 @@ X86TargetLowering::getOptimalMemOpType(u
>>    if ((!IsMemset || ZeroMemset) &&
>>        !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
>>      if (Size >= 16 &&
>> -        (!Subtarget->isUnalignedMem16Slow() ||
>> +        (!Subtarget.isUnalignedMem16Slow() ||
>>           ((DstAlign == 0 || DstAlign >= 16) &&
>>            (SrcAlign == 0 || SrcAlign >= 16)))) {
>>        if (Size >= 32) {
>>          // FIXME: Check if unaligned 32-byte accesses are slow.
>> -        if (Subtarget->hasInt256())
>> +        if (Subtarget.hasInt256())
>>            return MVT::v8i32;
>> -        if (Subtarget->hasFp256())
>> +        if (Subtarget.hasFp256())
>>            return MVT::v8f32;
>>        }
>> -      if (Subtarget->hasSSE2())
>> +      if (Subtarget.hasSSE2())
>>          return MVT::v4i32;
>> -      if (Subtarget->hasSSE1())
>> +      if (Subtarget.hasSSE1())
>>          return MVT::v4f32;
>>      } else if (!MemcpyStrSrc && Size >= 8 &&
>> -               !Subtarget->is64Bit() &&
>> -               Subtarget->hasSSE2()) {
>> +               !Subtarget.is64Bit() &&
>> +               Subtarget.hasSSE2()) {
>>        // Do not use f64 to lower memcpy if source is string constant.
>> It's
>>        // better to use i32 to avoid the loads.
>>        return MVT::f64;
>> @@ -2004,7 +2004,7 @@ X86TargetLowering::getOptimalMemOpType(u
>>    // This is a compromise. If we reach here, unaligned accesses may be
>> slow on
>>    // this target. However, creating smaller, aligned accesses could be
>> even
>>    // slower and would certainly be a lot more code.
>> -  if (Subtarget->is64Bit() && Size >= 8)
>> +  if (Subtarget.is64Bit() && Size >= 8)
>>      return MVT::i64;
>>    return MVT::i32;
>>  }
>> @@ -2029,10 +2029,10 @@ X86TargetLowering::allowsMisalignedMemor
>>        *Fast = true;
>>        break;
>>      case 128:
>> -      *Fast = !Subtarget->isUnalignedMem16Slow();
>> +      *Fast = !Subtarget.isUnalignedMem16Slow();
>>        break;
>>      case 256:
>> -      *Fast = !Subtarget->isUnalignedMem32Slow();
>> +      *Fast = !Subtarget.isUnalignedMem32Slow();
>>        break;
>>      // TODO: What about AVX-512 (512-bit) accesses?
>>      }
>> @@ -2048,7 +2048,7 @@ unsigned X86TargetLowering::getJumpTable
>>    // In GOT pic mode, each entry in the jump table is emitted as a
>> @GOTOFF
>>    // symbol.
>>    if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
>> -      Subtarget->isPICStyleGOT())
>> +      Subtarget.isPICStyleGOT())
>>      return MachineJumpTableInfo::EK_Custom32;
>>
>>    // Otherwise, use the normal jump table encoding heuristics.
>> @@ -2056,7 +2056,7 @@ unsigned X86TargetLowering::getJumpTable
>>  }
>>
>>  bool X86TargetLowering::useSoftFloat() const {
>> -  return Subtarget->useSoftFloat();
>> +  return Subtarget.useSoftFloat();
>>  }
>>
>>  const MCExpr *
>> @@ -2064,7 +2064,7 @@ X86TargetLowering::LowerCustomJumpTableE
>>                                               const MachineBasicBlock
>> *MBB,
>>                                               unsigned uid,MCContext
>> &Ctx) const{
>>    assert(MBB->getParent()->getTarget().getRelocationModel() ==
>> Reloc::PIC_ &&
>> -         Subtarget->isPICStyleGOT());
>> +         Subtarget.isPICStyleGOT());
>>    // In 32-bit ELF systems, our jump table entries are formed with
>> @GOTOFF
>>    // entries.
>>    return MCSymbolRefExpr::create(MBB->getSymbol(),
>> @@ -2074,7 +2074,7 @@ X86TargetLowering::LowerCustomJumpTableE
>>  /// Returns relocation base for the given PIC jumptable.
>>  SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
>>                                                      SelectionDAG &DAG)
>> const {
>> -  if (!Subtarget->is64Bit())
>> +  if (!Subtarget.is64Bit())
>>      // This doesn't have SDLoc associated with it, but is not really the
>>      // same as a Register.
>>      return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
>> @@ -2088,7 +2088,7 @@ const MCExpr *X86TargetLowering::
>>  getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
>>                               MCContext &Ctx) const {
>>    // X86-64 uses RIP relative addressing based on the jump table label.
>> -  if (Subtarget->isPICStyleRIPRel())
>> +  if (Subtarget.isPICStyleRIPRel())
>>      return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
>>
>>    // Otherwise, the reference is relative to the PIC base.
>> @@ -2104,7 +2104,7 @@ X86TargetLowering::findRepresentativeCla
>>    default:
>>      return TargetLowering::findRepresentativeClass(TRI, VT);
>>    case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
>> -    RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
>> +    RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
>>      break;
>>    case MVT::x86mmx:
>>      RRC = &X86::VR64RegClass;
>> @@ -2122,10 +2122,10 @@ X86TargetLowering::findRepresentativeCla
>>
>>  bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
>>                                                 unsigned &Offset) const {
>> -  if (!Subtarget->isTargetLinux())
>> +  if (!Subtarget.isTargetLinux())
>>      return false;
>>
>> -  if (Subtarget->is64Bit()) {
>> +  if (Subtarget.is64Bit()) {
>>      // %fs:0x28, unless we're using a Kernel code model, in which case
>> it's %gs:
>>      Offset = 0x28;
>>      if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
>> @@ -2141,14 +2141,14 @@ bool X86TargetLowering::getStackCookieLo
>>  }
>>
>>  Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB)
>> const {
>> -  if (!Subtarget->isTargetAndroid())
>> +  if (!Subtarget.isTargetAndroid())
>>      return TargetLowering::getSafeStackPointerLocation(IRB);
>>
>>    // Android provides a fixed TLS slot for the SafeStack pointer. See the
>>    // definition of TLS_SLOT_SAFESTACK in
>>    //
>> https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
>>    unsigned AddressSpace, Offset;
>> -  if (Subtarget->is64Bit()) {
>> +  if (Subtarget.is64Bit()) {
>>      // %fs:0x48, unless we're using a Kernel code model, in which case
>> it's %gs:
>>      Offset = 0x48;
>>      if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
>> @@ -2243,14 +2243,14 @@ X86TargetLowering::LowerReturn(SDValue C
>>      // or SSE or MMX vectors.
>>      if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
>>           VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
>> -          (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
>> +          (Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
>>        report_fatal_error("SSE register return with SSE disabled");
>>      }
>>      // Likewise we can't return F64 values with SSE1 only.  gcc does so,
>> but
>>      // llvm-gcc has never done it right and no one has noticed, so this
>>      // should be OK for now.
>>      if (ValVT == MVT::f64 &&
>> -        (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
>> +        (Subtarget.is64Bit() && !Subtarget.hasSSE2()))
>>        report_fatal_error("SSE2 register return with SSE2 disabled");
>>
>>      // Returns in ST0/ST1 are handled specially: these are pushed as
>> operands to
>> @@ -2268,7 +2268,7 @@ X86TargetLowering::LowerReturn(SDValue C
>>
>>      // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for
>> v1i64
>>      // which is returned in RAX / RDX.
>> -    if (Subtarget->is64Bit()) {
>> +    if (Subtarget.is64Bit()) {
>>        if (ValVT == MVT::x86mmx) {
>>          if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
>>            ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
>> @@ -2276,7 +2276,7 @@ X86TargetLowering::LowerReturn(SDValue C
>>                                    ValToCopy);
>>            // If we don't have SSE2 available, convert to v4f32 so the
>> generated
>>            // register is legal.
>> -          if (!Subtarget->hasSSE2())
>> +          if (!Subtarget.hasSSE2())
>>              ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
>>          }
>>        }
>> @@ -2301,7 +2301,7 @@ X86TargetLowering::LowerReturn(SDValue C
>>                                       getPointerTy(MF.getDataLayout()));
>>
>>      unsigned RetValReg
>> -        = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
>> +        = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
>>            X86::RAX : X86::EAX;
>>      Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
>>      Flag = Chain.getValue(1);
>> @@ -2311,7 +2311,7 @@ X86TargetLowering::LowerReturn(SDValue C
>>          DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
>>    }
>>
>> -  const X86RegisterInfo *TRI = Subtarget->getRegisterInfo();
>> +  const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
>>    const MCPhysReg *I =
>>        TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
>>    if (I) {
>> @@ -2379,7 +2379,7 @@ X86TargetLowering::getTypeForExtArgOrRet
>>                                              ISD::NodeType ExtendKind)
>> const {
>>    MVT ReturnMVT;
>>    // TODO: Is this also valid on 32-bit?
>> -  if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind ==
>> ISD::ZERO_EXTEND)
>> +  if (Subtarget.is64Bit() && VT == MVT::i1 && ExtendKind ==
>> ISD::ZERO_EXTEND)
>>      ReturnMVT = MVT::i8;
>>    else
>>      ReturnMVT = MVT::i32;
>> @@ -2400,7 +2400,7 @@ X86TargetLowering::LowerCallResult(SDVal
>>
>>    // Assign locations to each value returned by this call.
>>    SmallVector<CCValAssign, 16> RVLocs;
>> -  bool Is64Bit = Subtarget->is64Bit();
>> +  bool Is64Bit = Subtarget.is64Bit();
>>    CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
>>                   *DAG.getContext());
>>    CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
>> @@ -2412,7 +2412,7 @@ X86TargetLowering::LowerCallResult(SDVal
>>
>>      // If this is x86-64, and we disabled SSE, we can't return FP values
>>      if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT ==
>> MVT::f128) &&
>> -        ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
>> +        ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget.hasSSE1())) {
>>        report_fatal_error("SSE register return with SSE disabled");
>>      }
>>
>> @@ -2618,10 +2618,10 @@ X86TargetLowering::LowerMemArgument(SDVa
>>
>>  // FIXME: Get this from tablegen.
>>  static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
>> -                                                const X86Subtarget
>> *Subtarget) {
>> -  assert(Subtarget->is64Bit());
>> +                                                const X86Subtarget
>> &Subtarget) {
>> +  assert(Subtarget.is64Bit());
>>
>> -  if (Subtarget->isCallingConvWin64(CallConv)) {
>> +  if (Subtarget.isCallingConvWin64(CallConv)) {
>>      static const MCPhysReg GPR64ArgRegsWin64[] = {
>>        X86::RCX, X86::RDX, X86::R8,  X86::R9
>>      };
>> @@ -2637,9 +2637,9 @@ static ArrayRef<MCPhysReg> get64BitArgum
>>  // FIXME: Get this from tablegen.
>>  static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
>>                                                  CallingConv::ID CallConv,
>> -                                                const X86Subtarget
>> *Subtarget) {
>> -  assert(Subtarget->is64Bit());
>> -  if (Subtarget->isCallingConvWin64(CallConv)) {
>> +                                                const X86Subtarget
>> &Subtarget) {
>> +  assert(Subtarget.is64Bit());
>> +  if (Subtarget.isCallingConvWin64(CallConv)) {
>>      // The XMM registers which might contain var arg parameters are
>> shadowed
>>      // in their paired GPR.  So we only need to save the GPR to their
>> home
>>      // slots.
>> @@ -2649,10 +2649,10 @@ static ArrayRef<MCPhysReg> get64BitArgum
>>
>>    const Function *Fn = MF.getFunction();
>>    bool NoImplicitFloatOps =
>> Fn->hasFnAttribute(Attribute::NoImplicitFloat);
>> -  bool isSoftFloat = Subtarget->useSoftFloat();
>> +  bool isSoftFloat = Subtarget.useSoftFloat();
>>    assert(!(isSoftFloat && NoImplicitFloatOps) &&
>>           "SSE register cannot be used when SSE is disabled!");
>> -  if (isSoftFloat || NoImplicitFloatOps || !Subtarget->hasSSE1())
>> +  if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
>>      // Kernel mode asks for SSE to be disabled, so there are no XMM
>> argument
>>      // registers.
>>      return None;
>> @@ -2670,17 +2670,17 @@ SDValue X86TargetLowering::LowerFormalAr
>>      SmallVectorImpl<SDValue> &InVals) const {
>>    MachineFunction &MF = DAG.getMachineFunction();
>>    X86MachineFunctionInfo *FuncInfo =
>> MF.getInfo<X86MachineFunctionInfo>();
>> -  const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
>> +  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
>>
>>    const Function* Fn = MF.getFunction();
>>    if (Fn->hasExternalLinkage() &&
>> -      Subtarget->isTargetCygMing() &&
>> +      Subtarget.isTargetCygMing() &&
>>        Fn->getName() == "main")
>>      FuncInfo->setForceFramePointer(true);
>>
>>    MachineFrameInfo *MFI = MF.getFrameInfo();
>> -  bool Is64Bit = Subtarget->is64Bit();
>> -  bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
>> +  bool Is64Bit = Subtarget.is64Bit();
>> +  bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
>>
>>    assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
>>           "Var args not supported with calling convention fastcc, ghc or
>> hipe");
>> @@ -2818,7 +2818,7 @@ SDValue X86TargetLowering::LowerFormalAr
>>    }
>>
>>    // Figure out if XMM registers are in use.
>> -  assert(!(Subtarget->useSoftFloat() &&
>> +  assert(!(Subtarget.useSoftFloat() &&
>>             Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
>>           "SSE register cannot be used when SSE is disabled!");
>>
>> @@ -2830,7 +2830,7 @@ SDValue X86TargetLowering::LowerFormalAr
>>      ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv,
>> Subtarget);
>>      unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
>>      unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
>> -    assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
>> +    assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
>>             "SSE register cannot be used when SSE is disabled!");
>>
>>      // Gather all the live in physical registers.
>> @@ -2912,13 +2912,13 @@ SDValue X86TargetLowering::LowerFormalAr
>>      // Find the largest legal vector type.
>>      MVT VecVT = MVT::Other;
>>      // FIXME: Only some x86_32 calling conventions support AVX512.
>> -    if (Subtarget->hasAVX512() &&
>> +    if (Subtarget.hasAVX512() &&
>>          (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
>>                       CallConv == CallingConv::Intel_OCL_BI)))
>>        VecVT = MVT::v16f32;
>> -    else if (Subtarget->hasAVX())
>> +    else if (Subtarget.hasAVX())
>>        VecVT = MVT::v8f32;
>> -    else if (Subtarget->hasSSE2())
>> +    else if (Subtarget.hasSSE2())
>>        VecVT = MVT::v4f32;
>>
>>      // We forward some GPRs and some vector types.
>> @@ -2959,8 +2959,8 @@ SDValue X86TargetLowering::LowerFormalAr
>>      FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
>>      // If this is an sret function, the return should pop the hidden
>> pointer.
>>      if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
>> -        !Subtarget->getTargetTriple().isOSMSVCRT() &&
>> -        argsAreStructReturn(Ins, Subtarget->isTargetMCU()) ==
>> StackStructReturn)
>> +        !Subtarget.getTargetTriple().isOSMSVCRT() &&
>> +        argsAreStructReturn(Ins, Subtarget.isTargetMCU()) ==
>> StackStructReturn)
>>        FuncInfo->setBytesToPopOnReturn(4);
>>    }
>>
>> @@ -3078,9 +3078,9 @@ X86TargetLowering::LowerCall(TargetLower
>>    bool isVarArg                         = CLI.IsVarArg;
>>
>>    MachineFunction &MF = DAG.getMachineFunction();
>> -  bool Is64Bit        = Subtarget->is64Bit();
>> -  bool IsWin64        = Subtarget->isCallingConvWin64(CallConv);
>> -  StructReturnType SR = callIsStructReturn(Outs,
>> Subtarget->isTargetMCU());
>> +  bool Is64Bit        = Subtarget.is64Bit();
>> +  bool IsWin64        = Subtarget.isCallingConvWin64(CallConv);
>> +  StructReturnType SR = callIsStructReturn(Outs,
>> Subtarget.isTargetMCU());
>>    bool IsSibcall      = false;
>>    X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
>>    auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
>> @@ -3091,7 +3091,7 @@ X86TargetLowering::LowerCall(TargetLower
>>    if (Attr.getValueAsString() == "true")
>>      isTailCall = false;
>>
>> -  if (Subtarget->isPICStyleGOT() &&
>> +  if (Subtarget.isPICStyleGOT() &&
>>        !MF.getTarget().Options.GuaranteedTailCallOpt) {
>>      // If we are using a GOT, disable tail calls to external symbols with
>>      // default visibility. Tail calling such a symbol requires using a
>> GOT
>> @@ -3194,7 +3194,7 @@ X86TargetLowering::LowerCall(TargetLower
>>
>>    // Walk the register/memloc assignments, inserting copies/loads.  In
>> the case
>>    // of tail call optimization arguments are handle later.
>> -  const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
>> +  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
>>    for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
>>      // Skip inalloca arguments, they have already been written.
>>      ISD::ArgFlagsTy Flags = Outs[i].Flags;
>> @@ -3272,7 +3272,7 @@ X86TargetLowering::LowerCall(TargetLower
>>    if (!MemOpChains.empty())
>>      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
>>
>> -  if (Subtarget->isPICStyleGOT()) {
>> +  if (Subtarget.isPICStyleGOT()) {
>>      // ELF / PIC requires GOT in the EBX register before function calls
>> via PLT
>>      // GOT pointer.
>>      if (!isTailCall) {
>> @@ -3313,7 +3313,7 @@ X86TargetLowering::LowerCall(TargetLower
>>        X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
>>      };
>>      unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
>> -    assert((Subtarget->hasSSE1() || !NumXMMRegs)
>> +    assert((Subtarget.hasSSE1() || !NumXMMRegs)
>>             && "SSE registers cannot be used when SSE is disabled");
>>
>>      RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
>> @@ -3423,19 +3423,19 @@ X86TargetLowering::LowerCall(TargetLower
>>        // external symbols most go through the PLT in PIC mode.  If the
>> symbol
>>        // has hidden or protected visibility, or if it is static or
>> local, then
>>        // we don't need to use the PLT - we can directly call it.
>> -      if (Subtarget->isTargetELF() &&
>> +      if (Subtarget.isTargetELF() &&
>>            DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
>>            GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
>>          OpFlags = X86II::MO_PLT;
>> -      } else if (Subtarget->isPICStyleStubAny() &&
>> +      } else if (Subtarget.isPICStyleStubAny() &&
>>                   !GV->isStrongDefinitionForLinker() &&
>> -                 (!Subtarget->getTargetTriple().isMacOSX() ||
>> -                  Subtarget->getTargetTriple().isMacOSXVersionLT(10,
>> 5))) {
>> +                 (!Subtarget.getTargetTriple().isMacOSX() ||
>> +                  Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)))
>> {
>>          // PC-relative references to external symbols should go through
>> $stub,
>>          // unless we're building with the leopard linker or later, which
>>          // automatically synthesizes these stubs.
>>          OpFlags = X86II::MO_DARWIN_STUB;
>> -      } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
>> +      } else if (Subtarget.isPICStyleRIPRel() && isa<Function>(GV) &&
>>
>> cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
>>          // If the function is marked as non-lazy, generate an indirect
>> call
>>          // which loads from the GOT directly. This avoids runtime
>> overhead
>> @@ -3464,12 +3464,12 @@ X86TargetLowering::LowerCall(TargetLower
>>
>>      // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
>>      // external symbols should go through the PLT.
>> -    if (Subtarget->isTargetELF() &&
>> +    if (Subtarget.isTargetELF() &&
>>          DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
>>        OpFlags = X86II::MO_PLT;
>> -    } else if (Subtarget->isPICStyleStubAny() &&
>> -               (!Subtarget->getTargetTriple().isMacOSX() ||
>> -                Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
>> +    } else if (Subtarget.isPICStyleStubAny() &&
>> +               (!Subtarget.getTargetTriple().isMacOSX() ||
>> +                Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) {
>>        // PC-relative references to external symbols should go through
>> $stub,
>>        // unless we're building with the leopard linker or later, which
>>        // automatically synthesizes these stubs.
>> @@ -3478,7 +3478,7 @@ X86TargetLowering::LowerCall(TargetLower
>>
>>      Callee = DAG.getTargetExternalSymbol(
>>          S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
>> -  } else if (Subtarget->isTarget64BitILP32() &&
>> +  } else if (Subtarget.isTarget64BitILP32() &&
>>               Callee->getValueType(0) == MVT::i32) {
>>      // Zero-extend the 32-bit Callee address into a 64-bit according to
>> x32 ABI
>>      Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
>> @@ -3551,7 +3551,7 @@ X86TargetLowering::LowerCall(TargetLower
>>                         DAG.getTarget().Options.GuaranteedTailCallOpt))
>>      NumBytesForCalleeToPop = NumBytes;    // Callee pops everything
>>    else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
>> -           !Subtarget->getTargetTriple().isOSMSVCRT() &&
>> +           !Subtarget.getTargetTriple().isOSMSVCRT() &&
>>             SR == StackStructReturn)
>>      // If this is a call to a struct-return function, the callee
>>      // pops the hidden struct pointer, so we have to push it back.
>> @@ -3613,8 +3613,8 @@ X86TargetLowering::LowerCall(TargetLower
>>  unsigned
>>  X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
>>                                                 SelectionDAG& DAG) const {
>> -  const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
>> -  const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
>> +  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
>> +  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
>>    unsigned StackAlignment = TFI.getStackAlignment();
>>    uint64_t AlignMask = StackAlignment - 1;
>>    int64_t Offset = StackSize;
>> @@ -3707,8 +3707,8 @@ bool X86TargetLowering::IsEligibleForTai
>>
>>    CallingConv::ID CallerCC = CallerF->getCallingConv();
>>    bool CCMatch = CallerCC == CalleeCC;
>> -  bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
>> -  bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
>> +  bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
>> +  bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
>>
>>    // Win64 functions have extra shadow space for argument homing. Don't
>> do the
>>    // sibcall if the caller and callee have mismatched expectations for
>> this
>> @@ -3727,7 +3727,7 @@ bool X86TargetLowering::IsEligibleForTai
>>
>>    // Can't do sibcall if stack needs to be dynamically re-aligned. PEI
>> needs to
>>    // emit a special epilogue.
>> -  const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
>> +  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
>>    if (RegInfo->needsStackRealignment(MF))
>>      return false;
>>
>> @@ -3829,7 +3829,7 @@ bool X86TargetLowering::IsEligibleForTai
>>        // the caller's fixed stack objects.
>>        MachineFrameInfo *MFI = MF.getFrameInfo();
>>        const MachineRegisterInfo *MRI = &MF.getRegInfo();
>> -      const X86InstrInfo *TII = Subtarget->getInstrInfo();
>> +      const X86InstrInfo *TII = Subtarget.getInstrInfo();
>>        for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
>>          CCValAssign &VA = ArgLocs[i];
>>          SDValue Arg = OutVals[i];
>> @@ -3849,7 +3849,7 @@ bool X86TargetLowering::IsEligibleForTai
>>      // only target EAX, EDX, or ECX since the tail call must be
>> scheduled after
>>      // callee-saved registers are restored. These happen to be the same
>>      // registers used to pass 'inreg' arguments so watch out for those.
>> -    if (!Subtarget->is64Bit() &&
>> +    if (!Subtarget.is64Bit() &&
>>          ((!isa<GlobalAddressSDNode>(Callee) &&
>>            !isa<ExternalSymbolSDNode>(Callee)) ||
>>           DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
>> @@ -3876,7 +3876,7 @@ bool X86TargetLowering::IsEligibleForTai
>>    }
>>
>>    bool CalleeWillPop =
>> -      X86::isCalleePop(CalleeCC, Subtarget->is64Bit(), isVarArg,
>> +      X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
>>                         MF.getTarget().Options.GuaranteedTailCallOpt);
>>
>>    if (unsigned BytesToPop =
>> @@ -3978,7 +3978,7 @@ static SDValue getTargetShuffleNode(unsi
>>
>>  SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG)
>> const {
>>    MachineFunction &MF = DAG.getMachineFunction();
>> -  const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
>> +  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
>>    X86MachineFunctionInfo *FuncInfo =
>> MF.getInfo<X86MachineFunctionInfo>();
>>    int ReturnAddrIndex = FuncInfo->getRAIndex();
>>
>> @@ -4289,12 +4289,12 @@ bool X86TargetLowering::isExtractSubvect
>>
>>  bool X86TargetLowering::isCheapToSpeculateCttz() const {
>>    // Speculate cttz only if we can directly use TZCNT.
>> -  return Subtarget->hasBMI();
>> +  return Subtarget.hasBMI();
>>  }
>>
>>  bool X86TargetLowering::isCheapToSpeculateCtlz() const {
>>    // Speculate ctlz only if we can directly use LZCNT.
>> -  return Subtarget->hasLZCNT();
>> +  return Subtarget.hasLZCNT();
>>  }
>>
>>  /// Return true if every element in Mask, beginning
>> @@ -4474,7 +4474,7 @@ static SDValue getConstVector(ArrayRef<i
>>  }
>>
>>  /// Returns a vector of specified type with all zero elements.
>> -static SDValue getZeroVector(MVT VT, const X86Subtarget *Subtarget,
>> +static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
>>                               SelectionDAG &DAG, SDLoc dl) {
>>    assert(VT.isVector() && "Expected a vector type");
>>
>> @@ -4482,7 +4482,7 @@ static SDValue getZeroVector(MVT VT, con
>>    // to their dest type. This ensures they get CSE'd.
>>    SDValue Vec;
>>    if (VT.is128BitVector()) {  // SSE
>> -    if (Subtarget->hasSSE2()) {  // SSE2
>> +    if (Subtarget.hasSSE2()) {  // SSE2
>>        SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
>>        Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst,
>> Cst, Cst);
>>      } else { // SSE1
>> @@ -4490,7 +4490,7 @@ static SDValue getZeroVector(MVT VT, con
>>        Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst,
>> Cst, Cst);
>>      }
>>    } else if (VT.is256BitVector()) { // AVX
>> -    if (Subtarget->hasInt256()) { // AVX2
>> +    if (Subtarget.hasInt256()) { // AVX2
>>        SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
>>        SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
>>        Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
>> @@ -4508,9 +4508,9 @@ static SDValue getZeroVector(MVT VT, con
>>        Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
>>    } else if (VT.getVectorElementType() == MVT::i1) {
>>
>> -    assert((Subtarget->hasBWI() || VT.getVectorNumElements() <= 16)
>> +    assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16)
>>              && "Unexpected vector type");
>> -    assert((Subtarget->hasVLX() || VT.getVectorNumElements() >= 8)
>> +    assert((Subtarget.hasVLX() || VT.getVectorNumElements() >= 8)
>>              && "Unexpected vector type");
>>      SDValue Cst = DAG.getConstant(0, dl, MVT::i1);
>>      SmallVector<SDValue, 64> Ops(VT.getVectorNumElements(), Cst);
>> @@ -4756,7 +4756,7 @@ static SDValue Concat256BitVectors(SDVal
>>  /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit
>> types with
>>  /// no AVX2 support, use two <4 x i32> inserted in a <8 x i32>
>> appropriately.
>>  /// Then bitcast to their original type, ensuring they get CSE'd.
>> -static SDValue getOnesVector(EVT VT, const X86Subtarget *Subtarget,
>> +static SDValue getOnesVector(EVT VT, const X86Subtarget &Subtarget,
>>                               SelectionDAG &DAG, SDLoc dl) {
>>    assert((VT.is128BitVector() || VT.is256BitVector() ||
>> VT.is512BitVector()) &&
>>           "Expected a 128/256/512-bit vector type");
>> @@ -4764,7 +4764,7 @@ static SDValue getOnesVector(EVT VT, con
>>    APInt Ones = APInt::getAllOnesValue(32);
>>    unsigned NumElts = VT.getSizeInBits() / 32;
>>    SDValue Vec;
>> -  if (!Subtarget->hasInt256() && NumElts == 8) {
>> +  if (!Subtarget.hasInt256() && NumElts == 8) {
>>      Vec = DAG.getConstant(Ones, dl, MVT::v4i32);
>>      Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
>>    } else {
>> @@ -4803,7 +4803,7 @@ static SDValue getUnpackh(SelectionDAG &
>>  /// This produces a shuffle mask like 4,1,2,3 (idx=0) or  0,1,2,4
>> (idx=3).
>>  static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
>>                                             bool IsZero,
>> -                                           const X86Subtarget *Subtarget,
>> +                                           const X86Subtarget &Subtarget,
>>                                             SelectionDAG &DAG) {
>>    MVT VT = V2.getSimpleValueType();
>>    SDValue V1 = IsZero
>> @@ -5180,7 +5180,7 @@ static SDValue getShuffleScalarElt(SDNod
>>  static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
>>                                         unsigned NumNonZero, unsigned
>> NumZero,
>>                                         SelectionDAG &DAG,
>> -                                       const X86Subtarget* Subtarget,
>> +                                       const X86Subtarget &Subtarget,
>>                                         const TargetLowering &TLI) {
>>    if (NumNonZero > 8)
>>      return SDValue();
>> @@ -5190,7 +5190,7 @@ static SDValue LowerBuildVectorv16i8(SDV
>>    bool First = true;
>>
>>    // SSE4.1 - use PINSRB to insert each byte directly.
>> -  if (Subtarget->hasSSE41()) {
>> +  if (Subtarget.hasSSE41()) {
>>      for (unsigned i = 0; i < 16; ++i) {
>>        bool isNonZero = (NonZeros & (1 << i)) != 0;
>>        if (isNonZero) {
>> @@ -5250,7 +5250,7 @@ static SDValue LowerBuildVectorv16i8(SDV
>>  static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
>>                                       unsigned NumNonZero, unsigned
>> NumZero,
>>                                       SelectionDAG &DAG,
>> -                                     const X86Subtarget* Subtarget,
>> +                                     const X86Subtarget &Subtarget,
>>                                       const TargetLowering &TLI) {
>>    if (NumNonZero > 4)
>>      return SDValue();
>> @@ -5279,7 +5279,7 @@ static SDValue LowerBuildVectorv8i16(SDV
>>
>>  /// Custom lower build_vector of v4i32 or v4f32.
>>  static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
>> -                                     const X86Subtarget *Subtarget,
>> +                                     const X86Subtarget &Subtarget,
>>                                       const TargetLowering &TLI) {
>>    // Find all zeroable elements.
>>    std::bitset<4> Zeroable;
>> @@ -5343,7 +5343,7 @@ static SDValue LowerBuildVectorv4x32(SDV
>>    }
>>
>>    // See if we can lower this build_vector to a INSERTPS.
>> -  if (!Subtarget->hasSSE41())
>> +  if (!Subtarget.hasSSE41())
>>      return SDValue();
>>
>>    SDValue V2 = Elt.getOperand(0);
>> @@ -5624,12 +5624,12 @@ static SDValue EltsFromConsecutiveLoads(
>>  /// a scalar load, or a constant.
>>  /// The VBROADCAST node is returned when a pattern is found,
>>  /// or SDValue() otherwise.
>> -static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget*
>> Subtarget,
>> +static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget
>> &Subtarget,
>>                                      SelectionDAG &DAG) {
>>    // VBROADCAST requires AVX.
>>    // TODO: Splats could be generated for non-AVX CPUs using SSE
>>    // instructions, but there's less potential gain for only 128-bit
>> vectors.
>> -  if (!Subtarget->hasAVX())
>> +  if (!Subtarget.hasAVX())
>>      return SDValue();
>>
>>    MVT VT = Op.getSimpleValueType();
>> @@ -5679,7 +5679,7 @@ static SDValue LowerVectorBroadcast(SDVa
>>        if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
>>            Sc.getOpcode() != ISD::BUILD_VECTOR) {
>>
>> -        if (!Subtarget->hasInt256())
>> +        if (!Subtarget.hasInt256())
>>            return SDValue();
>>
>>          // Use the register form of the broadcast instruction available
>> on AVX2.
>> @@ -5697,7 +5697,7 @@ static SDValue LowerVectorBroadcast(SDVa
>>        // Constants may have multiple users.
>>
>>        // AVX-512 has register version of the broadcast
>> -      bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
>> +      bool hasRegVer = Subtarget.hasAVX512() && VT.is512BitVector() &&
>>          Ld.getValueType().getSizeInBits() >= 32;
>>        if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
>>            !hasRegVer))
>> @@ -5722,7 +5722,7 @@ static SDValue LowerVectorBroadcast(SDVa
>>    // from the constant pool and not to broadcast it from a scalar.
>>    // But override that restriction when optimizing for size.
>>    // TODO: Check if splatting is recommended for other AVX-capable CPUs.
>> -  if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
>> +  if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
>>      EVT CVT = Ld.getValueType();
>>      assert(!CVT.isVector() && "Must not broadcast a vector type");
>>
>> @@ -5731,7 +5731,7 @@ static SDValue LowerVectorBroadcast(SDVa
>>      // with AVX2, also splat i8 and i16.
>>      // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
>>      if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
>> -        (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
>> +        (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
>>        const Constant *C = nullptr;
>>        if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
>>          C = CI->getConstantIntValue();
>> @@ -5756,7 +5756,7 @@ static SDValue LowerVectorBroadcast(SDVa
>>    bool IsLoad = ISD::isNormalLoad(Ld.getNode());
>>
>>    // Handle AVX2 in-regist
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20160126/95c0fd58/attachment-0001.html>


More information about the llvm-commits mailing list