[llvm] r302179 - [ms-inline-asm] Use the frontend size only for ambiguous instructions
Reid Kleckner via llvm-commits
llvm-commits at lists.llvm.org
Fri May 5 08:51:46 PDT 2017
Sorry about that. Daniel Jasper initialized FrontendSize in r302214.
On Thu, May 4, 2017 at 4:31 PM, Evgenii Stepanov <eugeni.stepanov at gmail.com>
wrote:
> Hi,
>
> MSan bot is not happy with this change:
> http://lab.llvm.org:8011/builders/sanitizer-x86_64-linux-fast/builds/4693
>
>
> On Thu, May 4, 2017 at 11:19 AM, Reid Kleckner via llvm-commits
> <llvm-commits at lists.llvm.org> wrote:
> > Author: rnk
> > Date: Thu May 4 13:19:52 2017
> > New Revision: 302179
> >
> > URL: http://llvm.org/viewvc/llvm-project?rev=302179&view=rev
> > Log:
> > [ms-inline-asm] Use the frontend size only for ambiguous instructions
> >
> > This avoids problems on code like this:
> > char buf[16];
> > __asm {
> > movups xmm0, [buf]
> > mov [buf], eax
> > }
> >
> > The frontend size in this case (1) is wrong, and the register makes the
> > instruction matching unambiguous. There are also enough bytes available
> > that we shouldn't complain to the user that they are potentially using
> > an incorrectly sized instruction to access the variable.
> >
> > Supersedes D32636 and D26586 and fixes PR28266
> >
> > Added:
> > llvm/trunk/test/CodeGen/X86/ms-inline-asm-avx512.ll
> > Modified:
> > llvm/trunk/lib/Target/X86/AsmParser/X86AsmParser.cpp
> > llvm/trunk/lib/Target/X86/AsmParser/X86Operand.h
> >
> > Modified: llvm/trunk/lib/Target/X86/AsmParser/X86AsmParser.cpp
> > URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> X86/AsmParser/X86AsmParser.cpp?rev=302179&r1=302178&r2=302179&view=diff
> > ============================================================
> ==================
> > --- llvm/trunk/lib/Target/X86/AsmParser/X86AsmParser.cpp (original)
> > +++ llvm/trunk/lib/Target/X86/AsmParser/X86AsmParser.cpp Thu May 4
> 13:19:52 2017
> > @@ -776,11 +776,6 @@ private:
> >
> > bool ParseZ(std::unique_ptr<X86Operand> &Z, const SMLoc &StartLoc);
> >
> > - /// MS-compatibility:
> > - /// Obtain an appropriate size qualifier, when facing its absence,
> > - /// upon AVX512 vector/broadcast memory operand
> > - unsigned AdjustAVX512Mem(unsigned Size, X86Operand* UnsizedMemOpNext);
> > -
> > bool is64BitMode() const {
> > // FIXME: Can tablegen auto-generate this?
> > return getSTI().getFeatureBits()[X86::Mode64Bit];
> > @@ -1206,27 +1201,16 @@ std::unique_ptr<X86Operand> X86AsmParser
> > Identifier, Info.OpDecl);
> > }
> >
> > +
> > // We either have a direct symbol reference, or an offset from a
> symbol. The
> > // parser always puts the symbol on the LHS, so look there for size
> > // calculation purposes.
> > + unsigned FrontendSize = 0;
> > const MCBinaryExpr *BinOp = dyn_cast<MCBinaryExpr>(Disp);
> > bool IsSymRef =
> > isa<MCSymbolRefExpr>(BinOp ? BinOp->getLHS() : Disp);
> > - if (IsSymRef) {
> > - if (!Size) {
> > - Size = Info.Type * 8; // Size is in terms of bits in this context.
> > - if (Size)
> > - InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
> > - /*Len=*/0, Size);
> > - if (AllowBetterSizeMatch)
> > - // Handle cases where size qualifier is absent, upon an indirect
> symbol
> > - // reference - e.g. "vaddps zmm1, zmm2, [var]"
> > - // set Size to zero to allow matching mechansim to try and find a
> better
> > - // size qualifier than our initial guess, based on available
> variants of
> > - // the given instruction
> > - Size = 0;
> > - }
> > - }
> > + if (IsSymRef && !Size && Info.Type)
> > + FrontendSize = Info.Type * 8; // Size is in terms of bits in this
> context.
> >
> > // When parsing inline assembly we set the base register to a
> non-zero value
> > // if we don't know the actual value at this time. This is necessary
> to
> > @@ -1234,7 +1218,7 @@ std::unique_ptr<X86Operand> X86AsmParser
> > BaseReg = BaseReg ? BaseReg : 1;
> > return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp,
> BaseReg,
> > IndexReg, Scale, Start, End, Size,
> Identifier,
> > - Info.OpDecl);
> > + Info.OpDecl, FrontendSize);
> > }
> >
> > static void
> > @@ -2884,23 +2868,6 @@ bool X86AsmParser::MatchAndEmitATTInstru
> > return true;
> > }
> >
> > -unsigned X86AsmParser::AdjustAVX512Mem(unsigned Size,
> > - X86Operand* UnsizedMemOpNext) {
> > - // Check for the existence of an AVX512 platform
> > - if (!getSTI().getFeatureBits()[X86::FeatureAVX512])
> > - return 0;
> > - // Allow adjusting upon a (x|y|z)mm
> > - if (Size == 512 || Size == 256 || Size == 128)
> > - return Size;
> > - // This is an allegadly broadcasting mem op adjustment,
> > - // allow some more inquiring to validate it
> > - if (Size == 64 || Size == 32)
> > - return UnsizedMemOpNext && UnsizedMemOpNext->isToken() &&
> > - UnsizedMemOpNext->getToken().substr(0, 4).equals("{1to") ? Size
> : 0;
> > - // Do not allow any other type of adjustments
> > - return 0;
> > -}
> > -
> > bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned
> &Opcode,
> > OperandVector &Operands,
> > MCStreamer &Out,
> > @@ -2920,19 +2887,14 @@ bool X86AsmParser::MatchAndEmitIntelInst
> >
> > // Find one unsized memory operand, if present.
> > X86Operand *UnsizedMemOp = nullptr;
> > - // If unsized memory operand was found - obtain following operand.
> > - // For use in AdjustAVX512Mem
> > - X86Operand *UnsizedMemOpNext = nullptr;
> > for (const auto &Op : Operands) {
> > X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
> > - if (UnsizedMemOp) {
> > - UnsizedMemOpNext = X86Op;
> > + if (X86Op->isMemUnsized()) {
> > + UnsizedMemOp = X86Op;
> > // Have we found an unqualified memory operand,
> > // break. IA allows only one memory operand.
> > break;
> > }
> > - if (X86Op->isMemUnsized())
> > - UnsizedMemOp = X86Op;
> > }
> >
> > // Allow some instructions to have implicitly pointer-sized
> operands. This is
> > @@ -2978,7 +2940,6 @@ bool X86AsmParser::MatchAndEmitIntelInst
> > // If an unsized memory operand is present, try to match with each
> memory
> > // operand size. In Intel assembly, the size is not part of the
> instruction
> > // mnemonic.
> > - unsigned MatchedSize = 0;
> > if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
> > static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256,
> 512};
> > for (unsigned Size : MopSizes) {
> > @@ -2993,17 +2954,10 @@ bool X86AsmParser::MatchAndEmitIntelInst
> > // If this returned as a missing feature failure, remember that.
> > if (Match.back() == Match_MissingFeature)
> > ErrorInfoMissingFeature = ErrorInfoIgnore;
> > - if (M == Match_Success)
> > - // MS-compatability:
> > - // Adjust AVX512 vector/broadcast memory operand,
> > - // when facing the absence of a size qualifier.
> > - // Match GCC behavior on respective cases.
> > - MatchedSize = AdjustAVX512Mem(Size, UnsizedMemOpNext);
> > }
> >
> > // Restore the size of the unsized memory operand if we modified it.
> > - if (UnsizedMemOp)
> > - UnsizedMemOp->Mem.Size = 0;
> > + UnsizedMemOp->Mem.Size = 0;
> > }
> >
> > // If we haven't matched anything yet, this is not a basic integer or
> FPU
> > @@ -3027,20 +2981,30 @@ bool X86AsmParser::MatchAndEmitIntelInst
> > Op.getLocRange(), MatchingInlineAsm);
> > }
> >
> > + unsigned NumSuccessfulMatches =
> > + std::count(std::begin(Match), std::end(Match), Match_Success);
> > +
> > + // If matching was ambiguous and we had size information from the
> frontend,
> > + // try again with that. This handles cases like "movxz eax, m8/m16".
> > + if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
> > + UnsizedMemOp->getMemFrontendSize()) {
> > + UnsizedMemOp->Mem.Size = UnsizedMemOp->getMemFrontendSize();
> > + unsigned M = MatchInstruction(
> > + Operands, Inst, ErrorInfo, MatchingInlineAsm,
> isParsingIntelSyntax());
> > + if (M == Match_Success)
> > + NumSuccessfulMatches = 1;
> > +
> > + // Add a rewrite that encodes the size information we used from the
> > + // frontend.
> > + InstInfo->AsmRewrites->emplace_back(
> > + AOK_SizeDirective, UnsizedMemOp->getStartLoc(),
> > + /*Len=*/0, UnsizedMemOp->getMemFrontendSize());
> > + }
> > +
> > // If exactly one matched, then we treat that as a successful match
> (and the
> > // instruction will already have been filled in correctly, since the
> failing
> > // matches won't have modified it).
> > - unsigned NumSuccessfulMatches =
> > - std::count(std::begin(Match), std::end(Match), Match_Success);
> > if (NumSuccessfulMatches == 1) {
> > - if (MatchedSize && isParsingInlineAsm() && isParsingIntelSyntax())
> > - // MS compatibility -
> > - // Fix the rewrite according to the matched memory size
> > - // MS inline assembly only
> > - for (AsmRewrite &AR : *InstInfo->AsmRewrites)
> > - if ((AR.Loc.getPointer() == UnsizedMemOp->StartLoc.getPointer())
> &&
> > - (AR.Kind == AOK_SizeDirective))
> > - AR.Val = MatchedSize;
> > // Some instructions need post-processing to, for example, tweak
> which
> > // encoding is selected. Loop on it while changes happen so the
> individual
> > // transformations can chain off each other.
> > @@ -3057,7 +3021,7 @@ bool X86AsmParser::MatchAndEmitIntelInst
> > "multiple matches only possible with unsized memory
> operands");
> > return Error(UnsizedMemOp->getStartLoc(),
> > "ambiguous operand size for instruction '" + Mnemonic
> + "\'",
> > - UnsizedMemOp->getLocRange(), MatchingInlineAsm);
> > + UnsizedMemOp->getLocRange());
> > }
> >
> > // If one instruction matched with a missing feature, report this as a
> >
> > Modified: llvm/trunk/lib/Target/X86/AsmParser/X86Operand.h
> > URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> X86/AsmParser/X86Operand.h?rev=302179&r1=302178&r2=302179&view=diff
> > ============================================================
> ==================
> > --- llvm/trunk/lib/Target/X86/AsmParser/X86Operand.h (original)
> > +++ llvm/trunk/lib/Target/X86/AsmParser/X86Operand.h Thu May 4
> 13:19:52 2017
> > @@ -62,6 +62,10 @@ struct X86Operand : public MCParsedAsmOp
> > unsigned Scale;
> > unsigned Size;
> > unsigned ModeSize;
> > +
> > + /// If the memory operand is unsized and there are multiple
> instruction
> > + /// matches, prefer the one with this size.
> > + unsigned FrontendSize;
> > };
> >
> > union {
> > @@ -136,6 +140,10 @@ struct X86Operand : public MCParsedAsmOp
> > assert(Kind == Memory && "Invalid access!");
> > return Mem.ModeSize;
> > }
> > + unsigned getMemFrontendSize() const {
> > + assert(Kind == Memory && "Invalid access!");
> > + return Mem.FrontendSize;
> > + }
> >
> > bool isToken() const override {return Kind == Token; }
> >
> > @@ -532,7 +540,7 @@ struct X86Operand : public MCParsedAsmOp
> > CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
> > unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc
> StartLoc,
> > SMLoc EndLoc, unsigned Size = 0, StringRef SymName =
> StringRef(),
> > - void *OpDecl = nullptr) {
> > + void *OpDecl = nullptr, unsigned FrontendSize = 0) {
> > // We should never just have a displacement, that should be parsed
> as an
> > // absolute memory operand.
> > assert((SegReg || BaseReg || IndexReg) && "Invalid memory
> operand!");
> > @@ -548,6 +556,7 @@ struct X86Operand : public MCParsedAsmOp
> > Res->Mem.Scale = Scale;
> > Res->Mem.Size = Size;
> > Res->Mem.ModeSize = ModeSize;
> > + Res->Mem.FrontendSize = FrontendSize;
> > Res->SymName = SymName;
> > Res->OpDecl = OpDecl;
> > Res->AddressOf = false;
> >
> > Added: llvm/trunk/test/CodeGen/X86/ms-inline-asm-avx512.ll
> > URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/
> CodeGen/X86/ms-inline-asm-avx512.ll?rev=302179&view=auto
> > ============================================================
> ==================
> > --- llvm/trunk/test/CodeGen/X86/ms-inline-asm-avx512.ll (added)
> > +++ llvm/trunk/test/CodeGen/X86/ms-inline-asm-avx512.ll Thu May 4
> 13:19:52 2017
> > @@ -0,0 +1,24 @@
> > +; RUN: llc < %s | FileCheck %s
> > +
> > +; Generated from clang/test/CodeGen/ms-inline-asm-avx512.c
> > +
> > +target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
> > +target triple = "x86_64-pc-windows-msvc"
> > +
> > +; Function Attrs: noinline nounwind
> > +define void @ignore_fe_size() #0 {
> > +entry:
> > + %c = alloca i8, align 1
> > + call void asm sideeffect inteldialect "vaddps xmm1, xmm2,
> $1{1to4}\0A\09vaddps xmm1, xmm2, $2\0A\09mov eax, $3\0A\09mov $0, rax",
> "=*m,*m,*m,*m,~{eax},~{xmm1},~{dirflag},~{fpsr},~{flags}"(i8* %c, i8* %c,
> i8* %c, i8* %c) #1
> > + ret void
> > +}
> > +
> > +; CHECK-LABEL: ignore_fe_size:
> > +; CHECK: vaddps 7(%rsp){1to4}, %xmm2, %xmm1
> > +; CHECK: vaddps 7(%rsp), %xmm2, %xmm1
> > +; CHECK: movl 7(%rsp), %eax
> > +; CHECK: movq %rax, 7(%rsp)
> > +; CHECK: retq
> > +
> > +attributes #0 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false"
> "disable-tail-calls"="false" "less-precise-fpmad"="false"
> "no-frame-pointer-elim"="false" "no-infs-fp-math"="false"
> "no-jump-tables"="false" "no-nans-fp-math"="false"
> "no-signed-zeros-fp-math"="false" "no-trapping-math"="false"
> "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512"
> "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,
> +avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+
> cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+
> pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+
> sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves"
> "unsafe-fp-math"="false" "use-soft-float"="false" }
> > +attributes #1 = { nounwind }
> >
> >
> > _______________________________________________
> > llvm-commits mailing list
> > llvm-commits at lists.llvm.org
> > http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20170505/9c4688b7/attachment.html>
More information about the llvm-commits
mailing list