[llvm] f430c1e - [Tests] Add elementtype attribute to indirect inline asm operands (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 6 05:24:02 PST 2022


Author: Nikita Popov
Date: 2022-01-06T14:23:51+01:00
New Revision: f430c1eb6443282481e72e4fd209c9ada61e7cf1

URL: https://github.com/llvm/llvm-project/commit/f430c1eb6443282481e72e4fd209c9ada61e7cf1
DIFF: https://github.com/llvm/llvm-project/commit/f430c1eb6443282481e72e4fd209c9ada61e7cf1.diff

LOG: [Tests] Add elementtype attribute to indirect inline asm operands (NFC)

This updates LLVM tests for D116531 by adding elementtype attributes
to operands that correspond to indirect asm constraints.

Added: 
    

Modified: 
    llvm/test/Analysis/BasicAA/pr52735.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
    llvm/test/CodeGen/AArch64/arm64-inline-asm.ll
    llvm/test/CodeGen/AArch64/arm64_32.ll
    llvm/test/CodeGen/AArch64/inlineasm-X-constraint.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
    llvm/test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll
    llvm/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
    llvm/test/CodeGen/ARM/2013-11-08-inline-asm-neon-array.ll
    llvm/test/CodeGen/ARM/arm-modifier.ll
    llvm/test/CodeGen/ARM/indirect-reg-input.ll
    llvm/test/CodeGen/ARM/inline-diagnostics.ll
    llvm/test/CodeGen/ARM/inlineasm-64bit.ll
    llvm/test/CodeGen/ARM/inlineasm-X-constraint.ll
    llvm/test/CodeGen/ARM/inlineasm3.ll
    llvm/test/CodeGen/ARM/mult-alt-generic-arm.ll
    llvm/test/CodeGen/ARM/pr25317.ll
    llvm/test/CodeGen/AVR/inline-asm/inline-asm-invalid.ll
    llvm/test/CodeGen/BPF/inline_asm.ll
    llvm/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll
    llvm/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll
    llvm/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
    llvm/test/CodeGen/Generic/2008-02-20-MatchingMem.ll
    llvm/test/CodeGen/Hexagon/inline-asm-error.ll
    llvm/test/CodeGen/Hexagon/inline-asm-hexagon.ll
    llvm/test/CodeGen/Hexagon/jump-prob.ll
    llvm/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
    llvm/test/CodeGen/Hexagon/regp-underflow.ll
    llvm/test/CodeGen/MSP430/inline-asm-absolute-addressing.ll
    llvm/test/CodeGen/MSP430/mult-alt-generic-msp430.ll
    llvm/test/CodeGen/Mips/constraint-empty.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-R.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-1.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-2.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-m-1.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-m-2.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-o.ll
    llvm/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
    llvm/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll
    llvm/test/CodeGen/PowerPC/2007-09-11-RegCoalescerAssert.ll
    llvm/test/CodeGen/PowerPC/2007-10-16-InlineAsmFrameOffset.ll
    llvm/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll
    llvm/test/CodeGen/PowerPC/PR3488.ll
    llvm/test/CodeGen/PowerPC/asm-constraints.ll
    llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-LoadReserve-StoreCond-64bit-only.ll
    llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-LoadReserve-StoreCond.ll
    llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-check-ldarx-opt.ll
    llvm/test/CodeGen/PowerPC/ia-mem-r0.ll
    llvm/test/CodeGen/PowerPC/ia-neg-const.ll
    llvm/test/CodeGen/PowerPC/inlineasm-output-template.ll
    llvm/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
    llvm/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
    llvm/test/CodeGen/PowerPC/subreg-postra-2.ll
    llvm/test/CodeGen/PowerPC/subreg-postra.ll
    llvm/test/CodeGen/PowerPC/xray-ret-is-terminator.ll
    llvm/test/CodeGen/RISCV/inline-asm.ll
    llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
    llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
    llvm/test/CodeGen/SPARC/inlineasm.ll
    llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
    llvm/test/CodeGen/SystemZ/asm-01.ll
    llvm/test/CodeGen/SystemZ/asm-02.ll
    llvm/test/CodeGen/SystemZ/asm-03.ll
    llvm/test/CodeGen/SystemZ/asm-04.ll
    llvm/test/CodeGen/SystemZ/asm-05.ll
    llvm/test/CodeGen/SystemZ/frame-25.ll
    llvm/test/CodeGen/WebAssembly/inline-asm.ll
    llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll
    llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll
    llvm/test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll
    llvm/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll
    llvm/test/CodeGen/X86/2007-11-03-x86-64-q-constraint.ll
    llvm/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll
    llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
    llvm/test/CodeGen/X86/2008-02-25-InlineAsmBug.ll
    llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
    llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
    llvm/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll
    llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
    llvm/test/CodeGen/X86/2009-07-19-AsmExtraOperands.ll
    llvm/test/CodeGen/X86/2009-10-14-LiveVariablesBug.ll
    llvm/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
    llvm/test/CodeGen/X86/2010-07-13-indirectXconstraint.ll
    llvm/test/CodeGen/X86/2010-09-16-asmcrash.ll
    llvm/test/CodeGen/X86/9601.ll
    llvm/test/CodeGen/X86/asm-indirect-mem.ll
    llvm/test/CodeGen/X86/asm-reg-type-mismatch-avx512.ll
    llvm/test/CodeGen/X86/asm-reg-type-mismatch.ll
    llvm/test/CodeGen/X86/callbr-asm-instr-scheduling.ll
    llvm/test/CodeGen/X86/callbr-asm-kill.mir
    llvm/test/CodeGen/X86/callbr-asm-phi-placement.ll
    llvm/test/CodeGen/X86/callbr-asm-sink.ll
    llvm/test/CodeGen/X86/cas.ll
    llvm/test/CodeGen/X86/complex-asm.ll
    llvm/test/CodeGen/X86/crash.ll
    llvm/test/CodeGen/X86/inline-asm-A-constraint.ll
    llvm/test/CodeGen/X86/inline-asm-R-constraint.ll
    llvm/test/CodeGen/X86/inline-asm-duplicated-constraint.ll
    llvm/test/CodeGen/X86/inline-asm-flag-output.ll
    llvm/test/CodeGen/X86/inline-asm-fpstack.ll
    llvm/test/CodeGen/X86/inline-asm-h.ll
    llvm/test/CodeGen/X86/inline-asm-pic.ll
    llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll
    llvm/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll
    llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll
    llvm/test/CodeGen/X86/inline-asm.ll
    llvm/test/CodeGen/X86/leaf-fp-elim.ll
    llvm/test/CodeGen/X86/ms-inline-asm-PR44272.ll
    llvm/test/CodeGen/X86/ms-inline-asm-array.ll
    llvm/test/CodeGen/X86/ms-inline-asm-avx512.ll
    llvm/test/CodeGen/X86/ms-inline-asm-redundant-clobber.ll
    llvm/test/CodeGen/X86/ms-inline-asm.ll
    llvm/test/CodeGen/X86/mult-alt-generic-i686.ll
    llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll
    llvm/test/CodeGen/X86/mult-alt-x86.ll
    llvm/test/CodeGen/X86/multiple-loop-post-inc.ll
    llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll
    llvm/test/CodeGen/X86/pr3154.ll
    llvm/test/CodeGen/X86/regalloc-advanced-split-cost.ll
    llvm/test/CodeGen/X86/semantic-interposition-asm.ll
    llvm/test/CodeGen/X86/speculative-execution-side-effect-suppression.ll
    llvm/test/CodeGen/X86/win64_regcall.ll
    llvm/test/CodeGen/XCore/inline-asm.ll
    llvm/test/Instrumentation/AddressSanitizer/X86/asm_cpuid.ll
    llvm/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll
    llvm/test/Instrumentation/AddressSanitizer/X86/bug_11395.ll
    llvm/test/Instrumentation/AddressSanitizer/localescape.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
    llvm/test/Instrumentation/SanitizerCoverage/seh.ll
    llvm/test/Linker/inlineasm.ll
    llvm/test/MC/AsmParser/pr28805.ll
    llvm/test/Transforms/FunctionImport/Inputs/inlineasm.ll
    llvm/test/Transforms/Inline/2007-04-15-InlineEH.ll
    llvm/test/Transforms/Inline/devirtualize-4.ll
    llvm/test/Transforms/InstCombine/getelementptr.ll
    llvm/test/Verifier/inline-asm-indirect-operand.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/BasicAA/pr52735.ll b/llvm/test/Analysis/BasicAA/pr52735.ll
index 5b78ab595c50b..ba0c8bf97d0b4 100644
--- a/llvm/test/Analysis/BasicAA/pr52735.ll
+++ b/llvm/test/Analysis/BasicAA/pr52735.ll
@@ -17,7 +17,7 @@ define dso_local i32 @foo() {
 entry:
   %v = alloca i32, align 4
   %0 = bitcast i32* %v to i8*
-  callbr void asm "movl $$1, $0", "=*m,X,~{dirflag},~{fpsr},~{flags}"(i32* nonnull %v, i8* blockaddress(@foo, %out))
+  callbr void asm "movl $$1, $0", "=*m,X,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) nonnull %v, i8* blockaddress(@foo, %out))
           to label %asm.fallthrough [label %out]
 
 asm.fallthrough:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index 13a6fe72c2f0c..501ea11dd0542 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -100,7 +100,7 @@ define void @asm_indirect_output() {
 entry:
   %ap = alloca i8*, align 8
   %0 = load i8*, i8** %ap, align 8
-  call void asm sideeffect "", "=*r|m,0,~{memory}"(i8** %ap, i8* %0)
+  call void asm sideeffect "", "=*r|m,0,~{memory}"(i8** elementtype(i8*) %ap, i8* %0)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
index b44a6e5dbd67d..8aedbad871951 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
@@ -208,7 +208,7 @@ define i32 @test_memory_constraint(i32* %a) nounwind {
   ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
   ; CHECK:   $w0 = COPY [[COPY1]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %1 = tail call i32 asm "ldr $0, $1", "=r,*m"(i32* %a)
+  %1 = tail call i32 asm "ldr $0, $1", "=r,*m"(i32* elementtype(i32) %a)
   ret i32 %1
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-inline-asm.ll b/llvm/test/CodeGen/AArch64/arm64-inline-asm.ll
index e556ec7a3e0ef..86e2d2eb607d4 100644
--- a/llvm/test/CodeGen/AArch64/arm64-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-inline-asm.ll
@@ -97,7 +97,7 @@ define void @t7(i8* %f, i32 %g) nounwind {
 entry:
   %f.addr = alloca i8*, align 8
   store i8* %f, i8** %f.addr, align 8
-  call void asm "str ${1:w}, $0", "=*Q,r"(i8** %f.addr, i32 %g) nounwind
+  call void asm "str ${1:w}, $0", "=*Q,r"(i8** elementtype(i8*) %f.addr, i32 %g) nounwind
   ret void
 }
 
@@ -464,7 +464,7 @@ define void @test_zero_address() {
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ret
 entry:
-  tail call i32 asm sideeffect "ldr $0, $1 \0A", "=r,*Q"(i32* null)
+  tail call i32 asm sideeffect "ldr $0, $1 \0A", "=r,*Q"(i32* elementtype(i32) null)
   ret void
 }
 
@@ -499,7 +499,7 @@ entry:
   %m.addr = alloca <9 x float>, align 16
   %m = load <9 x float>, <9 x float>* %0, align 16
   store <9 x float> %m, <9 x float>* %m.addr, align 16
-  call void asm sideeffect "", "=*r|m,0,~{memory}"(<9 x float>* nonnull %m.addr, <9 x float> %m)
+  call void asm sideeffect "", "=*r|m,0,~{memory}"(<9 x float>* elementtype(<9 x float>) nonnull %m.addr, <9 x float> %m)
   ret void
 }
 
@@ -515,6 +515,6 @@ define void @test_o_output_constraint() {
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %b = alloca i8, align 1
-  call void asm "mov $0, 7", "=*o"(i8* %b)
+  call void asm "mov $0, 7", "=*o"(i8* elementtype(i8) %b)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32.ll b/llvm/test/CodeGen/AArch64/arm64_32.ll
index b5c2c6ebb81d7..b452a9ee419f3 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32.ll
@@ -596,7 +596,7 @@ define void @test_asm_memory(i32* %base.addr) {
 ; CHECK: add w[[ADDR:[0-9]+]], w0, #4
 ; CHECK: str wzr, [x[[ADDR]]
   %addr = getelementptr i32, i32* %base.addr, i32 1
-  call void asm sideeffect "str wzr, $0", "*m"(i32* %addr)
+  call void asm sideeffect "str wzr, $0", "*m"(i32* elementtype(i32) %addr)
   ret void
 }
 
@@ -606,7 +606,7 @@ define void @test_unsafe_asm_memory(i64 %val) {
 ; CHECK: str wzr, [x[[ADDR]]]
   %addr_int = trunc i64 %val to i32
   %addr = inttoptr i32 %addr_int to i32*
-  call void asm sideeffect "str wzr, $0", "*m"(i32* %addr)
+  call void asm sideeffect "str wzr, $0", "*m"(i32* elementtype(i32) %addr)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/inlineasm-X-constraint.ll b/llvm/test/CodeGen/AArch64/inlineasm-X-constraint.ll
index 77652cc071ef3..4a226e6d5828c 100644
--- a/llvm/test/CodeGen/AArch64/inlineasm-X-constraint.ll
+++ b/llvm/test/CodeGen/AArch64/inlineasm-X-constraint.ll
@@ -19,7 +19,7 @@ define  double @f1(double %f, i32 %pscr_value) {
 entry:
   %f.addr = alloca double, align 8
   store double %f, double* %f.addr, align 8
-  call void asm sideeffect "msr fpsr,$1", "=*X,r"(double* nonnull %f.addr, i32 %pscr_value) nounwind
+  call void asm sideeffect "msr fpsr,$1", "=*X,r"(double* elementtype(double) nonnull %f.addr, i32 %pscr_value) nounwind
   %0 = load double, double* %f.addr, align 8
   %add = fadd double %0, %0
   ret double %add
@@ -37,7 +37,7 @@ define  i32 @f2(i32 %f, i32 %pscr_value) {
 entry:
   %f.addr = alloca i32, align 4
   store i32 %f, i32* %f.addr, align 4
-  call void asm sideeffect "msr fpsr,$1", "=*X,r"(i32* nonnull %f.addr, i32 %pscr_value) nounwind
+  call void asm sideeffect "msr fpsr,$1", "=*X,r"(i32* elementtype(i32) nonnull %f.addr, i32 %pscr_value) nounwind
   %0 = load i32, i32* %f.addr, align 4
   %mul = mul i32 %0, %0
   ret i32 %mul
@@ -60,7 +60,7 @@ define  <8 x i8> @f3() {
 entry:
   %vector_res_int8x8 = alloca <8 x i8>, align 8
   %0 = getelementptr inbounds <8 x i8>, <8 x i8>* %vector_res_int8x8, i32 0, i32 0
-  call void asm sideeffect "msr fpsr,$1", "=*X,r"(<8 x i8>* nonnull %vector_res_int8x8, i32 undef) nounwind
+  call void asm sideeffect "msr fpsr,$1", "=*X,r"(<8 x i8>* elementtype(<8 x i8>) nonnull %vector_res_int8x8, i32 undef) nounwind
   %1 = load <8 x i8>, <8 x i8>* %vector_res_int8x8, align 8
   %mul = mul <8 x i8> %1, %1
   ret <8 x i8> %mul
@@ -147,6 +147,6 @@ bb:
 ; CHECK: str	[[Dest]], [x0]
 define void @f8(i64 *%x) {
 entry:
-  tail call void asm sideeffect "add $0, x0, x0", "=*X"(i64 *%x)
+  tail call void asm sideeffect "add $0, x0, x0", "=*X"(i64* elementtype(i64) %x)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
index d31e73e91b607..038b0082c2b17 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
@@ -239,7 +239,7 @@ define i32 @test_memory_constraint(i32 addrspace(3)* %a) nounwind {
   ; CHECK-NEXT:   $vgpr0 = COPY [[COPY2]](s32)
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
   ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
-  %1 = tail call i32 asm "ds_read_b32 $0, $1", "=v,*m"(i32 addrspace(3)* %a)
+  %1 = tail call i32 asm "ds_read_b32 $0, $1", "=v,*m"(i32 addrspace(3)* elementtype(i32) %a)
   ret i32 %1
 }
 

diff  --git a/llvm/test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll b/llvm/test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll
index 5ace8992102a3..78e132e1ecce4 100644
--- a/llvm/test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll
+++ b/llvm/test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll
@@ -21,8 +21,8 @@ bb:		; preds = %bb, %entry
 bb59:		; preds = %bb
 	%tmp68 = sdiv i64 0, 0		; <i64> [#uses=1]
 	%tmp6869 = trunc i64 %tmp68 to i32		; <i32> [#uses=2]
-	%tmp81 = call i32 asm "smull $0, $1, $2, $3     \0A\09mov   $0, $0,     lsr $4\0A\09add   $1, $0, $1, lsl $5\0A\09", "=&r,=*&r,r,r,i,i"( i32* null, i32 %tmp6869, i32 13316085, i32 23, i32 9 )		; <i32> [#uses=0]
-	%tmp90 = call i32 asm "smull $0, $1, $2, $3     \0A\09mov   $0, $0,     lsr $4\0A\09add   $1, $0, $1, lsl $5\0A\09", "=&r,=*&r,r,r,i,i"( i32* null, i32 %tmp6869, i32 10568984, i32 23, i32 9 )		; <i32> [#uses=0]
+	%tmp81 = call i32 asm "smull $0, $1, $2, $3     \0A\09mov   $0, $0,     lsr $4\0A\09add   $1, $0, $1, lsl $5\0A\09", "=&r,=*&r,r,r,i,i"( i32* elementtype( i32) null, i32 %tmp6869, i32 13316085, i32 23, i32 9 )		; <i32> [#uses=0]
+	%tmp90 = call i32 asm "smull $0, $1, $2, $3     \0A\09mov   $0, $0,     lsr $4\0A\09add   $1, $0, $1, lsl $5\0A\09", "=&r,=*&r,r,r,i,i"( i32* elementtype( i32) null, i32 %tmp6869, i32 10568984, i32 23, i32 9 )		; <i32> [#uses=0]
 	unreachable
 
 cond_next789:		; preds = %entry

diff  --git a/llvm/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll b/llvm/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
index e5c2fb4d67a1b..2672aa317ba1a 100644
--- a/llvm/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
+++ b/llvm/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
@@ -3,6 +3,6 @@
 
 define void @foo(i32 %i, i32* %p) nounwind {
 ;CHECK: swp r2, r0, [r1]
-	%asmtmp = call i32 asm sideeffect "swp $0, $2, $3", "=&r,=*m,r,*m,~{memory}"(i32* %p, i32 %i, i32* %p) nounwind
+	%asmtmp = call i32 asm sideeffect "swp $0, $2, $3", "=&r,=*m,r,*m,~{memory}"(i32* elementtype(i32) %p, i32 %i, i32* elementtype(i32) %p) nounwind
 	ret void
 }

diff  --git a/llvm/test/CodeGen/ARM/2013-11-08-inline-asm-neon-array.ll b/llvm/test/CodeGen/ARM/2013-11-08-inline-asm-neon-array.ll
index 5a864772faef5..2c5f00e15ba0f 100644
--- a/llvm/test/CodeGen/ARM/2013-11-08-inline-asm-neon-array.ll
+++ b/llvm/test/CodeGen/ARM/2013-11-08-inline-asm-neon-array.ll
@@ -9,7 +9,7 @@ target triple = "armv7--"
 define void @foo() #0 {
   %vsrc = alloca %struct.uint8x8x4_t, align 8
   %ptr = alloca i8;
-  %1 = call i8* asm sideeffect "vld4.u8 ${0:h}, [$1], $2", "=*w,=r,r,1"(%struct.uint8x8x4_t* %vsrc, i32 0, i8* %ptr)
+  %1 = call i8* asm sideeffect "vld4.u8 ${0:h}, [$1], $2", "=*w,=r,r,1"(%struct.uint8x8x4_t* elementtype(%struct.uint8x8x4_t) %vsrc, i32 0, i8* %ptr)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/ARM/arm-modifier.ll b/llvm/test/CodeGen/ARM/arm-modifier.ll
index 67d468e8abd25..93a16fa26249a 100644
--- a/llvm/test/CodeGen/ARM/arm-modifier.ll
+++ b/llvm/test/CodeGen/ARM/arm-modifier.ll
@@ -35,7 +35,7 @@ define void @f2() nounwind {
 entry:
 ; CHECK: f2
 ; CHECK: ldr r0, [r{{[0-9]+}}]
-call void asm sideeffect "ldr r0, [${0:m}]\0A\09", "*m,~{r0}"(i32** @f2_ptr) nounwind
+call void asm sideeffect "ldr r0, [${0:m}]\0A\09", "*m,~{r0}"(i32** elementtype(i32*) @f2_ptr) nounwind
 ret void
 }
 
@@ -51,9 +51,9 @@ entry:
 ; CHECK: ldm {{lr|r[0-9]+}}, {r{{[0-9]+}}, r{{[0-9]+}}}
 %tmp = load i64, i64* @f3_var, align 4
 %tmp1 = load i64, i64* @f3_var2, align 4
-%0 = call i64 asm sideeffect "stm ${0:m}, ${1:M}\0A\09adds $3, $1\0A\09", "=*m,=r,1,r"(i64** @f3_ptr, i64 %tmp, i64 %tmp1) nounwind
+%0 = call i64 asm sideeffect "stm ${0:m}, ${1:M}\0A\09adds $3, $1\0A\09", "=*m,=r,1,r"(i64** elementtype(i64*) @f3_ptr, i64 %tmp, i64 %tmp1) nounwind
 store i64 %0, i64* @f3_var, align 4
-%1 = call i64 asm sideeffect "ldm ${1:m}, ${0:M}\0A\09", "=r,*m"(i64** @f3_ptr) nounwind
+%1 = call i64 asm sideeffect "ldm ${1:m}, ${0:M}\0A\09", "=r,*m"(i64** elementtype(i64*) @f3_ptr) nounwind
 store i64 %1, i64* @f3_var, align 4
 ret void
 }
@@ -62,7 +62,7 @@ define i64 @f4(i64* %val) nounwind {
 entry:
   ;CHECK-LABEL: f4:
   ;CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
-  %0 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [$1]", "=&r,r,*Qo"(i64* %val, i64* %val) nounwind
+  %0 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [$1]", "=&r,r,*Qo"(i64* %val, i64* elementtype(i64) %val) nounwind
   ret i64 %0
 }
 

diff  --git a/llvm/test/CodeGen/ARM/indirect-reg-input.ll b/llvm/test/CodeGen/ARM/indirect-reg-input.ll
index e82e1dee94285..4c2c77d6bcf40 100644
--- a/llvm/test/CodeGen/ARM/indirect-reg-input.ll
+++ b/llvm/test/CodeGen/ARM/indirect-reg-input.ll
@@ -9,6 +9,6 @@
 define void @switch_to_stack(%struct.my_stack* %stack) nounwind {
 entry:
   %regs = getelementptr inbounds %struct.my_stack, %struct.my_stack* %stack, i32 0, i32 0
-  tail call void asm "\0A", "=*r,*0"(%struct.myjmp_buf* %regs, %struct.myjmp_buf* %regs)
+  tail call void asm "\0A", "=*r,*0"(%struct.myjmp_buf* elementtype(%struct.myjmp_buf) %regs, %struct.myjmp_buf* elementtype(%struct.myjmp_buf) %regs)
   ret void
 }

diff  --git a/llvm/test/CodeGen/ARM/inline-diagnostics.ll b/llvm/test/CodeGen/ARM/inline-diagnostics.ll
index 3f5b73c5a211c..036053c580eee 100644
--- a/llvm/test/CodeGen/ARM/inline-diagnostics.ll
+++ b/llvm/test/CodeGen/ARM/inline-diagnostics.ll
@@ -7,7 +7,7 @@ define float @inline_func(float %f1, float %f2) #0 {
   %c1 = alloca %struct.float4, align 4
   %c2 = alloca %struct.float4, align 4
   %c3 = alloca %struct.float4, align 4
-  call void asm sideeffect "vmul.f32 ${2:q}, ${0:q}, ${1:q}", "=*r,=*r,*w"(%struct.float4* %c1, %struct.float4* %c2, %struct.float4* %c3) #1, !srcloc !1
+  call void asm sideeffect "vmul.f32 ${2:q}, ${0:q}, ${1:q}", "=*r,=*r,*w"(%struct.float4* elementtype(%struct.float4) %c1, %struct.float4* elementtype(%struct.float4) %c2, %struct.float4* elementtype(%struct.float4) %c3) #1, !srcloc !1
   %x = getelementptr inbounds %struct.float4, %struct.float4* %c3, i32 0, i32 0
   %1 = load float, float* %x, align 4
   ret float %1

diff  --git a/llvm/test/CodeGen/ARM/inlineasm-64bit.ll b/llvm/test/CodeGen/ARM/inlineasm-64bit.ll
index 62c71ab375c88..8b68cad328471 100644
--- a/llvm/test/CodeGen/ARM/inlineasm-64bit.ll
+++ b/llvm/test/CodeGen/ARM/inlineasm-64bit.ll
@@ -5,7 +5,7 @@ define void @i64_write(i64* %p, i64 %val) nounwind {
 ; CHECK-LABEL: i64_write:
 ; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
 ; CHECK: strexd [[REG1]], {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
-  %1 = tail call i64 asm sideeffect "1: ldrexd $0, ${0:H}, [$2]\0A strexd $0, $3, ${3:H}, [$2]\0A teq $0, #0\0A bne 1b", "=&r,=*Qo,r,r,~{cc}"(i64* %p, i64* %p, i64 %val) nounwind
+  %1 = tail call i64 asm sideeffect "1: ldrexd $0, ${0:H}, [$2]\0A strexd $0, $3, ${3:H}, [$2]\0A teq $0, #0\0A bne 1b", "=&r,=*Qo,r,r,~{cc}"(i64* elementtype(i64) %p, i64* %p, i64 %val) nounwind
   ret void
 }
 
@@ -49,7 +49,7 @@ define void @foo(i64* %p, i64 %i) nounwind {
 ; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
 ; CHECK: strexd [[REG1]], {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
 ; CHECK: {{pop|pop.w}} {{{r[4-9]|r10|r11}}
-  %1 = tail call { i64, i64 } asm sideeffect "@ atomic64_set\0A1: ldrexd $0, ${0:H}, [$3]\0Aldrexd $1, ${1:H}, [$3]\0A strexd $0, $4, ${4:H}, [$3]\0A teq $0, #0\0A bne 1b", "=&r,=&r,=*Qo,r,r,~{cc}"(i64* %p, i64* %p, i64 %i) nounwind
+  %1 = tail call { i64, i64 } asm sideeffect "@ atomic64_set\0A1: ldrexd $0, ${0:H}, [$3]\0Aldrexd $1, ${1:H}, [$3]\0A strexd $0, $4, ${4:H}, [$3]\0A teq $0, #0\0A bne 1b", "=&r,=&r,=*Qo,r,r,~{cc}"(i64* elementtype(i64) %p, i64* %p, i64 %i) nounwind
   ret void
 }
 
@@ -91,7 +91,7 @@ define i64 @tied_64bit_test(i64 %in) nounwind {
 ; CHECK-LABEL: tied_64bit_test:
 ; CHECK: OUT([[OUTREG:r[0-9]+]]), IN([[OUTREG]])
   %addr = alloca i64
-  call void asm "OUT($0), IN($1)", "=*rm,0"(i64* %addr, i64 %in)
+  call void asm "OUT($0), IN($1)", "=*rm,0"(i64* elementtype(i64) %addr, i64 %in)
   ret i64 %in
 }
 

diff  --git a/llvm/test/CodeGen/ARM/inlineasm-X-constraint.ll b/llvm/test/CodeGen/ARM/inlineasm-X-constraint.ll
index d3d53df11b561..40d8062f2124b 100644
--- a/llvm/test/CodeGen/ARM/inlineasm-X-constraint.ll
+++ b/llvm/test/CodeGen/ARM/inlineasm-X-constraint.ll
@@ -19,7 +19,7 @@ define arm_aapcs_vfpcc double @f1(double %f, i32 %pscr_value) {
 entry:
   %f.addr = alloca double, align 8
   store double %f, double* %f.addr, align 8
-  call void asm sideeffect "vmsr fpscr,$1", "=*X,r"(double* nonnull %f.addr, i32 %pscr_value) nounwind
+  call void asm sideeffect "vmsr fpscr,$1", "=*X,r"(double* elementtype(double) nonnull %f.addr, i32 %pscr_value) nounwind
   %0 = load double, double* %f.addr, align 8
   %add = fadd double %0, %0
   ret double %add
@@ -37,7 +37,7 @@ define arm_aapcs_vfpcc i32 @f2(i32 %f, i32 %pscr_value) {
 entry:
   %f.addr = alloca i32, align 4
   store i32 %f, i32* %f.addr, align 4
-  call void asm sideeffect "vmsr fpscr,$1", "=*X,r"(i32* nonnull %f.addr, i32 %pscr_value) nounwind
+  call void asm sideeffect "vmsr fpscr,$1", "=*X,r"(i32* elementtype(i32) nonnull %f.addr, i32 %pscr_value) nounwind
   %0 = load i32, i32* %f.addr, align 4
   %mul = mul i32 %0, %0
   ret i32 %mul
@@ -66,7 +66,7 @@ define arm_aapcs_vfpcc <8 x i8> @f3() {
 entry:
   %vector_res_int8x8 = alloca <8 x i8>, align 8
   %0 = getelementptr inbounds <8 x i8>, <8 x i8>* %vector_res_int8x8, i32 0, i32 0
-  call void asm sideeffect "vmsr fpscr,$1", "=*X,r"(<8 x i8>* nonnull %vector_res_int8x8, i32 undef) nounwind
+  call void asm sideeffect "vmsr fpscr,$1", "=*X,r"(<8 x i8>* elementtype(<8 x i8>) nonnull %vector_res_int8x8, i32 undef) nounwind
   %1 = load <8 x i8>, <8 x i8>* %vector_res_int8x8, align 8
   %mul = mul <8 x i8> %1, %1
   ret <8 x i8> %mul
@@ -152,6 +152,6 @@ bb:
 ; CHECK: str	r{{.*}}, [r0]
 define void @f8(i32 *%x) {
 entry:
-  tail call void asm sideeffect "add $0, r0, r0", "=*X"(i32 *%x)
+  tail call void asm sideeffect "add $0, r0, r0", "=*X"(i32* elementtype(i32) %x)
   ret void
 }

diff  --git a/llvm/test/CodeGen/ARM/inlineasm3.ll b/llvm/test/CodeGen/ARM/inlineasm3.ll
index 59706c4e4180e..c318cdfca36ff 100644
--- a/llvm/test/CodeGen/ARM/inlineasm3.ll
+++ b/llvm/test/CodeGen/ARM/inlineasm3.ll
@@ -10,7 +10,7 @@ entry:
 ; CHECK: vmov.32 d30[0],
 ; CHECK: vmov q8, q15
   %tmp = alloca %struct.int32x4_t, align 16
-  call void asm sideeffect "vmov.I64 q15, #0\0Avmov.32 d30[0], $1\0Avmov ${0:q}, q15\0A", "=*w,r,~{d31},~{d30}"(%struct.int32x4_t* %tmp, i32 8192) nounwind
+  call void asm sideeffect "vmov.I64 q15, #0\0Avmov.32 d30[0], $1\0Avmov ${0:q}, q15\0A", "=*w,r,~{d31},~{d30}"(%struct.int32x4_t* elementtype(%struct.int32x4_t) %tmp, i32 8192) nounwind
   ret void
 }
 
@@ -48,7 +48,7 @@ ret i32 0
 @k.2126 = internal unnamed_addr global float 1.000000e+00
 define i32 @t4() nounwind {
 entry:
-call void asm sideeffect "flds s15, $0 \0A", "*^Uv,~{s15}"(float* @k.2126) nounwind
+call void asm sideeffect "flds s15, $0 \0A", "*^Uv,~{s15}"(float* elementtype(float) @k.2126) nounwind
 ret i32 0
 }
 
@@ -56,7 +56,7 @@ ret i32 0
 
 define i32 @t5() nounwind {
 entry:
-call void asm sideeffect "flds s15, $0 \0A", "*^Uvm,~{s15}"(float* @k.2126) nounwind
+call void asm sideeffect "flds s15, $0 \0A", "*^Uvm,~{s15}"(float* elementtype(float) @k.2126) nounwind
 ret i32 0
 }
 
@@ -108,7 +108,7 @@ entry:
 ; CHECK: str r1, [r0]
   %f.addr = alloca i8*, align 4
   store i8* %f, i8** %f.addr, align 4
-  call void asm "str $1, $0", "=*Q,r"(i8** %f.addr, i32 %g) nounwind
+  call void asm "str $1, $0", "=*Q,r"(i8** elementtype(i8*) %f.addr, i32 %g) nounwind
   ret void
 }
 
@@ -129,6 +129,6 @@ define i32 @fn1() local_unnamed_addr nounwind {
 entry:
 ; CHECK: mov [[addr:r[0-9]+]], #5
 ; CHECK: ldrh {{.*}}[[addr]]
-  %0 = tail call i32 asm "ldrh  $0, $1", "=r,*Q"(i8* inttoptr (i32 5 to i8*)) nounwind
+  %0 = tail call i32 asm "ldrh  $0, $1", "=r,*Q"(i8* elementtype(i8) inttoptr (i32 5 to i8*)) nounwind
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/ARM/mult-alt-generic-arm.ll b/llvm/test/CodeGen/ARM/mult-alt-generic-arm.ll
index 6ee114d4d4a15..2ac2b8eefff98 100644
--- a/llvm/test/CodeGen/ARM/mult-alt-generic-arm.ll
+++ b/llvm/test/CodeGen/ARM/mult-alt-generic-arm.ll
@@ -9,7 +9,7 @@ target triple = "arm--"
 
 define arm_aapcscc void @single_m() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,*m"(i32* @mout0, i32* @min1) nounwind
+  call void asm "foo $1,$0", "=*m,*m"(i32* elementtype(i32) @mout0, i32* elementtype(i32) @min1) nounwind
   ret void
 }
 
@@ -167,7 +167,7 @@ entry:
 define arm_aapcscc void @multi_m() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*m|r,m|r"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/ARM/pr25317.ll b/llvm/test/CodeGen/ARM/pr25317.ll
index 679b5a0299af7..ca29185672bf0 100644
--- a/llvm/test/CodeGen/ARM/pr25317.ll
+++ b/llvm/test/CodeGen/ARM/pr25317.ll
@@ -6,6 +6,6 @@ target triple = "armv7--linux-gnueabihf"
 ; CHECK-LABEL: f:
 ; CHECK: str lr, [r0]
 define void @f(i32* %p) {
-  call void asm sideeffect "str lr, $0", "=*o"(i32* %p)
+  call void asm sideeffect "str lr, $0", "=*o"(i32* elementtype(i32) %p)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AVR/inline-asm/inline-asm-invalid.ll b/llvm/test/CodeGen/AVR/inline-asm/inline-asm-invalid.ll
index 81b0f6e9b2832..8a3dcfc90ab84 100644
--- a/llvm/test/CodeGen/AVR/inline-asm/inline-asm-invalid.ll
+++ b/llvm/test/CodeGen/AVR/inline-asm/inline-asm-invalid.ll
@@ -3,7 +3,7 @@
 define void @foo(i16 %a) {
   ; CHECK: error: invalid operand in inline asm: 'jl ${0:l}'
   %i.addr = alloca i32, align 4
-  call void asm sideeffect "jl ${0:l}", "*m"(i32* %i.addr)
+  call void asm sideeffect "jl ${0:l}", "*m"(i32* elementtype(i32) %i.addr)
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/BPF/inline_asm.ll b/llvm/test/CodeGen/BPF/inline_asm.ll
index 7822ac49ed896..138e02db62350 100644
--- a/llvm/test/CodeGen/BPF/inline_asm.ll
+++ b/llvm/test/CodeGen/BPF/inline_asm.ll
@@ -34,9 +34,9 @@ entry:
 ; CHECK: r1 = 4
   %2 = tail call i32 asm sideeffect "$0 = $1 ll", "=r,i"(i64 333333333333) #2
 ; CHECK: r1 = 333333333333 ll
-  %3 = call i32 asm sideeffect "$0 = *(u16 *) $1", "=r,*m"(i32* nonnull %a) #2
+  %3 = call i32 asm sideeffect "$0 = *(u16 *) $1", "=r,*m"(i32* elementtype(i32) nonnull %a) #2
 ; CHECK: r1 = *(u16 *) (r10 - 4)
-  %4 = call i32 asm sideeffect "$0 = *(u32 *) $1", "=r,*m"(i32* getelementptr inbounds ([2 x i32], [2 x i32]* @g, i64 0, i64 1)) #2
+  %4 = call i32 asm sideeffect "$0 = *(u32 *) $1", "=r,*m"(i32* elementtype(i32) getelementptr inbounds ([2 x i32], [2 x i32]* @g, i64 0, i64 1)) #2
 ; CHECK: r1 = g ll
 ; CHECK: r0 = *(u32 *) (r1 + 4)
   call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0) #2

diff  --git a/llvm/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll b/llvm/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll
index 0f82ba61b2882..5c0770f1f3245 100644
--- a/llvm/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll
+++ b/llvm/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll
@@ -3,6 +3,6 @@
 ; Test that we can have an "X" output constraint.
 
 define void @test(i16 * %t) {
-        call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"( i16* %t )
+        call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"( i16* elementtype( i16) %t )
         ret void
 }

diff  --git a/llvm/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll b/llvm/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll
index 05989a0836cf5..c8cce5c1894e7 100644
--- a/llvm/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll
+++ b/llvm/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll
@@ -6,8 +6,8 @@ define void @test() {
 entry:
         %currfpu = alloca %struct..0anon, align 16              ; <%struct..0anon*> [#uses=2]
         %mxcsr = alloca %struct..0anon, align 16                ; <%struct..0anon*> [#uses=1]
-        call void asm sideeffect "fnstenv $0", "=*m,~{dirflag},~{fpsr},~{flags}"( %struct..0anon* %currfpu )
-        call void asm sideeffect "$0  $1", "=*m,*m,~{dirflag},~{fpsr},~{flags}"( %struct..0anon* %mxcsr, %struct..0anon* %currfpu )
+        call void asm sideeffect "fnstenv $0", "=*m,~{dirflag},~{fpsr},~{flags}"( %struct..0anon* elementtype( %struct..0anon) %currfpu )
+        call void asm sideeffect "$0  $1", "=*m,*m,~{dirflag},~{fpsr},~{flags}"( %struct..0anon* elementtype( %struct..0anon) %mxcsr, %struct..0anon* elementtype(%struct..0anon) %currfpu )
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll b/llvm/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
index a9d68ae5444ce..3b13077b37d85 100644
--- a/llvm/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
+++ b/llvm/test/CodeGen/Generic/2007-12-17-InvokeAsm.ll
@@ -2,7 +2,7 @@
 
 define fastcc void @bc__support__high_resolution_time__initialize_clock_rate() personality i32 (...)* @__gxx_personality_v0 {
 entry:
-  invoke void asm "rdtsc\0A\09movl %eax, $0\0A\09movl %edx, $1", "=*imr,=*imr,~{dirflag},~{fpsr},~{flags},~{dx},~{ax}"( i32* null, i32* null )
+  invoke void asm "rdtsc\0A\09movl %eax, $0\0A\09movl %edx, $1", "=*imr,=*imr,~{dirflag},~{fpsr},~{flags},~{dx},~{ax}"( i32* elementtype( i32) null, i32* elementtype(i32) null )
       to label %.noexc unwind label %cleanup144
 
 .noexc:		; preds = %entry

diff  --git a/llvm/test/CodeGen/Generic/2008-02-20-MatchingMem.ll b/llvm/test/CodeGen/Generic/2008-02-20-MatchingMem.ll
index 20f3dcc2971d4..12362f1a1ea4b 100644
--- a/llvm/test/CodeGen/Generic/2008-02-20-MatchingMem.ll
+++ b/llvm/test/CodeGen/Generic/2008-02-20-MatchingMem.ll
@@ -3,7 +3,7 @@
 define void @test(i32* %X) nounwind  {
 entry:
 	%tmp1 = getelementptr i32, i32* %X, i32 10		; <i32*> [#uses=2]
-	tail call void asm sideeffect " $0 $1 ", "=*im,*im,~{memory}"( i32* %tmp1, i32* %tmp1 ) nounwind 
+	tail call void asm sideeffect " $0 $1 ", "=*im,*im,~{memory}"( i32* elementtype( i32) %tmp1, i32* elementtype(i32) %tmp1 ) nounwind 
 	ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/inline-asm-error.ll b/llvm/test/CodeGen/Hexagon/inline-asm-error.ll
index 0a1e70830f586..0254836127b70 100644
--- a/llvm/test/CodeGen/Hexagon/inline-asm-error.ll
+++ b/llvm/test/CodeGen/Hexagon/inline-asm-error.ll
@@ -8,7 +8,7 @@
 
 define void @f0(%s.0* byval(%s.0) align 8 %a0) {
 b0:
-  call void asm sideeffect ".weak OFFSET_0;jump ##(OFFSET_0 + 0x14c15f0)", "*r"(%s.0* nonnull %a0), !srcloc !0
+  call void asm sideeffect ".weak OFFSET_0;jump ##(OFFSET_0 + 0x14c15f0)", "*r"(%s.0* elementtype(%s.0) nonnull %a0), !srcloc !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/inline-asm-hexagon.ll b/llvm/test/CodeGen/Hexagon/inline-asm-hexagon.ll
index 302096d49b3e1..76d7ae6ce52ec 100644
--- a/llvm/test/CodeGen/Hexagon/inline-asm-hexagon.ll
+++ b/llvm/test/CodeGen/Hexagon/inline-asm-hexagon.ll
@@ -10,7 +10,7 @@ entry:
   %free_list_ptr.addr = alloca i64*, align 4
   store i64* %free_list_ptr, i64** %free_list_ptr.addr, align 4
   %0 = load i32*, i32** %item_ptr, align 4
-  %1 = call { i64, i32 } asm sideeffect "1:     $0 = memd_locked($5)\0A\09       $1 = HIGH(${0:H}) \0A\09       $1 = add($1,#1) \0A\09       memw($6) = LOW(${0:L}) \0A\09       $0 = combine($7,$1) \0A\09       memd_locked($5,p0) = $0 \0A\09       if !p0 jump 1b\0A\09", "=&r,=&r,=*m,=*m,r,r,r,r,*m,*m,~{p0}"(i64** %free_list_ptr.addr, i8** %free_item_ptr, i64 0, i64* %free_list_ptr, i8** %free_item_ptr, i32* %0, i64** %free_list_ptr.addr, i8** %free_item_ptr) nounwind
+  %1 = call { i64, i32 } asm sideeffect "1:     $0 = memd_locked($5)\0A\09       $1 = HIGH(${0:H}) \0A\09       $1 = add($1,#1) \0A\09       memw($6) = LOW(${0:L}) \0A\09       $0 = combine($7,$1) \0A\09       memd_locked($5,p0) = $0 \0A\09       if !p0 jump 1b\0A\09", "=&r,=&r,=*m,=*m,r,r,r,r,*m,*m,~{p0}"(i64** elementtype(i64*) %free_list_ptr.addr, i8** elementtype(i8*) %free_item_ptr, i64 0, i64* %free_list_ptr, i8** %free_item_ptr, i32* %0, i64** elementtype(i64*) %free_list_ptr.addr, i8** elementtype(i8*) %free_item_ptr) nounwind
   %asmresult1 = extractvalue { i64, i32 } %1, 1
   ret i32 %asmresult1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/jump-prob.ll b/llvm/test/CodeGen/Hexagon/jump-prob.ll
index a5f420df0df5e..a5805e723cad2 100644
--- a/llvm/test/CodeGen/Hexagon/jump-prob.ll
+++ b/llvm/test/CodeGen/Hexagon/jump-prob.ll
@@ -70,7 +70,7 @@ b4:                                               ; preds = %b2
 b5:                                               ; preds = %b4
   store i8 0, i8* %a2, align 1, !tbaa !0
   %v17 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 0, i32 3, i32 %v2
-  %v18 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* %v17, i32* %v17, i32 1, i32* %v17) #0, !srcloc !5
+  %v18 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) %v17, i32* %v17, i32 1, i32* elementtype(i32) %v17) #0, !srcloc !5
   %v19 = load i32, i32* %v17, align 4, !tbaa !3
   %v20 = icmp eq i32 %v19, 255
   br i1 %v20, label %b6, label %b7
@@ -114,7 +114,7 @@ b8:                                               ; preds = %b4
 b9:                                               ; preds = %b8
   store i8 1, i8* %a2, align 1, !tbaa !0
   %v42 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 1, i32 3, i32 %v2
-  %v43 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* %v42, i32* %v42, i32 1, i32* %v42) #0, !srcloc !5
+  %v43 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) %v42, i32* %v42, i32 1, i32* elementtype(i32) %v42) #0, !srcloc !5
   %v44 = load i32, i32* %v42, align 4, !tbaa !3
   %v45 = icmp eq i32 %v44, 255
   br i1 %v45, label %b10, label %b11

diff  --git a/llvm/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll b/llvm/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
index bbd05ae0b5008..33e148d3244fc 100644
--- a/llvm/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
+++ b/llvm/test/CodeGen/Hexagon/rdf-inline-asm-fixed.ll
@@ -15,7 +15,7 @@ entry:
   %0 = bitcast i32* %arg1 to i8*
   call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #2
   store i32 %status, i32* %arg1, align 4, !tbaa !1
-  %1 = call i32 asm sideeffect "r0 = #$1\0Ar1 = $2\0Ar2 = $4\0Atrap0 (#0)\0A$0 = r0", "=r,i,r,*m,r,~{r0},~{r1},~{r2}"(i32 24, i32* nonnull %arg1, i32* nonnull %arg1, i32 %status) #2, !srcloc !5
+  %1 = call i32 asm sideeffect "r0 = #$1\0Ar1 = $2\0Ar2 = $4\0Atrap0 (#0)\0A$0 = r0", "=r,i,r,*m,r,~{r0},~{r1},~{r2}"(i32 24, i32* nonnull %arg1, i32* elementtype(i32) nonnull %arg1, i32 %status) #2, !srcloc !5
   call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) #2
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/Hexagon/regp-underflow.ll b/llvm/test/CodeGen/Hexagon/regp-underflow.ll
index 748f98d744f21..a880eab948e19 100644
--- a/llvm/test/CodeGen/Hexagon/regp-underflow.ll
+++ b/llvm/test/CodeGen/Hexagon/regp-underflow.ll
@@ -47,34 +47,34 @@ b4:                                               ; preds = %b3
 b5:                                               ; preds = %b5, %b4
   %v6 = phi i32* [ %v5, %b4 ], [ %v29, %b5 ]
   %v7 = phi i32 [ 0, %b4 ], [ %v27, %b5 ]
-  %v8 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  %v8 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
   store i32 %v8, i32* %v6, align 4, !tbaa !0
   %v9 = getelementptr i32, i32* %v6, i32 1
-  %v10 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  %v10 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
   store i32 %v10, i32* %v9, align 4, !tbaa !0
   %v11 = getelementptr i32, i32* %v6, i32 2
-  %v12 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  %v12 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
   store i32 %v12, i32* %v11, align 4, !tbaa !0
   %v13 = getelementptr i32, i32* %v6, i32 3
-  %v14 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  %v14 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
   store i32 %v14, i32* %v13, align 4, !tbaa !0
   %v15 = getelementptr i32, i32* %v6, i32 4
-  %v16 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  %v16 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
   store i32 %v16, i32* %v15, align 4, !tbaa !0
   %v17 = getelementptr i32, i32* %v6, i32 5
-  %v18 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  %v18 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
   store i32 %v18, i32* %v17, align 4, !tbaa !0
   %v19 = getelementptr i32, i32* %v6, i32 6
-  %v20 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  %v20 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
   store i32 %v20, i32* %v19, align 4, !tbaa !0
   %v21 = getelementptr i32, i32* %v6, i32 7
-  %v22 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  %v22 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
   store i32 %v22, i32* %v21, align 4, !tbaa !0
   %v23 = getelementptr i32, i32* %v6, i32 8
-  %v24 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  %v24 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
   store i32 %v24, i32* %v23, align 4, !tbaa !0
   %v25 = getelementptr i32, i32* %v6, i32 9
-  %v26 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  %v26 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* elementtype(i32) @g3, i32* @g3, i32 1, i32* elementtype(i32) @g3), !srcloc !4
   store i32 %v26, i32* %v25, align 4, !tbaa !0
   %v27 = add nsw i32 %v7, 10
   %v28 = icmp eq i32 %v27, 100

diff  --git a/llvm/test/CodeGen/MSP430/inline-asm-absolute-addressing.ll b/llvm/test/CodeGen/MSP430/inline-asm-absolute-addressing.ll
index 91505dca48ddf..ee730a128b085 100644
--- a/llvm/test/CodeGen/MSP430/inline-asm-absolute-addressing.ll
+++ b/llvm/test/CodeGen/MSP430/inline-asm-absolute-addressing.ll
@@ -10,6 +10,6 @@ target triple = "msp430-elf"
 define void @f() {
 entry:
 ; CHECK: mov r1, &256
-  call void asm sideeffect "mov r1, $0", "*m"(i8* inttoptr (i16 256 to i8*))
+  call void asm sideeffect "mov r1, $0", "*m"(i8* elementtype(i8) inttoptr (i16 256 to i8*))
   ret void
 }

diff  --git a/llvm/test/CodeGen/MSP430/mult-alt-generic-msp430.ll b/llvm/test/CodeGen/MSP430/mult-alt-generic-msp430.ll
index f8ae49e197b7b..86809c5bd3d02 100644
--- a/llvm/test/CodeGen/MSP430/mult-alt-generic-msp430.ll
+++ b/llvm/test/CodeGen/MSP430/mult-alt-generic-msp430.ll
@@ -9,7 +9,7 @@ target triple = "msp430"
 
 define void @single_m() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,*m"(i16* @mout0, i16* @min1) nounwind
+  call void asm "foo $1,$0", "=*m,*m"(i16* elementtype(i16) @mout0, i16* elementtype(i16) @min1) nounwind
   ret void
 }
 
@@ -167,7 +167,7 @@ entry:
 define void @multi_m() nounwind {
 entry:
   %tmp = load i16, i16* @min1, align 2
-  call void asm "foo $1,$0", "=*m|r,m|r"(i16* @mout0, i16 %tmp) nounwind
+  call void asm "foo $1,$0", "=*m|r,m|r"(i16* elementtype(i16) @mout0, i16 %tmp) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/constraint-empty.ll b/llvm/test/CodeGen/Mips/constraint-empty.ll
index 65b5d436457b9..849320f61a15b 100644
--- a/llvm/test/CodeGen/Mips/constraint-empty.ll
+++ b/llvm/test/CodeGen/Mips/constraint-empty.ll
@@ -5,7 +5,7 @@ define void @foo() {
 entry:
   %s = alloca i32, align 4
   %x = alloca i32, align 4
-  call void asm "", "=*imr,=*m,0,*m,~{$1}"(i32* %x, i32* %s, i32* %x, i32* %s)
+  call void asm "", "=*imr,=*m,0,*m,~{$1}"(i32* elementtype(i32) %x, i32* elementtype(i32) %s, i32* %x, i32* elementtype(i32) %s)
 
 ; CHECK: #APP
 ; CHECK: #NO_APP

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-R.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-R.ll
index 9c7611ba81d5b..2cd2be128db13 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-R.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-R.ll
@@ -6,7 +6,7 @@ define void @R(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: R:
 
-  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -20,7 +20,7 @@ define void @R_offset_4(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: R_offset_4:
 
-  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -34,7 +34,7 @@ define void @R_offset_254(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: R_offset_254:
 
-  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 63))
+  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 63))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -48,7 +48,7 @@ define void @R_offset_256(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: R_offset_256:
 
-  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 64))
+  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 64))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: addiu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], 256

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-1.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-1.ll
index 59778df3b4230..956f3c5288b94 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-1.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-1.ll
@@ -8,7 +8,7 @@ define void @ZC(i32 *%p) nounwind {
 entry:
   ; ALL-LABEL: ZC:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; ALL: #APP
@@ -22,7 +22,7 @@ define void @ZC_offset_n4(i32 *%p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_n4:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 -1))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 -1))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; ALL: #APP
@@ -36,7 +36,7 @@ define void @ZC_offset_4(i32 *%p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_4:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; ALL: #APP
@@ -50,7 +50,7 @@ define void @ZC_offset_252(i32 *%p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_252:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 63))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 63))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; ALL: #APP
@@ -64,7 +64,7 @@ define void @ZC_offset_256(i32 *%p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_256:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 64))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 64))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
 
@@ -85,7 +85,7 @@ define void @ZC_offset_2044(i32 *%p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_2044:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 511))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 511))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
 
@@ -106,7 +106,7 @@ define void @ZC_offset_2048(i32 *%p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_2048:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 512))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 512))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
 
@@ -128,7 +128,7 @@ define void @ZC_offset_32764(i32 *%p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_32764:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
 
   ; ALL-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
 
@@ -150,7 +150,7 @@ define void @ZC_offset_32768(i32 *%p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_32768:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
 
   ; ALL-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; ALL-DAG: ori $[[T0:[0-9]+]], $zero, 32768

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-2.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-2.ll
index 2a0904c54c9a0..c9c94deec6e4f 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-2.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-2.ll
@@ -27,7 +27,7 @@ entry:
 
 ; ALL: #NO_APP
 
-  %1 = call { i32, i32 } asm sideeffect ".set push\0A.set noreorder\0A1:\0All $0, $2\0Aaddu $1, $0, $3\0Asc $1, $2\0Abeqz $1, 1b\0Aaddu $1, $0, $3\0A.set pop\0A", "=&r,=&r,=*^ZC,Ir,*^ZC,~{memory},~{$1}"(i32* %count, i32 10, i32* %count)
+  %1 = call { i32, i32 } asm sideeffect ".set push\0A.set noreorder\0A1:\0All $0, $2\0Aaddu $1, $0, $3\0Asc $1, $2\0Abeqz $1, 1b\0Aaddu $1, $0, $3\0A.set pop\0A", "=&r,=&r,=*^ZC,Ir,*^ZC,~{memory},~{$1}"(i32* elementtype(i32) %count, i32 10, i32* elementtype(i32) %count)
   %asmresult1.i = extractvalue { i32, i32 } %1, 1
   %cmp = icmp ne i32 %asmresult1.i, 10
   %conv = zext i1 %cmp to i32

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-m-1.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-m-1.ll
index 11ef8341cbdb5..d48caaabdbc0c 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-m-1.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-m-1.ll
@@ -6,7 +6,7 @@ define void @m(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: m:
 
-  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -20,7 +20,7 @@ define void @m_offset_4(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: m_offset_4:
 
-  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -34,7 +34,7 @@ define void @m_offset_32764(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: m_offset_32764:
 
-  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
+  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -48,7 +48,7 @@ define void @m_offset_32768(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: m_offset_32768:
 
-  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
+  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK-DAG: ori $[[T0:[0-9]+]], $zero, 32768

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-m-2.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-m-2.ll
index caf17f9dbf7da..0a6994a715bf8 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-m-2.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-m-2.ll
@@ -19,8 +19,8 @@ entry:
 ; CHECK: sw  $[[T3]], 0($[[T1]])
 
   %l1 = alloca i32, align 4
-  call void asm "sw $1, $0", "=*m,r"(i32* %l1, i32 %x) nounwind
-  %0 = call i32 asm "lw $0, $1", "=r,*m"(i32* %l1) nounwind
+  call void asm "sw $1, $0", "=*m,r"(i32* elementtype(i32) %l1, i32 %x) nounwind
+  %0 = call i32 asm "lw $0, $1", "=r,*m"(i32* elementtype(i32) %l1) nounwind
   store i32 %0, i32* @g1, align 4
   ret i32 %0
 }
@@ -55,13 +55,13 @@ entry:
 define void @main() {
 entry:
 ; Second word:
-  tail call void asm sideeffect "    lw    $0, ${1:D}", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+  tail call void asm sideeffect "    lw    $0, ${1:D}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
 ; First word. Notice, no 'D':
-  tail call void asm sideeffect "    lw    $0, ${1}", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+  tail call void asm sideeffect "    lw    $0, ${1}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
 
 ; High-order part.
-  tail call void asm sideeffect "    lw    $0, ${1:M}", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+  tail call void asm sideeffect "    lw    $0, ${1:M}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
 ; Low-order part.
-  tail call void asm sideeffect "    lw    $0, ${1:L}", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+  tail call void asm sideeffect "    lw    $0, ${1:L}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-o.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-o.ll
index de677cbcc6813..157bf6875a73a 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-o.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-o.ll
@@ -6,7 +6,7 @@ define void @o(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: o:
 
-  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -20,7 +20,7 @@ define void @o_offset_4(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: o_offset_4:
 
-  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -34,7 +34,7 @@ define void @o_offset_32764(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: o_offset_32764:
 
-  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
+  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -48,7 +48,7 @@ define void @o_offset_32768(i32 *%p) nounwind {
 entry:
   ; CHECK-LABEL: o_offset_32768:
 
-  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
+  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK-DAG: ori $[[T0:[0-9]+]], $zero, 32768

diff  --git a/llvm/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll b/llvm/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
index 937a64d310726..f80ca2b963169 100644
--- a/llvm/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
+++ b/llvm/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
@@ -22,7 +22,7 @@ target triple = "powerpc-unknown-linux-gnu"
 define i64 @test(i32 %A, i32 %B, i32 %C) nounwind {
 entry:
 	%Y = alloca i32, align 4		; <i32*> [#uses=2]
-	%tmp4 = call i32 asm "subf${3:I}c $1,$4,$3\0A\09subfze $0,$2", "=r,=*&r,r,rI,r"( i32* %Y, i32 %A, i32 %B, i32 %C )		; <i32> [#uses=1]
+	%tmp4 = call i32 asm "subf${3:I}c $1,$4,$3\0A\09subfze $0,$2", "=r,=*&r,r,rI,r"( i32* elementtype( i32) %Y, i32 %A, i32 %B, i32 %C )		; <i32> [#uses=1]
 	%tmp5 = load i32, i32* %Y		; <i32> [#uses=1]
 	%tmp56 = zext i32 %tmp5 to i64		; <i64> [#uses=1]
 	%tmp7 = shl i64 %tmp56, 32		; <i64> [#uses=1]

diff  --git a/llvm/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll b/llvm/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll
index c5da2baab643f..d86fc8528cb9f 100644
--- a/llvm/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll
+++ b/llvm/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll
@@ -15,7 +15,7 @@ bb:		; preds = %bb, %entry
 	%tmp8 = getelementptr float, float* %tmp56, i32 %i.035.0		; <float*> [#uses=2]
 	%tmp101112 = bitcast float* %tmp8 to i8*		; <i8*> [#uses=1]
 	%tmp1617 = bitcast float* %tmp8 to i32*		; <i32*> [#uses=1]
-	%tmp21 = tail call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"( i8* %tmp101112, i32 0, i32* %tmp1617 )		; <i32> [#uses=0]
+	%tmp21 = tail call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"(i8* %tmp101112, i32 0, i32* elementtype(i32) %tmp1617 )		; <i32> [#uses=0]
 	%indvar.next = add i32 %i.035.0, 1		; <i32> [#uses=2]
 	%exitcond = icmp eq i32 %indvar.next, 4		; <i1> [#uses=1]
 	br i1 %exitcond, label %return, label %bb

diff  --git a/llvm/test/CodeGen/PowerPC/2007-09-11-RegCoalescerAssert.ll b/llvm/test/CodeGen/PowerPC/2007-09-11-RegCoalescerAssert.ll
index 53552323b72ca..b3369ca1b75bc 100644
--- a/llvm/test/CodeGen/PowerPC/2007-09-11-RegCoalescerAssert.ll
+++ b/llvm/test/CodeGen/PowerPC/2007-09-11-RegCoalescerAssert.ll
@@ -4,6 +4,6 @@
 
 define void @_ZN17TCMalloc_SpinLock4LockEv(%struct.TCMalloc_SpinLock* %this) {
 entry:
-        %tmp3 = call i32 asm sideeffect "1: lwarx $0, 0, $1\0A\09stwcx. $2, 0, $1\0A\09bne- 1b\0A\09isync", "=&r,=*r,r,1,~{dirflag},~{fpsr},~{flags},~{memory}"( i32** null, i32 1, i32* null )         ; <i32> [#uses=0]
+        %tmp3 = call i32 asm sideeffect "1: lwarx $0, 0, $1\0A\09stwcx. $2, 0, $1\0A\09bne- 1b\0A\09isync", "=&r,=*r,r,1,~{dirflag},~{fpsr},~{flags},~{memory}"(i32** elementtype(i32*) null, i32 1, i32* null)         ; <i32> [#uses=0]
         unreachable
 }

diff  --git a/llvm/test/CodeGen/PowerPC/2007-10-16-InlineAsmFrameOffset.ll b/llvm/test/CodeGen/PowerPC/2007-10-16-InlineAsmFrameOffset.ll
index a69e145f3adf0..b2d65314d395a 100644
--- a/llvm/test/CodeGen/PowerPC/2007-10-16-InlineAsmFrameOffset.ll
+++ b/llvm/test/CodeGen/PowerPC/2007-10-16-InlineAsmFrameOffset.ll
@@ -8,7 +8,7 @@ define i32 @test() {
 entry:
         %data = alloca i32              ; <i32*> [#uses=1]
         %compressedPage = alloca %struct._StorePageMax          ; <%struct._StorePageMax*> [#uses=0]
-        %tmp107 = call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"( i8* null, i32 0, i32* %data )          ; <i32> [#uses=0]
+        %tmp107 = call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"( i8* null, i32 0, i32* elementtype(i32) %data )          ; <i32> [#uses=0]
         unreachable
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll b/llvm/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll
index 0209808f2f190..3c5ca4000bc66 100644
--- a/llvm/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll
+++ b/llvm/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll
@@ -17,7 +17,7 @@ entry:
   store i32 %y, i32* %y_addr
   %0 = load i32, i32* %y_addr, align 4                 ; <i32> [#uses=1]
   %1 = getelementptr inbounds [0 x i32], [0 x i32]* @x, i32 0, i32 %0 ; <i32*> [#uses=1]
-  call void asm sideeffect "isync\0A\09eieio\0A\09stw $1, $0", "=*o,r,~{memory}"(i32* %1, i32 0) nounwind
+  call void asm sideeffect "isync\0A\09eieio\0A\09stw $1, $0", "=*o,r,~{memory}"(i32* elementtype(i32) %1, i32 0) nounwind
   br label %return
 
 return:                                           ; preds = %entry

diff  --git a/llvm/test/CodeGen/PowerPC/PR3488.ll b/llvm/test/CodeGen/PowerPC/PR3488.ll
index 69c375a149f36..068b43fd3b0ed 100644
--- a/llvm/test/CodeGen/PowerPC/PR3488.ll
+++ b/llvm/test/CodeGen/PowerPC/PR3488.ll
@@ -98,7 +98,7 @@ module asm "\09.previous\09\09\09\09\09"
 ; Function Attrs: nounwind
 define void @__alloc_pages_nodemask() #0 {
 entry:
-  %0 = call i64 asm sideeffect "ld${1:U}${1:X} $0,$1", "=r,*m"(i64* undef)
+  %0 = call i64 asm sideeffect "ld${1:U}${1:X} $0,$1", "=r,*m"(i64* elementtype(i64) undef)
   br i1 undef, label %do.body.lr.ph.i.i.i, label %zone_page_state_snapshot.exit.i.i
 ; CHECK: ld 3, 0(3)
 

diff  --git a/llvm/test/CodeGen/PowerPC/asm-constraints.ll b/llvm/test/CodeGen/PowerPC/asm-constraints.ll
index da77d1a169792..014b5bc2711d7 100644
--- a/llvm/test/CodeGen/PowerPC/asm-constraints.ll
+++ b/llvm/test/CodeGen/PowerPC/asm-constraints.ll
@@ -32,7 +32,7 @@ entry:
   store i32 %result, i32* %result.addr, align 4
   store i8* %addr, i8** %addr.addr, align 8
   %0 = load i8*, i8** %addr.addr, align 8
-  %1 = call i32 asm sideeffect "ld${1:U}${1:X} $0,$1\0Acmpw $0,$0\0Abne- 1f\0A1: isync\0A", "=r,*m,~{memory},~{cr0}"(i8* %0) #1, !srcloc !0
+  %1 = call i32 asm sideeffect "ld${1:U}${1:X} $0,$1\0Acmpw $0,$0\0Abne- 1f\0A1: isync\0A", "=r,*m,~{memory},~{cr0}"(i8* elementtype(i8) %0) #1, !srcloc !0
   store i32 %1, i32* %result.addr, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-LoadReserve-StoreCond-64bit-only.ll b/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-LoadReserve-StoreCond-64bit-only.ll
index d00901f3ace24..a07cf46ce7522 100644
--- a/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-LoadReserve-StoreCond-64bit-only.ll
+++ b/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-LoadReserve-StoreCond-64bit-only.ll
@@ -15,7 +15,7 @@ define dso_local i64 @test_ldarx(i64* readnone %a) {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    blr
 entry:
-  %0 = call i64 asm sideeffect "ldarx $0, ${1:y}", "=r,*Z,~{memory}"(i64* %a)
+  %0 = call i64 asm sideeffect "ldarx $0, ${1:y}", "=r,*Z,~{memory}"(i64* elementtype(i64) %a)
   ret i64 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-LoadReserve-StoreCond.ll b/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-LoadReserve-StoreCond.ll
index 4db143b17269b..a04ef4b73ccfe 100644
--- a/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-LoadReserve-StoreCond.ll
+++ b/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-LoadReserve-StoreCond.ll
@@ -25,7 +25,7 @@ define dso_local signext i32 @test_lwarx(i32* readnone %a) {
 ; CHECK-32-NEXT:    #NO_APP
 ; CHECK-32-NEXT:    blr
 entry:
-  %0 = call i32 asm sideeffect "lwarx $0, ${1:y}", "=r,*Z,~{memory}"(i32* %a)
+  %0 = call i32 asm sideeffect "lwarx $0, ${1:y}", "=r,*Z,~{memory}"(i32* elementtype(i32) %a)
   ret i32 %0
 }
 
@@ -113,7 +113,7 @@ define dso_local signext i16 @test_lharx(i16* %a) {
 ; CHECK-32-NEXT:    extsh 3, 3
 ; CHECK-32-NEXT:    blr
 entry:
-  %0 = tail call i16 asm sideeffect "lharx $0, ${1:y}", "=r,*Z,~{memory}"(i16* %a)
+  %0 = tail call i16 asm sideeffect "lharx $0, ${1:y}", "=r,*Z,~{memory}"(i16* elementtype(i16) %a)
   ret i16 %0
 }
 
@@ -135,6 +135,6 @@ define dso_local zeroext i8 @test_lbarx(i8* %a) {
 ; CHECK-32-NEXT:    clrlwi 3, 3, 24
 ; CHECK-32-NEXT:    blr
 entry:
-  %0 = tail call i8 asm sideeffect "lbarx $0, ${1:y}", "=r,*Z,~{memory}"(i8* %a)
+  %0 = tail call i8 asm sideeffect "lbarx $0, ${1:y}", "=r,*Z,~{memory}"(i8* elementtype(i8) %a)
   ret i8 %0
 }

diff  --git a/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-check-ldarx-opt.ll b/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-check-ldarx-opt.ll
index ed9bee2003b79..7aeac798f53c2 100644
--- a/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-check-ldarx-opt.ll
+++ b/llvm/test/CodeGen/PowerPC/builtins-ppc-xlcompat-check-ldarx-opt.ll
@@ -65,7 +65,7 @@ entry:
   br label %do.body
 
 do.body:                                          ; preds = %do.body, %entry
-  %1 = call i64 asm sideeffect "ldarx $0, ${1:y}", "=r,*Z,~{memory}"(i64* nonnull %x64)
+  %1 = call i64 asm sideeffect "ldarx $0, ${1:y}", "=r,*Z,~{memory}"(i64* elementtype(i64) nonnull %x64)
   %2 = call i32 @llvm.ppc.stdcx(i8* nonnull %0, i64 0)
   %tobool.not = icmp eq i32 %2, 0
   br i1 %tobool.not, label %do.body, label %do.end

diff  --git a/llvm/test/CodeGen/PowerPC/ia-mem-r0.ll b/llvm/test/CodeGen/PowerPC/ia-mem-r0.ll
index 6928667c6db79..ca981f3f986c7 100644
--- a/llvm/test/CodeGen/PowerPC/ia-mem-r0.ll
+++ b/llvm/test/CodeGen/PowerPC/ia-mem-r0.ll
@@ -14,75 +14,75 @@ define void @test1({ i8*, void (i8*, i8*)* } %fn_arg) {
   %regs = alloca [18 x i64], align 8
   store { i8*, void (i8*, i8*)* } %fn_arg, { i8*, void (i8*, i8*)* }* %fn
   %1 = bitcast [18 x i64]* %regs to i64*
-  call void asm sideeffect "std  14, $0", "=*m"(i64* %1)
+  call void asm sideeffect "std  14, $0", "=*m"(i64* elementtype(i64) %1)
   %2 = bitcast [18 x i64]* %regs to i8*
   %3 = getelementptr i8, i8* %2, i32 8
   %4 = bitcast i8* %3 to i64*
-  call void asm sideeffect "std  15, $0", "=*m"(i64* %4)
+  call void asm sideeffect "std  15, $0", "=*m"(i64* elementtype(i64) %4)
   %5 = bitcast [18 x i64]* %regs to i8*
   %6 = getelementptr i8, i8* %5, i32 16
   %7 = bitcast i8* %6 to i64*
-  call void asm sideeffect "std  16, $0", "=*m"(i64* %7)
+  call void asm sideeffect "std  16, $0", "=*m"(i64* elementtype(i64) %7)
   %8 = bitcast [18 x i64]* %regs to i8*
   %9 = getelementptr i8, i8* %8, i32 24
   %10 = bitcast i8* %9 to i64*
-  call void asm sideeffect "std  17, $0", "=*m"(i64* %10)
+  call void asm sideeffect "std  17, $0", "=*m"(i64* elementtype(i64) %10)
   %11 = bitcast [18 x i64]* %regs to i8*
   %12 = getelementptr i8, i8* %11, i32 32
   %13 = bitcast i8* %12 to i64*
-  call void asm sideeffect "std  18, $0", "=*m"(i64* %13)
+  call void asm sideeffect "std  18, $0", "=*m"(i64* elementtype(i64) %13)
   %14 = bitcast [18 x i64]* %regs to i8*
   %15 = getelementptr i8, i8* %14, i32 40
   %16 = bitcast i8* %15 to i64*
-  call void asm sideeffect "std  19, $0", "=*m"(i64* %16)
+  call void asm sideeffect "std  19, $0", "=*m"(i64* elementtype(i64) %16)
   %17 = bitcast [18 x i64]* %regs to i8*
   %18 = getelementptr i8, i8* %17, i32 48
   %19 = bitcast i8* %18 to i64*
-  call void asm sideeffect "std  20, $0", "=*m"(i64* %19)
+  call void asm sideeffect "std  20, $0", "=*m"(i64* elementtype(i64) %19)
   %20 = bitcast [18 x i64]* %regs to i8*
   %21 = getelementptr i8, i8* %20, i32 56
   %22 = bitcast i8* %21 to i64*
-  call void asm sideeffect "std  21, $0", "=*m"(i64* %22)
+  call void asm sideeffect "std  21, $0", "=*m"(i64* elementtype(i64) %22)
   %23 = bitcast [18 x i64]* %regs to i8*
   %24 = getelementptr i8, i8* %23, i32 64
   %25 = bitcast i8* %24 to i64*
-  call void asm sideeffect "std  22, $0", "=*m"(i64* %25)
+  call void asm sideeffect "std  22, $0", "=*m"(i64* elementtype(i64) %25)
   %26 = bitcast [18 x i64]* %regs to i8*
   %27 = getelementptr i8, i8* %26, i32 72
   %28 = bitcast i8* %27 to i64*
-  call void asm sideeffect "std  23, $0", "=*m"(i64* %28)
+  call void asm sideeffect "std  23, $0", "=*m"(i64* elementtype(i64) %28)
   %29 = bitcast [18 x i64]* %regs to i8*
   %30 = getelementptr i8, i8* %29, i32 80
   %31 = bitcast i8* %30 to i64*
-  call void asm sideeffect "std  24, $0", "=*m"(i64* %31)
+  call void asm sideeffect "std  24, $0", "=*m"(i64* elementtype(i64) %31)
   %32 = bitcast [18 x i64]* %regs to i8*
   %33 = getelementptr i8, i8* %32, i32 88
   %34 = bitcast i8* %33 to i64*
-  call void asm sideeffect "std  25, $0", "=*m"(i64* %34)
+  call void asm sideeffect "std  25, $0", "=*m"(i64* elementtype(i64) %34)
   %35 = bitcast [18 x i64]* %regs to i8*
   %36 = getelementptr i8, i8* %35, i32 96
   %37 = bitcast i8* %36 to i64*
-  call void asm sideeffect "std  26, $0", "=*m"(i64* %37)
+  call void asm sideeffect "std  26, $0", "=*m"(i64* elementtype(i64) %37)
   %38 = bitcast [18 x i64]* %regs to i8*
   %39 = getelementptr i8, i8* %38, i32 104
   %40 = bitcast i8* %39 to i64*
-  call void asm sideeffect "std  27, $0", "=*m"(i64* %40)
+  call void asm sideeffect "std  27, $0", "=*m"(i64* elementtype(i64) %40)
   %41 = bitcast [18 x i64]* %regs to i8*
   %42 = getelementptr i8, i8* %41, i32 112
   %43 = bitcast i8* %42 to i64*
-  call void asm sideeffect "std  28, $0", "=*m"(i64* %43)
+  call void asm sideeffect "std  28, $0", "=*m"(i64* elementtype(i64) %43)
   %44 = bitcast [18 x i64]* %regs to i8*
   %45 = getelementptr i8, i8* %44, i32 120
   %46 = bitcast i8* %45 to i64*
-  call void asm sideeffect "std  29, $0", "=*m"(i64* %46)
+  call void asm sideeffect "std  29, $0", "=*m"(i64* elementtype(i64) %46)
   %47 = bitcast [18 x i64]* %regs to i8*
   %48 = getelementptr i8, i8* %47, i32 128
   %49 = bitcast i8* %48 to i64*
-  call void asm sideeffect "std  30, $0", "=*m"(i64* %49)
+  call void asm sideeffect "std  30, $0", "=*m"(i64* elementtype(i64) %49)
   %50 = bitcast [18 x i64]* %regs to i8*
   %51 = getelementptr i8, i8* %50, i32 136
   %52 = bitcast i8* %51 to i64*
-  call void asm sideeffect "std  31, $0", "=*m"(i64* %52)
+  call void asm sideeffect "std  31, $0", "=*m"(i64* elementtype(i64) %52)
   %53 = getelementptr { i8*, void (i8*, i8*)* }, { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 1
   %.funcptr = load void (i8*, i8*)*, void (i8*, i8*)** %53
   %54 = getelementptr { i8*, void (i8*, i8*)* }, { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 0

diff  --git a/llvm/test/CodeGen/PowerPC/ia-neg-const.ll b/llvm/test/CodeGen/PowerPC/ia-neg-const.ll
index cbb605965e3d3..5f788016bea45 100644
--- a/llvm/test/CodeGen/PowerPC/ia-neg-const.ll
+++ b/llvm/test/CodeGen/PowerPC/ia-neg-const.ll
@@ -9,7 +9,7 @@ define i64 @main() #0 {
 entry:
   %x = alloca i64, align 8
   store i64 0, i64* %x, align 8
-  %0 = call i64 asm sideeffect "ld       $0,$1\0A\09add${2:I}   $0,$0,$2", "=&r,*m,Ir"(i64* %x, i64 -1) #0
+  %0 = call i64 asm sideeffect "ld       $0,$1\0A\09add${2:I}   $0,$0,$2", "=&r,*m,Ir"(i64* elementtype(i64) %x, i64 -1) #0
   ret i64 %0
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/inlineasm-output-template.ll b/llvm/test/CodeGen/PowerPC/inlineasm-output-template.ll
index d56d77f265856..fbfaf16385ffb 100644
--- a/llvm/test/CodeGen/PowerPC/inlineasm-output-template.ll
+++ b/llvm/test/CodeGen/PowerPC/inlineasm-output-template.ll
@@ -32,6 +32,6 @@ define dso_local i32 @test_inlineasm_c_output_template2() {
 ; PPC64-LABEL: test_inlineasm_L_output_template
 ; PPC64: # 8(4)
 define dso_local void @test_inlineasm_L_output_template(i64 %0, i64* %1) {
-  tail call void asm sideeffect "# ${0:L}", "*m"(i64* %1)
+  tail call void asm sideeffect "# ${0:L}", "*m"(i64* elementtype(i64) %1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll b/llvm/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
index 9bbec5b241883..658376c4a3b25 100644
--- a/llvm/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
+++ b/llvm/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
@@ -9,7 +9,7 @@ target triple = "powerpc--"
 
 define void @single_m() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,*m"(i32* @mout0, i32* @min1) nounwind
+  call void asm "foo $1,$0", "=*m,*m"(i32* elementtype(i32) @mout0, i32* elementtype(i32) @min1) nounwind
   ret void
 }
 
@@ -166,7 +166,7 @@ entry:
 define void @multi_m() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*m|r,m|r"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll b/llvm/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
index 1fa9c0d3c1302..d834b91750917 100644
--- a/llvm/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
+++ b/llvm/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
@@ -9,7 +9,7 @@ target triple = "powerpc64--"
 
 define void @single_m() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,*m"(i32* @mout0, i32* @min1) nounwind
+  call void asm "foo $1,$0", "=*m,*m"(i32* elementtype(i32) @mout0, i32* elementtype(i32) @min1) nounwind
   ret void
 }
 
@@ -166,7 +166,7 @@ entry:
 define void @multi_m() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*m|r,m|r"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/subreg-postra-2.ll b/llvm/test/CodeGen/PowerPC/subreg-postra-2.ll
index 794c9c190d1c6..cfef020c2d351 100644
--- a/llvm/test/CodeGen/PowerPC/subreg-postra-2.ll
+++ b/llvm/test/CodeGen/PowerPC/subreg-postra-2.ll
@@ -21,7 +21,7 @@ wait_on_buffer.exit1319:                          ; preds = %while.body392
   %conv.i.i1322 = and i64 %1, 1
   %lnot404 = icmp eq i64 %conv.i.i1322, 0
   %.err.4 = select i1 %lnot404, i32 -5, i32 %input1
-  %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* %b_state.i.i1314) #0
+  %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* elementtype(i64) %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* elementtype(i64) %b_state.i.i1314) #0
   store i8* %0, i8** %input4, align 8
   %cmp.i1312 = icmp eq i32* %input2, %input3
   br i1 %cmp.i1312, label %while.end418, label %while.body392

diff  --git a/llvm/test/CodeGen/PowerPC/subreg-postra.ll b/llvm/test/CodeGen/PowerPC/subreg-postra.ll
index 38e27c73c907e..9f5f9e70808d5 100644
--- a/llvm/test/CodeGen/PowerPC/subreg-postra.ll
+++ b/llvm/test/CodeGen/PowerPC/subreg-postra.ll
@@ -138,7 +138,7 @@ wait_on_buffer.exit1319:                          ; preds = %while.body392
   %conv.i.i1322 = and i64 %1, 1
   %lnot404 = icmp eq i64 %conv.i.i1322, 0
   %.err.4 = select i1 %lnot404, i32 -5, i32 %inp2
-  %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* %b_state.i.i1314) #1
+  %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* elementtype(i64) %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* elementtype(i64) %b_state.i.i1314) #1
   %prev.i.i.i1325 = getelementptr inbounds i8, i8* %0, i64 8
   %3 = load i32*, i32** %inp4, align 8
   store i32* %3, i32** %inp5, align 8

diff  --git a/llvm/test/CodeGen/PowerPC/xray-ret-is-terminator.ll b/llvm/test/CodeGen/PowerPC/xray-ret-is-terminator.ll
index 9418ce58a49a5..1f176f6f36676 100644
--- a/llvm/test/CodeGen/PowerPC/xray-ret-is-terminator.ll
+++ b/llvm/test/CodeGen/PowerPC/xray-ret-is-terminator.ll
@@ -9,12 +9,12 @@ bb:
   br i1 undef, label %bb1, label %bb8
 
 bb1:
-  %tmp = tail call i64 asm sideeffect "", "=&r,=*m,b,r,*m,~{cc}"(i64* nonnull undef, i64* nonnull undef, i64 1, i64* nonnull undef)
+  %tmp = tail call i64 asm sideeffect "", "=&r,=*m,b,r,*m,~{cc}"(i64* elementtype(i64) nonnull undef, i64* nonnull undef, i64 1, i64* elementtype(i64) nonnull undef)
   %tmp2 = icmp eq i64 %tmp, 0
   br i1 %tmp2, label %bb3, label %bb8
 
 bb3:
-  %tmp4 = tail call i64 asm sideeffect "", "=&r,=*m,b,r,r,*m,~{cc}"(i64* undef, i64* undef, i64 0, i64 undef, i64* undef)
+  %tmp4 = tail call i64 asm sideeffect "", "=&r,=*m,b,r,r,*m,~{cc}"(i64* elementtype(i64) undef, i64* undef, i64 0, i64 undef, i64* elementtype(i64) undef)
   %tmp5 = icmp eq i64 0, %tmp4
   br i1 %tmp5, label %bb6, label %bb3
 

diff  --git a/llvm/test/CodeGen/RISCV/inline-asm.ll b/llvm/test/CodeGen/RISCV/inline-asm.ll
index de5d9a5f22a83..4a96f1dbbd270 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm.ll
@@ -60,7 +60,7 @@ define void @constraint_m(i32* %a) nounwind {
 ; RV64I-NEXT:    #APP
 ; RV64I-NEXT:    #NO_APP
 ; RV64I-NEXT:    ret
-  call void asm sideeffect "", "=*m"(i32* %a)
+  call void asm sideeffect "", "=*m"(i32* elementtype(i32) %a)
   ret void
 }
 
@@ -78,7 +78,7 @@ define i32 @constraint_m2(i32* %a) nounwind {
 ; RV64I-NEXT:    lw a0, 0(a0)
 ; RV64I-NEXT:    #NO_APP
 ; RV64I-NEXT:    ret
-  %1 = tail call i32 asm "lw $0, $1", "=r,*m"(i32* %a)
+  %1 = tail call i32 asm "lw $0, $1", "=r,*m"(i32* elementtype(i32) %a)
   ret i32 %1
 }
 
@@ -170,8 +170,8 @@ define void @constraint_A(i8* %a) nounwind {
 ; RV64I-NEXT:    lb s1, 0(a0)
 ; RV64I-NEXT:    #NO_APP
 ; RV64I-NEXT:    ret
-  tail call void asm sideeffect "sb s0, $0", "*A"(i8* %a)
-  tail call void asm sideeffect "lb s1, $0", "*A"(i8* %a)
+  tail call void asm sideeffect "sb s0, $0", "*A"(i8* elementtype(i8) %a)
+  tail call void asm sideeffect "lb s1, $0", "*A"(i8* elementtype(i8) %a)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll b/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
index 32ebc02ce2013..e6712b5fc9a91 100644
--- a/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
+++ b/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
@@ -7,10 +7,10 @@ target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
 define internal void @set_fast_math() nounwind {
 entry:
 	%fsr = alloca i32		; <i32*> [#uses=4]
-	call void asm "st %fsr, $0", "=*m"(i32* %fsr) nounwind
+	call void asm "st %fsr, $0", "=*m"(i32* elementtype(i32) %fsr) nounwind
 	%0 = load i32, i32* %fsr, align 4		; <i32> [#uses=1]
 	%1 = or i32 %0, 4194304		; <i32> [#uses=1]
 	store i32 %1, i32* %fsr, align 4
-	call void asm sideeffect "ld $0, %fsr", "*m"(i32* %fsr) nounwind
+	call void asm sideeffect "ld $0, %fsr", "*m"(i32* elementtype(i32) %fsr) nounwind
 	ret void
 }

diff  --git a/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll b/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
index 6b738e386c3a4..a779ebb7ec92b 100644
--- a/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
+++ b/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
@@ -182,7 +182,7 @@ entry:
    %4 = add i128 %2, %3
    %5 = bitcast i8* %sum to i128*
    store i128 %4, i128* %5
-   tail call void asm sideeffect "", "=*m,*m"(i128 *%0, i128* %5) nounwind
+   tail call void asm sideeffect "", "=*m,*m"(i128* elementtype(i128) %0, i128* elementtype(i128) %5) nounwind
    %6 = load i128, i128* %0
    %7 = sub i128 %2, %6
    %8 = bitcast i8* %
diff  to i128*

diff  --git a/llvm/test/CodeGen/SPARC/inlineasm.ll b/llvm/test/CodeGen/SPARC/inlineasm.ll
index ca68a5a2317ca..eb95be2519b49 100644
--- a/llvm/test/CodeGen/SPARC/inlineasm.ll
+++ b/llvm/test/CodeGen/SPARC/inlineasm.ll
@@ -117,7 +117,7 @@ entry:
 ; CHECK: std %l0, [%o0]
 define void @test_addressing_mode_i64(i64* %out) {
 entry:
-  call void asm "std %l0, $0", "=*m,r"(i64* nonnull %out, i64 0)
+  call void asm "std %l0, $0", "=*m,r"(i64* elementtype(i64) nonnull %out, i64 0)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll b/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
index 49496c304d23d..8ee5e409f3cb9 100644
--- a/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
+++ b/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
@@ -9,7 +9,7 @@ target triple = "sparc"
 
 define void @single_m() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,*m"(i32* @mout0, i32* @min1) nounwind
+  call void asm "foo $1,$0", "=*m,*m"(i32* elementtype(i32) @mout0, i32* elementtype(i32) @min1) nounwind
   ret void
 }
 
@@ -167,7 +167,7 @@ entry:
 define void @multi_m() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*m|r,m|r"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/SystemZ/asm-01.ll b/llvm/test/CodeGen/SystemZ/asm-01.ll
index 3dbc8ac268b7b..f003eb2cca134 100644
--- a/llvm/test/CodeGen/SystemZ/asm-01.ll
+++ b/llvm/test/CodeGen/SystemZ/asm-01.ll
@@ -9,7 +9,7 @@ define void @f1(i64 %base) {
 ; CHECK: blah 0(%r2)
 ; CHECK: br %r14
   %addr = inttoptr i64 %base to i64 *
-  call void asm "blah $0", "=*Q" (i64 *%addr)
+  call void asm "blah $0", "=*Q" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -21,7 +21,7 @@ define void @f2(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, -1
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*Q" (i64 *%addr)
+  call void asm "blah $0", "=*Q" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -32,7 +32,7 @@ define void @f3(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, 4095
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*Q" (i64 *%addr)
+  call void asm "blah $0", "=*Q" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -44,7 +44,7 @@ define void @f4(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, 4096
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*Q" (i64 *%addr)
+  call void asm "blah $0", "=*Q" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -56,6 +56,6 @@ define void @f5(i64 %base, i64 %index) {
 ; CHECK: br %r14
   %add = add i64 %base, %index
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*Q" (i64 *%addr)
+  call void asm "blah $0", "=*Q" (i64* elementtype(i64) %addr)
   ret void
 }

diff  --git a/llvm/test/CodeGen/SystemZ/asm-02.ll b/llvm/test/CodeGen/SystemZ/asm-02.ll
index c916d827cde6f..dd92e6b06c280 100644
--- a/llvm/test/CodeGen/SystemZ/asm-02.ll
+++ b/llvm/test/CodeGen/SystemZ/asm-02.ll
@@ -9,7 +9,7 @@ define void @f1(i64 %base) {
 ; CHECK: blah 0(%r2)
 ; CHECK: br %r14
   %addr = inttoptr i64 %base to i64 *
-  call void asm "blah $0", "=*R" (i64 *%addr)
+  call void asm "blah $0", "=*R" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -21,7 +21,7 @@ define void @f2(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, -1
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*R" (i64 *%addr)
+  call void asm "blah $0", "=*R" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -32,7 +32,7 @@ define void @f3(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, 4095
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*R" (i64 *%addr)
+  call void asm "blah $0", "=*R" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -44,7 +44,7 @@ define void @f4(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, 4096
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*R" (i64 *%addr)
+  call void asm "blah $0", "=*R" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -55,7 +55,7 @@ define void @f5(i64 %base, i64 %index) {
 ; CHECK: br %r14
   %add = add i64 %base, %index
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*R" (i64 *%addr)
+  call void asm "blah $0", "=*R" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -67,7 +67,7 @@ define void @f6(i64 %base, i64 %index) {
   %add = add i64 %base, 4095
   %addi = add i64 %add, %index
   %addr = inttoptr i64 %addi to i64 *
-  call void asm "blah $0", "=*R" (i64 *%addr)
+  call void asm "blah $0", "=*R" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -80,6 +80,6 @@ define void @f7(i64 %base, i64 %index) {
   %add = add i64 %base, 4096
   %addi = add i64 %add, %index
   %addr = inttoptr i64 %addi to i64 *
-  call void asm "blah $0", "=*R" (i64 *%addr)
+  call void asm "blah $0", "=*R" (i64* elementtype(i64) %addr)
   ret void
 }

diff  --git a/llvm/test/CodeGen/SystemZ/asm-03.ll b/llvm/test/CodeGen/SystemZ/asm-03.ll
index d4fd564ce1930..eebab2643f285 100644
--- a/llvm/test/CodeGen/SystemZ/asm-03.ll
+++ b/llvm/test/CodeGen/SystemZ/asm-03.ll
@@ -10,7 +10,7 @@ define void @f1(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, -524288
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*S" (i64 *%addr)
+  call void asm "blah $0", "=*S" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -22,7 +22,7 @@ define void @f2(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, -524289
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*S" (i64 *%addr)
+  call void asm "blah $0", "=*S" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -33,7 +33,7 @@ define void @f3(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, 524287
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*S" (i64 *%addr)
+  call void asm "blah $0", "=*S" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -45,6 +45,6 @@ define void @f4(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, 524288
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*S" (i64 *%addr)
+  call void asm "blah $0", "=*S" (i64* elementtype(i64) %addr)
   ret void
 }

diff  --git a/llvm/test/CodeGen/SystemZ/asm-04.ll b/llvm/test/CodeGen/SystemZ/asm-04.ll
index eb91bef83769a..0322fe700060b 100644
--- a/llvm/test/CodeGen/SystemZ/asm-04.ll
+++ b/llvm/test/CodeGen/SystemZ/asm-04.ll
@@ -10,7 +10,7 @@ define void @f1(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, -524288
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*T" (i64 *%addr)
+  call void asm "blah $0", "=*T" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -22,7 +22,7 @@ define void @f2(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, -524289
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*T" (i64 *%addr)
+  call void asm "blah $0", "=*T" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -33,7 +33,7 @@ define void @f3(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, 524287
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*T" (i64 *%addr)
+  call void asm "blah $0", "=*T" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -45,7 +45,7 @@ define void @f4(i64 %base) {
 ; CHECK: br %r14
   %add = add i64 %base, 524288
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*T" (i64 *%addr)
+  call void asm "blah $0", "=*T" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -56,7 +56,7 @@ define void @f5(i64 %base, i64 %index) {
 ; CHECK: br %r14
   %add = add i64 %base, %index
   %addr = inttoptr i64 %add to i64 *
-  call void asm "blah $0", "=*T" (i64 *%addr)
+  call void asm "blah $0", "=*T" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -68,6 +68,6 @@ define void @f6(i64 %base, i64 %index) {
   %add = add i64 %base, 524287
   %addi = add i64 %add, %index
   %addr = inttoptr i64 %addi to i64 *
-  call void asm "blah $0", "=*T" (i64 *%addr)
+  call void asm "blah $0", "=*T" (i64* elementtype(i64) %addr)
   ret void
 }

diff  --git a/llvm/test/CodeGen/SystemZ/asm-05.ll b/llvm/test/CodeGen/SystemZ/asm-05.ll
index 832ae2fba4205..6b8556832cfeb 100644
--- a/llvm/test/CodeGen/SystemZ/asm-05.ll
+++ b/llvm/test/CodeGen/SystemZ/asm-05.ll
@@ -8,7 +8,7 @@ define void @f1(i64 %base) {
 ; CHECK: blah 0(%r2)
 ; CHECK: br %r14
   %addr = inttoptr i64 %base to i64 *
-  call void asm "blah $0", "=*m" (i64 *%addr)
+  call void asm "blah $0", "=*m" (i64* elementtype(i64) %addr)
   ret void
 }
 
@@ -17,6 +17,6 @@ define void @f2(i64 %base) {
 ; CHECK: blah 0(%r2)
 ; CHECK: br %r14
   %addr = inttoptr i64 %base to i64 *
-  call void asm "blah $0", "=*o" (i64 *%addr)
+  call void asm "blah $0", "=*o" (i64* elementtype(i64) %addr)
   ret void
 }

diff  --git a/llvm/test/CodeGen/SystemZ/frame-25.ll b/llvm/test/CodeGen/SystemZ/frame-25.ll
index 64c175bd4ecaa..f6b54c0b25075 100644
--- a/llvm/test/CodeGen/SystemZ/frame-25.ll
+++ b/llvm/test/CodeGen/SystemZ/frame-25.ll
@@ -17,7 +17,7 @@ define void @fun0() #0 {
 entry:
   %b = alloca [16 x i8], align 1
   %0 = getelementptr inbounds [16 x i8], [16 x i8]* %b, i64 0, i64 0
-  call void asm "stcke $0", "=*Q"([16 x i8]* nonnull %b) #2
+  call void asm "stcke $0", "=*Q"([16 x i8]* elementtype([16 x i8]) nonnull %b) #2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/inline-asm.ll b/llvm/test/CodeGen/WebAssembly/inline-asm.ll
index 7a219febb59d5..038a03a71ddc5 100644
--- a/llvm/test/CodeGen/WebAssembly/inline-asm.ll
+++ b/llvm/test/CodeGen/WebAssembly/inline-asm.ll
@@ -77,7 +77,7 @@ entry:
 ; CHECK-NEXT: local.get $push[[S1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.store16 0($pop[[S0]]), $pop[[S1]]{{$}}
 define void @X_i16(i16 * %t) {
-  call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(i16* %t)
+  call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(i16* elementtype(i16) %t)
   ret void
 }
 
@@ -87,7 +87,7 @@ define void @X_i16(i16 * %t) {
 ; CHECK-NEXT: local.get $push[[S1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.store 0($pop[[S0]]), $pop[[S1]]{{$}}
 define void @X_ptr(i16 ** %t) {
-  call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(i16** %t)
+  call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(i16** elementtype(i16*) %t)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll b/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll
index 944fae68abc45..727407897fed1 100644
--- a/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll
+++ b/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll
@@ -7,7 +7,7 @@ define i32 @foo(i32 %X) {
 entry:
 	%X_addr = alloca i32		; <i32*> [#uses=3]
 	store i32 %X, i32* %X_addr
-	call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,m,1,~{dirflag},~{fpsr},~{flags}"( i32* @G, i32* %X_addr, i32* @G, i32 %X )
+	call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,m,1,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @G, i32* elementtype(i32) %X_addr, i32* @G, i32 %X )
 	%tmp1 = load i32, i32* %X_addr		; <i32> [#uses=1]
 	ret i32 %tmp1
 }
@@ -16,7 +16,7 @@ define i32 @foo2(i32 %X) {
 entry:
 	%X_addr = alloca i32		; <i32*> [#uses=3]
 	store i32 %X, i32* %X_addr
-	call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,1,~{dirflag},~{fpsr},~{flags}"( i32* @G, i32* %X_addr, i32 %X )
+	call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,1,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @G, i32* elementtype(i32) %X_addr, i32 %X )
 	%tmp1 = load i32, i32* %X_addr		; <i32> [#uses=1]
 	ret i32 %tmp1
 }

diff  --git a/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll b/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll
index 60bffdd908c2a..b08b0e573afc9 100644
--- a/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll
+++ b/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll
@@ -23,7 +23,7 @@ define i1 @_ZNK12QImageWriter8canWriteEv() {
 	%tmp62 = load %struct.QImageWriterPrivate*, %struct.QImageWriterPrivate** null		; <%struct.QImageWriterPrivate*> [#uses=1]
 	%tmp = getelementptr %struct.QImageWriterPrivate, %struct.QImageWriterPrivate* %tmp62, i32 0, i32 9		; <%struct.QString*> [#uses=1]
 	%tmp75 = call %struct.QString* @_ZN7QStringaSERKS_( %struct.QString* %tmp, %struct.QString* null )		; <%struct.QString*> [#uses=0]
-	call void asm sideeffect "lock\0Adecl $0\0Asetne 1", "=*m"( i32* null )
+	call void asm sideeffect "lock\0Adecl $0\0Asetne 1", "=*m"( i32* elementtype( i32) null )
 	ret i1 false
 }
 

diff  --git a/llvm/test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll b/llvm/test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll
index 176b566fe0ae4..0b55af9c5ed96 100644
--- a/llvm/test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll
+++ b/llvm/test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define fastcc i32 @bc_divide(%struct.bc_struct* %n1, %struct.bc_struct* %n2, %struct.bc_struct** %quot, i32 %scale) nounwind {
 entry:
-	%tmp7.i46 = tail call i64 asm sideeffect ".byte 0x0f,0x31", "={dx},=*{ax},~{dirflag},~{fpsr},~{flags}"( i64* getelementptr (%struct.CycleCount, %struct.CycleCount* @_programStartTime, i32 0, i32 1) )		; <i64> [#uses=0]
+	%tmp7.i46 = tail call i64 asm sideeffect ".byte 0x0f,0x31", "={dx},=*{ax},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) getelementptr (%struct.CycleCount, %struct.CycleCount* @_programStartTime, i32 0, i32 1) )		; <i64> [#uses=0]
 	%tmp221 = sdiv i32 10, 0		; <i32> [#uses=1]
 	tail call fastcc void @_one_mult( i8* null, i32 0, i32 %tmp221, i8* null )
 	ret i32 0

diff  --git a/llvm/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll b/llvm/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll
index d02346d103c14..21872289dfade 100644
--- a/llvm/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll
+++ b/llvm/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll
@@ -5,7 +5,7 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define i32 @kernel_init(i8* %unused) {
 entry:
-	call void asm sideeffect "foo ${0:q}", "=*imr"( i64* null )
+	call void asm sideeffect "foo ${0:q}", "=*imr"( i64* elementtype( i64) null )
 	ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/X86/2007-11-03-x86-64-q-constraint.ll b/llvm/test/CodeGen/X86/2007-11-03-x86-64-q-constraint.ll
index 27ec8260d06b7..a81e2701e2e92 100644
--- a/llvm/test/CodeGen/X86/2007-11-03-x86-64-q-constraint.ll
+++ b/llvm/test/CodeGen/X86/2007-11-03-x86-64-q-constraint.ll
@@ -4,6 +4,6 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "x86_64-unknown-linux-gnu"
 
 define void @yield() {
-        %tmp9 = call i64 asm sideeffect "xchgb ${0:b},$1", "=q,*m,0,~{dirflag},~{fpsr},~{flags},~{memory}"( i64* null, i64 0 )   ; <i64>
+        %tmp9 = call i64 asm sideeffect "xchgb ${0:b},$1", "=q,*m,0,~{dirflag},~{fpsr},~{flags},~{memory}"( i64* elementtype( i64) null, i64 0 )   ; <i64>
         ret void
 }

diff  --git a/llvm/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll b/llvm/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll
index ec3bce9c666af..8331c34bc32a1 100644
--- a/llvm/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll
+++ b/llvm/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll
@@ -10,7 +10,6 @@ entry:
         %tmp12 = trunc i64 %tmp1 to i32         ; <i32> [#uses=2]
         store i32 %tmp12, i32* %lines, align 4
         %tmp6 = call i64* asm sideeffect "foo",
-"=r,=*r,=*r,r,0,1,2,~{dirflag},~{fpsr},~{flags},~{memory}"( i64** %p2_addr,
-i32* %lines, i64 256, i64* %p1, i64* %p2, i32 %tmp12 )              ; <i64*> [#uses=0]
+"=r,=*r,=*r,r,0,1,2,~{dirflag},~{fpsr},~{flags},~{memory}"(i64** elementtype(i64*) %p2_addr, i32* elementtype(i32) %lines, i64 256, i64* %p1, i64* %p2, i32 %tmp12 )              ; <i64*> [#uses=0]
         ret void
 }

diff  --git a/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll b/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
index c4572d3f64b4f..bac724d9edeaa 100644
--- a/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
+++ b/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
@@ -90,7 +90,7 @@ entry:
 	%tmp32 = load i8*, i8** %src_addr, align 4		; <i8*> [#uses=1]
 	%tmp33 = getelementptr i8, i8* %tmp32, i32 %tmp31		; <i8*> [#uses=1]
 	%tmp3334 = bitcast i8* %tmp33 to i32*		; <i32*> [#uses=1]
-	call void asm sideeffect "movd  $4, %mm0                \0A\09movd  $5, %mm1                \0A\09movd  $6, %mm2                \0A\09movd  $7, %mm3                \0A\09punpcklbw %mm1, %mm0         \0A\09punpcklbw %mm3, %mm2         \0A\09movq %mm0, %mm1              \0A\09punpcklwd %mm2, %mm0         \0A\09punpckhwd %mm2, %mm1         \0A\09movd  %mm0, $0                \0A\09punpckhdq %mm0, %mm0         \0A\09movd  %mm0, $1                \0A\09movd  %mm1, $2                \0A\09punpckhdq %mm1, %mm1         \0A\09movd  %mm1, $3                \0A\09", "=*m,=*m,=*m,=*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"( i32* %tmp12, i32* %tmp56, i32* %tmp1011, i32* %tmp1617, i32* %tmp1920, i32* %tmp2324, i32* %tmp2829, i32* %tmp3334 ) nounwind
+	call void asm sideeffect "movd  $4, %mm0                \0A\09movd  $5, %mm1                \0A\09movd  $6, %mm2                \0A\09movd  $7, %mm3                \0A\09punpcklbw %mm1, %mm0         \0A\09punpcklbw %mm3, %mm2         \0A\09movq %mm0, %mm1              \0A\09punpcklwd %mm2, %mm0         \0A\09punpckhwd %mm2, %mm1         \0A\09movd  %mm0, $0                \0A\09punpckhdq %mm0, %mm0         \0A\09movd  %mm0, $1                \0A\09movd  %mm1, $2                \0A\09punpckhdq %mm1, %mm1         \0A\09movd  %mm1, $3                \0A\09", "=*m,=*m,=*m,=*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"( i32* elementtype( i32) %tmp12, i32* elementtype(i32) %tmp56, i32* elementtype(i32) %tmp1011, i32* elementtype(i32) %tmp1617, i32* elementtype(i32) %tmp1920, i32* elementtype(i32) %tmp2324, i32* elementtype(i32) %tmp2829, i32* elementtype(i32) %tmp3334 ) nounwind
 	br label %return
 
 return:		; preds = %entry

diff  --git a/llvm/test/CodeGen/X86/2008-02-25-InlineAsmBug.ll b/llvm/test/CodeGen/X86/2008-02-25-InlineAsmBug.ll
index 1251e3fda8c7d..822b79d97efe2 100644
--- a/llvm/test/CodeGen/X86/2008-02-25-InlineAsmBug.ll
+++ b/llvm/test/CodeGen/X86/2008-02-25-InlineAsmBug.ll
@@ -25,7 +25,7 @@ bb32:		; preds = %entry
 	%pix_addr.0327.sum337 = add i32 %pix_addr.0327.rec, %tmp154.sum		; <i32> [#uses=1]
 	%tmp191 = getelementptr i8, i8* %pix, i32 %pix_addr.0327.sum337		; <i8*> [#uses=1]
 	%tmp191192 = bitcast i8* %tmp191 to i32*		; <i32*> [#uses=1]
-	call void asm sideeffect "movd  $4, %mm0                \0A\09movd  $5, %mm1                \0A\09movd  $6, %mm2                \0A\09movd  $7, %mm3                \0A\09punpcklbw %mm1, %mm0         \0A\09punpcklbw %mm3, %mm2         \0A\09movq %mm0, %mm1              \0A\09punpcklwd %mm2, %mm0         \0A\09punpckhwd %mm2, %mm1         \0A\09movd  %mm0, $0                \0A\09punpckhdq %mm0, %mm0         \0A\09movd  %mm0, $1                \0A\09movd  %mm1, $2                \0A\09punpckhdq %mm1, %mm1         \0A\09movd  %mm1, $3                \0A\09", "=*m,=*m,=*m,=*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"( i32* null, i32* %tmp164, i32* %tmp169, i32* %tmp174, i32* %tmp177178, i32* %tmp181182, i32* %tmp186187, i32* %tmp191192 ) nounwind 
+	call void asm sideeffect "movd  $4, %mm0                \0A\09movd  $5, %mm1                \0A\09movd  $6, %mm2                \0A\09movd  $7, %mm3                \0A\09punpcklbw %mm1, %mm0         \0A\09punpcklbw %mm3, %mm2         \0A\09movq %mm0, %mm1              \0A\09punpcklwd %mm2, %mm0         \0A\09punpckhwd %mm2, %mm1         \0A\09movd  %mm0, $0                \0A\09punpckhdq %mm0, %mm0         \0A\09movd  %mm0, $1                \0A\09movd  %mm1, $2                \0A\09punpckhdq %mm1, %mm1         \0A\09movd  %mm1, $3                \0A\09", "=*m,=*m,=*m,=*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"( i32* elementtype( i32) null, i32* elementtype(i32) %tmp164, i32* elementtype(i32) %tmp169, i32* elementtype(i32) %tmp174, i32* elementtype(i32) %tmp177178, i32* elementtype(i32) %tmp181182, i32* elementtype(i32) %tmp186187, i32* elementtype(i32) %tmp191192 ) nounwind 
 	unreachable
 
 bb292:		; preds = %entry

diff  --git a/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll b/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
index 1ba17254c3c3c..e77e8456746bf 100644
--- a/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
+++ b/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
@@ -20,8 +20,8 @@ target triple = "i386-apple-darwin8"
 define i32 @aci(i32* %pw) nounwind {
 entry:
 	%0 = load i32, i32* @x, align 4
-	%asmtmp = tail call { i32, i32 } asm "movl $0, %eax\0A\090:\0A\09test %eax, %eax\0A\09je 1f\0A\09movl %eax, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{ax},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind
-	%asmtmp2 = tail call { i32, i32 } asm "movl $0, %edx\0A\090:\0A\09test %edx, %edx\0A\09je 1f\0A\09movl %edx, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{dx},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind
+	%asmtmp = tail call { i32, i32 } asm "movl $0, %eax\0A\090:\0A\09test %eax, %eax\0A\09je 1f\0A\09movl %eax, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{ax},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* elementtype(i32) %pw, i32* elementtype(i32) %pw) nounwind
+	%asmtmp2 = tail call { i32, i32 } asm "movl $0, %edx\0A\090:\0A\09test %edx, %edx\0A\09je 1f\0A\09movl %edx, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{dx},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* elementtype(i32) %pw, i32* elementtype(i32) %pw) nounwind
 	%asmresult2 = extractvalue { i32, i32 } %asmtmp, 0
 	%asmresult3 = extractvalue { i32, i32 } %asmtmp2, 0
 	%1 = add i32 %asmresult2, %asmresult3

diff  --git a/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll b/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
index 51f2dfbfdb582..552adfda42d74 100644
--- a/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
+++ b/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
@@ -38,7 +38,7 @@ entry:
 	%3 = load i32, i32* %0, align 4		; <i32> [#uses=1]
 	%4 = load i32, i32* %1, align 4		; <i32> [#uses=1]
 	%5 = load i8, i8* %state, align 1		; <i8> [#uses=1]
-	%asmtmp = tail call { i32, i32, i32, i32 } asm sideeffect "#1st=$0 $1 2nd=$1 $2 3rd=$2 $4 5th=$4 $3=4th 1$0 1%eXx 5$4 5%eXx 6th=$5", "=&r,=r,=r,=*m,=&q,=*imr,1,2,*m,5,~{dirflag},~{fpsr},~{flags},~{cx}"(i8** %2, i8* %state, i32 %3, i32 %4, i8** %2, i8 %5) nounwind		; <{ i32, i32, i32, i32 }> [#uses=3]
+	%asmtmp = tail call { i32, i32, i32, i32 } asm sideeffect "#1st=$0 $1 2nd=$1 $2 3rd=$2 $4 5th=$4 $3=4th 1$0 1%eXx 5$4 5%eXx 6th=$5", "=&r,=r,=r,=*m,=&q,=*imr,1,2,*m,5,~{dirflag},~{fpsr},~{flags},~{cx}"(i8** elementtype(i8*) %2, i8* elementtype(i8) %state, i32 %3, i32 %4, i8** elementtype(i8*) %2, i8 %5) nounwind		; <{ i32, i32, i32, i32 }> [#uses=3]
 	%asmresult = extractvalue { i32, i32, i32, i32 } %asmtmp, 0		; <i32> [#uses=1]
 	%asmresult1 = extractvalue { i32, i32, i32, i32 } %asmtmp, 1		; <i32> [#uses=1]
 	store i32 %asmresult1, i32* %0

diff  --git a/llvm/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll b/llvm/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll
index bd1b47a588ef4..f0e922d87c4b7 100644
--- a/llvm/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll
+++ b/llvm/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll
@@ -6,7 +6,7 @@
 
 define void @bn_sqr_comba8(i32* nocapture %r, i32* %a) nounwind {
 entry:
-	%asmtmp23 = tail call %0 asm "mulq $3", "={ax},={dx},{ax},*m,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 0, i32* %a) nounwind		; <%0> [#uses=1]
+	%asmtmp23 = tail call %0 asm "mulq $3", "={ax},={dx},{ax},*m,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 0, i32* elementtype(i32) %a) nounwind		; <%0> [#uses=1]
 	%asmresult25 = extractvalue %0 %asmtmp23, 1		; <i32> [#uses=1]
 	%asmtmp26 = tail call %0 asm "addq $0,$0; adcq $2,$1", "={dx},=r,imr,0,1,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 0, i32 %asmresult25, i32 0) nounwind		; <%0> [#uses=1]
 	%asmresult27 = extractvalue %0 %asmtmp26, 0		; <i32> [#uses=1]

diff  --git a/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll b/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
index 2615164c6d4bd..b4edc115ec01f 100644
--- a/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
+++ b/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
@@ -36,7 +36,7 @@ entry:
 	%tmp15 = load i32, i32* %arrayidx14		; <i32> [#uses=1]
 	%arrayidx17 = getelementptr i32, i32* %data, i32 8		; <i32*> [#uses=1]
 	%tmp18 = load i32, i32* %arrayidx17		; <i32> [#uses=1]
-	%0 = call i32 asm "cpuid", "={ax},=*{bx},=*{cx},=*{dx},{ax},{bx},{cx},{dx},~{dirflag},~{fpsr},~{flags}"(i32* %arrayidx2, i32* %arrayidx4, i32* %arrayidx6, i32 %tmp9, i32 %tmp12, i32 %tmp15, i32 %tmp18) nounwind		; <i32> [#uses=1]
+	%0 = call i32 asm "cpuid", "={ax},=*{bx},=*{cx},=*{dx},{ax},{bx},{cx},{dx},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %arrayidx2, i32* elementtype(i32) %arrayidx4, i32* elementtype(i32) %arrayidx6, i32 %tmp9, i32 %tmp12, i32 %tmp15, i32 %tmp18) nounwind		; <i32> [#uses=1]
 	store i32 %0, i32* %arrayidx
 	ret void
 }

diff  --git a/llvm/test/CodeGen/X86/2009-07-19-AsmExtraOperands.ll b/llvm/test/CodeGen/X86/2009-07-19-AsmExtraOperands.ll
index 2f5c898ce2217..3faf9b2afabdc 100644
--- a/llvm/test/CodeGen/X86/2009-07-19-AsmExtraOperands.ll
+++ b/llvm/test/CodeGen/X86/2009-07-19-AsmExtraOperands.ll
@@ -3,7 +3,7 @@
 
 define i32 @atomic_cmpset_long(i64* %dst, i64 %exp, i64 %src) nounwind ssp noredzone noimplicitfloat {
 entry:
-	%0 = call i8 asm sideeffect "\09lock ; \09\09\09cmpxchgq $2,$1 ;\09       sete\09$0 ;\09\091:\09\09\09\09# atomic_cmpset_long", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* undef, i64 undef, i64 undef, i64* undef) nounwind		; <i8> [#uses=0]
+	%0 = call i8 asm sideeffect "\09lock ; \09\09\09cmpxchgq $2,$1 ;\09       sete\09$0 ;\09\091:\09\09\09\09# atomic_cmpset_long", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) undef, i64 undef, i64 undef, i64* elementtype(i64) undef) nounwind		; <i8> [#uses=0]
 	br label %1
 
 ; <label>:1		; preds = %entry

diff  --git a/llvm/test/CodeGen/X86/2009-10-14-LiveVariablesBug.ll b/llvm/test/CodeGen/X86/2009-10-14-LiveVariablesBug.ll
index c1aa17ce8700b..68759a9f7b34b 100644
--- a/llvm/test/CodeGen/X86/2009-10-14-LiveVariablesBug.ll
+++ b/llvm/test/CodeGen/X86/2009-10-14-LiveVariablesBug.ll
@@ -10,6 +10,6 @@ entry:
   store i16 %source, i16* %source_addr
   store i32 4, i32* @i, align 4
   call void asm sideeffect "# top of block", "~{dirflag},~{fpsr},~{flags},~{edi},~{esi},~{edx},~{ecx},~{eax}"() nounwind
-  %asmtmp = call i16 asm sideeffect "movw $1, $0", "=={ax},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i16* %source_addr) nounwind ; <i16> [#uses=0]
+  %asmtmp = call i16 asm sideeffect "movw $1, $0", "=={ax},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i16* elementtype(i16) %source_addr) nounwind ; <i16> [#uses=0]
   ret void
 }

diff  --git a/llvm/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll b/llvm/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
index 023c77aedd4ae..ccbbdce110c6c 100644
--- a/llvm/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
+++ b/llvm/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
@@ -5,7 +5,7 @@ define i32 @foo() nounwind ssp {
 entry:
 ; CHECK: GCROOT %eax
   %_r = alloca i32, align 4                       ; <i32*> [#uses=2]
-  call void asm "/* GCROOT $0 */", "=*imr,0,~{dirflag},~{fpsr},~{flags}"(i32* %_r, i32 4) nounwind
+  call void asm "/* GCROOT $0 */", "=*imr,0,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %_r, i32 4) nounwind
   %0 = load i32, i32* %_r, align 4                     ; <i32> [#uses=1]
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/X86/2010-07-13-indirectXconstraint.ll b/llvm/test/CodeGen/X86/2010-07-13-indirectXconstraint.ll
index 306e22ae5f15b..f3491c507c2b2 100644
--- a/llvm/test/CodeGen/X86/2010-07-13-indirectXconstraint.ll
+++ b/llvm/test/CodeGen/X86/2010-07-13-indirectXconstraint.ll
@@ -10,7 +10,7 @@
 define void @weighting_filter() nounwind ssp {
 entry:
 ; CHECK: leaq _gsm_H.1466(%rip),%rax;
-  call void asm sideeffect "leaq $0,%rax;\0A", "*X,~{dirflag},~{fpsr},~{flags},~{memory},~{rax}"(%union..0anon* bitcast (%0* @gsm_H.1466 to %union..0anon*)) nounwind
+  call void asm sideeffect "leaq $0,%rax;\0A", "*X,~{dirflag},~{fpsr},~{flags},~{memory},~{rax}"(%union..0anon* elementtype(%union..0anon) bitcast (%0* @gsm_H.1466 to %union..0anon*)) nounwind
   br label %return
 
 return:                                           ; preds = %entry

diff  --git a/llvm/test/CodeGen/X86/2010-09-16-asmcrash.ll b/llvm/test/CodeGen/X86/2010-09-16-asmcrash.ll
index 81b0fc560ee70..f233564e41532 100644
--- a/llvm/test/CodeGen/X86/2010-09-16-asmcrash.ll
+++ b/llvm/test/CodeGen/X86/2010-09-16-asmcrash.ll
@@ -40,7 +40,7 @@ while.cond:                                       ; preds = %while.body, %while.
   br i1 undef, label %while.body, label %while.end
 
 while.body:                                       ; preds = %while.cond
-  %0 = call i8 asm sideeffect "\09lock ; \09\09\09cmpxchgl $2,$1 ;\09       sete\09$0 ;\09\091:\09\09\09\09# atomic_cmpset_int", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %tmp4, i32 undef, i32 undef, i32* %tmp4) nounwind, !srcloc !0
+  %0 = call i8 asm sideeffect "\09lock ; \09\09\09cmpxchgl $2,$1 ;\09       sete\09$0 ;\09\091:\09\09\09\09# atomic_cmpset_int", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %tmp4, i32 undef, i32 undef, i32* elementtype(i32) %tmp4) nounwind, !srcloc !0
   br i1 undef, label %while.cond, label %return
 
 while.end:                                        ; preds = %while.cond

diff  --git a/llvm/test/CodeGen/X86/9601.ll b/llvm/test/CodeGen/X86/9601.ll
index cd65a030701ac..e54da99007eae 100644
--- a/llvm/test/CodeGen/X86/9601.ll
+++ b/llvm/test/CodeGen/X86/9601.ll
@@ -7,6 +7,6 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define void @test() {
 entry:
-  %0 = call float asm sideeffect "xchg $0, $1", "=r,*m,0,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* undef, float 2.000000e+00) nounwind
+  %0 = call float asm sideeffect "xchg $0, $1", "=r,*m,0,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) undef, float 2.000000e+00) nounwind
   unreachable
 }

diff  --git a/llvm/test/CodeGen/X86/asm-indirect-mem.ll b/llvm/test/CodeGen/X86/asm-indirect-mem.ll
index c57aa995e8a8d..03d979f64ad02 100644
--- a/llvm/test/CodeGen/X86/asm-indirect-mem.ll
+++ b/llvm/test/CodeGen/X86/asm-indirect-mem.ll
@@ -5,7 +5,7 @@ target triple = "i386-apple-darwin8"
 
 define void @atomic_store_rel_int(i32* %p, i32 %v) nounwind  {
 entry:
-	%asmtmp = tail call i32 asm sideeffect "xchgl $1,$0", "=*m,=r,*m,1,~{dirflag},~{fpsr},~{flags}"( i32* %p, i32* %p, i32 %v ) nounwind 		; <i32> [#uses=0]
+	%asmtmp = tail call i32 asm sideeffect "xchgl $1,$0", "=*m,=r,*m,1,~{dirflag},~{fpsr},~{flags}"( i32* elementtype( i32) %p, i32* elementtype(i32) %p, i32 %v ) nounwind 		; <i32> [#uses=0]
 	ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/asm-reg-type-mismatch-avx512.ll b/llvm/test/CodeGen/X86/asm-reg-type-mismatch-avx512.ll
index 1c5e1ce8a6b99..5bb7d5561077a 100644
--- a/llvm/test/CodeGen/X86/asm-reg-type-mismatch-avx512.ll
+++ b/llvm/test/CodeGen/X86/asm-reg-type-mismatch-avx512.ll
@@ -10,6 +10,6 @@ define i64 @test1() nounwind {
 ; CHECK-NEXT:    vmovq %xmm16, %rax
 ; CHECK-NEXT:    retq
 entry:
-  %0 = tail call i64 asm sideeffect "vmovq $1, $0", "={xmm16},*m,~{dirflag},~{fpsr},~{flags}"(i64* null) nounwind
+  %0 = tail call i64 asm sideeffect "vmovq $1, $0", "={xmm16},*m,~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) null) nounwind
   ret i64 %0
 }

diff  --git a/llvm/test/CodeGen/X86/asm-reg-type-mismatch.ll b/llvm/test/CodeGen/X86/asm-reg-type-mismatch.ll
index ced074015acef..108a1c14689ae 100644
--- a/llvm/test/CodeGen/X86/asm-reg-type-mismatch.ll
+++ b/llvm/test/CodeGen/X86/asm-reg-type-mismatch.ll
@@ -23,7 +23,7 @@ entry:
 
 define i64 @test2() nounwind {
 entry:
-  %0 = tail call i64 asm sideeffect "movq $1, $0", "={xmm7},*m,~{dirflag},~{fpsr},~{flags}"(i64* null) nounwind
+  %0 = tail call i64 asm sideeffect "movq $1, $0", "={xmm7},*m,~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) null) nounwind
   ret i64 %0
   ; CHECK: test2
 	; CHECK: movq {{.*}}, %xmm7

diff  --git a/llvm/test/CodeGen/X86/callbr-asm-instr-scheduling.ll b/llvm/test/CodeGen/X86/callbr-asm-instr-scheduling.ll
index c4cb4ada5dc7c..9e9acc71d5e44 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-instr-scheduling.ll
+++ b/llvm/test/CodeGen/X86/callbr-asm-instr-scheduling.ll
@@ -39,7 +39,7 @@ define i64 @early_ioremap_pmd(i64 %addr) {
 ; CHECK-NEXT:    .zero (-(((.Ltmp5-.Ltmp6)-(.Ltmp4-.Ltmp2))>0))*((.Ltmp5-.Ltmp6)-(.Ltmp4-.Ltmp2)),144
 ; CHECK-NEXT:  .Ltmp7:
 entry:
-  %0 = tail call i64 asm sideeffect "mov %cr3,$0\0A\09", "=r,=*m,~{dirflag},~{fpsr},~{flags}"(i64* nonnull @__force_order)
+  %0 = tail call i64 asm sideeffect "mov %cr3,$0\0A\09", "=r,=*m,~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) nonnull @__force_order)
   %and.i = and i64 %0, 9223372036854771712
   %1 = load i64, i64* @page_offset_base, align 8
   %add = add i64 %and.i, %1
@@ -49,7 +49,7 @@ entry:
   %shr = lshr i64 %addr, %sh_prom
   %and = and i64 %shr, 511
   %arrayidx = getelementptr %struct.pgd_t, %struct.pgd_t* %2, i64 %and
-  callbr void asm sideeffect "1: jmp 6f\0A2:\0A.skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90\0A3:\0A.section .altinstructions,\22a\22\0A .long 1b - .\0A .long 4f - .\0A .word ${1:P}\0A .byte 3b - 1b\0A .byte 5f - 4f\0A .byte 3b - 2b\0A.previous\0A.section .altinstr_replacement,\22ax\22\0A4: jmp ${5:l}\0A5:\0A.previous\0A.section .altinstructions,\22a\22\0A .long 1b - .\0A .long 0\0A .word ${0:P}\0A .byte 3b - 1b\0A .byte 0\0A .byte 0\0A.previous\0A.section .altinstr_aux,\22ax\22\0A6:\0A testb $2,$3\0A jnz ${4:l}\0A jmp ${5:l}\0A.previous\0A", "i,i,i,*m,X,X,~{dirflag},~{fpsr},~{flags}"(i16 528, i32 117, i32 1, i8* getelementptr inbounds (%struct.cpuinfo_x86, %struct.cpuinfo_x86* @boot_cpu_data, i64 0, i32 12, i32 1, i64 58), i8* blockaddress(@early_ioremap_pmd, %if.end.i), i8* blockaddress(@early_ioremap_pmd, %if.then.i))
+  callbr void asm sideeffect "1: jmp 6f\0A2:\0A.skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90\0A3:\0A.section .altinstructions,\22a\22\0A .long 1b - .\0A .long 4f - .\0A .word ${1:P}\0A .byte 3b - 1b\0A .byte 5f - 4f\0A .byte 3b - 2b\0A.previous\0A.section .altinstr_replacement,\22ax\22\0A4: jmp ${5:l}\0A5:\0A.previous\0A.section .altinstructions,\22a\22\0A .long 1b - .\0A .long 0\0A .word ${0:P}\0A .byte 3b - 1b\0A .byte 0\0A .byte 0\0A.previous\0A.section .altinstr_aux,\22ax\22\0A6:\0A testb $2,$3\0A jnz ${4:l}\0A jmp ${5:l}\0A.previous\0A", "i,i,i,*m,X,X,~{dirflag},~{fpsr},~{flags}"(i16 528, i32 117, i32 1, i8* elementtype(i8) getelementptr inbounds (%struct.cpuinfo_x86, %struct.cpuinfo_x86* @boot_cpu_data, i64 0, i32 12, i32 1, i64 58), i8* blockaddress(@early_ioremap_pmd, %if.end.i), i8* blockaddress(@early_ioremap_pmd, %if.then.i))
           to label %_static_cpu_has.exit.thread.i [label %if.end.i, label %if.then.i]
 
 _static_cpu_has.exit.thread.i:                    ; preds = %entry

diff  --git a/llvm/test/CodeGen/X86/callbr-asm-kill.mir b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
index 58b34fc89d1ee..969ca6950e872 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-kill.mir
+++ b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
@@ -21,7 +21,7 @@
     %a = phi i8* [ %arg, %entry ], [ %b, %loop ]
     %b = load i8*, i8** %mem, align 8
     call void @foo(i8* %a)
-    callbr void asm sideeffect "", "*m,X"(i8* %b, i8* blockaddress(@test1, %loop))
+    callbr void asm sideeffect "", "*m,X"(i8* elementtype(i8) %b, i8* blockaddress(@test1, %loop))
             to label %end [label %loop]
 
   end:                                              ; preds = %loop

diff  --git a/llvm/test/CodeGen/X86/callbr-asm-phi-placement.ll b/llvm/test/CodeGen/X86/callbr-asm-phi-placement.ll
index e12c4f1dfb26e..2c2d9f9403cb7 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-phi-placement.ll
+++ b/llvm/test/CodeGen/X86/callbr-asm-phi-placement.ll
@@ -36,7 +36,7 @@ loop:
   %a = phi i8* [ %arg, %entry ], [ %b, %loop ]
   %b = load i8*, i8** %mem, align 8
   call void @foo(i8* %a)
-  callbr void asm sideeffect "", "*m,X"(i8* %b, i8* blockaddress(@test1, %loop))
+  callbr void asm sideeffect "", "*m,X"(i8* elementtype(i8) %b, i8* blockaddress(@test1, %loop))
           to label %end [label %loop]
 
 end:

diff  --git a/llvm/test/CodeGen/X86/callbr-asm-sink.ll b/llvm/test/CodeGen/X86/callbr-asm-sink.ll
index 758ac37f8ba43..d1a9dfb8a296d 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-sink.ll
+++ b/llvm/test/CodeGen/X86/callbr-asm-sink.ll
@@ -21,7 +21,7 @@ define void @klist_dec_and_del(%struct1*) {
 ; CHECK-NEXT:    movq $0, -8(%rax)
 ; CHECK-NEXT:    retq
   %2 = getelementptr inbounds %struct1, %struct1* %0, i64 0, i32 1
-  callbr void asm sideeffect "# $0 $1", "*m,X,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %2, i8* blockaddress(@klist_dec_and_del, %3))
+  callbr void asm sideeffect "# $0 $1", "*m,X,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %2, i8* blockaddress(@klist_dec_and_del, %3))
           to label %6 [label %3]
 
 3:

diff  --git a/llvm/test/CodeGen/X86/cas.ll b/llvm/test/CodeGen/X86/cas.ll
index 7807bb97f5b90..a38e7196b9e42 100644
--- a/llvm/test/CodeGen/X86/cas.ll
+++ b/llvm/test/CodeGen/X86/cas.ll
@@ -29,7 +29,7 @@ entry:
   %2 = load float, float* %1, align 4
   %3 = load float, float* %desired.addr, align 4
   %4 = load float*, float** %p.addr, align 8
-  %5 = call i8 asm sideeffect "lock; cmpxchg $3, $4; mov $2, $1; sete $0", "={ax},=*rm,{ax},q,*m,~{memory},~{cc},~{dirflag},~{fpsr},~{flags}"(float* %0, float %2, float %3, float* %4) nounwind
+  %5 = call i8 asm sideeffect "lock; cmpxchg $3, $4; mov $2, $1; sete $0", "={ax},=*rm,{ax},q,*m,~{memory},~{cc},~{dirflag},~{fpsr},~{flags}"(float* elementtype(float) %0, float %2, float %3, float* elementtype(float) %4) nounwind
   store i8 %5, i8* %success, align 1
   %6 = load i8, i8* %success, align 1
   %tobool = trunc i8 %6 to i1
@@ -59,7 +59,7 @@ entry:
   %3 = load i8, i8* %desired.addr, align 1
   %tobool1 = trunc i8 %3 to i1
   %4 = load i8*, i8** %p.addr, align 8
-  %5 = call i8 asm sideeffect "lock; cmpxchg $3, $4; mov $2, $1; sete $0", "={ax},=*rm,{ax},q,*m,~{memory},~{cc},~{dirflag},~{fpsr},~{flags}"(i8* %0, i1 %tobool, i1 %tobool1, i8* %4) nounwind
+  %5 = call i8 asm sideeffect "lock; cmpxchg $3, $4; mov $2, $1; sete $0", "={ax},=*rm,{ax},q,*m,~{memory},~{cc},~{dirflag},~{fpsr},~{flags}"(i8* elementtype(i8) %0, i1 %tobool, i1 %tobool1, i8* elementtype(i8) %4) nounwind
   store i8 %5, i8* %success, align 1
   %6 = load i8, i8* %success, align 1
   %tobool2 = trunc i8 %6 to i1

diff  --git a/llvm/test/CodeGen/X86/complex-asm.ll b/llvm/test/CodeGen/X86/complex-asm.ll
index d7b5879309dac..ba5e719d68ae6 100644
--- a/llvm/test/CodeGen/X86/complex-asm.ll
+++ b/llvm/test/CodeGen/X86/complex-asm.ll
@@ -6,7 +6,7 @@
 define %0 @f() nounwind ssp {
 entry:
   %v = alloca %0, align 8
-  call void asm sideeffect "", "=*r,r,r,0,~{dirflag},~{fpsr},~{flags}"(%0* %v, i32 0, i32 1, i128 undef) nounwind
+  call void asm sideeffect "", "=*r,r,r,0,~{dirflag},~{fpsr},~{flags}"(%0* elementtype(%0) %v, i32 0, i32 1, i128 undef) nounwind
   %0 = getelementptr inbounds %0, %0* %v, i64 0, i32 0
   %1 = load i64, i64* %0, align 8
   %2 = getelementptr inbounds %0, %0* %v, i64 0, i32 1

diff  --git a/llvm/test/CodeGen/X86/crash.ll b/llvm/test/CodeGen/X86/crash.ll
index b46856374fb1b..29042d9d17b6b 100644
--- a/llvm/test/CodeGen/X86/crash.ll
+++ b/llvm/test/CodeGen/X86/crash.ll
@@ -384,12 +384,12 @@ entry:
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:
-  %t1 = tail call i32 asm sideeffect "bar", "=r,=*m,~{dirflag},~{fpsr},~{flags}"(i32* @__force_order) nounwind
+  %t1 = tail call i32 asm sideeffect "bar", "=r,=*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @__force_order) nounwind
   br label %if.end
 
 if.end:
   %t6 = inttoptr i32 %t0 to i64*
-  %t11 = tail call i64 asm sideeffect "foo", "=*m,=A,{bx},{cx},1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %t6, i32 0, i32 0, i64 0) nounwind
+  %t11 = tail call i64 asm sideeffect "foo", "=*m,=A,{bx},{cx},1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %t6, i32 0, i32 0, i64 0) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/inline-asm-A-constraint.ll b/llvm/test/CodeGen/X86/inline-asm-A-constraint.ll
index f07a13cf54ae8..3ae049d67c3e7 100644
--- a/llvm/test/CodeGen/X86/inline-asm-A-constraint.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-A-constraint.ll
@@ -12,7 +12,7 @@ entry:
   %conv = trunc i128 %shr to i64
   %conv1 = trunc i128 %0 to i64
   %1 = load i128, i128* %src, align 16, !tbaa !1
-  %2 = tail call i128 asm sideeffect "lock; cmpxchg16b $1", "=A,=*m,{cx},{bx},0,*m,~{dirflag},~{fpsr},~{flags}"(i8* %ptr, i64 %conv, i64 %conv1, i128 %1, i8* %ptr) #1, !srcloc !5
+  %2 = tail call i128 asm sideeffect "lock; cmpxchg16b $1", "=A,=*m,{cx},{bx},0,*m,~{dirflag},~{fpsr},~{flags}"(i8* elementtype(i8) %ptr, i64 %conv, i64 %conv1, i128 %1, i8* elementtype(i8) %ptr) #1, !srcloc !5
   %retval.sroa.0.0.extract.trunc = trunc i128 %2 to i64
   %retval.sroa.2.0.extract.shift = lshr i128 %2, 64
   %retval.sroa.2.0.extract.trunc = trunc i128 %retval.sroa.2.0.extract.shift to i64

diff  --git a/llvm/test/CodeGen/X86/inline-asm-R-constraint.ll b/llvm/test/CodeGen/X86/inline-asm-R-constraint.ll
index 218638c0e6539..c3ed4887f9132 100644
--- a/llvm/test/CodeGen/X86/inline-asm-R-constraint.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-R-constraint.ll
@@ -12,7 +12,7 @@ entry:
   %b_addr = alloca i8, align 1                    ; <i8*> [#uses=2]
   store i16 %a, i16* %a_addr
   store i8 %b, i8* %b_addr
-  call void asm "\09\09movw\09$2, %ax\09\09\0A\09\09divb\09$3\09\09\09\0A\09\09movb\09%al, $0\09\0A\09\09movb %ah, ($4)", "=*m,=*m,*m,*m,R,~{dirflag},~{fpsr},~{flags},~{ax}"(i8* %quotient, i8* %remainder, i16* %a_addr, i8* %b_addr, i8* %remainder) nounwind
+  call void asm "\09\09movw\09$2, %ax\09\09\0A\09\09divb\09$3\09\09\09\0A\09\09movb\09%al, $0\09\0A\09\09movb %ah, ($4)", "=*m,=*m,*m,*m,R,~{dirflag},~{fpsr},~{flags},~{ax}"(i8* elementtype(i8) %quotient, i8* elementtype(i8) %remainder, i16* elementtype(i16) %a_addr, i8* elementtype(i8) %b_addr, i8* %remainder) nounwind
   ret void
 ; CHECK: ret
 }

diff  --git a/llvm/test/CodeGen/X86/inline-asm-duplicated-constraint.ll b/llvm/test/CodeGen/X86/inline-asm-duplicated-constraint.ll
index 0228f45ce96c1..e47c015b77dc7 100644
--- a/llvm/test/CodeGen/X86/inline-asm-duplicated-constraint.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-duplicated-constraint.ll
@@ -7,6 +7,6 @@
 ; CHECK: ret
 define void @test1(i32* %l) {
   %load = load i32, i32* %l
-  call void asm "nop", "=*rmrm,0m0m,~{dirflag},~{fpsr},~{flags}"(i32* %l, i32 %load)
+  call void asm "nop", "=*rmrm,0m0m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %l, i32 %load)
   ret void
 }

diff  --git a/llvm/test/CodeGen/X86/inline-asm-flag-output.ll b/llvm/test/CodeGen/X86/inline-asm-flag-output.ll
index 1c89ce94c86c7..1e325760755e2 100644
--- a/llvm/test/CodeGen/X86/inline-asm-flag-output.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-flag-output.ll
@@ -26,7 +26,7 @@ define i32 @test_cca(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    seta %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@cca},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@cca},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -57,7 +57,7 @@ define i32 @test_ccae(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setae %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccae},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccae},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -88,7 +88,7 @@ define i32 @test_ccb(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccb},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccb},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -119,7 +119,7 @@ define i32 @test_ccbe(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setbe %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccbe},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccbe},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -150,7 +150,7 @@ define i32 @test_ccc(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccc},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccc},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -181,7 +181,7 @@ define i32 @test_cce(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@cce},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@cce},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -212,7 +212,7 @@ define i32 @test_ccz(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    sete %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccz},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccz},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -243,7 +243,7 @@ define i32 @test_ccg(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setg %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccg},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccg},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -274,7 +274,7 @@ define i32 @test_ccge(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setge %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccge},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccge},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -305,7 +305,7 @@ define i32 @test_ccl(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setl %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccl},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccl},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -336,7 +336,7 @@ define i32 @test_ccle(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setle %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccle},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccle},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -367,7 +367,7 @@ define i32 @test_ccna(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setbe %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccna},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccna},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -398,7 +398,7 @@ define i32 @test_ccnae(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccnae},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccnae},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -429,7 +429,7 @@ define i32 @test_ccnb(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setae %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccnb},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccnb},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -460,7 +460,7 @@ define i32 @test_ccnbe(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    seta %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccnbe},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccnbe},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -491,7 +491,7 @@ define i32 @test_ccnc(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setae %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccnc},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccnc},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -522,7 +522,7 @@ define i32 @test_ccne(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccne},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccne},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -553,7 +553,7 @@ define i32 @test_ccnz(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccnz},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccnz},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -584,7 +584,7 @@ define i32 @test_ccng(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setle %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccng},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccng},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -615,7 +615,7 @@ define i32 @test_ccnge(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setl %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccnge},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccnge},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -646,7 +646,7 @@ define i32 @test_ccnl(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setge %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccnl},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccnl},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -677,7 +677,7 @@ define i32 @test_ccnle(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setg %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccnle},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccnle},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -708,7 +708,7 @@ define i32 @test_ccno(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setno %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccno},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccno},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -739,7 +739,7 @@ define i32 @test_ccnp(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setnp %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccnp},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccnp},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -770,7 +770,7 @@ define i32 @test_ccns(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setns %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccns},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccns},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -801,7 +801,7 @@ define i32 @test_cco(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    seto %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@cco},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@cco},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -832,7 +832,7 @@ define i32 @test_ccp(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    setp %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccp},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccp},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -863,7 +863,7 @@ define i32 @test_ccs(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    sets %al
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i32 asm "cmp $2,$1", "={@ccs},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i32 asm "cmp $2,$1", "={@ccs},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i32 %cc, 0
   %rv = zext i1 %tobool to i32
   ret i32 %rv
@@ -899,7 +899,7 @@ define void @test_cca_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@cca},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@cca},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -939,7 +939,7 @@ define void @test_ccae_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccae},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccae},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -979,7 +979,7 @@ define void @test_ccb_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccb},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccb},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1019,7 +1019,7 @@ define void @test_ccbe_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccbe},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccbe},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1059,7 +1059,7 @@ define void @test_ccc_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccc},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccc},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1099,7 +1099,7 @@ define void @test_cce_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@cce},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@cce},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1139,7 +1139,7 @@ define void @test_ccz_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccz},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccz},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1179,7 +1179,7 @@ define void @test_ccg_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccg},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccg},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1219,7 +1219,7 @@ define void @test_ccge_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccge},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccge},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1259,7 +1259,7 @@ define void @test_ccl_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccl},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccl},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1299,7 +1299,7 @@ define void @test_ccle_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccle},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccle},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1339,7 +1339,7 @@ define void @test_ccna_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccna},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccna},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1379,7 +1379,7 @@ define void @test_ccnae_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccnae},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccnae},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1419,7 +1419,7 @@ define void @test_ccnb_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccnb},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccnb},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1459,7 +1459,7 @@ define void @test_ccnbe_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccnbe},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccnbe},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1499,7 +1499,7 @@ define void @test_ccnc_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccnc},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccnc},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1539,7 +1539,7 @@ define void @test_ccne_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccne},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccne},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1579,7 +1579,7 @@ define void @test_ccnz_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccnz},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccnz},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1619,7 +1619,7 @@ define void @test_ccng_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccng},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccng},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1659,7 +1659,7 @@ define void @test_ccnge_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccnge},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccnge},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1699,7 +1699,7 @@ define void @test_ccnl_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccnl},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccnl},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1739,7 +1739,7 @@ define void @test_ccnle_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccnle},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccnle},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1779,7 +1779,7 @@ define void @test_ccno_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccno},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccno},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1819,7 +1819,7 @@ define void @test_ccnp_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccnp},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccnp},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1859,7 +1859,7 @@ define void @test_ccns_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccns},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccns},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1899,7 +1899,7 @@ define void @test_cco_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@cco},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@cco},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1939,7 +1939,7 @@ define void @test_ccp_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccp},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccp},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 
@@ -1979,7 +1979,7 @@ define void @test_ccs_branch(i64 %nr, i64* %addr) nounwind {
 ; X64-NEXT:    popq %rax
 ; X64-NEXT:    retq
 entry:
-  %cc = tail call i8 asm "cmp $2,$1", "={@ccs},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %addr, i64 %nr) nounwind
+  %cc = tail call i8 asm "cmp $2,$1", "={@ccs},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %addr, i64 %nr) nounwind
   %tobool = icmp ne i8 %cc, 0
   br i1 %tobool, label %then, label %exit
 

diff  --git a/llvm/test/CodeGen/X86/inline-asm-fpstack.ll b/llvm/test/CodeGen/X86/inline-asm-fpstack.ll
index 757f259d99b45..ed68d78b45aef 100644
--- a/llvm/test/CodeGen/X86/inline-asm-fpstack.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-fpstack.ll
@@ -258,7 +258,7 @@ define void @fist1(x86_fp80 %x, i32* %p) nounwind ssp {
 ; CHECK-NEXT:    addl $12, %esp
 ; CHECK-NEXT:    retl
 entry:
-  tail call void asm sideeffect "fistl $1", "{st},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(x86_fp80 %x, i32* %p) nounwind
+  tail call void asm sideeffect "fistl $1", "{st},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(x86_fp80 %x, i32* elementtype(i32) %p) nounwind
   ret void
 }
 
@@ -282,7 +282,7 @@ define x86_fp80 @fist2(x86_fp80 %x, i32* %p) nounwind ssp {
 ; CHECK-NEXT:    addl $12, %esp
 ; CHECK-NEXT:    retl
 entry:
-  %0 = tail call x86_fp80 asm "fistl $2", "=&{st},0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(x86_fp80 %x, i32* %p) nounwind
+  %0 = tail call x86_fp80 asm "fistl $2", "=&{st},0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(x86_fp80 %x, i32* elementtype(i32) %p) nounwind
   ret x86_fp80 %0
 }
 
@@ -485,7 +485,7 @@ entry:
 
 sw.bb4.i:
   %1 = call x86_fp80 asm sideeffect "frndint", "={st},0,~{dirflag},~{fpsr},~{flags}"(x86_fp80 %0)
-  call void asm sideeffect "fldcw $0", "*m,~{dirflag},~{fpsr},~{flags}"(i32* undef)
+  call void asm sideeffect "fldcw $0", "*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) undef)
   br label %_Z5tointRKe.exit
 
 _Z5tointRKe.exit:

diff  --git a/llvm/test/CodeGen/X86/inline-asm-h.ll b/llvm/test/CodeGen/X86/inline-asm-h.ll
index afb0be8765c5f..dc9d35647d57b 100644
--- a/llvm/test/CodeGen/X86/inline-asm-h.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-h.ll
@@ -4,7 +4,7 @@
 
 define dso_local void @zed() nounwind {
 entry:
-  call void asm "movq %mm2,${0:H}", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* @foobar) nounwind
+  call void asm "movq %mm2,${0:H}", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @foobar) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/inline-asm-pic.ll b/llvm/test/CodeGen/X86/inline-asm-pic.ll
index a079748c56c94..7aeb1bfbdf416 100644
--- a/llvm/test/CodeGen/X86/inline-asm-pic.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-pic.ll
@@ -15,6 +15,6 @@ define void @func2() nounwind {
 ; CHECK-NEXT:    ## InlineAsm End
 ; CHECK-NEXT:    retl
 entry:
-	tail call void asm "mov $1,%gs:$0", "=*m,ri,~{dirflag},~{fpsr},~{flags}"(i8** inttoptr (i32 152 to i8**), i8* bitcast (i8** @main_q to i8*)) nounwind
+	tail call void asm "mov $1,%gs:$0", "=*m,ri,~{dirflag},~{fpsr},~{flags}"(i8** elementtype(i8*) inttoptr (i32 152 to i8**), i8* bitcast (i8** @main_q to i8*)) nounwind
 	ret void
 }

diff  --git a/llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll b/llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll
index 2120ab9f188bc..c395e20dc044e 100644
--- a/llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll
@@ -22,6 +22,6 @@ entry:
   %tmp1 = load i8, i8* %1
   %2 = bitcast i32* %dst to i8*
   %tmp2 = load i8, i8* %2
-  call void asm "pushfq \0Aandq $2, (%rsp) \0Aorq  $3, (%rsp) \0Apopfq \0Aaddb $4, $1 \0Apushfq \0Apopq $0 \0A", "=*&rm,=*&rm,i,r,r,1,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %newflags, i8* %0, i64 -2, i64 %and, i8 %tmp1, i8 %tmp2) nounwind
+  call void asm "pushfq \0Aandq $2, (%rsp) \0Aorq  $3, (%rsp) \0Apopfq \0Aaddb $4, $1 \0Apushfq \0Apopq $0 \0A", "=*&rm,=*&rm,i,r,r,1,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %newflags, i8* elementtype(i8) %0, i64 -2, i64 %and, i8 %tmp1, i8 %tmp2) nounwind
   ret void
 }

diff  --git a/llvm/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll b/llvm/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll
index 006d87eaab8f6..e6d2454b811d9 100644
--- a/llvm/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll
@@ -8,7 +8,7 @@ declare void @bar(i8* nocapture, %struct.foo* align 4 byval(%struct.foo)) nounwi
 ; Don't clobber %esi if we have inline asm that clobbers %esp.
 define void @test1(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind {
   call void @bar(i8* %z, %struct.foo* align 4 byval(%struct.foo) %x)
-  call void asm sideeffect inteldialect "xor esp, esp", "=*m,~{flags},~{esp},~{esp},~{dirflag},~{fpsr},~{flags}"(i8* %z)
+  call void asm sideeffect inteldialect "xor esp, esp", "=*m,~{flags},~{esp},~{esp},~{dirflag},~{fpsr},~{flags}"(i8* elementtype(i8) %z)
   ret void
 
 ; CHECK-LABEL: test1:

diff  --git a/llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll b/llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll
index 29034a63ed9b8..668668c0f54ce 100644
--- a/llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll
@@ -9,7 +9,7 @@ entry:
   br i1 %cond, label %doit, label %skip
 
 doit:
-  call void asm sideeffect "xor %ecx, %ecx\0A\09mov %ecx, $0", "=*m,~{ecx},~{flags}"(i32* %r)
+  call void asm sideeffect "xor %ecx, %ecx\0A\09mov %ecx, $0", "=*m,~{ecx},~{flags}"(i32* elementtype(i32) %r)
   %junk = alloca i32
   call void @bar(i32* %junk)
   br label %skip

diff  --git a/llvm/test/CodeGen/X86/inline-asm.ll b/llvm/test/CodeGen/X86/inline-asm.ll
index e4442388b082d..7810736a4be6c 100644
--- a/llvm/test/CodeGen/X86/inline-asm.ll
+++ b/llvm/test/CodeGen/X86/inline-asm.ll
@@ -40,7 +40,7 @@ entry:
 
 define void @test7(i1 zeroext %desired, i32* %p) nounwind {
 entry:
-  %0 = tail call i8 asm sideeffect "xchg $0, $1", "=r,*m,0,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %p, i1 %desired) nounwind
+  %0 = tail call i8 asm sideeffect "xchg $0, $1", "=r,*m,0,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %p, i1 %desired) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/leaf-fp-elim.ll b/llvm/test/CodeGen/X86/leaf-fp-elim.ll
index 4a4e9b444eb67..470e4a3027a26 100644
--- a/llvm/test/CodeGen/X86/leaf-fp-elim.ll
+++ b/llvm/test/CodeGen/X86/leaf-fp-elim.ll
@@ -19,7 +19,7 @@ entry:
   br i1 %0, label %return, label %bb
 
 bb:                                               ; preds = %entry
-  tail call void asm "mov $1, $0", "=*m,{cx},~{dirflag},~{fpsr},~{flags}"(i8** @msg, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i64 0, i64 0)) nounwind
+  tail call void asm "mov $1, $0", "=*m,{cx},~{dirflag},~{fpsr},~{flags}"(i8** elementtype(i8*) @msg, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i64 0, i64 0)) nounwind
   tail call void @llvm.trap()
   unreachable
 

diff  --git a/llvm/test/CodeGen/X86/ms-inline-asm-PR44272.ll b/llvm/test/CodeGen/X86/ms-inline-asm-PR44272.ll
index 3d8a4fe385c58..8a953d31c7f88 100644
--- a/llvm/test/CodeGen/X86/ms-inline-asm-PR44272.ll
+++ b/llvm/test/CodeGen/X86/ms-inline-asm-PR44272.ll
@@ -8,7 +8,7 @@ entry:
 
 define dso_local void @main() {
 entry:
-  call void asm sideeffect inteldialect "call ${0:P}", "*m,~{dirflag},~{fpsr},~{flags}"(void ()* @func)
+  call void asm sideeffect inteldialect "call ${0:P}", "*m,~{dirflag},~{fpsr},~{flags}"(void ()* elementtype(void ()) @func)
   ret void
 ; CHECK-LABEL: main:
 ; CHECK: {{## InlineAsm Start|#APP}}

diff  --git a/llvm/test/CodeGen/X86/ms-inline-asm-array.ll b/llvm/test/CodeGen/X86/ms-inline-asm-array.ll
index 0abfe780f7186..2d08af23d2405 100644
--- a/llvm/test/CodeGen/X86/ms-inline-asm-array.ll
+++ b/llvm/test/CodeGen/X86/ms-inline-asm-array.ll
@@ -5,7 +5,7 @@
 ; CHECK: movl    %edx, arr(,%rdx,4)
 define dso_local i32 @main() #0 {
 entry:
-  call void asm sideeffect inteldialect "mov dword ptr arr[rdx * $$4],edx", "=*m,~{dirflag},~{fpsr},~{flags}"([10 x i32]* @arr) #1, !srcloc !4
+  call void asm sideeffect inteldialect "mov dword ptr arr[rdx * $$4],edx", "=*m,~{dirflag},~{fpsr},~{flags}"([10 x i32]* elementtype([10 x i32]) @arr) #1, !srcloc !4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/X86/ms-inline-asm-avx512.ll b/llvm/test/CodeGen/X86/ms-inline-asm-avx512.ll
index 4eb966f9c1c83..5a4fba8546c7e 100644
--- a/llvm/test/CodeGen/X86/ms-inline-asm-avx512.ll
+++ b/llvm/test/CodeGen/X86/ms-inline-asm-avx512.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-pc-windows-msvc"
 define void @ignore_fe_size() #0 {
 entry:
   %c = alloca i8, align 1
-  call void asm sideeffect inteldialect "vaddps xmm1, xmm2, $1{1to4}\0A\09vaddps xmm1, xmm2, $2\0A\09mov eax, $3\0A\09mov $0, rax", "=*m,*m,*m,*m,~{eax},~{xmm1},~{dirflag},~{fpsr},~{flags}"(i8* %c, i8* %c, i8* %c, i8* %c) #1
+  call void asm sideeffect inteldialect "vaddps xmm1, xmm2, $1{1to4}\0A\09vaddps xmm1, xmm2, $2\0A\09mov eax, $3\0A\09mov $0, rax", "=*m,*m,*m,*m,~{eax},~{xmm1},~{dirflag},~{fpsr},~{flags}"(i8* elementtype(i8) %c, i8* elementtype(i8) %c, i8* elementtype(i8) %c, i8* elementtype(i8) %c) #1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/ms-inline-asm-redundant-clobber.ll b/llvm/test/CodeGen/X86/ms-inline-asm-redundant-clobber.ll
index 2acda02f9d6a9..83d80375b31b3 100644
--- a/llvm/test/CodeGen/X86/ms-inline-asm-redundant-clobber.ll
+++ b/llvm/test/CodeGen/X86/ms-inline-asm-redundant-clobber.ll
@@ -21,6 +21,6 @@ define dso_local void @foo() local_unnamed_addr {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    retq
 entry:
-  tail call void asm sideeffect inteldialect "clc\0A\09cmpxchg8b $0\0A\09cmpxchg16b $1\0A\09clc", "=*m,=*m,~{eax},~{edx},~{flags},~{rax},~{rdx},~{dirflag},~{fpsr},~{flags}"([16 x i8]* nonnull @test_mem, [16 x i8]* nonnull @test_mem) #1
+  tail call void asm sideeffect inteldialect "clc\0A\09cmpxchg8b $0\0A\09cmpxchg16b $1\0A\09clc", "=*m,=*m,~{eax},~{edx},~{flags},~{rax},~{rdx},~{dirflag},~{fpsr},~{flags}"([16 x i8]* elementtype([16 x i8]) nonnull @test_mem, [16 x i8]* elementtype([16 x i8]) nonnull @test_mem) #1
   ret void
 }

diff  --git a/llvm/test/CodeGen/X86/ms-inline-asm.ll b/llvm/test/CodeGen/X86/ms-inline-asm.ll
index 828a76e6ad1f2..27f55a90641da 100644
--- a/llvm/test/CodeGen/X86/ms-inline-asm.ll
+++ b/llvm/test/CodeGen/X86/ms-inline-asm.ll
@@ -29,7 +29,7 @@ define void @t3(i32 %V) nounwind {
 entry:
   %V.addr = alloca i32, align 4
   store i32 %V, i32* %V.addr, align 4
-  call void asm sideeffect inteldialect "mov eax, DWORD PTR [$0]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %V.addr) nounwind
+  call void asm sideeffect inteldialect "mov eax, DWORD PTR [$0]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %V.addr) nounwind
   ret void
 ; CHECK: t3
 ; CHECK: {{## InlineAsm Start|#APP}}
@@ -85,8 +85,8 @@ entry:
 define i32* @t30() nounwind ssp {
 entry:
   %res = alloca i32*, align 4
-  call void asm sideeffect inteldialect "lea edi, dword ptr $0", "*m,~{edi},~{dirflag},~{fpsr},~{flags}"([2 x i32]* @results) nounwind
-  call void asm sideeffect inteldialect "mov dword ptr $0, edi", "=*m,~{dirflag},~{fpsr},~{flags}"(i32** %res) nounwind
+  call void asm sideeffect inteldialect "lea edi, dword ptr $0", "*m,~{edi},~{dirflag},~{fpsr},~{flags}"([2 x i32]* elementtype([2 x i32]) @results) nounwind
+  call void asm sideeffect inteldialect "mov dword ptr $0, edi", "=*m,~{dirflag},~{fpsr},~{flags}"(i32** elementtype(i32*) %res) nounwind
   %0 = load i32*, i32** %res, align 4
   ret i32* %0
 ; CHECK-LABEL: t30:
@@ -110,7 +110,7 @@ define i32 @t31() {
 entry:
   %val = alloca i32, align 64
   store i32 -1, i32* %val, align 64
-  call void asm sideeffect inteldialect "mov dword ptr $0, esp", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %val)
+  call void asm sideeffect inteldialect "mov dword ptr $0, esp", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %val)
   %sp = load i32, i32* %val, align 64
   ret i32 %sp
 ; CHECK-LABEL: t31:
@@ -145,7 +145,7 @@ define i32 @uid() {
 entry:
   %r = alloca i32, align 4
   %0 = bitcast i32* %r to i8*
-  call void asm sideeffect inteldialect "xor eax, eax\0A\09.L__MSASMLABEL_.${:uid}__wloop:\0A\09inc eax\0A\09cmp eax, $$42\0A\09jne .L__MSASMLABEL_.${:uid}__wloop\0A\09mov dword ptr $0, eax", "=*m,~{eax},~{flags},~{dirflag},~{fpsr},~{flags}"(i32* nonnull %r)
+  call void asm sideeffect inteldialect "xor eax, eax\0A\09.L__MSASMLABEL_.${:uid}__wloop:\0A\09inc eax\0A\09cmp eax, $$42\0A\09jne .L__MSASMLABEL_.${:uid}__wloop\0A\09mov dword ptr $0, eax", "=*m,~{eax},~{flags},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) nonnull %r)
   %1 = load i32, i32* %r, align 4
   ret i32 %1
 ; CHECK-LABEL: uid:
@@ -160,7 +160,7 @@ entry:
 declare hidden void @other_func()
 
 define void @naked() #0 {
-  call void asm sideeffect inteldialect "call dword ptr $0", "*m,~{eax},~{ebx},~{ecx},~{edx},~{edi},~{esi},~{esp},~{ebp},~{dirflag},~{fpsr},~{flags}"(void()* @other_func)
+  call void asm sideeffect inteldialect "call dword ptr $0", "*m,~{eax},~{ebx},~{ecx},~{edx},~{edi},~{esi},~{esp},~{ebp},~{dirflag},~{fpsr},~{flags}"(void()* elementtype(void()) @other_func)
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/X86/mult-alt-generic-i686.ll b/llvm/test/CodeGen/X86/mult-alt-generic-i686.ll
index e55a3a2d35b3b..65ff8915b06be 100644
--- a/llvm/test/CodeGen/X86/mult-alt-generic-i686.ll
+++ b/llvm/test/CodeGen/X86/mult-alt-generic-i686.ll
@@ -9,7 +9,7 @@ target triple = "i686"
 
 define void @single_m() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32* @min1) nounwind
+  call void asm "foo $1,$0", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32* elementtype(i32) @min1) nounwind
   ret void
 }
 
@@ -166,7 +166,7 @@ entry:
 define void @multi_m() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*m|r,m|r,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*m|r,m|r,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll b/llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll
index 026d0a636e884..28becc34a8799 100644
--- a/llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll
+++ b/llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll
@@ -9,7 +9,7 @@ target triple = "x86_64"
 
 define void @single_m() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32* @min1) nounwind
+  call void asm "foo $1,$0", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32* elementtype(i32) @min1) nounwind
   ret void
 }
 
@@ -166,7 +166,7 @@ entry:
 define void @multi_m() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*m|r,m|r,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*m|r,m|r,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/mult-alt-x86.ll b/llvm/test/CodeGen/X86/mult-alt-x86.ll
index 18e245e80dc46..25ba861ba4891 100644
--- a/llvm/test/CodeGen/X86/mult-alt-x86.ll
+++ b/llvm/test/CodeGen/X86/mult-alt-x86.ll
@@ -127,135 +127,135 @@ entry:
 
 define void @single_I() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,I,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*m,I,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @single_J() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,J,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*m,J,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @single_K() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,K,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*m,K,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @single_L() nounwind {
 entry:
 ; Missing lowering support for 'L'.
-;  call void asm "foo $1,$0", "=*m,L,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+;  call void asm "foo $1,$0", "=*m,L,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @single_M() nounwind {
 entry:
 ; Missing lowering support for 'M'.
-;  call void asm "foo $1,$0", "=*m,M,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+;  call void asm "foo $1,$0", "=*m,M,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @single_N() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,N,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*m,N,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @single_G() nounwind {
 entry:
 ; Missing lowering support for 'G'.
-;  call void asm "foo $1,$0", "=*m,G,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, double 1.000000e+000) nounwind
+;  call void asm "foo $1,$0", "=*m,G,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, double 1.000000e+000) nounwind
   ret void
 }
 
 define void @single_C() nounwind {
 entry:
 ; Missing lowering support for 'C'.
-;  call void asm "foo $1,$0", "=*m,C,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, double 1.000000e+000) nounwind
+;  call void asm "foo $1,$0", "=*m,C,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, double 1.000000e+000) nounwind
   ret void
 }
 
 define void @single_e() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,e,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*m,e,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @single_Z() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*m,Z,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*m,Z,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @multi_R() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*r|R|m,r|R|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|R|m,r|R|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
 define void @multi_q() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*r|q|m,r|q|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|q|m,r|q|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
 define void @multi_Q() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*r|Q|m,r|Q|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|Q|m,r|Q|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
 define void @multi_a() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*r|{ax}|m,r|{ax}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|{ax}|m,r|{ax}|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
 define void @multi_b() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*r|{bx}|m,r|{bx}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|{bx}|m,r|{bx}|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
 define void @multi_c() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*r|{cx}|m,r|{cx}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|{cx}|m,r|{cx}|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
 define void @multi_d() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*r|{dx}|m,r|{dx},~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|{dx}|m,r|{dx},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
 define void @multi_S() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*r|{si}|m,r|{si}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|{si}|m,r|{si}|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
 define void @multi_D() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*r|{di}|m,r|{di}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|{di}|m,r|{di}|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
 define void @multi_A() nounwind {
 entry:
   %tmp = load i32, i32* @min1, align 4
-  call void asm "foo $1,$0", "=*r|A|m,r|A|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|A|m,r|A|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 %tmp) nounwind
   ret void
 }
 
@@ -277,14 +277,14 @@ entry:
 define void @multi_y() nounwind {
 entry:
   %tmp = load double, double* @din1, align 8
-  call void asm "foo $1,$0", "=*r|y|m,r|y|m,~{dirflag},~{fpsr},~{flags}"(double* @dout0, double %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|y|m,r|y|m,~{dirflag},~{fpsr},~{flags}"(double* elementtype(double) @dout0, double %tmp) nounwind
   ret void
 }
 
 define void @multi_x() nounwind {
 entry:
   %tmp = load double, double* @din1, align 8
-  call void asm "foo $1,$0", "=*r|x|m,r|x|m,~{dirflag},~{fpsr},~{flags}"(double* @dout0, double %tmp) nounwind
+  call void asm "foo $1,$0", "=*r|x|m,r|x|m,~{dirflag},~{fpsr},~{flags}"(double* elementtype(double) @dout0, double %tmp) nounwind
   ret void
 }
 
@@ -295,64 +295,64 @@ entry:
 
 define void @multi_I() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*r|m|m,r|I|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*r|m|m,r|I|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @multi_J() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*r|m|m,r|J|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*r|m|m,r|J|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @multi_K() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*r|m|m,r|K|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*r|m|m,r|K|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @multi_L() nounwind {
 entry:
 ; Missing lowering support for 'L'.
-;  call void asm "foo $1,$0", "=*r|m|m,r|L|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+;  call void asm "foo $1,$0", "=*r|m|m,r|L|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @multi_M() nounwind {
 entry:
 ; Missing lowering support for 'M'.
-;  call void asm "foo $1,$0", "=*r|m|m,r|M|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+;  call void asm "foo $1,$0", "=*r|m|m,r|M|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @multi_N() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*r|m|m,r|N|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*r|m|m,r|N|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @multi_G() nounwind {
 entry:
 ; Missing lowering support for 'G'.
-;  call void asm "foo $1,$0", "=*r|m|m,r|G|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, double 1.000000e+000) nounwind
+;  call void asm "foo $1,$0", "=*r|m|m,r|G|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, double 1.000000e+000) nounwind
   ret void
 }
 
 define void @multi_C() nounwind {
 entry:
 ; Missing lowering support for 'C'.
-;  call void asm "foo $1,$0", "=*r|m|m,r|C|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, double 1.000000e+000) nounwind
+;  call void asm "foo $1,$0", "=*r|m|m,r|C|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, double 1.000000e+000) nounwind
   ret void
 }
 
 define void @multi_e() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*r|m|m,r|e|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*r|m|m,r|e|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }
 
 define void @multi_Z() nounwind {
 entry:
-  call void asm "foo $1,$0", "=*r|m|m,r|Z|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 1) nounwind
+  call void asm "foo $1,$0", "=*r|m|m,r|Z|m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @mout0, i32 1) nounwind
   ret void
 }

diff  --git a/llvm/test/CodeGen/X86/multiple-loop-post-inc.ll b/llvm/test/CodeGen/X86/multiple-loop-post-inc.ll
index d54aea160c7e3..f93bb0f5618c4 100644
--- a/llvm/test/CodeGen/X86/multiple-loop-post-inc.ll
+++ b/llvm/test/CodeGen/X86/multiple-loop-post-inc.ll
@@ -76,11 +76,11 @@ bb4:                                              ; preds = %bb3, %bb2
   %29 = insertelement <4 x float> %28, float %23, i32 1 ; <<4 x float>> [#uses=1]
   %30 = insertelement <4 x float> %29, float %25, i32 2 ; <<4 x float>> [#uses=1]
   %31 = insertelement <4 x float> %30, float %27, i32 3 ; <<4 x float>> [#uses=5]
-  %asmtmp.i = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* %times4) nounwind ; <<4 x float>> [#uses=3]
+  %asmtmp.i = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* elementtype(float) %times4) nounwind ; <<4 x float>> [#uses=3]
   %32 = fadd <4 x float> %31, %asmtmp.i           ; <<4 x float>> [#uses=3]
   %33 = fadd <4 x float> %32, %asmtmp.i           ; <<4 x float>> [#uses=3]
   %34 = fadd <4 x float> %33, %asmtmp.i           ; <<4 x float>> [#uses=2]
-  %asmtmp.i18 = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* %timesN) nounwind ; <<4 x float>> [#uses=8]
+  %asmtmp.i18 = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* elementtype(float) %timesN) nounwind ; <<4 x float>> [#uses=8]
   %35 = icmp sgt i64 %N_addr.0, 15                ; <i1> [#uses=2]
   br i1 %3, label %bb6.preheader, label %bb8
 
@@ -158,7 +158,7 @@ bb7:                                              ; preds = %bb6.bb7_crit_edge,
   %O_addr.1.lcssa = phi float* [ %scevgep125, %bb6.bb7_crit_edge ], [ %O_addr.0, %bb6.preheader ] ; <float*> [#uses=1]
   %vX0.0.lcssa = phi <4 x float> [ %41, %bb6.bb7_crit_edge ], [ %31, %bb6.preheader ] ; <<4 x float>> [#uses=1]
   %N_addr.1.lcssa = phi i64 [ %tmp136, %bb6.bb7_crit_edge ], [ %N_addr.0, %bb6.preheader ] ; <i64> [#uses=1]
-  %asmtmp.i17 = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* %times4) nounwind ; <<4 x float>> [#uses=0]
+  %asmtmp.i17 = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* elementtype(float) %times4) nounwind ; <<4 x float>> [#uses=0]
   br label %bb11
 
 bb8:                                              ; preds = %bb4

diff  --git a/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll b/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll
index a1fc3f9831769..57913769a3a2a 100644
--- a/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll
+++ b/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll
@@ -27,16 +27,16 @@ entry:
   store i32 1, i32* %x, align 4
   store i32 2, i32* %y, align 4
   call void asm sideeffect alignstack "# top of block", "~{dirflag},~{fpsr},~{flags},~{edi},~{esi},~{edx},~{ecx},~{eax}"() nounwind
-  %asmtmp = call i32 asm sideeffect alignstack "movl $1, $0", "=={eax},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i32* %x) nounwind ; <i32> [#uses=1]
+  %asmtmp = call i32 asm sideeffect alignstack "movl $1, $0", "=={eax},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i32* elementtype(i32) %x) nounwind ; <i32> [#uses=1]
   store i32 %asmtmp, i32* %"%eax"
-  %asmtmp1 = call i32 asm sideeffect alignstack "movl $1, $0", "=={ebx},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i32* %y) nounwind ; <i32> [#uses=1]
+  %asmtmp1 = call i32 asm sideeffect alignstack "movl $1, $0", "=={ebx},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i32* elementtype(i32) %y) nounwind ; <i32> [#uses=1]
   store i32 %asmtmp1, i32* %"%ebx"
   %1 = call i32 asm "", "={bx}"() nounwind        ; <i32> [#uses=1]
   %2 = call i32 asm "", "={ax}"() nounwind        ; <i32> [#uses=1]
   %asmtmp2 = call i32 asm sideeffect alignstack "addl $1, $0", "=={eax},{ebx},{eax},~{dirflag},~{fpsr},~{flags},~{memory}"(i32 %1, i32 %2) nounwind ; <i32> [#uses=1]
   store i32 %asmtmp2, i32* %"%eax"
   %3 = call i32 asm "", "={ax}"() nounwind        ; <i32> [#uses=1]
-  call void asm sideeffect alignstack "movl $0, $1", "{eax},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i32 %3, i32* %result) nounwind
+  call void asm sideeffect alignstack "movl $0, $1", "{eax},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i32 %3, i32* elementtype(i32) %result) nounwind
   %4 = load i32, i32* %result, align 4                 ; <i32> [#uses=1]
   %5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i32 0, i32 0), i32 %4) nounwind ; <i32> [#uses=0]
   store i32 0, i32* %0, align 4

diff  --git a/llvm/test/CodeGen/X86/pr3154.ll b/llvm/test/CodeGen/X86/pr3154.ll
index fa788b2231f04..f6be512faafb9 100644
--- a/llvm/test/CodeGen/X86/pr3154.ll
+++ b/llvm/test/CodeGen/X86/pr3154.ll
@@ -29,7 +29,7 @@ bb19:		; preds = %bb, %entry
 	%12 = mul i32 %11, -4		; <i32> [#uses=2]
 	%13 = shl i32 %len, 1		; <i32> [#uses=1]
 	%14 = and i32 %13, -4		; <i32> [#uses=2]
-	call void asm sideeffect "movsd   $0,     %xmm7                \0A\09movapd  ff_pd_1, %xmm6     \0A\09movapd  ff_pd_2, %xmm5     \0A\09movlhps %xmm7, %xmm7                \0A\09subpd   %xmm5, %xmm7                \0A\09addsd   %xmm6, %xmm7                \0A\09", "*m,~{dirflag},~{fpsr},~{flags}"(double* %c) nounwind
+	call void asm sideeffect "movsd   $0,     %xmm7                \0A\09movapd  ff_pd_1, %xmm6     \0A\09movapd  ff_pd_2, %xmm5     \0A\09movlhps %xmm7, %xmm7                \0A\09subpd   %xmm5, %xmm7                \0A\09addsd   %xmm6, %xmm7                \0A\09", "*m,~{dirflag},~{fpsr},~{flags}"(double* elementtype(double) %c) nounwind
 	%15 = and i32 %len, 1		; <i32> [#uses=1]
 	%toBool = icmp eq i32 %15, 0		; <i1> [#uses=1]
 	%16 = getelementptr double, double* %data15.0, i32 %11		; <double*> [#uses=2]
@@ -81,7 +81,7 @@ bb31:		; preds = %bb30
 	%28 = getelementptr double, double* %autoc, i32 %j4.141		; <double*> [#uses=1]
 	%29 = getelementptr double, double* %autoc, i32 %25		; <double*> [#uses=1]
 	%30 = getelementptr double, double* %autoc, i32 %26		; <double*> [#uses=1]
-	%asmtmp32 = call i32 asm sideeffect "movsd    ff_pd_1, %xmm0 \0A\09movsd    ff_pd_1, %xmm1 \0A\09movsd    ff_pd_1, %xmm2 \0A\091:                                 \0A\09movapd   ($4,$0), %xmm3           \0A\09movupd -8($5,$0), %xmm4           \0A\09movapd   ($5,$0), %xmm5           \0A\09mulpd     %xmm3, %xmm4           \0A\09mulpd     %xmm3, %xmm5           \0A\09mulpd -16($5,$0), %xmm3           \0A\09addpd     %xmm4, %xmm1           \0A\09addpd     %xmm5, %xmm0           \0A\09addpd     %xmm3, %xmm2           \0A\09add       $$16,    $0               \0A\09jl 1b                              \0A\09movhlps   %xmm0, %xmm3           \0A\09movhlps   %xmm1, %xmm4           \0A\09movhlps   %xmm2, %xmm5           \0A\09addsd     %xmm3, %xmm0           \0A\09addsd     %xmm4, %xmm1           \0A\09addsd     %xmm5, %xmm2           \0A\09movsd     %xmm0, $1               \0A\09movsd     %xmm1, $2               \0A\09movsd     %xmm2, $3               \0A\09", "=&r,=*m,=*m,=*m,r,r,0,~{dirflag},~{fpsr},~{flags}"(double* %28, double* %29, double* %30, double* %21, double* %27, i32 %22) nounwind		; <i32> [#uses=0]
+	%asmtmp32 = call i32 asm sideeffect "movsd    ff_pd_1, %xmm0 \0A\09movsd    ff_pd_1, %xmm1 \0A\09movsd    ff_pd_1, %xmm2 \0A\091:                                 \0A\09movapd   ($4,$0), %xmm3           \0A\09movupd -8($5,$0), %xmm4           \0A\09movapd   ($5,$0), %xmm5           \0A\09mulpd     %xmm3, %xmm4           \0A\09mulpd     %xmm3, %xmm5           \0A\09mulpd -16($5,$0), %xmm3           \0A\09addpd     %xmm4, %xmm1           \0A\09addpd     %xmm5, %xmm0           \0A\09addpd     %xmm3, %xmm2           \0A\09add       $$16,    $0               \0A\09jl 1b                              \0A\09movhlps   %xmm0, %xmm3           \0A\09movhlps   %xmm1, %xmm4           \0A\09movhlps   %xmm2, %xmm5           \0A\09addsd     %xmm3, %xmm0           \0A\09addsd     %xmm4, %xmm1           \0A\09addsd     %xmm5, %xmm2           \0A\09movsd     %xmm0, $1               \0A\09movsd     %xmm1, $2               \0A\09movsd     %xmm2, $3               \0A\09", "=&r,=*m,=*m,=*m,r,r,0,~{dirflag},~{fpsr},~{flags}"(double* elementtype(double) %28, double* elementtype(double) %29, double* elementtype(double) %30, double* %21, double* %27, i32 %22) nounwind		; <i32> [#uses=0]
 	br label %bb35
 
 bb33:		; preds = %bb30
@@ -89,7 +89,7 @@ bb33:		; preds = %bb30
 	%31 = getelementptr double, double* %data15.0, i32 %.sum39		; <double*> [#uses=1]
 	%32 = getelementptr double, double* %autoc, i32 %j4.141		; <double*> [#uses=1]
 	%33 = getelementptr double, double* %autoc, i32 %25		; <double*> [#uses=1]
-	%asmtmp34 = call i32 asm sideeffect "movsd    ff_pd_1, %xmm0 \0A\09movsd    ff_pd_1, %xmm1 \0A\091:                                 \0A\09movapd   ($3,$0), %xmm3           \0A\09movupd -8($4,$0), %xmm4           \0A\09mulpd     %xmm3, %xmm4           \0A\09mulpd    ($4,$0), %xmm3           \0A\09addpd     %xmm4, %xmm1           \0A\09addpd     %xmm3, %xmm0           \0A\09add       $$16,    $0               \0A\09jl 1b                              \0A\09movhlps   %xmm0, %xmm3           \0A\09movhlps   %xmm1, %xmm4           \0A\09addsd     %xmm3, %xmm0           \0A\09addsd     %xmm4, %xmm1           \0A\09movsd     %xmm0, $1               \0A\09movsd     %xmm1, $2               \0A\09", "=&r,=*m,=*m,r,r,0,~{dirflag},~{fpsr},~{flags}"(double* %32, double* %33, double* %21, double* %31, i32 %22) nounwind		; <i32> [#uses=0]
+	%asmtmp34 = call i32 asm sideeffect "movsd    ff_pd_1, %xmm0 \0A\09movsd    ff_pd_1, %xmm1 \0A\091:                                 \0A\09movapd   ($3,$0), %xmm3           \0A\09movupd -8($4,$0), %xmm4           \0A\09mulpd     %xmm3, %xmm4           \0A\09mulpd    ($4,$0), %xmm3           \0A\09addpd     %xmm4, %xmm1           \0A\09addpd     %xmm3, %xmm0           \0A\09add       $$16,    $0               \0A\09jl 1b                              \0A\09movhlps   %xmm0, %xmm3           \0A\09movhlps   %xmm1, %xmm4           \0A\09addsd     %xmm3, %xmm0           \0A\09addsd     %xmm4, %xmm1           \0A\09movsd     %xmm0, $1               \0A\09movsd     %xmm1, $2               \0A\09", "=&r,=*m,=*m,r,r,0,~{dirflag},~{fpsr},~{flags}"(double* elementtype(double) %32, double* elementtype(double) %33, double* %21, double* %31, i32 %22) nounwind		; <i32> [#uses=0]
 	%.pre = add i32 %j4.141, 2		; <i32> [#uses=1]
 	br label %bb35
 

diff  --git a/llvm/test/CodeGen/X86/regalloc-advanced-split-cost.ll b/llvm/test/CodeGen/X86/regalloc-advanced-split-cost.ll
index d9f3908389a58..631dec230da55 100644
--- a/llvm/test/CodeGen/X86/regalloc-advanced-split-cost.ll
+++ b/llvm/test/CodeGen/X86/regalloc-advanced-split-cost.ll
@@ -48,7 +48,7 @@ entry:
 if.then:                                          ; preds = %entry
   %arrayidx7 = getelementptr inbounds i32, i32* %array, i32 6
   store i32 %shl, i32* %arrayidx7, align 4, !tbaa !7
-  call void asm "nop", "=*m,r,r,r,r,r,*m,~{dirflag},~{fpsr},~{flags}"(i32** nonnull %array.addr, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32** nonnull %array.addr) #1, !srcloc !9
+  call void asm "nop", "=*m,r,r,r,r,r,*m,~{dirflag},~{fpsr},~{flags}"(i32** elementtype(i32*) nonnull %array.addr, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32** elementtype(i32*) nonnull %array.addr) #1, !srcloc !9
   %6 = load i32*, i32** %array.addr, align 4, !tbaa !3
   %arrayidx8 = getelementptr inbounds i32, i32* %6, i32 7
   br label %if.end
@@ -58,7 +58,7 @@ if.else:                                          ; preds = %entry
   %7 = load i32, i32* %arrayidx5, align 4, !tbaa !7
   %arrayidx9 = getelementptr inbounds i32, i32* %array, i32 8
   store i32 %shl, i32* %arrayidx9, align 4, !tbaa !7
-  call void asm "nop", "=*m,{ax},{bx},{cx},{dx},{di},{si},{ebp},*m,~{dirflag},~{fpsr},~{flags}"(i32** nonnull %array.addr, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %7, i32* undef, i32** nonnull %array.addr) #1, !srcloc !10
+  call void asm "nop", "=*m,{ax},{bx},{cx},{dx},{di},{si},{ebp},*m,~{dirflag},~{fpsr},~{flags}"(i32** elementtype(i32*) nonnull %array.addr, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %7, i32* undef, i32** elementtype(i32*) nonnull %array.addr) #1, !srcloc !10
   %8 = load i32*, i32** %array.addr, align 4, !tbaa !3
   %arrayidx10 = getelementptr inbounds i32, i32* %8, i32 9
   br label %if.end

diff  --git a/llvm/test/CodeGen/X86/semantic-interposition-asm.ll b/llvm/test/CodeGen/X86/semantic-interposition-asm.ll
index cf81f71db5fa0..755c28fcf9650 100644
--- a/llvm/test/CodeGen/X86/semantic-interposition-asm.ll
+++ b/llvm/test/CodeGen/X86/semantic-interposition-asm.ll
@@ -28,7 +28,7 @@ define i64 @test_var() nounwind {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    retq
 entry:
-  %0 = tail call i64 asm "movq $1, $0\0Amovq $2, $0", "=r,*m,*m"(i32* @gv0, i32* @gv1)
+  %0 = tail call i64 asm "movq $1, $0\0Amovq $2, $0", "=r,*m,*m"(i32* elementtype(i32) @gv0, i32* elementtype(i32) @gv1)
   ret i64 %0
 }
 
@@ -67,6 +67,6 @@ define i64 @test_fun() nounwind {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    retq
 entry:
-  %0 = tail call i64 asm "movq $1, $0\0Amovq $2, $0", "=r,*m,*m"(void ()* nonnull @fun0, void ()* nonnull @fun1)
+  %0 = tail call i64 asm "movq $1, $0\0Amovq $2, $0", "=r,*m,*m"(void ()* elementtype(void ()) nonnull @fun0, void ()* elementtype(void ()) nonnull @fun1)
   ret i64 %0
 }

diff  --git a/llvm/test/CodeGen/X86/speculative-execution-side-effect-suppression.ll b/llvm/test/CodeGen/X86/speculative-execution-side-effect-suppression.ll
index c1dee72e6d076..53aaee197496e 100644
--- a/llvm/test/CodeGen/X86/speculative-execution-side-effect-suppression.ll
+++ b/llvm/test/CodeGen/X86/speculative-execution-side-effect-suppression.ll
@@ -218,7 +218,7 @@ define dso_local i32 (i32*)* @_Z3bazv() {
 entry:
   %p = alloca i32 (i32*)*, align 8
   store i32 (i32*)* @_Z3barPi, i32 (i32*)** %p, align 8
-  call void asm sideeffect "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(i32 (i32*)** %p, i32 (i32*)** %p) #3, !srcloc !2
+  call void asm sideeffect "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(i32 (i32*)** elementtype(i32 (i32*)*) %p, i32 (i32*)** elementtype(i32 (i32*)*) %p) #3, !srcloc !2
   %0 = load i32 (i32*)*, i32 (i32*)** %p, align 8
   ret i32 (i32*)* %0
 }

diff  --git a/llvm/test/CodeGen/X86/win64_regcall.ll b/llvm/test/CodeGen/X86/win64_regcall.ll
index 4cd051928f366..155657fc5ab9f 100644
--- a/llvm/test/CodeGen/X86/win64_regcall.ll
+++ b/llvm/test/CodeGen/X86/win64_regcall.ll
@@ -3,7 +3,7 @@
 define dso_local x86_regcallcc void @ensure_align() local_unnamed_addr #0 {
 entry:
   %b = alloca i32, align 4
-  call void asm sideeffect "nopl $0", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* nonnull %b)
+  call void asm sideeffect "nopl $0", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) nonnull %b)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/XCore/inline-asm.ll b/llvm/test/CodeGen/XCore/inline-asm.ll
index e9f5b57699976..fceb9aba9c372 100644
--- a/llvm/test/CodeGen/XCore/inline-asm.ll
+++ b/llvm/test/CodeGen/XCore/inline-asm.ll
@@ -39,7 +39,7 @@ entry:
 ; CHECK: retsp 0
 define i32 @f5() nounwind {
 entry:
-  %asmtmp = call i32 asm "ldw $0, $1", "=r,*m"(i32* @x) nounwind
+  %asmtmp = call i32 asm "ldw $0, $1", "=r,*m"(i32* elementtype(i32) @x) nounwind
   ret i32 %asmtmp
 }
 
@@ -48,6 +48,6 @@ entry:
 ; CHECK: retsp 0
 define i32 @f6() nounwind {
 entry:
-  %asmtmp = call i32 asm "ldw $0, $1", "=r,*m"(i32* @y) nounwind
+  %asmtmp = call i32 asm "ldw $0, $1", "=r,*m"(i32* elementtype(i32) @y) nounwind
   ret i32 %asmtmp
 }

diff  --git a/llvm/test/Instrumentation/AddressSanitizer/X86/asm_cpuid.ll b/llvm/test/Instrumentation/AddressSanitizer/X86/asm_cpuid.ll
index 091aad5cc47ef..289187e1f6473 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/X86/asm_cpuid.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/X86/asm_cpuid.ll
@@ -12,7 +12,7 @@ define void @MyCPUID(i32 %fxn, i32* %out) sanitize_address {
   %c.ptr = alloca i32
   %d.ptr = alloca i32
   store i32 %fxn, i32* %fxn.ptr
-  call void asm sideeffect inteldialect "xchg ebx, esi\0A\09mov eax, dword ptr $4\0A\09cpuid\0A\09mov dword ptr $0, eax\0A\09mov dword ptr $1, ebx\0A\09mov dword ptr $2, ecx\0A\09mov dword ptr $3, edx\0A\09xchg ebx, esi", "=*m,=*m,=*m,=*m,*m,~{eax},~{ebx},~{ecx},~{edx},~{esi},~{dirflag},~{fpsr},~{flags}"(i32* %a.ptr, i32* %b.ptr, i32* %c.ptr, i32* %d.ptr, i32* %fxn.ptr)
+  call void asm sideeffect inteldialect "xchg ebx, esi\0A\09mov eax, dword ptr $4\0A\09cpuid\0A\09mov dword ptr $0, eax\0A\09mov dword ptr $1, ebx\0A\09mov dword ptr $2, ecx\0A\09mov dword ptr $3, edx\0A\09xchg ebx, esi", "=*m,=*m,=*m,=*m,*m,~{eax},~{ebx},~{ecx},~{edx},~{esi},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %a.ptr, i32* elementtype(i32) %b.ptr, i32* elementtype(i32) %c.ptr, i32* elementtype(i32) %d.ptr, i32* elementtype(i32) %fxn.ptr)
 
   %a = load i32, i32* %a.ptr
   %a.out = getelementptr inbounds i32, i32* %out, i32 0

diff  --git a/llvm/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll b/llvm/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll
index 0a6ba2a260f94..10af8cb79af6a 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll
@@ -48,7 +48,7 @@ entry:
   %conv5 = zext i1 %cmp4 to i32
   %conv6 = trunc i32 %conv5 to i8
   store i8 %conv6, i8* %flagDA, align 1
-  call void asm sideeffect "mov\09\09\09$0,\09\09\09\09\09\09\09\09\09\09%rsi\0Amov\09\09\09$2,\09\09\09\09\09\09\09\09\09\09%rcx\0Amov\09\09\09$1,\09\09\09\09\09\09\09\09\09\09%rdi\0Amov\09\09\09$8,\09\09\09\09\09\09\09\09\09\09%rax\0A", "*m,*m,*m,*m,*m,*m,*m,*m,*m,~{rsi},~{rdi},~{rax},~{rcx},~{rdx},~{memory},~{dirflag},~{fpsr},~{flags}"(i8** %S.addr, i8** %D.addr, i32* %pS.addr, i32* %pDiffS, i32* %pDiffD, i32* %sr, i8* %flagSA, i8* %flagDA, i32* %h.addr) #1
+  call void asm sideeffect "mov\09\09\09$0,\09\09\09\09\09\09\09\09\09\09%rsi\0Amov\09\09\09$2,\09\09\09\09\09\09\09\09\09\09%rcx\0Amov\09\09\09$1,\09\09\09\09\09\09\09\09\09\09%rdi\0Amov\09\09\09$8,\09\09\09\09\09\09\09\09\09\09%rax\0A", "*m,*m,*m,*m,*m,*m,*m,*m,*m,~{rsi},~{rdi},~{rax},~{rcx},~{rdx},~{memory},~{dirflag},~{fpsr},~{flags}"(i8** elementtype(i8*) %S.addr, i8** elementtype(i8*) %D.addr, i32* elementtype(i32) %pS.addr, i32* elementtype(i32) %pDiffS, i32* elementtype(i32) %pDiffD, i32* elementtype(i32) %sr, i8* elementtype(i8) %flagSA, i8* elementtype(i8) %flagDA, i32* elementtype(i32) %h.addr) #1
   ret void
 }
 

diff  --git a/llvm/test/Instrumentation/AddressSanitizer/X86/bug_11395.ll b/llvm/test/Instrumentation/AddressSanitizer/X86/bug_11395.ll
index ec63905967f55..1552ad84cb6b3 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/X86/bug_11395.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/X86/bug_11395.ll
@@ -60,7 +60,7 @@ entry:
   store i8* %1, i8** %iirjump, align 4, !tbaa !0
   %sub = sub nsw i32 0, %blocksize
   store i32 %sub, i32* %blocksize.addr, align 4, !tbaa !3
-  %2 = call { i32*, i32*, i32* } asm sideeffect "1:                           \0A\09xor           %esi, %esi\0A\09xor           %ecx, %ecx\0A\09jmp  *$5                     \0A\09ff_mlp_firorder_8:            \0A\09mov   0x1c+0($0), %eax\0A\09imull 0x1c+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_7:            \0A\09mov   0x18+0($0), %eax\0A\09imull 0x18+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_6:            \0A\09mov   0x14+0($0), %eax\0A\09imull 0x14+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_5:            \0A\09mov   0x10+0($0), %eax\0A\09imull 0x10+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_4:            \0A\09mov   0x0c+0($0), %eax\0A\09imull 0x0c+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_3:            \0A\09mov   0x08+0($0), %eax\0A\09imull 0x08+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_2:            \0A\09mov   0x04+0($0), %eax\0A\09imull 0x04+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_1:            \0A\09mov   0x00+0($0), %eax\0A\09imull 0x00+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_0:\0A\09jmp  *$6                     \0A\09ff_mlp_iirorder_4:            \0A\09mov   0x0c+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x0c+4* 8($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_iirorder_3:            \0A\09mov   0x08+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x08+4* 8($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_iirorder_2:            \0A\09mov   0x04+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x04+4* 8($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_iirorder_1:            \0A\09mov   0x00+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x00+4* 8($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_iirorder_0:\0A\09mov           %ecx, %edx\0A\09mov           %esi, %eax\0A\09movzbl        $7   , %ecx\0A\09shrd    %cl, %edx, %eax\0A\09mov  %eax  ,%edx      \0A\09add  ($2)      ,%eax     \0A\09and   $4       ,%eax     \0A\09sub   $$4       ,  $0         \0A\09mov  %eax, ($0)        \0A\09mov  %eax, ($2)        \0A\09add $$4* 8    ,  $2         \0A\09sub  %edx   ,%eax     \0A\09mov  %eax,4*(8 + (40 * 4))($0)  \0A\09incl              $3         \0A\09js 1b                        \0A\09", "=r,=r,=r,=*m,*m,*m,*m,*m,0,1,2,*m,~{eax},~{edx},~{esi},~{ecx},~{dirflag},~{fpsr},~{flags}"(i32* %blocksize.addr, i32* %mask.addr, i8** %firjump, i8** %iirjump, i32* %filter_shift.addr, i32* %state, i32* %coeff, i32* %sample_buffer, i32* %blocksize.addr) nounwind, !srcloc !4
+  %2 = call { i32*, i32*, i32* } asm sideeffect "1:                           \0A\09xor           %esi, %esi\0A\09xor           %ecx, %ecx\0A\09jmp  *$5                     \0A\09ff_mlp_firorder_8:            \0A\09mov   0x1c+0($0), %eax\0A\09imull 0x1c+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_7:            \0A\09mov   0x18+0($0), %eax\0A\09imull 0x18+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_6:            \0A\09mov   0x14+0($0), %eax\0A\09imull 0x14+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_5:            \0A\09mov   0x10+0($0), %eax\0A\09imull 0x10+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_4:            \0A\09mov   0x0c+0($0), %eax\0A\09imull 0x0c+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_3:            \0A\09mov   0x08+0($0), %eax\0A\09imull 0x08+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_2:            \0A\09mov   0x04+0($0), %eax\0A\09imull 0x04+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_1:            \0A\09mov   0x00+0($0), %eax\0A\09imull 0x00+0($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_firorder_0:\0A\09jmp  *$6                     \0A\09ff_mlp_iirorder_4:            \0A\09mov   0x0c+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x0c+4* 8($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_iirorder_3:            \0A\09mov   0x08+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x08+4* 8($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_iirorder_2:            \0A\09mov   0x04+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x04+4* 8($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_iirorder_1:            \0A\09mov   0x00+4*(8 + (40 * 4))($0), %eax\0A\09imull 0x00+4* 8($1)       \0A\09add                %eax , %esi\0A\09adc                %edx , %ecx\0A\09ff_mlp_iirorder_0:\0A\09mov           %ecx, %edx\0A\09mov           %esi, %eax\0A\09movzbl        $7   , %ecx\0A\09shrd    %cl, %edx, %eax\0A\09mov  %eax  ,%edx      \0A\09add  ($2)      ,%eax     \0A\09and   $4       ,%eax     \0A\09sub   $$4       ,  $0         \0A\09mov  %eax, ($0)        \0A\09mov  %eax, ($2)        \0A\09add $$4* 8    ,  $2         \0A\09sub  %edx   ,%eax     \0A\09mov  %eax,4*(8 + (40 * 4))($0)  \0A\09incl              $3         \0A\09js 1b                        \0A\09", "=r,=r,=r,=*m,*m,*m,*m,*m,0,1,2,*m,~{eax},~{edx},~{esi},~{ecx},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %blocksize.addr, i32* elementtype(i32) %mask.addr, i8** elementtype(i8*) %firjump, i8** elementtype(i8*) %iirjump, i32* elementtype(i32) %filter_shift.addr, i32* %state, i32* %coeff, i32* %sample_buffer, i32* elementtype(i32) %blocksize.addr) nounwind, !srcloc !4
   ret void
 }
 

diff  --git a/llvm/test/Instrumentation/AddressSanitizer/localescape.ll b/llvm/test/Instrumentation/AddressSanitizer/localescape.ll
index f85f91e506d06..f22d5a632d7ea 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/localescape.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/localescape.ll
@@ -80,7 +80,7 @@ define void @ScaleFilterCols_SSSE3(i8* %dst_ptr, i8* %src_ptr, i32 %dst_width, i
 entry:
   %dst_width.addr = alloca i32, align 4
   store i32 %dst_width, i32* %dst_width.addr, align 4
-  %0 = call { i8*, i8*, i32, i32, i32 } asm sideeffect "", "=r,=r,={ax},=r,=r,=*rm,rm,rm,0,1,2,3,4,5,~{memory},~{cc},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{dirflag},~{fpsr},~{flags}"(i32* nonnull %dst_width.addr, i32 %x, i32 %dx, i8* %dst_ptr, i8* %src_ptr, i32 0, i32 0, i32 0, i32 %dst_width)
+  %0 = call { i8*, i8*, i32, i32, i32 } asm sideeffect "", "=r,=r,={ax},=r,=r,=*rm,rm,rm,0,1,2,3,4,5,~{memory},~{cc},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) nonnull %dst_width.addr, i32 %x, i32 %dx, i8* %dst_ptr, i8* %src_ptr, i32 0, i32 0, i32 0, i32 %dst_width)
   ret void
 }
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
index 749145142445b..862a028aa0188 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
@@ -180,14 +180,14 @@ entry:
 ;  asm("" : "=m" (id1), "=m" (id2) : "m" (is1), "m"(is2))
 define dso_local void @f_2i_2o_mem() sanitize_memory {
 entry:
-  call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* @id1, i32* @id2, i32* @is1, i32* @is2)
+  call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id1, i32* elementtype(i32) @id2, i32* elementtype(i32) @is1, i32* elementtype(i32) @is2)
   ret void
 }
 
 ; CHECK-LABEL: @f_2i_2o_mem
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id1{{.*}}, i64 4)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id2{{.*}}, i64 4)
-; CHECK: call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* @id1, i32* @id2, i32* @is1, i32* @is2)
+; CHECK: call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id1, i32* elementtype(i32) @id2, i32* elementtype(i32) @is1, i32* elementtype(i32) @is2)
 
 
 ; Same input and output passed as both memory and register:
@@ -195,7 +195,7 @@ entry:
 define dso_local void @f_1i_1o_memreg() sanitize_memory {
 entry:
   %0 = load i32, i32* @is1, align 4
-  %1 = call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(i32* @id1, i32 %0, i32* @is1)
+  %1 = call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id1, i32 %0, i32* elementtype(i32) @is1)
   store i32 %1, i32* @id1, align 4
   ret void
 }
@@ -204,14 +204,14 @@ entry:
 ; CHECK: [[IS1_F7:%.*]] = load i32, i32* @is1, align 4
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id1{{.*}}, i64 4)
 ; CHECK: call void @__msan_warning
-; CHECK: call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(i32* @id1, i32 [[IS1_F7]], i32* @is1)
+; CHECK: call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id1, i32 [[IS1_F7]], i32* elementtype(i32) @is1)
 
 
 ; Three outputs, first and last returned via regs, second via mem:
 ;  asm("" : "=r" (id1), "=m"(id2), "=r" (id3):);
 define dso_local void @f_3o_reg_mem_reg() sanitize_memory {
 entry:
-  %0 = call { i32, i32 } asm "", "=r,=*m,=r,~{dirflag},~{fpsr},~{flags}"(i32* @id2)
+  %0 = call { i32, i32 } asm "", "=r,=*m,=r,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id2)
   %asmresult = extractvalue { i32, i32 } %0, 0
   %asmresult1 = extractvalue { i32, i32 } %0, 1
   store i32 %asmresult, i32* @id1, align 4
@@ -221,7 +221,7 @@ entry:
 
 ; CHECK-LABEL: @f_3o_reg_mem_reg
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id2{{.*}}), i64 4)
-; CHECK: call { i32, i32 } asm "", "=r,=*m,=r,~{dirflag},~{fpsr},~{flags}"(i32* @id2)
+; CHECK: call { i32, i32 } asm "", "=r,=*m,=r,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id2)
 
 
 ; Three inputs and three outputs of 
diff erent types: a pair, a char, a function pointer.
@@ -232,7 +232,7 @@ entry:
   %0 = load i64, i64* bitcast (%struct.pair* @pair1 to i64*), align 4
   %1 = load i8, i8* @c1, align 1
   %2 = load i8* (i8*, i8*, i32)*, i8* (i8*, i8*, i32)** @memcpy_s1, align 8
-  %3 = call { i8, i8* (i8*, i8*, i32)* } asm "", "=*r,=r,=r,r,r,r,~{dirflag},~{fpsr},~{flags}"(%struct.pair* @pair2, i64 %0, i8 %1, i8* (i8*, i8*, i32)* %2)
+  %3 = call { i8, i8* (i8*, i8*, i32)* } asm "", "=*r,=r,=r,r,r,r,~{dirflag},~{fpsr},~{flags}"(%struct.pair* elementtype(%struct.pair) @pair2, i64 %0, i8 %1, i8* (i8*, i8*, i32)* %2)
   %asmresult = extractvalue { i8, i8* (i8*, i8*, i32)* } %3, 0
   %asmresult1 = extractvalue { i8, i8* (i8*, i8*, i32)* } %3, 1
   store i8 %asmresult, i8* @c2, align 1
@@ -248,14 +248,14 @@ entry:
 ; CHECK: call void @__msan_warning
 ; CHECK: call void @__msan_warning
 ; CHECK: call void @__msan_warning
-; CHECK: call { i8, i8* (i8*, i8*, i32)* } asm "", "=*r,=r,=r,r,r,r,~{dirflag},~{fpsr},~{flags}"(%struct.pair* @pair2, {{.*}}[[PAIR1_F9]], i8 [[C1_F9]], {{.*}} [[MEMCPY_S1_F9]])
+; CHECK: call { i8, i8* (i8*, i8*, i32)* } asm "", "=*r,=r,=r,r,r,r,~{dirflag},~{fpsr},~{flags}"(%struct.pair* elementtype(%struct.pair) @pair2, {{.*}}[[PAIR1_F9]], i8 [[C1_F9]], {{.*}} [[MEMCPY_S1_F9]])
 
 ; Three inputs and three outputs of 
diff erent types: a pair, a char, a function pointer.
 ; Everything is passed in memory:
 ;  asm("" : "=m" (pair2), "=m" (c2), "=m" (memcpy_d1) : "m"(pair1), "m"(c1), "m"(memcpy_s1));
 define dso_local void @f_3i_3o_complex_mem() sanitize_memory {
 entry:
-  call void asm "", "=*m,=*m,=*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(%struct.pair* @pair2, i8* @c2, i8* (i8*, i8*, i32)** @memcpy_d1, %struct.pair* @pair1, i8* @c1, i8* (i8*, i8*, i32)** @memcpy_s1)
+  call void asm "", "=*m,=*m,=*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(%struct.pair* elementtype(%struct.pair) @pair2, i8* elementtype(i8) @c2, i8* (i8*, i8*, i32)** elementtype(i8* (i8*, i8*, i32)*) @memcpy_d1, %struct.pair* elementtype(%struct.pair) @pair1, i8* elementtype(i8) @c1, i8* (i8*, i8*, i32)** elementtype(i8* (i8*, i8*, i32)*) @memcpy_s1)
   ret void
 }
 
@@ -263,7 +263,7 @@ entry:
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@pair2{{.*}}, i64 8)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@c2{{.*}}, i64 1)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@memcpy_d1{{.*}}, i64 8)
-; CHECK: call void asm "", "=*m,=*m,=*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(%struct.pair* @pair2, i8* @c2, i8* (i8*, i8*, i32)** @memcpy_d1, %struct.pair* @pair1, i8* @c1, i8* (i8*, i8*, i32)** @memcpy_s1)
+; CHECK: call void asm "", "=*m,=*m,=*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(%struct.pair* elementtype(%struct.pair) @pair2, i8* elementtype(i8) @c2, i8* (i8*, i8*, i32)** elementtype(i8* (i8*, i8*, i32)*) @memcpy_d1, %struct.pair* elementtype(%struct.pair) @pair1, i8* elementtype(i8) @c1, i8* (i8*, i8*, i32)** elementtype(i8* (i8*, i8*, i32)*) @memcpy_s1)
 
 
 ; A simple asm goto construct to check that callbr is handled correctly:

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
index 22c206faa0e96..44af78c1b7146 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
@@ -46,7 +46,7 @@ entry:
   store i64 0, i64* %nr, align 8
   store i64* %value, i64** %addr, align 8
   %0 = load i64, i64* %nr, align 8
-  call void asm "btsq $2, $1; setc $0", "=*qm,=*m,Ir,~{dirflag},~{fpsr},~{flags}"(i8* %bit, i64** %addr, i64 %0)
+  call void asm "btsq $2, $1; setc $0", "=*qm,=*m,Ir,~{dirflag},~{fpsr},~{flags}"(i8* elementtype(i8) %bit, i64** elementtype(i64*) %addr, i64 %0)
   %1 = load i8, i8* %bit, align 1
   %tobool = trunc i8 %1 to i1
   br i1 %tobool, label %if.then, label %if.else

diff  --git a/llvm/test/Instrumentation/SanitizerCoverage/seh.ll b/llvm/test/Instrumentation/SanitizerCoverage/seh.ll
index 8d396723e6328..b1b82313ef771 100644
--- a/llvm/test/Instrumentation/SanitizerCoverage/seh.ll
+++ b/llvm/test/Instrumentation/SanitizerCoverage/seh.ll
@@ -75,7 +75,7 @@ define void @ScaleFilterCols_SSSE3(i8* %dst_ptr, i8* %src_ptr, i32 %dst_width, i
 entry:
   %dst_width.addr = alloca i32, align 4
   store i32 %dst_width, i32* %dst_width.addr, align 4
-  %0 = call { i8*, i8*, i32, i32, i32 } asm sideeffect "", "=r,=r,={ax},=r,=r,=*rm,rm,rm,0,1,2,3,4,5,~{memory},~{cc},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{dirflag},~{fpsr},~{flags}"(i32* nonnull %dst_width.addr, i32 %x, i32 %dx, i8* %dst_ptr, i8* %src_ptr, i32 0, i32 0, i32 0, i32 %dst_width)
+  %0 = call { i8*, i8*, i32, i32, i32 } asm sideeffect "", "=r,=r,={ax},=r,=r,=*rm,rm,rm,0,1,2,3,4,5,~{memory},~{cc},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) nonnull %dst_width.addr, i32 %x, i32 %dx, i8* %dst_ptr, i8* %src_ptr, i32 0, i32 0, i32 0, i32 %dst_width)
   ret void
 }
 

diff  --git a/llvm/test/Linker/inlineasm.ll b/llvm/test/Linker/inlineasm.ll
index d77f3a715b13c..de1902e7e970c 100644
--- a/llvm/test/Linker/inlineasm.ll
+++ b/llvm/test/Linker/inlineasm.ll
@@ -11,7 +11,7 @@ target triple = "i386-apple-macosx10.6.8"
 
 define void @f(%T* %x) nounwind ssp {
 entry:
-call void asm sideeffect "", "=*m"(%T* %x) nounwind
+call void asm sideeffect "", "=*m"(%T* elementtype(%T) %x) nounwind
 unreachable
 }
 

diff  --git a/llvm/test/MC/AsmParser/pr28805.ll b/llvm/test/MC/AsmParser/pr28805.ll
index f370ce73cc6eb..4bfa82297c299 100644
--- a/llvm/test/MC/AsmParser/pr28805.ll
+++ b/llvm/test/MC/AsmParser/pr28805.ll
@@ -8,7 +8,7 @@ entry:
   %res = alloca i32, align 4
   %0 = bitcast i32* %res to i8*
   store i32 -1, i32* %res, align 4
-  call void asm sideeffect inteldialect ".byte 0xC7\0A\09.byte 0xF8\0A\09.byte 2\0A\09.byte 0\0A\09.byte 0\0A\09.byte 0\0A\09jmp .L__MSASMLABEL_.0__L2\0A\09mov dword ptr $0, eax\0A\09.L__MSASMLABEL_.0__L2:", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* nonnull %res)
+  call void asm sideeffect inteldialect ".byte 0xC7\0A\09.byte 0xF8\0A\09.byte 2\0A\09.byte 0\0A\09.byte 0\0A\09.byte 0\0A\09jmp .L__MSASMLABEL_.0__L2\0A\09mov dword ptr $0, eax\0A\09.L__MSASMLABEL_.0__L2:", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) nonnull %res)
   %1 = load i32, i32* %res, align 4
   ret i32 %1
 }

diff  --git a/llvm/test/Transforms/FunctionImport/Inputs/inlineasm.ll b/llvm/test/Transforms/FunctionImport/Inputs/inlineasm.ll
index 1ffc5db5f8b0d..aa9029fd12038 100644
--- a/llvm/test/Transforms/FunctionImport/Inputs/inlineasm.ll
+++ b/llvm/test/Transforms/FunctionImport/Inputs/inlineasm.ll
@@ -6,6 +6,6 @@ entry:
   %v.addr = alloca i64*, align 8
   store i64* %v, i64** %v.addr, align 8
   %0 = load i64*, i64** %v.addr, align 8
-  call void asm sideeffect "movzbl     myvar(%rip), %eax\0A\09movq %rax, $0\0A\09", "=*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i64* %0) #1
+  call void asm sideeffect "movzbl     myvar(%rip), %eax\0A\09movq %rax, $0\0A\09", "=*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %0) #1
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/2007-04-15-InlineEH.ll b/llvm/test/Transforms/Inline/2007-04-15-InlineEH.ll
index 482c4efcf09cc..38b3cdded1216 100644
--- a/llvm/test/Transforms/Inline/2007-04-15-InlineEH.ll
+++ b/llvm/test/Transforms/Inline/2007-04-15-InlineEH.ll
@@ -15,7 +15,7 @@ declare void @c()
 define void @f() {
 ; CHECK-LABEL: define void @f()
 entry:
-  call void asm "rdtsc\0A\09movl %eax, $0\0A\09movl %edx, $1", "=*imr,=*imr,~{dirflag},~{fpsr},~{flags},~{dx},~{ax}"( i32* null, i32* null ) nounwind
+  call void asm "rdtsc\0A\09movl %eax, $0\0A\09movl %edx, $1", "=*imr,=*imr,~{dirflag},~{fpsr},~{flags},~{dx},~{ax}"( i32* elementtype( i32) null, i32* elementtype(i32) null ) nounwind
 ; CHECK: call void asm
   unreachable
 }

diff  --git a/llvm/test/Transforms/Inline/devirtualize-4.ll b/llvm/test/Transforms/Inline/devirtualize-4.ll
index 2205dae7aa238..45867f034185a 100644
--- a/llvm/test/Transforms/Inline/devirtualize-4.ll
+++ b/llvm/test/Transforms/Inline/devirtualize-4.ll
@@ -103,7 +103,7 @@ declare dso_local void @__cxa_pure_virtual() unnamed_addr
 
 define linkonce_odr dso_local void @_Z13DoNotOptimizeIP4ImplEvRKT_(%class.Impl** dereferenceable(8) %var) local_unnamed_addr {
 entry:
-  call void asm sideeffect "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(%class.Impl** nonnull %var, %class.Impl** nonnull %var)
+  call void asm sideeffect "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(%class.Impl** elementtype(%class.Impl*) nonnull %var, %class.Impl** elementtype(%class.Impl*) nonnull %var)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index be3a7b1ac8040..b6b6f02d15959 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -552,7 +552,7 @@ define i32 @test27(%struct.compat_siginfo* %to, %struct.siginfo_t* %from) {
 ; CHECK-NEXT:    [[T349:%.*]] = getelementptr [[STRUCT_SIGINFO_T:%.*]], %struct.siginfo_t* [[T344]], i64 0, i32 3, i32 0, i32 3, i32 0
 ; CHECK-NEXT:    [[T349350:%.*]] = bitcast i8** [[T349]] to i32*
 ; CHECK-NEXT:    [[T351:%.*]] = load i32, i32* [[T349350]], align 8
-; CHECK-NEXT:    [[T360:%.*]] = call i32 asm sideeffect "...", "=r,ir,*m,i,0,~{dirflag},~{fpsr},~{flags}"(i32 [[T351]], %struct.__large_struct* null, i32 -14, i32 0) #[[ATTR0:[0-9]+]]
+; CHECK-NEXT:    [[T360:%.*]] = call i32 asm sideeffect "...", "=r,ir,*m,i,0,~{dirflag},~{fpsr},~{flags}"(i32 [[T351]], %struct.__large_struct* elementtype(%struct.__large_struct) null, i32 -14, i32 0) #[[ATTR0:[0-9]+]]
 ; CHECK-NEXT:    unreachable
 ;
 entry:
@@ -566,8 +566,7 @@ entry:
   %t349350 = bitcast i8** %t349 to i32*
   %t351 = load i32, i32* %t349350, align 8
   %t360 = call i32 asm sideeffect "...",
-  "=r,ir,*m,i,0,~{dirflag},~{fpsr},~{flags}"( i32 %t351,
-  %struct.__large_struct* null, i32 -14, i32 0 )
+  "=r,ir,*m,i,0,~{dirflag},~{fpsr},~{flags}"( i32 %t351, %struct.__large_struct* elementtype(%struct.__large_struct) null, i32 -14, i32 0 )
   unreachable
 }
 

diff  --git a/llvm/test/Verifier/inline-asm-indirect-operand.ll b/llvm/test/Verifier/inline-asm-indirect-operand.ll
index 4be6f50b9ef54..1f78c171dccb3 100644
--- a/llvm/test/Verifier/inline-asm-indirect-operand.ll
+++ b/llvm/test/Verifier/inline-asm-indirect-operand.ll
@@ -20,9 +20,9 @@ define void @not_pointer_arg(i32 %p, i32 %x) {
 }
 
 ; CHECK: Elementtype attribute can only be applied for indirect constraints
-; CHECK-NEXT: call void asm "addl $1, $0", "=*rm,r"(i32* %p, i32* elementtype(i32) %x)
+; CHECK-NEXT: call void asm "addl $1, $0", "=*rm,r"(i32* elementtype(i32) %p, i32* elementtype(i32) %x)
 define void @not_indirect(i32* %p, i32* %x) {
-	call void asm "addl $1, $0", "=*rm,r"(i32* %p, i32* elementtype(i32) %x)
+	call void asm "addl $1, $0", "=*rm,r"(i32* elementtype(i32) %p, i32* elementtype(i32) %x)
   ret void
 }
 


        


More information about the llvm-commits mailing list