[clang] e6bf48d - [X86] Don't request 0x90 nop filling in p2align directives (#110134)

via cfe-commits cfe-commits at lists.llvm.org
Wed Oct 2 03:14:15 PDT 2024


Author: Jeremy Morse
Date: 2024-10-02T11:14:05+01:00
New Revision: e6bf48d11047e970cb24554a01b65b566d6b5d22

URL: https://github.com/llvm/llvm-project/commit/e6bf48d11047e970cb24554a01b65b566d6b5d22
DIFF: https://github.com/llvm/llvm-project/commit/e6bf48d11047e970cb24554a01b65b566d6b5d22.diff

LOG: [X86] Don't request 0x90 nop filling in p2align directives (#110134)

As of rev ea222be0d, LLVMs assembler will actually try to honour the
"fill value" part of p2align directives. X86 printed these as 0x90, which
isn't actually what it wanted: we want multi-byte nops for .text
padding. Compiling via a textual assembly file produces single-byte
nop padding since ea222be0d but the built-in assembler will produce
multi-byte nops. This divergent behaviour is undesirable.

To fix: don't set the byte padding field for x86, which allows the
assembler to pick multi-byte nops. Test that we get the same multi-byte
padding when compiled via textual assembly or directly to object file.
Added same-align-bytes-with-llasm-llobj.ll to that effect, updated
numerous other tests to not contain check-lines for the explicit padding.

Added: 
    llvm/test/CodeGen/X86/same-align-bytes-with-llasm-llobj.ll

Modified: 
    clang/test/CodeGen/align-loops.c
    llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
    llvm/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll
    llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
    llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
    llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
    llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll
    llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll
    llvm/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll
    llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
    llvm/test/CodeGen/X86/2008-08-06-CmpStride.ll
    llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
    llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
    llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
    llvm/test/CodeGen/X86/2009-08-12-badswitch.ll
    llvm/test/CodeGen/X86/2020_12_02_decrementing_loop.ll
    llvm/test/CodeGen/X86/AMX/amx-across-func.ll
    llvm/test/CodeGen/X86/AMX/amx-ldtilecfg-insert.ll
    llvm/test/CodeGen/X86/AMX/amx-lower-tile-copy.ll
    llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll
    llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
    llvm/test/CodeGen/X86/MachineSink-Issue98477.ll
    llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
    llvm/test/CodeGen/X86/PR71178-register-coalescer-crash.ll
    llvm/test/CodeGen/X86/SwitchLowering.ll
    llvm/test/CodeGen/X86/addr-mode-matcher-2.ll
    llvm/test/CodeGen/X86/align-basic-block-sections.mir
    llvm/test/CodeGen/X86/and-sink.ll
    llvm/test/CodeGen/X86/apx/push2-pop2.ll
    llvm/test/CodeGen/X86/apx/setzucc.ll
    llvm/test/CodeGen/X86/assertzext-demanded.ll
    llvm/test/CodeGen/X86/atom-pad-short-functions.ll
    llvm/test/CodeGen/X86/atomic-bit-test.ll
    llvm/test/CodeGen/X86/atomic-flags.ll
    llvm/test/CodeGen/X86/atomic-idempotent.ll
    llvm/test/CodeGen/X86/atomic-minmax-i6432.ll
    llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
    llvm/test/CodeGen/X86/atomic-non-integer.ll
    llvm/test/CodeGen/X86/atomic-rm-bit-test-64.ll
    llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
    llvm/test/CodeGen/X86/atomic-xor.ll
    llvm/test/CodeGen/X86/atomic128.ll
    llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll
    llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
    llvm/test/CodeGen/X86/atomicrmw-uinc-udec-wrap.ll
    llvm/test/CodeGen/X86/avx-cmp.ll
    llvm/test/CodeGen/X86/avx-vbroadcast.ll
    llvm/test/CodeGen/X86/avx-vzeroupper.ll
    llvm/test/CodeGen/X86/avx2-vbroadcast.ll
    llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll
    llvm/test/CodeGen/X86/avx512-i1test.ll
    llvm/test/CodeGen/X86/avx512vnni-combine.ll
    llvm/test/CodeGen/X86/avxvnni-combine.ll
    llvm/test/CodeGen/X86/block-placement.ll
    llvm/test/CodeGen/X86/break-false-dep.ll
    llvm/test/CodeGen/X86/callbr-asm-blockplacement.ll
    llvm/test/CodeGen/X86/cast-vsel.ll
    llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll
    llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll
    llvm/test/CodeGen/X86/coalesce-esp.ll
    llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness-reduced.ll
    llvm/test/CodeGen/X86/coalescer-commute1.ll
    llvm/test/CodeGen/X86/coalescer-commute4.ll
    llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
    llvm/test/CodeGen/X86/code-align-loops.ll
    llvm/test/CodeGen/X86/code_placement_align_all.ll
    llvm/test/CodeGen/X86/combine-pmuldq.ll
    llvm/test/CodeGen/X86/constant-pool-sharing.ll
    llvm/test/CodeGen/X86/copy-eflags.ll
    llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
    llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
    llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
    llvm/test/CodeGen/X86/fdiv-combine.ll
    llvm/test/CodeGen/X86/fixup-lea.ll
    llvm/test/CodeGen/X86/fma-commute-loop.ll
    llvm/test/CodeGen/X86/fma-intrinsics-phi-213-to-231.ll
    llvm/test/CodeGen/X86/fold-call-3.ll
    llvm/test/CodeGen/X86/fold-loop-of-urem.ll
    llvm/test/CodeGen/X86/fp-une-cmp.ll
    llvm/test/CodeGen/X86/hoist-invariant-load.ll
    llvm/test/CodeGen/X86/i128-mul.ll
    llvm/test/CodeGen/X86/i386-shrink-wrapping.ll
    llvm/test/CodeGen/X86/icmp-shift-opt.ll
    llvm/test/CodeGen/X86/ifunc-asm.ll
    llvm/test/CodeGen/X86/innermost-loop-alignment.ll
    llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll
    llvm/test/CodeGen/X86/issue76416.ll
    llvm/test/CodeGen/X86/kcfi-patchable-function-prefix.ll
    llvm/test/CodeGen/X86/kcfi.ll
    llvm/test/CodeGen/X86/known-bits.ll
    llvm/test/CodeGen/X86/lea-opt-cse2.ll
    llvm/test/CodeGen/X86/lea-opt-cse4.ll
    llvm/test/CodeGen/X86/licm-symbol.ll
    llvm/test/CodeGen/X86/loop-search.ll
    llvm/test/CodeGen/X86/loop-strength-reduce5.ll
    llvm/test/CodeGen/X86/loop-strength-reduce7.ll
    llvm/test/CodeGen/X86/lsr-addrecloops.ll
    llvm/test/CodeGen/X86/lsr-interesting-step.ll
    llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
    llvm/test/CodeGen/X86/lsr-negative-stride.ll
    llvm/test/CodeGen/X86/lsr-sort.ll
    llvm/test/CodeGen/X86/lsr-static-addr.ll
    llvm/test/CodeGen/X86/machine-cp.ll
    llvm/test/CodeGen/X86/machine-cse.ll
    llvm/test/CodeGen/X86/madd.ll
    llvm/test/CodeGen/X86/masked-iv-safe.ll
    llvm/test/CodeGen/X86/masked-iv-unsafe.ll
    llvm/test/CodeGen/X86/merge_store.ll
    llvm/test/CodeGen/X86/min-legal-vector-width.ll
    llvm/test/CodeGen/X86/mmx-arith.ll
    llvm/test/CodeGen/X86/negative-stride-fptosi-user.ll
    llvm/test/CodeGen/X86/optimize-max-0.ll
    llvm/test/CodeGen/X86/optimize-max-1.ll
    llvm/test/CodeGen/X86/optimize-max-2.ll
    llvm/test/CodeGen/X86/or-address.ll
    llvm/test/CodeGen/X86/overflowing-iv-codegen.ll
    llvm/test/CodeGen/X86/patchable-prologue.ll
    llvm/test/CodeGen/X86/pcsections-atomics.ll
    llvm/test/CodeGen/X86/peep-test-0.ll
    llvm/test/CodeGen/X86/peep-test-1.ll
    llvm/test/CodeGen/X86/peephole-copy.ll
    llvm/test/CodeGen/X86/pic-load-remat.ll
    llvm/test/CodeGen/X86/postalloc-coalescing.ll
    llvm/test/CodeGen/X86/pr14314.ll
    llvm/test/CodeGen/X86/pr22338.ll
    llvm/test/CodeGen/X86/pr30562.ll
    llvm/test/CodeGen/X86/pr32108.ll
    llvm/test/CodeGen/X86/pr33290.ll
    llvm/test/CodeGen/X86/pr33747.ll
    llvm/test/CodeGen/X86/pr37916.ll
    llvm/test/CodeGen/X86/pr38185.ll
    llvm/test/CodeGen/X86/pr38217.ll
    llvm/test/CodeGen/X86/pr38539.ll
    llvm/test/CodeGen/X86/pr38795.ll
    llvm/test/CodeGen/X86/pr42565.ll
    llvm/test/CodeGen/X86/pr42909.ll
    llvm/test/CodeGen/X86/pr43529.ll
    llvm/test/CodeGen/X86/pr44140.ll
    llvm/test/CodeGen/X86/pr44412.ll
    llvm/test/CodeGen/X86/pr47874.ll
    llvm/test/CodeGen/X86/pr49393.ll
    llvm/test/CodeGen/X86/pr49451.ll
    llvm/test/CodeGen/X86/pr50374.ll
    llvm/test/CodeGen/X86/pr50782.ll
    llvm/test/CodeGen/X86/pr51371.ll
    llvm/test/CodeGen/X86/pr5145.ll
    llvm/test/CodeGen/X86/pr51615.ll
    llvm/test/CodeGen/X86/pr53842.ll
    llvm/test/CodeGen/X86/pr53990-incorrect-machine-sink.ll
    llvm/test/CodeGen/X86/pr55648.ll
    llvm/test/CodeGen/X86/pr61923.ll
    llvm/test/CodeGen/X86/pr63108.ll
    llvm/test/CodeGen/X86/pr63692.ll
    llvm/test/CodeGen/X86/pr65895.ll
    llvm/test/CodeGen/X86/pr68539.ll
    llvm/test/CodeGen/X86/pr93000.ll
    llvm/test/CodeGen/X86/promote-sra-by-itself.ll
    llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
    llvm/test/CodeGen/X86/rdrand.ll
    llvm/test/CodeGen/X86/retpoline.ll
    llvm/test/CodeGen/X86/reverse_branches.ll
    llvm/test/CodeGen/X86/sad.ll
    llvm/test/CodeGen/X86/saddo-redundant-add.ll
    llvm/test/CodeGen/X86/scalar_widen_div.ll
    llvm/test/CodeGen/X86/setcc-lowering.ll
    llvm/test/CodeGen/X86/setcc-non-simple-type.ll
    llvm/test/CodeGen/X86/shift-parts.ll
    llvm/test/CodeGen/X86/sink-out-of-loop.ll
    llvm/test/CodeGen/X86/speculative-load-hardening.ll
    llvm/test/CodeGen/X86/split-extend-vector-inreg.ll
    llvm/test/CodeGen/X86/sse-domains.ll
    llvm/test/CodeGen/X86/stack-coloring-wineh.ll
    llvm/test/CodeGen/X86/switch.ll
    llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
    llvm/test/CodeGen/X86/tail-dup-multiple-latch-loop.ll
    llvm/test/CodeGen/X86/tail-dup-partial.ll
    llvm/test/CodeGen/X86/tail-dup-repeat.ll
    llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
    llvm/test/CodeGen/X86/tls-loads-control3.ll
    llvm/test/CodeGen/X86/trunc-store.ll
    llvm/test/CodeGen/X86/twoaddr-coalesce.ll
    llvm/test/CodeGen/X86/twoaddr-lea.ll
    llvm/test/CodeGen/X86/unaligned-load.ll
    llvm/test/CodeGen/X86/undef-label.ll
    llvm/test/CodeGen/X86/vec_setcc-2.ll
    llvm/test/CodeGen/X86/vector-fshl-128.ll
    llvm/test/CodeGen/X86/vector-fshl-256.ll
    llvm/test/CodeGen/X86/vector-pack-128.ll
    llvm/test/CodeGen/X86/vector-shift-by-select-loop.ll
    llvm/test/CodeGen/X86/vector-shuffle-combining.ll
    llvm/test/CodeGen/X86/vselect-avx.ll
    llvm/test/CodeGen/X86/widen_arith-1.ll
    llvm/test/CodeGen/X86/widen_arith-2.ll
    llvm/test/CodeGen/X86/widen_arith-3.ll
    llvm/test/CodeGen/X86/widen_arith-4.ll
    llvm/test/CodeGen/X86/widen_arith-5.ll
    llvm/test/CodeGen/X86/widen_arith-6.ll
    llvm/test/CodeGen/X86/widen_cast-1.ll
    llvm/test/CodeGen/X86/widen_cast-2.ll
    llvm/test/CodeGen/X86/widen_cast-4.ll
    llvm/test/CodeGen/X86/x86-shrink-wrapping.ll
    llvm/test/CodeGen/X86/x86-win64-shrink-wrapping.ll
    llvm/test/CodeGen/X86/xor.ll
    llvm/test/CodeGen/X86/xray-attribute-instrumentation.ll
    llvm/test/CodeGen/X86/xray-custom-log.ll
    llvm/test/CodeGen/X86/xray-partial-instrumentation-skip-entry.ll
    llvm/test/CodeGen/X86/xray-partial-instrumentation-skip-exit.ll
    llvm/test/CodeGen/X86/xray-selective-instrumentation.ll
    llvm/test/CodeGen/X86/xray-tail-call-sled.ll
    llvm/test/DebugInfo/COFF/pieces.ll
    llvm/test/DebugInfo/X86/header.ll
    llvm/test/DebugInfo/X86/loop-align-debug.ll
    llvm/test/Transforms/LoopStrengthReduce/X86/2011-11-29-postincphi.ll
    llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
    llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
    llvm/test/Transforms/LoopStrengthReduce/X86/macro-fuse-cmp.ll

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/align-loops.c b/clang/test/CodeGen/align-loops.c
index 7744b4ab0f24ec..25f8cded59f323 100644
--- a/clang/test/CodeGen/align-loops.c
+++ b/clang/test/CodeGen/align-loops.c
@@ -5,8 +5,8 @@
 // RUN: %clang_cc1 -triple=x86_64 -S %s -falign-loops=32 -O -o - | FileCheck %s --check-prefixes=CHECK,CHECK_32
 
 // CHECK-LABEL: foo:
-// CHECK_8:       .p2align 3, 0x90
-// CHECK_32:      .p2align 5, 0x90
+// CHECK_8:       .p2align 3
+// CHECK_32:      .p2align 5
 
 void bar(void);
 void foo(void) {

diff  --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index 3ce044387ada29..47eb617c06ac5b 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -43,8 +43,6 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) {
 
   AssemblerDialect = AsmWriterFlavor;
 
-  TextAlignFillValue = 0x90;
-
   if (!is64Bit)
     Data64bitsDirective = nullptr;       // we can't emit a 64-bit unit
 
@@ -93,8 +91,6 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
 
   AssemblerDialect = AsmWriterFlavor;
 
-  TextAlignFillValue = 0x90;
-
   // Debug Information
   SupportsDebugInformation = true;
 
@@ -132,8 +128,6 @@ X86MCAsmInfoMicrosoft::X86MCAsmInfoMicrosoft(const Triple &Triple) {
 
   AssemblerDialect = AsmWriterFlavor;
 
-  TextAlignFillValue = 0x90;
-
   AllowAtInName = true;
 }
 
@@ -167,7 +161,5 @@ X86MCAsmInfoGNUCOFF::X86MCAsmInfoGNUCOFF(const Triple &Triple) {
 
   AssemblerDialect = AsmWriterFlavor;
 
-  TextAlignFillValue = 0x90;
-
   AllowAtInName = true;
 }

diff  --git a/llvm/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll b/llvm/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll
index ac749bccb3c55d..f3bdf561a94569 100644
--- a/llvm/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll
+++ b/llvm/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll
@@ -7,7 +7,7 @@ define i32 @foo(i32 %t, i32 %C) {
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    decl %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %cond_true
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    incl %eax

diff  --git a/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll b/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
index 46dddd8fcd851a..1e5ee2f71d9b47 100644
--- a/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
+++ b/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
@@ -27,7 +27,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    js .LBB0_14
 ; CHECK-NEXT:  # %bb.12:
 ; CHECK-NEXT:    xorl %r8d, %r8d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_13: # %a25b
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb %r8b, %r8b
@@ -38,7 +38,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    jne .LBB0_1
 ; CHECK-NEXT:  # %bb.15:
 ; CHECK-NEXT:    xorl %r8d, %r8d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_16: # %a25b140
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb %r8b, %r8b
@@ -56,7 +56,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    movb $1, %r10b
 ; CHECK-NEXT:    jmp .LBB0_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_9: # %b1606
 ; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; CHECK-NEXT:    testb %r9b, %r9b
@@ -83,7 +83,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; CHECK-NEXT:    testq %rdx, %rdx
 ; CHECK-NEXT:    js .LBB0_18
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_36: # %a30b
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -93,7 +93,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; CHECK-NEXT:    testb %r10b, %r10b
 ; CHECK-NEXT:    jne .LBB0_4
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_19: # %a30b294
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -115,7 +115,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; CHECK-NEXT:    testb %r8b, %r8b
 ; CHECK-NEXT:    jne .LBB0_8
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_33: # %a74b
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -128,7 +128,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; CHECK-NEXT:    testl %eax, %eax
 ; CHECK-NEXT:    js .LBB0_9
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_35: # %a97b
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -142,7 +142,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    testb %r9b, %r9b
 ; CHECK-NEXT:    jne .LBB0_35
 ; CHECK-NEXT:    jmp .LBB0_9
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_21: # %b377
 ; CHECK-NEXT:    # in Loop: Header=BB0_20 Depth=2
 ; CHECK-NEXT:    testb %r9b, %r9b
@@ -153,7 +153,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    # Child Loop BB0_37 Depth 3
 ; CHECK-NEXT:    testq %rsi, %rsi
 ; CHECK-NEXT:    js .LBB0_21
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_37: # %a35b
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
 ; CHECK-NEXT:    # Parent Loop BB0_20 Depth=2
@@ -161,7 +161,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    testb %r9b, %r9b
 ; CHECK-NEXT:    je .LBB0_37
 ; CHECK-NEXT:    jmp .LBB0_21
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_27: # %b1016
 ; CHECK-NEXT:    # in Loop: Header=BB0_25 Depth=2
 ; CHECK-NEXT:    testq %rsi, %rsi
@@ -173,7 +173,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    # Child Loop BB0_28 Depth 3
 ; CHECK-NEXT:    testq %rdx, %rdx
 ; CHECK-NEXT:    js .LBB0_26
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_38: # %a53b
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
 ; CHECK-NEXT:    # Parent Loop BB0_25 Depth=2
@@ -184,7 +184,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    # in Loop: Header=BB0_25 Depth=2
 ; CHECK-NEXT:    testb %r10b, %r10b
 ; CHECK-NEXT:    jne .LBB0_27
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_28: # %a53b1019
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
 ; CHECK-NEXT:    # Parent Loop BB0_25 Depth=2
@@ -192,7 +192,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    testq %rdx, %rdx
 ; CHECK-NEXT:    jle .LBB0_28
 ; CHECK-NEXT:    jmp .LBB0_27
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_31: # %b1263
 ; CHECK-NEXT:    # in Loop: Header=BB0_29 Depth=2
 ; CHECK-NEXT:    testq %rdx, %rdx
@@ -204,7 +204,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    # Child Loop BB0_32 Depth 3
 ; CHECK-NEXT:    testq %rsi, %rsi
 ; CHECK-NEXT:    js .LBB0_30
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_39: # %a63b
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
 ; CHECK-NEXT:    # Parent Loop BB0_29 Depth=2
@@ -215,7 +215,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    # in Loop: Header=BB0_29 Depth=2
 ; CHECK-NEXT:    testq %rsi, %rsi
 ; CHECK-NEXT:    jle .LBB0_31
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_32: # %a63b1266
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
 ; CHECK-NEXT:    # Parent Loop BB0_29 Depth=2
@@ -223,7 +223,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    testq %rsi, %rsi
 ; CHECK-NEXT:    jle .LBB0_32
 ; CHECK-NEXT:    jmp .LBB0_31
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_24: # %b712
 ; CHECK-NEXT:    # in Loop: Header=BB0_22 Depth=2
 ; CHECK-NEXT:    testb %r9b, %r9b
@@ -234,7 +234,7 @@ define dso_local void @foo(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5)
 ; CHECK-NEXT:    # Child Loop BB0_23 Depth 3
 ; CHECK-NEXT:    testq %rdx, %rdx
 ; CHECK-NEXT:    js .LBB0_24
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_23: # %a45b
 ; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
 ; CHECK-NEXT:    # Parent Loop BB0_22 Depth=2

diff  --git a/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll b/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
index f21aaca7ca5f17..49e2bf207e52a8 100644
--- a/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
+++ b/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
@@ -15,7 +15,7 @@ define void @foo(ptr %buf, i32 %size, i32 %col, ptr %p) nounwind {
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; CHECK-NEXT:    addl $8, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_2: ## %bb
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl (%esi), %edi

diff  --git a/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll b/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
index 2f75ab29e708fe..cfb3e508576dda 100644
--- a/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
+++ b/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
@@ -9,7 +9,7 @@ define signext i16 @f(ptr %bp, ptr %ss)   {
 ; CHECK-NEXT:    .cfi_offset %esi, -8
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %cond_next127
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl (%eax), %edx

diff  --git a/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll b/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll
index f9996e2df50e0e..6ebb97d63e7c65 100644
--- a/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll
+++ b/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll
@@ -6,7 +6,7 @@ define signext   i16 @t(ptr %qmatrix, ptr %dct, ptr %acBaseTable, ptr %acExtTabl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %cond_next127
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %eax, %edx

diff  --git a/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll b/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll
index 750d06d9e6031f..bbce246a5d394a 100644
--- a/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll
+++ b/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll
@@ -14,7 +14,7 @@ define float @foo(ptr %x, ptr %y, i32 %c) nounwind {
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %bb18
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    xorps %xmm1, %xmm1

diff  --git a/llvm/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll b/llvm/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll
index 68566c7b370979..8d690ba06e3bd6 100644
--- a/llvm/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll
+++ b/llvm/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll
@@ -16,7 +16,7 @@ define fastcc void @mp_sqrt(i32 %n, i32 %radix, ptr %in, ptr %out, ptr %tmp1, pt
 ; CHECK-NEXT:    movb $1, %cl
 ; CHECK-NEXT:    movl $1, %ebx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb.i5
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %ecx, %eax
@@ -37,7 +37,7 @@ define fastcc void @mp_sqrt(i32 %n, i32 %radix, ptr %in, ptr %out, ptr %tmp1, pt
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    xorpd %xmm1, %xmm1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_7: # %bb.i28.i
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cvttsd2si %xmm1, %edi
@@ -85,7 +85,7 @@ define fastcc void @mp_sqrt(i32 %n, i32 %radix, ptr %in, ptr %out, ptr %tmp1, pt
 ; CHECK-NEXT:    popl %ebx
 ; CHECK-NEXT:    popl %ebp
 ; CHECK-NEXT:    retl
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_9: # %bb.i.i
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_9

diff  --git a/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll b/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
index 8e6d2c11b7b3dc..c95fc00b3ee6d4 100644
--- a/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
@@ -16,13 +16,13 @@ define void @t(ptr %depth, ptr %bop, i32 %mode) nounwind  {
 ; CHECK-NEXT:  ## %bb.1: ## %entry
 ; CHECK-NEXT:    cmpl $1, %edx
 ; CHECK-NEXT:    jne LBB0_10
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_2: ## %bb2898.us
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp LBB0_2
 ; CHECK-NEXT:  LBB0_3: ## %bb13086.preheader
 ; CHECK-NEXT:    movb $1, %al
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_4: ## %bb13088
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb %al, %al
@@ -31,7 +31,7 @@ define void @t(ptr %depth, ptr %bop, i32 %mode) nounwind  {
 ; CHECK-NEXT:    ## in Loop: Header=BB0_4 Depth=1
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    jmp LBB0_7
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_5: ## in Loop: Header=BB0_4 Depth=1
 ; CHECK-NEXT:    movl $65535, %ecx ## imm = 0xFFFF
 ; CHECK-NEXT:  LBB0_7: ## %bb13107

diff  --git a/llvm/test/CodeGen/X86/2008-08-06-CmpStride.ll b/llvm/test/CodeGen/X86/2008-08-06-CmpStride.ll
index ca92c555058abb..5086ed40a43a21 100644
--- a/llvm/test/CodeGen/X86/2008-08-06-CmpStride.ll
+++ b/llvm/test/CodeGen/X86/2008-08-06-CmpStride.ll
@@ -10,7 +10,7 @@ define i32 @main() nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movl $10271, %ebx # imm = 0x281F
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %forbody
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl $.str, %edi

diff  --git a/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll b/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
index 18d3cec442c6c2..c2a7d6be8baa00 100644
--- a/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
+++ b/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
@@ -11,7 +11,7 @@ define ptr @test(ptr %Q, ptr %L) nounwind {
 ; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    jmp LBB0_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %bb
 ; CHECK-NEXT:    ## in Loop: Header=BB0_2 Depth=1
 ; CHECK-NEXT:    incl %eax

diff  --git a/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll b/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
index 7807d49269e64c..c421541001c5d8 100644
--- a/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
+++ b/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
@@ -20,7 +20,7 @@ define ptr @t(ptr %desc, i64 %p) nounwind ssp {
 ; CHECK-NEXT:    movq %rdi, %r14
 ; CHECK-NEXT:    orq $2097152, %rbx ## imm = 0x200000
 ; CHECK-NEXT:    andl $15728640, %ebx ## imm = 0xF00000
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %bb4
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    xorl %eax, %eax

diff  --git a/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll b/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
index ce28893090c43f..1dd30e82630992 100644
--- a/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
@@ -8,7 +8,7 @@ define i64 @test(ptr %tmp13) nounwind {
 ; CHECK-NEXT:    movl (%rdi), %ecx
 ; CHECK-NEXT:    movl %ecx, %eax
 ; CHECK-NEXT:    shrl %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %while.cond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb $1, %cl

diff  --git a/llvm/test/CodeGen/X86/2009-08-12-badswitch.ll b/llvm/test/CodeGen/X86/2009-08-12-badswitch.ll
index 4b8085a995f083..7050889d71029c 100644
--- a/llvm/test/CodeGen/X86/2009-08-12-badswitch.ll
+++ b/llvm/test/CodeGen/X86/2009-08-12-badswitch.ll
@@ -123,7 +123,7 @@ define internal fastcc i32 @foo(i64 %bar) nounwind ssp {
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    popq %rcx
 ; CHECK-NEXT:    retq
-; CHECK-NEXT:    .p2align 2, 0x90
+; CHECK-NEXT:    .p2align 2
 ; CHECK-NEXT:    .data_region jt32
 ; CHECK-NEXT:  .set L0_0_set_3, LBB0_3-LJTI0_0
 ; CHECK-NEXT:  .set L0_0_set_4, LBB0_4-LJTI0_0

diff  --git a/llvm/test/CodeGen/X86/2020_12_02_decrementing_loop.ll b/llvm/test/CodeGen/X86/2020_12_02_decrementing_loop.ll
index 43b52898c79a2c..22bf4581c6b42a 100644
--- a/llvm/test/CodeGen/X86/2020_12_02_decrementing_loop.ll
+++ b/llvm/test/CodeGen/X86/2020_12_02_decrementing_loop.ll
@@ -4,7 +4,7 @@
 define i32 @test_01(ptr %p, i64 %len, i32 %x) {
 ; CHECK-LABEL: test_01:
 ; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %loop
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subq $1, %rsi
@@ -44,7 +44,7 @@ failure:                                          ; preds = %backedge
 define i32 @test_01a(ptr %p, i64 %len, i32 %x) {
 ; CHECK-LABEL: test_01a:
 ; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB1_1: ## %loop
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subq $1, %rsi
@@ -84,7 +84,7 @@ failure:                                          ; preds = %backedge
 define i32 @test_02(ptr %p, i64 %len, i32 %x) {
 ; CHECK-LABEL: test_02:
 ; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB2_1: ## %loop
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subq $1, %rsi
@@ -126,7 +126,7 @@ failure:                                          ; preds = %backedge
 define i32 @test_03(ptr %p, i64 %len, i32 %x) {
 ; CHECK-LABEL: test_03:
 ; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB3_1: ## %loop
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subq $1, %rsi

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-across-func.ll b/llvm/test/CodeGen/X86/AMX/amx-across-func.ll
index ae0be9b5a5bcd9..320c96535abba0 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-across-func.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-across-func.ll
@@ -235,7 +235,7 @@ define dso_local i32 @test_loop(i32 %0) nounwind {
 ; CHECK-NEXT:    movl $32, %r15d
 ; CHECK-NEXT:    movw $8, %r12w
 ; CHECK-NEXT:    movl $buf+2048, %r13d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_2: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    tileloadd (%r14,%r15), %tmm0
 ; CHECK-NEXT:    movabsq $64, %rax
@@ -300,7 +300,7 @@ define dso_local i32 @test_loop(i32 %0) nounwind {
 ; IPRA-NEXT:    movl $32, %esi
 ; IPRA-NEXT:    movw $8, %di
 ; IPRA-NEXT:    movl $buf+2048, %r8d
-; IPRA-NEXT:    .p2align 4, 0x90
+; IPRA-NEXT:    .p2align 4
 ; IPRA-NEXT:  .LBB2_2: # =>This Inner Loop Header: Depth=1
 ; IPRA-NEXT:    tileloadd (%rdx,%rsi), %tmm0
 ; IPRA-NEXT:    callq foo
@@ -494,7 +494,7 @@ define dso_local void @test_loop2(i32 %0) nounwind {
 ; CHECK-NEXT:    movl $32, %r15d
 ; CHECK-NEXT:    movw $8, %bp
 ; CHECK-NEXT:    movl $buf+2048, %r12d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    callq foo
@@ -535,7 +535,7 @@ define dso_local void @test_loop2(i32 %0) nounwind {
 ; IPRA-NEXT:    movl $32, %ecx
 ; IPRA-NEXT:    movw $8, %dx
 ; IPRA-NEXT:    movl $buf+2048, %esi
-; IPRA-NEXT:    .p2align 4, 0x90
+; IPRA-NEXT:    .p2align 4
 ; IPRA-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
 ; IPRA-NEXT:    callq foo
 ; IPRA-NEXT:    testl %edi, %edi

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-ldtilecfg-insert.ll b/llvm/test/CodeGen/X86/AMX/amx-ldtilecfg-insert.ll
index 714590d3c156b6..8595024103dbd3 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-ldtilecfg-insert.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-ldtilecfg-insert.ll
@@ -241,7 +241,7 @@ define dso_local void @test5(i16 signext %0, i16 signext %1) nounwind {
 ; CHECK-NEXT:    movl $32, %edx
 ; CHECK-NEXT:    leal -1(%rsi), %r8d
 ; CHECK-NEXT:    jmp .LBB4_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB4_3: # %if.false
 ; CHECK-NEXT:    # in Loop: Header=BB4_1 Depth=1
 ; CHECK-NEXT:    movl %r8d, %esi
@@ -301,7 +301,7 @@ define dso_local void @test6(i16 signext %0) nounwind {
 ; CHECK-NEXT:    movl $32, %edx
 ; CHECK-NEXT:    xorl %esi, %esi
 ; CHECK-NEXT:    jmp .LBB5_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB5_3: # %if.false
 ; CHECK-NEXT:    # in Loop: Header=BB5_1 Depth=1
 ; CHECK-NEXT:    decl %esi

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-lower-tile-copy.ll b/llvm/test/CodeGen/X86/AMX/amx-lower-tile-copy.ll
index a0085afbaf0253..e9bb68e711f889 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-lower-tile-copy.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-lower-tile-copy.ll
@@ -32,7 +32,7 @@ define dso_local void @test1(ptr%buf) nounwind {
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    xorl %r14d, %r14d
 ; CHECK-NEXT:    movl $32, %r15d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %loop.header
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movabsq $64, %rax
@@ -92,7 +92,7 @@ define dso_local void @test1(ptr%buf) nounwind {
 ; EGPR-NEXT:    movq %rdi, %rbx # encoding: [0x48,0x89,0xfb]
 ; EGPR-NEXT:    xorl %r14d, %r14d # encoding: [0x45,0x31,0xf6]
 ; EGPR-NEXT:    movl $32, %r15d # encoding: [0x41,0xbf,0x20,0x00,0x00,0x00]
-; EGPR-NEXT:    .p2align 4, 0x90
+; EGPR-NEXT:    .p2align 4
 ; EGPR-NEXT:  .LBB0_2: # %loop.header
 ; EGPR-NEXT:    # =>This Inner Loop Header: Depth=1
 ; EGPR-NEXT:    movabsq $64, %rax # encoding: [0x48,0xb8,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
@@ -179,7 +179,7 @@ define dso_local void @test2(ptr%buf) nounwind {
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    xorl %r14d, %r14d
 ; CHECK-NEXT:    movl $32, %r15d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_2: # %loop.header
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    tilezero %tmm0
@@ -231,7 +231,7 @@ define dso_local void @test2(ptr%buf) nounwind {
 ; EGPR-NEXT:    movq %rdi, %rbx # encoding: [0x48,0x89,0xfb]
 ; EGPR-NEXT:    xorl %r14d, %r14d # encoding: [0x45,0x31,0xf6]
 ; EGPR-NEXT:    movl $32, %r15d # encoding: [0x41,0xbf,0x20,0x00,0x00,0x00]
-; EGPR-NEXT:    .p2align 4, 0x90
+; EGPR-NEXT:    .p2align 4
 ; EGPR-NEXT:  .LBB1_2: # %loop.header
 ; EGPR-NEXT:    # =>This Inner Loop Header: Depth=1
 ; EGPR-NEXT:    tilezero %tmm0 # encoding: [0xc4,0xe2,0x7b,0x49,0xc0]

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll b/llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll
index c7c919c7cbb30d..4fb0a4445862ff 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll
@@ -244,7 +244,7 @@ define dso_local void @test3(ptr%buf) nounwind {
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    movl $32, %r14d
 ; CHECK-NEXT:    xorl %r15d, %r15d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_2: # %loop.header
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    tilestored %tmm0, (%rbx,%r14)
@@ -299,7 +299,7 @@ define dso_local void @test3(ptr%buf) nounwind {
 ; EGPR-NEXT:    movq %rdi, %rbx # encoding: [0x48,0x89,0xfb]
 ; EGPR-NEXT:    movl $32, %r14d # encoding: [0x41,0xbe,0x20,0x00,0x00,0x00]
 ; EGPR-NEXT:    xorl %r15d, %r15d # encoding: [0x45,0x31,0xff]
-; EGPR-NEXT:    .p2align 4, 0x90
+; EGPR-NEXT:    .p2align 4
 ; EGPR-NEXT:  .LBB1_2: # %loop.header
 ; EGPR-NEXT:    # =>This Inner Loop Header: Depth=1
 ; EGPR-NEXT:    tilestored %tmm0, (%rbx,%r14) # EVEX TO VEX Compression encoding: [0xc4,0xa2,0x7a,0x4b,0x04,0x33]

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
index 15e7136f4a5030..ec087f23248c4c 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
@@ -85,7 +85,7 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind {
 ; CHECK-NEXT:    xorl %ebx, %ebx
 ; CHECK-NEXT:    xorl %r14d, %r14d
 ; CHECK-NEXT:    jmp .LBB1_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_5: # in Loop: Header=BB1_1 Depth=1
 ; CHECK-NEXT:    incq %r14
 ; CHECK-NEXT:    addl %edx, %ebx
@@ -96,7 +96,7 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind {
 ; CHECK-NEXT:    xorl %r12d, %r12d
 ; CHECK-NEXT:    xorl %r13d, %r13d
 ; CHECK-NEXT:    jmp .LBB1_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_4: # in Loop: Header=BB1_2 Depth=2
 ; CHECK-NEXT:    tilestored %tmm1, (%r15,%rax)
 ; CHECK-NEXT:    incq %r13

diff  --git a/llvm/test/CodeGen/X86/MachineSink-Issue98477.ll b/llvm/test/CodeGen/X86/MachineSink-Issue98477.ll
index 4df32854cdae44..4abb1f8e1ca429 100644
--- a/llvm/test/CodeGen/X86/MachineSink-Issue98477.ll
+++ b/llvm/test/CodeGen/X86/MachineSink-Issue98477.ll
@@ -13,7 +13,7 @@ define i32 @main(i1 %tobool.not, i32 %0) {
 ; CHECK-NEXT:  .LBB0_1: # %j.preheader
 ; CHECK-NEXT:    xorl %r9d, %r9d
 ; CHECK-NEXT:    jmp .LBB0_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_5: # %if.then4
 ; CHECK-NEXT:    # in Loop: Header=BB0_2 Depth=1
 ; CHECK-NEXT:    movl $1, %eax

diff  --git a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
index fa9ed49da1b7f9..0103d2bf3cc2c6 100644
--- a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
+++ b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
@@ -17,7 +17,7 @@ define void @merge_const_store(i32 %count, ptr nocapture %p) nounwind uwtable no
 ; X86-NEXT:    jle .LBB0_3
 ; X86-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_2: # %.lr.ph
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl $67305985, (%ecx) # imm = 0x4030201
@@ -34,7 +34,7 @@ define void @merge_const_store(i32 %count, ptr nocapture %p) nounwind uwtable no
 ; X64-NEXT:    jle .LBB0_3
 ; X64-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; X64-NEXT:    movabsq $578437695752307201, %rax # imm = 0x807060504030201
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_2: # %.lr.ph
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movq %rax, (%rsi)
@@ -81,7 +81,7 @@ define void @merge_const_store_no_vec(i32 %count, ptr nocapture %p) noimplicitfl
 ; X86-NEXT:    jle .LBB1_3
 ; X86-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB1_2: # %.lr.ph
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl $0, (%ecx)
@@ -102,7 +102,7 @@ define void @merge_const_store_no_vec(i32 %count, ptr nocapture %p) noimplicitfl
 ; X64:       # %bb.0:
 ; X64-NEXT:    testl %edi, %edi
 ; X64-NEXT:    jle .LBB1_2
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB1_1: # %.lr.ph
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movq $0, (%rsi)
@@ -153,7 +153,7 @@ define void @merge_const_store_vec(i32 %count, ptr nocapture %p) nounwind uwtabl
 ; X86-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB2_2: # %.lr.ph
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    vmovups %ymm0, (%ecx)
@@ -170,7 +170,7 @@ define void @merge_const_store_vec(i32 %count, ptr nocapture %p) nounwind uwtabl
 ; X64-NEXT:    jle .LBB2_3
 ; X64-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB2_2: # %.lr.ph
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    vmovups %ymm0, (%rsi)
@@ -219,7 +219,7 @@ define void @merge_nonconst_store(i32 %count, i8 %zz, ptr nocapture %p) nounwind
 ; X86-BWON-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-BWON-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
-; X86-BWON-NEXT:    .p2align 4, 0x90
+; X86-BWON-NEXT:    .p2align 4
 ; X86-BWON-NEXT:  .LBB3_2: # %.lr.ph
 ; X86-BWON-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-BWON-NEXT:    movl $67305985, (%ecx) # imm = 0x4030201
@@ -240,7 +240,7 @@ define void @merge_nonconst_store(i32 %count, i8 %zz, ptr nocapture %p) nounwind
 ; X86-BWOFF-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-BWOFF-NEXT:    movb {{[0-9]+}}(%esp), %dl
-; X86-BWOFF-NEXT:    .p2align 4, 0x90
+; X86-BWOFF-NEXT:    .p2align 4
 ; X86-BWOFF-NEXT:  .LBB3_2: # %.lr.ph
 ; X86-BWOFF-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-BWOFF-NEXT:    movl $67305985, (%ecx) # imm = 0x4030201
@@ -257,7 +257,7 @@ define void @merge_nonconst_store(i32 %count, i8 %zz, ptr nocapture %p) nounwind
 ; X64:       # %bb.0:
 ; X64-NEXT:    testl %edi, %edi
 ; X64-NEXT:    jle .LBB3_2
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_1: # %.lr.ph
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl $67305985, (%rdx) # imm = 0x4030201
@@ -310,7 +310,7 @@ define void @merge_loads_i16(i32 %count, ptr noalias nocapture %q, ptr noalias n
 ; X86-BWON-NEXT:  # %bb.1: # %.lr.ph
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-BWON-NEXT:    .p2align 4, 0x90
+; X86-BWON-NEXT:    .p2align 4
 ; X86-BWON-NEXT:  .LBB4_2: # =>This Inner Loop Header: Depth=1
 ; X86-BWON-NEXT:    movzwl (%edx), %esi
 ; X86-BWON-NEXT:    movw %si, (%ecx)
@@ -333,7 +333,7 @@ define void @merge_loads_i16(i32 %count, ptr noalias nocapture %q, ptr noalias n
 ; X86-BWOFF-NEXT:  # %bb.1: # %.lr.ph
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-BWOFF-NEXT:    .p2align 4, 0x90
+; X86-BWOFF-NEXT:    .p2align 4
 ; X86-BWOFF-NEXT:  .LBB4_2: # =>This Inner Loop Header: Depth=1
 ; X86-BWOFF-NEXT:    movw (%edx), %si
 ; X86-BWOFF-NEXT:    movw %si, (%ecx)
@@ -349,7 +349,7 @@ define void @merge_loads_i16(i32 %count, ptr noalias nocapture %q, ptr noalias n
 ; X64-BWON:       # %bb.0:
 ; X64-BWON-NEXT:    testl %edi, %edi
 ; X64-BWON-NEXT:    jle .LBB4_2
-; X64-BWON-NEXT:    .p2align 4, 0x90
+; X64-BWON-NEXT:    .p2align 4
 ; X64-BWON-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
 ; X64-BWON-NEXT:    movzwl (%rsi), %eax
 ; X64-BWON-NEXT:    movw %ax, (%rdx)
@@ -363,7 +363,7 @@ define void @merge_loads_i16(i32 %count, ptr noalias nocapture %q, ptr noalias n
 ; X64-BWOFF:       # %bb.0:
 ; X64-BWOFF-NEXT:    testl %edi, %edi
 ; X64-BWOFF-NEXT:    jle .LBB4_2
-; X64-BWOFF-NEXT:    .p2align 4, 0x90
+; X64-BWOFF-NEXT:    .p2align 4
 ; X64-BWOFF-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
 ; X64-BWOFF-NEXT:    movw (%rsi), %ax
 ; X64-BWOFF-NEXT:    movw %ax, (%rdx)
@@ -411,7 +411,7 @@ define void @no_merge_loads(i32 %count, ptr noalias nocapture %q, ptr noalias no
 ; X86-BWON-NEXT:  # %bb.1: # %.lr.ph
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-BWON-NEXT:    .p2align 4, 0x90
+; X86-BWON-NEXT:    .p2align 4
 ; X86-BWON-NEXT:  .LBB5_2: # %a4
 ; X86-BWON-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-BWON-NEXT:    movzbl (%edx), %ebx
@@ -437,7 +437,7 @@ define void @no_merge_loads(i32 %count, ptr noalias nocapture %q, ptr noalias no
 ; X86-BWOFF-NEXT:  # %bb.1: # %.lr.ph
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-BWOFF-NEXT:    .p2align 4, 0x90
+; X86-BWOFF-NEXT:    .p2align 4
 ; X86-BWOFF-NEXT:  .LBB5_2: # %a4
 ; X86-BWOFF-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-BWOFF-NEXT:    movb (%edx), %bl
@@ -456,7 +456,7 @@ define void @no_merge_loads(i32 %count, ptr noalias nocapture %q, ptr noalias no
 ; X64-BWON:       # %bb.0:
 ; X64-BWON-NEXT:    testl %edi, %edi
 ; X64-BWON-NEXT:    jle .LBB5_2
-; X64-BWON-NEXT:    .p2align 4, 0x90
+; X64-BWON-NEXT:    .p2align 4
 ; X64-BWON-NEXT:  .LBB5_1: # %a4
 ; X64-BWON-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-BWON-NEXT:    movzbl (%rsi), %eax
@@ -473,7 +473,7 @@ define void @no_merge_loads(i32 %count, ptr noalias nocapture %q, ptr noalias no
 ; X64-BWOFF:       # %bb.0:
 ; X64-BWOFF-NEXT:    testl %edi, %edi
 ; X64-BWOFF-NEXT:    jle .LBB5_2
-; X64-BWOFF-NEXT:    .p2align 4, 0x90
+; X64-BWOFF-NEXT:    .p2align 4
 ; X64-BWOFF-NEXT:  .LBB5_1: # %a4
 ; X64-BWOFF-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-BWOFF-NEXT:    movb (%rsi), %al
@@ -526,7 +526,7 @@ define void @merge_loads_integer(i32 %count, ptr noalias nocapture %q, ptr noali
 ; X86-NEXT:  # %bb.1: # %.lr.ph
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB6_2: # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl (%edx), %esi
 ; X86-NEXT:    movl 4(%edx), %edi
@@ -546,7 +546,7 @@ define void @merge_loads_integer(i32 %count, ptr noalias nocapture %q, ptr noali
 ; X64:       # %bb.0:
 ; X64-NEXT:    testl %edi, %edi
 ; X64-NEXT:    jle .LBB6_2
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movq (%rsi), %rax
 ; X64-NEXT:    movq %rax, (%rdx)
@@ -590,7 +590,7 @@ define void @merge_loads_vector(i32 %count, ptr noalias nocapture %q, ptr noalia
 ; X86-NEXT:  # %bb.1: # %.lr.ph
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB7_2: # %block4
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    vmovups (%edx), %xmm0
@@ -605,7 +605,7 @@ define void @merge_loads_vector(i32 %count, ptr noalias nocapture %q, ptr noalia
 ; X64:       # %bb.0:
 ; X64-NEXT:    testl %edi, %edi
 ; X64-NEXT:    jle .LBB7_2
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB7_1: # %block4
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    vmovups (%rsi), %xmm0
@@ -659,7 +659,7 @@ define void @merge_loads_no_align(i32 %count, ptr noalias nocapture %q, ptr noal
 ; X86-NEXT:  # %bb.1: # %.lr.ph
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB8_2: # %block4
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    vmovups (%edx), %xmm0
@@ -674,7 +674,7 @@ define void @merge_loads_no_align(i32 %count, ptr noalias nocapture %q, ptr noal
 ; X64:       # %bb.0:
 ; X64-NEXT:    testl %edi, %edi
 ; X64-NEXT:    jle .LBB8_2
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB8_1: # %block4
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    vmovups (%rsi), %xmm0
@@ -737,7 +737,7 @@ define void @MergeLoadStoreBaseIndexOffset(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-BWON-NEXT:    .p2align 4, 0x90
+; X86-BWON-NEXT:    .p2align 4
 ; X86-BWON-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
 ; X86-BWON-NEXT:    movl (%edi,%eax,8), %ebx
 ; X86-BWON-NEXT:    movzwl (%edx,%ebx), %ebx
@@ -770,7 +770,7 @@ define void @MergeLoadStoreBaseIndexOffset(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-BWOFF-NEXT:    .p2align 4, 0x90
+; X86-BWOFF-NEXT:    .p2align 4
 ; X86-BWOFF-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
 ; X86-BWOFF-NEXT:    movl (%edi,%eax,8), %ebx
 ; X86-BWOFF-NEXT:    movw (%edx,%ebx), %bx
@@ -791,7 +791,7 @@ define void @MergeLoadStoreBaseIndexOffset(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X64-BWON:       # %bb.0:
 ; X64-BWON-NEXT:    movl %ecx, %eax
 ; X64-BWON-NEXT:    xorl %ecx, %ecx
-; X64-BWON-NEXT:    .p2align 4, 0x90
+; X64-BWON-NEXT:    .p2align 4
 ; X64-BWON-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
 ; X64-BWON-NEXT:    movq (%rdi,%rcx,8), %r8
 ; X64-BWON-NEXT:    movzwl (%rdx,%r8), %r8d
@@ -806,7 +806,7 @@ define void @MergeLoadStoreBaseIndexOffset(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X64-BWOFF:       # %bb.0:
 ; X64-BWOFF-NEXT:    movl %ecx, %eax
 ; X64-BWOFF-NEXT:    xorl %ecx, %ecx
-; X64-BWOFF-NEXT:    .p2align 4, 0x90
+; X64-BWOFF-NEXT:    .p2align 4
 ; X64-BWOFF-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
 ; X64-BWOFF-NEXT:    movq (%rdi,%rcx,8), %r8
 ; X64-BWOFF-NEXT:    movw (%rdx,%r8), %r8w
@@ -863,7 +863,7 @@ define void @MergeLoadStoreBaseIndexOffsetComplicated(ptr %a, ptr %b, ptr %c, i6
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ebx
 ; X86-BWON-NEXT:    xorl %ebp, %ebp
-; X86-BWON-NEXT:    .p2align 4, 0x90
+; X86-BWON-NEXT:    .p2align 4
 ; X86-BWON-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
 ; X86-BWON-NEXT:    movsbl (%edi), %ecx
 ; X86-BWON-NEXT:    movzbl (%esi,%ecx), %edx
@@ -909,7 +909,7 @@ define void @MergeLoadStoreBaseIndexOffsetComplicated(ptr %a, ptr %b, ptr %c, i6
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ebx
 ; X86-BWOFF-NEXT:    xorl %ebp, %ebp
-; X86-BWOFF-NEXT:    .p2align 4, 0x90
+; X86-BWOFF-NEXT:    .p2align 4
 ; X86-BWOFF-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
 ; X86-BWOFF-NEXT:    movsbl (%edi), %ecx
 ; X86-BWOFF-NEXT:    movb (%esi,%ecx), %dl
@@ -939,7 +939,7 @@ define void @MergeLoadStoreBaseIndexOffsetComplicated(ptr %a, ptr %b, ptr %c, i6
 ; X64-BWON-LABEL: MergeLoadStoreBaseIndexOffsetComplicated:
 ; X64-BWON:       # %bb.0:
 ; X64-BWON-NEXT:    xorl %eax, %eax
-; X64-BWON-NEXT:    .p2align 4, 0x90
+; X64-BWON-NEXT:    .p2align 4
 ; X64-BWON-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
 ; X64-BWON-NEXT:    movsbq (%rsi), %r8
 ; X64-BWON-NEXT:    movzwl (%rdx,%r8), %r8d
@@ -954,7 +954,7 @@ define void @MergeLoadStoreBaseIndexOffsetComplicated(ptr %a, ptr %b, ptr %c, i6
 ; X64-BWOFF-LABEL: MergeLoadStoreBaseIndexOffsetComplicated:
 ; X64-BWOFF:       # %bb.0:
 ; X64-BWOFF-NEXT:    xorl %eax, %eax
-; X64-BWOFF-NEXT:    .p2align 4, 0x90
+; X64-BWOFF-NEXT:    .p2align 4
 ; X64-BWOFF-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
 ; X64-BWOFF-NEXT:    movsbq (%rsi), %r8
 ; X64-BWOFF-NEXT:    movw (%rdx,%r8), %r8w
@@ -1011,7 +1011,7 @@ define void @MergeLoadStoreBaseIndexOffsetSext(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-BWON-NEXT:    .p2align 4, 0x90
+; X86-BWON-NEXT:    .p2align 4
 ; X86-BWON-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
 ; X86-BWON-NEXT:    movsbl (%edi,%eax), %ebx
 ; X86-BWON-NEXT:    movzwl (%edx,%ebx), %ebx
@@ -1044,7 +1044,7 @@ define void @MergeLoadStoreBaseIndexOffsetSext(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-BWOFF-NEXT:    .p2align 4, 0x90
+; X86-BWOFF-NEXT:    .p2align 4
 ; X86-BWOFF-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
 ; X86-BWOFF-NEXT:    movsbl (%edi,%eax), %ebx
 ; X86-BWOFF-NEXT:    movw (%edx,%ebx), %bx
@@ -1065,7 +1065,7 @@ define void @MergeLoadStoreBaseIndexOffsetSext(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X64-BWON:       # %bb.0:
 ; X64-BWON-NEXT:    movl %ecx, %eax
 ; X64-BWON-NEXT:    xorl %ecx, %ecx
-; X64-BWON-NEXT:    .p2align 4, 0x90
+; X64-BWON-NEXT:    .p2align 4
 ; X64-BWON-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
 ; X64-BWON-NEXT:    movsbq (%rdi,%rcx), %r8
 ; X64-BWON-NEXT:    movzwl (%rdx,%r8), %r8d
@@ -1080,7 +1080,7 @@ define void @MergeLoadStoreBaseIndexOffsetSext(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X64-BWOFF:       # %bb.0:
 ; X64-BWOFF-NEXT:    movl %ecx, %eax
 ; X64-BWOFF-NEXT:    xorl %ecx, %ecx
-; X64-BWOFF-NEXT:    .p2align 4, 0x90
+; X64-BWOFF-NEXT:    .p2align 4
 ; X64-BWOFF-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
 ; X64-BWOFF-NEXT:    movsbq (%rdi,%rcx), %r8
 ; X64-BWOFF-NEXT:    movw (%rdx,%r8), %r8w
@@ -1138,7 +1138,7 @@ define void @loadStoreBaseIndexOffsetSextNoSex(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-BWON-NEXT:    .p2align 4, 0x90
+; X86-BWON-NEXT:    .p2align 4
 ; X86-BWON-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
 ; X86-BWON-NEXT:    movsbl (%edi,%eax), %ebx
 ; X86-BWON-NEXT:    movzbl (%edx,%ebx), %ecx
@@ -1180,7 +1180,7 @@ define void @loadStoreBaseIndexOffsetSextNoSex(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-BWOFF-NEXT:    .p2align 4, 0x90
+; X86-BWOFF-NEXT:    .p2align 4
 ; X86-BWOFF-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
 ; X86-BWOFF-NEXT:    movsbl (%edi,%eax), %ebx
 ; X86-BWOFF-NEXT:    movb (%edx,%ebx), %cl
@@ -1207,7 +1207,7 @@ define void @loadStoreBaseIndexOffsetSextNoSex(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X64-BWON:       # %bb.0:
 ; X64-BWON-NEXT:    movl %ecx, %eax
 ; X64-BWON-NEXT:    xorl %ecx, %ecx
-; X64-BWON-NEXT:    .p2align 4, 0x90
+; X64-BWON-NEXT:    .p2align 4
 ; X64-BWON-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
 ; X64-BWON-NEXT:    movsbq (%rdi,%rcx), %r8
 ; X64-BWON-NEXT:    movzbl (%rdx,%r8), %r9d
@@ -1226,7 +1226,7 @@ define void @loadStoreBaseIndexOffsetSextNoSex(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X64-BWOFF:       # %bb.0:
 ; X64-BWOFF-NEXT:    movl %ecx, %eax
 ; X64-BWOFF-NEXT:    xorl %ecx, %ecx
-; X64-BWOFF-NEXT:    .p2align 4, 0x90
+; X64-BWOFF-NEXT:    .p2align 4
 ; X64-BWOFF-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
 ; X64-BWOFF-NEXT:    movsbq (%rdi,%rcx), %r8
 ; X64-BWOFF-NEXT:    movb (%rdx,%r8), %r9b
@@ -1561,7 +1561,7 @@ define void @merge_const_store_heterogeneous(i32 %count, ptr nocapture %p) nounw
 ; X86-NEXT:    jle .LBB23_3
 ; X86-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB23_2: # %.lr.ph
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl $67305985, (%ecx) # imm = 0x4030201
@@ -1578,7 +1578,7 @@ define void @merge_const_store_heterogeneous(i32 %count, ptr nocapture %p) nounw
 ; X64-NEXT:    jle .LBB23_3
 ; X64-NEXT:  # %bb.1: # %.lr.ph.preheader
 ; X64-NEXT:    movabsq $578437695752307201, %rax # imm = 0x807060504030201
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB23_2: # %.lr.ph
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movq %rax, (%rsi)

diff  --git a/llvm/test/CodeGen/X86/PR71178-register-coalescer-crash.ll b/llvm/test/CodeGen/X86/PR71178-register-coalescer-crash.ll
index 0ce346f7c2e02c..12d66f64cb73d5 100644
--- a/llvm/test/CodeGen/X86/PR71178-register-coalescer-crash.ll
+++ b/llvm/test/CodeGen/X86/PR71178-register-coalescer-crash.ll
@@ -8,7 +8,7 @@ define i32 @h(i1 %arg, i32 %arg1) {
 ; CHECK-NEXT:    movabsq $9166129423, %rcx # imm = 0x22258090F
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    jmp .LBB0_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_9: # %bb18
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    xorl %eax, %eax

diff  --git a/llvm/test/CodeGen/X86/SwitchLowering.ll b/llvm/test/CodeGen/X86/SwitchLowering.ll
index ac80fda69fccdb..921b4e3d24d114 100644
--- a/llvm/test/CodeGen/X86/SwitchLowering.ll
+++ b/llvm/test/CodeGen/X86/SwitchLowering.ll
@@ -13,7 +13,7 @@ define ptr @FindChar(ptr %CurPtr) {
 ; CHECK-NEXT:    .cfi_offset %edi, -8
 ; CHECK-NEXT:    xorl %edi, %edi
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movzbl (%esi,%edi), %eax

diff  --git a/llvm/test/CodeGen/X86/addr-mode-matcher-2.ll b/llvm/test/CodeGen/X86/addr-mode-matcher-2.ll
index daba729bf040f2..c810fe137024ce 100644
--- a/llvm/test/CodeGen/X86/addr-mode-matcher-2.ll
+++ b/llvm/test/CodeGen/X86/addr-mode-matcher-2.ll
@@ -30,7 +30,7 @@ define void @foo_sext_nsw(i1 zeroext, i32) nounwind {
 ; X86-NEXT:    retl
 ; X86-NEXT:  .LBB0_1: # %.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_2: # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    leal (%eax,%eax,4), %eax
 ; X86-NEXT:    leal 20(,%eax,4), %eax
@@ -49,7 +49,7 @@ define void @foo_sext_nsw(i1 zeroext, i32) nounwind {
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB0_1: # %.preheader
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_2: # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    cltq
 ; X64-NEXT:    shlq $2, %rax
@@ -77,7 +77,7 @@ define void @foo_sext_nuw(i1 zeroext, i32) nounwind {
 ; X86-NEXT:    retl
 ; X86-NEXT:  .LBB1_1: # %.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB1_2: # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    leal (%eax,%eax,4), %eax
 ; X86-NEXT:    leal 20(,%eax,4), %eax
@@ -96,7 +96,7 @@ define void @foo_sext_nuw(i1 zeroext, i32) nounwind {
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB1_1: # %.preheader
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB1_2: # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    incl %eax
 ; X64-NEXT:    cltq
@@ -125,7 +125,7 @@ define void @foo_zext_nsw(i1 zeroext, i32) nounwind {
 ; X86-NEXT:    retl
 ; X86-NEXT:  .LBB2_1: # %.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB2_2: # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    leal (%eax,%eax,4), %eax
 ; X86-NEXT:    leal 20(,%eax,4), %eax
@@ -144,7 +144,7 @@ define void @foo_zext_nsw(i1 zeroext, i32) nounwind {
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB2_1: # %.preheader
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB2_2: # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    incl %eax
 ; X64-NEXT:    shlq $2, %rax
@@ -173,7 +173,7 @@ define void @foo_zext_nuw(i1 zeroext, i32) nounwind {
 ; X86-NEXT:    retl
 ; X86-NEXT:  .LBB3_1: # %.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB3_2: # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    leal (%eax,%eax,4), %eax
 ; X86-NEXT:    leal 20(,%eax,4), %eax
@@ -192,7 +192,7 @@ define void @foo_zext_nuw(i1 zeroext, i32) nounwind {
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB3_1: # %.preheader
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_2: # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %eax
 ; X64-NEXT:    shlq $2, %rax
@@ -220,7 +220,7 @@ define void @foo_sext(i1 zeroext, i32) nounwind {
 ; X86-NEXT:    retl
 ; X86-NEXT:  .LBB4_1: # %.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB4_2: # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    leal (%eax,%eax,4), %eax
 ; X86-NEXT:    leal 20(,%eax,4), %eax
@@ -239,7 +239,7 @@ define void @foo_sext(i1 zeroext, i32) nounwind {
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB4_1: # %.preheader
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB4_2: # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    incl %eax
 ; X64-NEXT:    cltq
@@ -268,7 +268,7 @@ define void @foo_zext(i1 zeroext, i32) nounwind {
 ; X86-NEXT:    retl
 ; X86-NEXT:  .LBB5_1: # %.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB5_2: # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    leal (%eax,%eax,4), %eax
 ; X86-NEXT:    leal 20(,%eax,4), %eax
@@ -287,7 +287,7 @@ define void @foo_zext(i1 zeroext, i32) nounwind {
 ; X64-NEXT:    retq
 ; X64-NEXT:  .LBB5_1: # %.preheader
 ; X64-NEXT:    movl %esi, %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB5_2: # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    incl %eax
 ; X64-NEXT:    shlq $2, %rax

diff  --git a/llvm/test/CodeGen/X86/align-basic-block-sections.mir b/llvm/test/CodeGen/X86/align-basic-block-sections.mir
index e7ad5dfe6478f8..7521341ff3ea01 100644
--- a/llvm/test/CodeGen/X86/align-basic-block-sections.mir
+++ b/llvm/test/CodeGen/X86/align-basic-block-sections.mir
@@ -132,5 +132,5 @@ body:             |
 ...
 
 # CHECK: .section	.text.test,"ax", at progbits,unique,2
-# CHECK-NEXT: .p2align	8, 0x90
+# CHECK-NEXT: .p2align	8
 # CHECK-NEXT: test.__part.2: # %entry

diff  --git a/llvm/test/CodeGen/X86/and-sink.ll b/llvm/test/CodeGen/X86/and-sink.ll
index 965e84844758fc..d9a34d743ac657 100644
--- a/llvm/test/CodeGen/X86/and-sink.ll
+++ b/llvm/test/CodeGen/X86/and-sink.ll
@@ -54,7 +54,7 @@ define i32 @and_sink2(i32 %a, i1 %c, i1 %c2) {
 ; CHECK-NEXT:  # %bb.1: # %bb0.preheader
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_2: # %bb0
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl $0, B

diff  --git a/llvm/test/CodeGen/X86/apx/push2-pop2.ll b/llvm/test/CodeGen/X86/apx/push2-pop2.ll
index 6bd9f525090ee2..f5be484be2b1a6 100644
--- a/llvm/test/CodeGen/X86/apx/push2-pop2.ll
+++ b/llvm/test/CodeGen/X86/apx/push2-pop2.ll
@@ -286,7 +286,7 @@ define void @lea_in_epilog(i1 %arg, ptr %arg1, ptr %arg2, i64 %arg3, i64 %arg4,
 ; CHECK-NEXT:    xorl %ebp, %ebp
 ; CHECK-NEXT:    xorl %r12d, %r12d
 ; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB6_2: # %bb15
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    incq %r12
@@ -338,7 +338,7 @@ define void @lea_in_epilog(i1 %arg, ptr %arg1, ptr %arg2, i64 %arg3, i64 %arg4,
 ; PPX-NEXT:    xorl %ebp, %ebp
 ; PPX-NEXT:    xorl %r12d, %r12d
 ; PPX-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; PPX-NEXT:    .p2align 4, 0x90
+; PPX-NEXT:    .p2align 4
 ; PPX-NEXT:  .LBB6_2: # %bb15
 ; PPX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; PPX-NEXT:    incq %r12
@@ -390,7 +390,7 @@ define void @lea_in_epilog(i1 %arg, ptr %arg1, ptr %arg2, i64 %arg3, i64 %arg4,
 ; FRAME-NEXT:    xorl %r13d, %r13d
 ; FRAME-NEXT:    xorl %r14d, %r14d
 ; FRAME-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; FRAME-NEXT:    .p2align 4, 0x90
+; FRAME-NEXT:    .p2align 4
 ; FRAME-NEXT:  .LBB6_2: # %bb15
 ; FRAME-NEXT:    # =>This Inner Loop Header: Depth=1
 ; FRAME-NEXT:    movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill

diff  --git a/llvm/test/CodeGen/X86/apx/setzucc.ll b/llvm/test/CodeGen/X86/apx/setzucc.ll
index 084e54235330c2..1f2b612a170883 100644
--- a/llvm/test/CodeGen/X86/apx/setzucc.ll
+++ b/llvm/test/CodeGen/X86/apx/setzucc.ll
@@ -53,7 +53,7 @@ define i32 @flags_copy_lowering() nounwind {
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB4_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    addl %edx, 0

diff  --git a/llvm/test/CodeGen/X86/assertzext-demanded.ll b/llvm/test/CodeGen/X86/assertzext-demanded.ll
index 3d2120578b49e4..33a8c543ed9ab0 100644
--- a/llvm/test/CodeGen/X86/assertzext-demanded.ll
+++ b/llvm/test/CodeGen/X86/assertzext-demanded.ll
@@ -7,7 +7,7 @@ define void @simplify_assertzext(ptr %0) {
 ; CHECK:       # %bb.0: # %BB
 ; CHECK-NEXT:    movl $275047, %eax # imm = 0x43267
 ; CHECK-NEXT:    movb $1, %cl
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %CF246
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb %cl, %cl
@@ -15,7 +15,7 @@ define void @simplify_assertzext(ptr %0) {
 ; CHECK-NEXT:  # %bb.2: # %CF260
 ; CHECK-NEXT:    orl $278403, %eax # imm = 0x43F83
 ; CHECK-NEXT:    movl %eax, (%rdi)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %CF242
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_3

diff  --git a/llvm/test/CodeGen/X86/atom-pad-short-functions.ll b/llvm/test/CodeGen/X86/atom-pad-short-functions.ll
index 06800bd244de3c..301995dfef0a3d 100644
--- a/llvm/test/CodeGen/X86/atom-pad-short-functions.ll
+++ b/llvm/test/CodeGen/X86/atom-pad-short-functions.ll
@@ -110,7 +110,7 @@ define void @test_branch_to_same_bb(i32 %x, i32 %y) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    jle .LBB7_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB7_1: # %while.cond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB7_1

diff  --git a/llvm/test/CodeGen/X86/atomic-bit-test.ll b/llvm/test/CodeGen/X86/atomic-bit-test.ll
index 10b6605c3fb05e..8f91f4120842b3 100644
--- a/llvm/test/CodeGen/X86/atomic-bit-test.ll
+++ b/llvm/test/CodeGen/X86/atomic-bit-test.ll
@@ -106,7 +106,7 @@ define i64 @bts63() nounwind {
 ; X86-NEXT:    movl $-2147483648, %esi # imm = 0x80000000
 ; X86-NEXT:    movl v64+4, %edx
 ; X86-NEXT:    movl v64, %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB4_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %edx, %ecx
@@ -230,7 +230,7 @@ define i64 @btc63() nounwind {
 ; X86-NEXT:    movl $-2147483648, %esi # imm = 0x80000000
 ; X86-NEXT:    movl v64+4, %edx
 ; X86-NEXT:    movl v64, %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB9_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %edx, %ecx
@@ -360,7 +360,7 @@ define i64 @btr63() nounwind {
 ; X86-NEXT:    movl $-1, %edi
 ; X86-NEXT:    movl v64+4, %edx
 ; X86-NEXT:    movl v64, %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB14_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ebx
@@ -396,7 +396,7 @@ define i16 @multi_use1() nounwind {
 ; X86-LABEL: multi_use1:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzwl v16, %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB15_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -416,7 +416,7 @@ define i16 @multi_use1() nounwind {
 ; X64-LABEL: multi_use1:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movzwl v16(%rip), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB15_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -471,7 +471,7 @@ define i16 @use_in_
diff _bb() nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movzwl v16, %esi
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB17_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %esi, %ecx
@@ -496,7 +496,7 @@ define i16 @use_in_
diff _bb() nounwind {
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq %rbx
 ; X64-NEXT:    movzwl v16(%rip), %ebx
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB17_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %ebx, %ecx

diff  --git a/llvm/test/CodeGen/X86/atomic-flags.ll b/llvm/test/CodeGen/X86/atomic-flags.ll
index 00a6308a749694..317af67deb1632 100644
--- a/llvm/test/CodeGen/X86/atomic-flags.ll
+++ b/llvm/test/CodeGen/X86/atomic-flags.ll
@@ -144,7 +144,7 @@ define zeroext i1 @xadd_cmp0_i64(ptr %x) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl (%esi), %eax
 ; X86-NEXT:    movl 4(%esi), %edx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB2_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ebx

diff  --git a/llvm/test/CodeGen/X86/atomic-idempotent.ll b/llvm/test/CodeGen/X86/atomic-idempotent.ll
index d5c46485068a64..55b4d1af094f67 100644
--- a/llvm/test/CodeGen/X86/atomic-idempotent.ll
+++ b/llvm/test/CodeGen/X86/atomic-idempotent.ll
@@ -64,7 +64,7 @@ define i16 @or16(ptr %p) {
 ; X86-SLM:       # %bb.0:
 ; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SLM-NEXT:    movzwl (%ecx), %eax
-; X86-SLM-NEXT:    .p2align 4, 0x90
+; X86-SLM-NEXT:    .p2align 4
 ; X86-SLM-NEXT:  .LBB1_1: # %atomicrmw.start
 ; X86-SLM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-SLM-NEXT:    lock cmpxchgw %ax, (%ecx)
@@ -76,7 +76,7 @@ define i16 @or16(ptr %p) {
 ; X86-ATOM:       # %bb.0:
 ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-ATOM-NEXT:    movzwl (%ecx), %eax
-; X86-ATOM-NEXT:    .p2align 4, 0x90
+; X86-ATOM-NEXT:    .p2align 4
 ; X86-ATOM-NEXT:  .LBB1_1: # %atomicrmw.start
 ; X86-ATOM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-ATOM-NEXT:    lock cmpxchgw %ax, (%ecx)
@@ -105,7 +105,7 @@ define i32 @xor32(ptr %p) {
 ; X86-SLM:       # %bb.0:
 ; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SLM-NEXT:    movl (%ecx), %eax
-; X86-SLM-NEXT:    .p2align 4, 0x90
+; X86-SLM-NEXT:    .p2align 4
 ; X86-SLM-NEXT:  .LBB2_1: # %atomicrmw.start
 ; X86-SLM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-SLM-NEXT:    lock cmpxchgl %eax, (%ecx)
@@ -117,7 +117,7 @@ define i32 @xor32(ptr %p) {
 ; X86-ATOM:       # %bb.0:
 ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-ATOM-NEXT:    movl (%ecx), %eax
-; X86-ATOM-NEXT:    .p2align 4, 0x90
+; X86-ATOM-NEXT:    .p2align 4
 ; X86-ATOM-NEXT:  .LBB2_1: # %atomicrmw.start
 ; X86-ATOM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-ATOM-NEXT:    lock cmpxchgl %eax, (%ecx)
@@ -146,7 +146,7 @@ define i64 @sub64(ptr %p) {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl (%esi), %eax
 ; X86-NEXT:    movl 4(%esi), %edx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB3_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %edx, %ecx
@@ -196,7 +196,7 @@ define i128 @or128(ptr %p) {
 ; X86-GENERIC-NEXT:    movl 8(%edi), %edx
 ; X86-GENERIC-NEXT:    movl (%edi), %ebx
 ; X86-GENERIC-NEXT:    movl 4(%edi), %esi
-; X86-GENERIC-NEXT:    .p2align 4, 0x90
+; X86-GENERIC-NEXT:    .p2align 4
 ; X86-GENERIC-NEXT:  .LBB4_1: # %atomicrmw.start
 ; X86-GENERIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-GENERIC-NEXT:    movl %ebx, (%esp)
@@ -257,7 +257,7 @@ define i128 @or128(ptr %p) {
 ; X86-ATOM-NEXT:    movl 8(%edi), %edx
 ; X86-ATOM-NEXT:    movl (%edi), %esi
 ; X86-ATOM-NEXT:    movl 4(%edi), %ebx
-; X86-ATOM-NEXT:    .p2align 4, 0x90
+; X86-ATOM-NEXT:    .p2align 4
 ; X86-ATOM-NEXT:  .LBB4_1: # %atomicrmw.start
 ; X86-ATOM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-ATOM-NEXT:    movl %esi, (%esp)
@@ -320,7 +320,7 @@ define i32 @and32 (ptr %p) {
 ; X86-SLM:       # %bb.0:
 ; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SLM-NEXT:    movl (%ecx), %eax
-; X86-SLM-NEXT:    .p2align 4, 0x90
+; X86-SLM-NEXT:    .p2align 4
 ; X86-SLM-NEXT:  .LBB5_1: # %atomicrmw.start
 ; X86-SLM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-SLM-NEXT:    lock cmpxchgl %eax, (%ecx)
@@ -332,7 +332,7 @@ define i32 @and32 (ptr %p) {
 ; X86-ATOM:       # %bb.0:
 ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-ATOM-NEXT:    movl (%ecx), %eax
-; X86-ATOM-NEXT:    .p2align 4, 0x90
+; X86-ATOM-NEXT:    .p2align 4
 ; X86-ATOM-NEXT:  .LBB5_1: # %atomicrmw.start
 ; X86-ATOM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-ATOM-NEXT:    lock cmpxchgl %eax, (%ecx)
@@ -495,7 +495,7 @@ define void @or64_nouse_seq_cst(ptr %p) {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl (%esi), %eax
 ; X86-NEXT:    movl 4(%esi), %edx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB11_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %edx, %ecx
@@ -546,7 +546,7 @@ define void @or128_nouse_seq_cst(ptr %p) {
 ; X86-GENERIC-NEXT:    movl 8(%esi), %edi
 ; X86-GENERIC-NEXT:    movl (%esi), %edx
 ; X86-GENERIC-NEXT:    movl 4(%esi), %ebx
-; X86-GENERIC-NEXT:    .p2align 4, 0x90
+; X86-GENERIC-NEXT:    .p2align 4
 ; X86-GENERIC-NEXT:  .LBB12_1: # %atomicrmw.start
 ; X86-GENERIC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-GENERIC-NEXT:    movl %edx, (%esp)
@@ -603,7 +603,7 @@ define void @or128_nouse_seq_cst(ptr %p) {
 ; X86-ATOM-NEXT:    movl 8(%esi), %edx
 ; X86-ATOM-NEXT:    movl (%esi), %eax
 ; X86-ATOM-NEXT:    movl 4(%esi), %edi
-; X86-ATOM-NEXT:    .p2align 4, 0x90
+; X86-ATOM-NEXT:    .p2align 4
 ; X86-ATOM-NEXT:  .LBB12_1: # %atomicrmw.start
 ; X86-ATOM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-ATOM-NEXT:    movl %eax, (%esp)

diff  --git a/llvm/test/CodeGen/X86/atomic-minmax-i6432.ll b/llvm/test/CodeGen/X86/atomic-minmax-i6432.ll
index 809457f4d8291a..f8cbd0a6a9ee0f 100644
--- a/llvm/test/CodeGen/X86/atomic-minmax-i6432.ll
+++ b/llvm/test/CodeGen/X86/atomic-minmax-i6432.ll
@@ -12,7 +12,7 @@ define i64 @atomic_max_i64() nounwind {
 ; LINUX-NEXT:    movl sc64+4, %edx
 ; LINUX-NEXT:    movl sc64, %eax
 ; LINUX-NEXT:    movl $5, %esi
-; LINUX-NEXT:    .p2align 4, 0x90
+; LINUX-NEXT:    .p2align 4
 ; LINUX-NEXT:  .LBB0_1: # %atomicrmw.start
 ; LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LINUX-NEXT:    cmpl %eax, %esi
@@ -41,7 +41,7 @@ define i64 @atomic_max_i64() nounwind {
 ; PIC-NEXT:    movl (%esi), %eax
 ; PIC-NEXT:    movl 4(%esi), %edx
 ; PIC-NEXT:    movl $5, %edi
-; PIC-NEXT:    .p2align 4, 0x90
+; PIC-NEXT:    .p2align 4
 ; PIC-NEXT:  LBB0_1: ## %atomicrmw.start
 ; PIC-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; PIC-NEXT:    cmpl %eax, %edi
@@ -69,7 +69,7 @@ define i64 @atomic_min_i64() nounwind {
 ; LINUX-NEXT:    pushl %ebx
 ; LINUX-NEXT:    movl sc64+4, %edx
 ; LINUX-NEXT:    movl sc64, %eax
-; LINUX-NEXT:    .p2align 4, 0x90
+; LINUX-NEXT:    .p2align 4
 ; LINUX-NEXT:  .LBB1_1: # %atomicrmw.start
 ; LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LINUX-NEXT:    cmpl $7, %eax
@@ -95,7 +95,7 @@ define i64 @atomic_min_i64() nounwind {
 ; PIC-NEXT:    movl L_sc64$non_lazy_ptr-L1$pb(%eax), %esi
 ; PIC-NEXT:    movl (%esi), %eax
 ; PIC-NEXT:    movl 4(%esi), %edx
-; PIC-NEXT:    .p2align 4, 0x90
+; PIC-NEXT:    .p2align 4
 ; PIC-NEXT:  LBB1_1: ## %atomicrmw.start
 ; PIC-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; PIC-NEXT:    cmpl $7, %eax
@@ -124,7 +124,7 @@ define i64 @atomic_umax_i64() nounwind {
 ; LINUX-NEXT:    movl sc64+4, %edx
 ; LINUX-NEXT:    movl sc64, %eax
 ; LINUX-NEXT:    movl $7, %esi
-; LINUX-NEXT:    .p2align 4, 0x90
+; LINUX-NEXT:    .p2align 4
 ; LINUX-NEXT:  .LBB2_1: # %atomicrmw.start
 ; LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LINUX-NEXT:    cmpl %eax, %esi
@@ -153,7 +153,7 @@ define i64 @atomic_umax_i64() nounwind {
 ; PIC-NEXT:    movl (%esi), %eax
 ; PIC-NEXT:    movl 4(%esi), %edx
 ; PIC-NEXT:    movl $7, %edi
-; PIC-NEXT:    .p2align 4, 0x90
+; PIC-NEXT:    .p2align 4
 ; PIC-NEXT:  LBB2_1: ## %atomicrmw.start
 ; PIC-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; PIC-NEXT:    cmpl %eax, %edi
@@ -181,7 +181,7 @@ define i64 @atomic_umin_i64() nounwind {
 ; LINUX-NEXT:    pushl %ebx
 ; LINUX-NEXT:    movl sc64+4, %edx
 ; LINUX-NEXT:    movl sc64, %eax
-; LINUX-NEXT:    .p2align 4, 0x90
+; LINUX-NEXT:    .p2align 4
 ; LINUX-NEXT:  .LBB3_1: # %atomicrmw.start
 ; LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LINUX-NEXT:    cmpl $9, %eax
@@ -207,7 +207,7 @@ define i64 @atomic_umin_i64() nounwind {
 ; PIC-NEXT:    movl L_sc64$non_lazy_ptr-L3$pb(%eax), %esi
 ; PIC-NEXT:    movl (%esi), %eax
 ; PIC-NEXT:    movl 4(%esi), %edx
-; PIC-NEXT:    .p2align 4, 0x90
+; PIC-NEXT:    .p2align 4
 ; PIC-NEXT:  LBB3_1: ## %atomicrmw.start
 ; PIC-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; PIC-NEXT:    cmpl $9, %eax
@@ -238,7 +238,7 @@ define void @tf_bug(ptr %ptr) nounwind {
 ; LINUX-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; LINUX-NEXT:    movl id+4, %edx
 ; LINUX-NEXT:    movl id, %eax
-; LINUX-NEXT:    .p2align 4, 0x90
+; LINUX-NEXT:    .p2align 4
 ; LINUX-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LINUX-NEXT:    movl %eax, %ebx
@@ -267,7 +267,7 @@ define void @tf_bug(ptr %ptr) nounwind {
 ; PIC-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; PIC-NEXT:    movl (_id-L4$pb)+4(%edi), %edx
 ; PIC-NEXT:    movl _id-L4$pb(%edi), %eax
-; PIC-NEXT:    .p2align 4, 0x90
+; PIC-NEXT:    .p2align 4
 ; PIC-NEXT:  LBB4_1: ## %atomicrmw.start
 ; PIC-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; PIC-NEXT:    movl %eax, %ebx

diff  --git a/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll b/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
index 5960787fe30de1..e8b4d3f6812d26 100644
--- a/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
+++ b/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
@@ -16,7 +16,7 @@ define void @store_fp128(ptr %fptr, fp128 %v) {
 ; X64-SSE-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
 ; X64-SSE-NEXT:    movq (%rdi), %rax
 ; X64-SSE-NEXT:    movq 8(%rdi), %rdx
-; X64-SSE-NEXT:    .p2align 4, 0x90
+; X64-SSE-NEXT:    .p2align 4
 ; X64-SSE-NEXT:  .LBB0_1: # %atomicrmw.start
 ; X64-SSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-SSE-NEXT:    lock cmpxchg16b (%rdi)
@@ -73,7 +73,7 @@ define fp128 @exchange_fp128(ptr %fptr, fp128 %x) {
 ; X64-SSE-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
 ; X64-SSE-NEXT:    movq (%rdi), %rax
 ; X64-SSE-NEXT:    movq 8(%rdi), %rdx
-; X64-SSE-NEXT:    .p2align 4, 0x90
+; X64-SSE-NEXT:    .p2align 4
 ; X64-SSE-NEXT:  .LBB2_1: # %atomicrmw.start
 ; X64-SSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-SSE-NEXT:    lock cmpxchg16b (%rdi)
@@ -96,7 +96,7 @@ define fp128 @exchange_fp128(ptr %fptr, fp128 %x) {
 ; X64-AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
 ; X64-AVX-NEXT:    movq (%rdi), %rax
 ; X64-AVX-NEXT:    movq 8(%rdi), %rdx
-; X64-AVX-NEXT:    .p2align 4, 0x90
+; X64-AVX-NEXT:    .p2align 4
 ; X64-AVX-NEXT:  .LBB2_1: # %atomicrmw.start
 ; X64-AVX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-AVX-NEXT:    lock cmpxchg16b (%rdi)

diff  --git a/llvm/test/CodeGen/X86/atomic-non-integer.ll b/llvm/test/CodeGen/X86/atomic-non-integer.ll
index bd794712a31096..ed882cbbc52546 100644
--- a/llvm/test/CodeGen/X86/atomic-non-integer.ll
+++ b/llvm/test/CodeGen/X86/atomic-non-integer.ll
@@ -440,7 +440,7 @@ define double @exchange_double(ptr %fptr, double %x) {
 ; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE1-NEXT:    movl (%esi), %eax
 ; X86-SSE1-NEXT:    movl 4(%esi), %edx
-; X86-SSE1-NEXT:    .p2align 4, 0x90
+; X86-SSE1-NEXT:    .p2align 4
 ; X86-SSE1-NEXT:  .LBB8_1: # %atomicrmw.start
 ; X86-SSE1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-SSE1-NEXT:    lock cmpxchg8b (%esi)
@@ -472,7 +472,7 @@ define double @exchange_double(ptr %fptr, double %x) {
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE2-NEXT:    movl (%esi), %eax
 ; X86-SSE2-NEXT:    movl 4(%esi), %edx
-; X86-SSE2-NEXT:    .p2align 4, 0x90
+; X86-SSE2-NEXT:    .p2align 4
 ; X86-SSE2-NEXT:  .LBB8_1: # %atomicrmw.start
 ; X86-SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-SSE2-NEXT:    lock cmpxchg8b (%esi)
@@ -506,7 +506,7 @@ define double @exchange_double(ptr %fptr, double %x) {
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-AVX-NEXT:    movl (%esi), %eax
 ; X86-AVX-NEXT:    movl 4(%esi), %edx
-; X86-AVX-NEXT:    .p2align 4, 0x90
+; X86-AVX-NEXT:    .p2align 4
 ; X86-AVX-NEXT:  .LBB8_1: # %atomicrmw.start
 ; X86-AVX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-AVX-NEXT:    lock cmpxchg8b (%esi)
@@ -539,7 +539,7 @@ define double @exchange_double(ptr %fptr, double %x) {
 ; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NOSSE-NEXT:    movl (%esi), %eax
 ; X86-NOSSE-NEXT:    movl 4(%esi), %edx
-; X86-NOSSE-NEXT:    .p2align 4, 0x90
+; X86-NOSSE-NEXT:    .p2align 4
 ; X86-NOSSE-NEXT:  .LBB8_1: # %atomicrmw.start
 ; X86-NOSSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NOSSE-NEXT:    lock cmpxchg8b (%esi)

diff  --git a/llvm/test/CodeGen/X86/atomic-rm-bit-test-64.ll b/llvm/test/CodeGen/X86/atomic-rm-bit-test-64.ll
index 8ff4f4067dabda..3fe5b70edc7183 100644
--- a/llvm/test/CodeGen/X86/atomic-rm-bit-test-64.ll
+++ b/llvm/test/CodeGen/X86/atomic-rm-bit-test-64.ll
@@ -27,7 +27,7 @@ define i64 @atomic_shl2_xor_64_gpr_val(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    # kill: def $cl killed $cl killed $rcx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rcx
@@ -51,7 +51,7 @@ define i64 @atomic_shl1_neq_xor_64_gpr_val(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -159,7 +159,7 @@ define i64 @atomic_blsi_xor_64_gpr_val(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    negq %rcx
 ; CHECK-NEXT:    andq %rsi, %rcx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB7_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rdx
@@ -184,7 +184,7 @@ define i64 @atomic_shl1_xor_64_gpr_valz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB8_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -214,7 +214,7 @@ define i64 @atomic_shl2_xor_64_gpr_valz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    # kill: def $cl killed $cl killed $rcx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB9_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rcx
@@ -243,7 +243,7 @@ define i64 @atomic_shl1_neq_xor_64_gpr_valz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB10_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -276,7 +276,7 @@ define i64 @atomic_shl1_small_mask_xor_64_gpr_valz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB11_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -306,7 +306,7 @@ define i64 @atomic_shl1_mask0_xor_64_gpr_valz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB12_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -336,7 +336,7 @@ define i64 @atomic_shl1_mask1_xor_64_gpr_valz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB13_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -367,7 +367,7 @@ define i64 @atomic_shl1_mask01_xor_64_gpr_valz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB14_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -397,7 +397,7 @@ define i64 @atomic_blsi_xor_64_gpr_valz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    negq %rdx
 ; CHECK-NEXT:    andq %rsi, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB15_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rcx
@@ -427,7 +427,7 @@ define i64 @atomic_shl1_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB16_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -456,7 +456,7 @@ define i64 @atomic_shl2_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    # kill: def $cl killed $cl killed $rcx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB17_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rcx
@@ -485,7 +485,7 @@ define i64 @atomic_shl1_neq_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB18_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -517,7 +517,7 @@ define i64 @atomic_shl1_small_mask_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB19_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -546,7 +546,7 @@ define i64 @atomic_shl1_mask0_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB20_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -575,7 +575,7 @@ define i64 @atomic_shl1_mask1_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB21_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -605,7 +605,7 @@ define i64 @atomic_shl1_mask01_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    shlq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB22_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -634,7 +634,7 @@ define i64 @atomic_blsi_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    negq %rdx
 ; CHECK-NEXT:    andq %rsi, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB23_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rcx
@@ -686,7 +686,7 @@ define i64 @atomic_shl2_and_64_gpr_val(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq %rdx, %rcx
 ; CHECK-NEXT:    notq %rcx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB25_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -711,7 +711,7 @@ define i64 @atomic_shl1_neq_and_64_gpr_val(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq $-2, %rdx
 ; CHECK-NEXT:    rolq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB26_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -826,7 +826,7 @@ define i64 @atomic_blsi_and_64_gpr_val(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq %rcx, %rdx
 ; CHECK-NEXT:    notq %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB31_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -853,7 +853,7 @@ define i64 @atomic_shl1_and_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq $-2, %rdx
 ; CHECK-NEXT:    rolq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB32_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -885,7 +885,7 @@ define i64 @atomic_shl2_and_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq %rdx, %rcx
 ; CHECK-NEXT:    notq %rcx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB33_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -915,7 +915,7 @@ define i64 @atomic_shl1_neq_and_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq $-2, %rdx
 ; CHECK-NEXT:    rolq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB34_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -948,7 +948,7 @@ define i64 @atomic_shl1_small_mask_and_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq $-2, %rdx
 ; CHECK-NEXT:    rolq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB35_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -978,7 +978,7 @@ define i64 @atomic_shl1_mask0_and_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq $-2, %rdx
 ; CHECK-NEXT:    rolq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB36_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -1008,7 +1008,7 @@ define i64 @atomic_shl1_mask1_and_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq $-2, %rdx
 ; CHECK-NEXT:    rolq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB37_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -1041,7 +1041,7 @@ define i64 @atomic_shl1_mask01_and_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    # kill: def $cl killed $cl killed $rcx
 ; CHECK-NEXT:    rolq %cl, %rsi
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB38_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rcx
@@ -1073,7 +1073,7 @@ define i64 @atomic_blsi_and_64_gpr_valnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq %rdx, %rcx
 ; CHECK-NEXT:    notq %rcx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB39_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -1137,7 +1137,7 @@ define i64 @atomic_shl2_and_64_gpr_brnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq %rdx, %rsi
 ; CHECK-NEXT:    notq %rsi
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB41_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %r8
@@ -1178,7 +1178,7 @@ define i64 @atomic_shl1_neq_and_64_gpr_brnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq $-2, %rdx
 ; CHECK-NEXT:    rolq %cl, %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB42_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rsi
@@ -1354,7 +1354,7 @@ define i64 @atomic_blsi_and_64_gpr_brnz(ptr %v, i64 %c) nounwind {
 ; CHECK-NEXT:    movq %rcx, %rdx
 ; CHECK-NEXT:    notq %rdx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB47_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %r8
@@ -1420,7 +1420,7 @@ define i64 @atomic_shl1_neq_xor_64_const_br(ptr %v) nounwind {
 ; CHECK-LABEL: atomic_shl1_neq_xor_64_const_br:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB49_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rcx
@@ -1483,7 +1483,7 @@ define i64 @atomic_shl1_neq_xor_64_const_brz(ptr %v) nounwind {
 ; CHECK-LABEL: atomic_shl1_neq_xor_64_const_brz:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB51_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rcx
@@ -1546,7 +1546,7 @@ define i64 @atomic_shl1_neq_xor_64_const_brnz(ptr %v) nounwind {
 ; CHECK-LABEL: atomic_shl1_neq_xor_64_const_brnz:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB53_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rcx
@@ -1583,7 +1583,7 @@ define i64 @atomic_and_with_not_arg(ptr %v, i64 %c) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq $-1, %rcx
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB54_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rdx

diff  --git a/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll b/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
index 5594d13a234d02..b4d40fee01e419 100644
--- a/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
+++ b/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
@@ -11,7 +11,7 @@ define zeroext i8 @atomic_shl1_or_8_gpr_val(ptr %v, i8 zeroext %c) nounwind {
 ; X86-NEXT:    movl $1, %edx
 ; X86-NEXT:    shll %cl, %edx
 ; X86-NEXT:    movzbl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -31,7 +31,7 @@ define zeroext i8 @atomic_shl1_or_8_gpr_val(ptr %v, i8 zeroext %c) nounwind {
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -63,7 +63,7 @@ define zeroext i8 @atomic_shl1_mask0_or_8_gpr_val(ptr %v, i8 zeroext %c) nounwin
 ; X86-NEXT:    movb $1, %ah
 ; X86-NEXT:    shlb %cl, %ah
 ; X86-NEXT:    movb (%esi), %al
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB1_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -86,7 +86,7 @@ define zeroext i8 @atomic_shl1_mask0_or_8_gpr_val(ptr %v, i8 zeroext %c) nounwin
 ; X64-NEXT:    movb $1, %dl
 ; X64-NEXT:    shlb %cl, %dl
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB1_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -120,7 +120,7 @@ define zeroext i8 @atomic_shl1_mask01_or_8_gpr_val(ptr %v, i8 zeroext %c) nounwi
 ; X86-NEXT:    movb $1, %dl
 ; X86-NEXT:    shlb %cl, %dl
 ; X86-NEXT:    movzbl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB2_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -141,7 +141,7 @@ define zeroext i8 @atomic_shl1_mask01_or_8_gpr_val(ptr %v, i8 zeroext %c) nounwi
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shlb %cl, %dl
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB2_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -169,7 +169,7 @@ define zeroext i8 @atomic_shl1_xor_8_gpr_valz(ptr %v, i8 zeroext %c) nounwind {
 ; X86-NEXT:    movl $1, %edx
 ; X86-NEXT:    shll %cl, %edx
 ; X86-NEXT:    movzbl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB3_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -190,7 +190,7 @@ define zeroext i8 @atomic_shl1_xor_8_gpr_valz(ptr %v, i8 zeroext %c) nounwind {
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -225,7 +225,7 @@ define zeroext i8 @atomic_shl1_mask0_xor_8_gpr_valz(ptr %v, i8 zeroext %c) nounw
 ; X86-NEXT:    movb $1, %ah
 ; X86-NEXT:    shlb %cl, %ah
 ; X86-NEXT:    movb (%esi), %al
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB4_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -247,7 +247,7 @@ define zeroext i8 @atomic_shl1_mask0_xor_8_gpr_valz(ptr %v, i8 zeroext %c) nounw
 ; X64-NEXT:    movb $1, %dl
 ; X64-NEXT:    shlb %cl, %dl
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB4_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -283,7 +283,7 @@ define zeroext i8 @atomic_shl1_mask01_xor_8_gpr_valz(ptr %v, i8 zeroext %c) noun
 ; X86-NEXT:    movl $1, %ebx
 ; X86-NEXT:    shll %cl, %ebx
 ; X86-NEXT:    movzbl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB5_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -305,7 +305,7 @@ define zeroext i8 @atomic_shl1_mask01_xor_8_gpr_valz(ptr %v, i8 zeroext %c) noun
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB5_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -341,7 +341,7 @@ define zeroext i8 @atomic_shl1_and_8_gpr_brnz(ptr %v, i8 zeroext %c) nounwind {
 ; X86-NEXT:    movb %bl, %ah
 ; X86-NEXT:    notb %ah
 ; X86-NEXT:    movb (%edx), %al
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB6_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movb %al, %ch
@@ -370,7 +370,7 @@ define zeroext i8 @atomic_shl1_and_8_gpr_brnz(ptr %v, i8 zeroext %c) nounwind {
 ; X64-NEXT:    movl %edx, %esi
 ; X64-NEXT:    notb %sil
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB6_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %r8d
@@ -419,7 +419,7 @@ define zeroext i8 @atomic_shl1_mask0_and_8_gpr_brnz(ptr %v, i8 zeroext %c) nounw
 ; X86-NEXT:    movb $-2, %ah
 ; X86-NEXT:    rolb %cl, %ah
 ; X86-NEXT:    movb (%edx), %al
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB7_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movb %al, %ch
@@ -446,7 +446,7 @@ define zeroext i8 @atomic_shl1_mask0_and_8_gpr_brnz(ptr %v, i8 zeroext %c) nounw
 ; X64-NEXT:    movb $-2, %dl
 ; X64-NEXT:    rolb %cl, %dl
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB7_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -500,7 +500,7 @@ define zeroext i8 @atomic_shl1_mask01_and_8_gpr_brnz(ptr %v, i8 zeroext %c) noun
 ; X86-NEXT:    movl %ebx, %ecx
 ; X86-NEXT:    notb %cl
 ; X86-NEXT:    movb (%edx), %al
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB8_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movb %al, %ch
@@ -530,7 +530,7 @@ define zeroext i8 @atomic_shl1_mask01_and_8_gpr_brnz(ptr %v, i8 zeroext %c) noun
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    notb %cl
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB8_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %r8d
@@ -576,7 +576,7 @@ define zeroext i8 @atomic_shl1_and_8_gpr_val(ptr %v) nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzbl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB9_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edx
@@ -590,7 +590,7 @@ define zeroext i8 @atomic_shl1_and_8_gpr_val(ptr %v) nounwind {
 ; X64-LABEL: atomic_shl1_and_8_gpr_val:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB9_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -611,7 +611,7 @@ define zeroext i8 @atomic_shl1_or_8_gpr_valnz(ptr %v) nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzbl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB10_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edx
@@ -626,7 +626,7 @@ define zeroext i8 @atomic_shl1_or_8_gpr_valnz(ptr %v) nounwind {
 ; X64-LABEL: atomic_shl1_or_8_gpr_valnz:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB10_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -649,7 +649,7 @@ define zeroext i8 @atomic_shl1_xor_8_gpr_brz(ptr %v) nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movzbl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB11_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -669,7 +669,7 @@ define zeroext i8 @atomic_shl1_xor_8_gpr_brz(ptr %v) nounwind {
 ; X64-LABEL: atomic_shl1_xor_8_gpr_brz:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB11_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -710,7 +710,7 @@ define zeroext i16 @atomic_shl1_xor_16_gpr_val(ptr %v, i16 zeroext %c) nounwind
 ; X86-NEXT:    movl $1, %edx
 ; X86-NEXT:    shll %cl, %edx
 ; X86-NEXT:    movzwl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB12_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -732,7 +732,7 @@ define zeroext i16 @atomic_shl1_xor_16_gpr_val(ptr %v, i16 zeroext %c) nounwind
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB12_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -799,7 +799,7 @@ define zeroext i16 @atomic_shl1_mask0_xor_16_gpr_val(ptr %v, i16 zeroext %c) nou
 ; X86-NEXT:    movl $1, %edi
 ; X86-NEXT:    shll %cl, %edi
 ; X86-NEXT:    movzwl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB14_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -825,7 +825,7 @@ define zeroext i16 @atomic_shl1_mask0_xor_16_gpr_val(ptr %v, i16 zeroext %c) nou
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB14_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -862,7 +862,7 @@ define zeroext i16 @atomic_shl1_mask1_xor_16_gpr_val(ptr %v, i16 zeroext %c) nou
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB15_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -888,7 +888,7 @@ define zeroext i16 @atomic_shl1_mask1_xor_16_gpr_val(ptr %v, i16 zeroext %c) nou
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB15_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -959,7 +959,7 @@ define zeroext i16 @atomic_blsi_xor_16_gpr_val(ptr %v, i16 zeroext %c) nounwind
 ; X86-NEXT:    negl %ecx
 ; X86-NEXT:    andl %eax, %ecx
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB17_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %esi
@@ -980,7 +980,7 @@ define zeroext i16 @atomic_blsi_xor_16_gpr_val(ptr %v, i16 zeroext %c) nounwind
 ; X64-NEXT:    negl %ecx
 ; X64-NEXT:    andl %esi, %ecx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB17_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %edx
@@ -1010,7 +1010,7 @@ define zeroext i16 @atomic_shl1_xor_16_gpr_valz(ptr %v, i16 zeroext %c) nounwind
 ; X86-NEXT:    movl $1, %edx
 ; X86-NEXT:    shll %cl, %edx
 ; X86-NEXT:    movzwl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB18_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -1035,7 +1035,7 @@ define zeroext i16 @atomic_shl1_xor_16_gpr_valz(ptr %v, i16 zeroext %c) nounwind
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB18_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -1075,7 +1075,7 @@ define zeroext i16 @atomic_shl1_small_mask_xor_16_gpr_valz(ptr %v, i16 zeroext %
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
 ; X86-NEXT:    movzwl %si, %esi
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB19_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -1102,7 +1102,7 @@ define zeroext i16 @atomic_shl1_small_mask_xor_16_gpr_valz(ptr %v, i16 zeroext %
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzwl %dx, %edx
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB19_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -1143,7 +1143,7 @@ define zeroext i16 @atomic_shl1_mask0_xor_16_gpr_valz(ptr %v, i16 zeroext %c) no
 ; X86-NEXT:    movl $1, %edi
 ; X86-NEXT:    shll %cl, %edi
 ; X86-NEXT:    movzwl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB20_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -1169,7 +1169,7 @@ define zeroext i16 @atomic_shl1_mask0_xor_16_gpr_valz(ptr %v, i16 zeroext %c) no
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB20_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -1208,7 +1208,7 @@ define zeroext i16 @atomic_shl1_mask1_xor_16_gpr_valz(ptr %v, i16 zeroext %c) no
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB21_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -1234,7 +1234,7 @@ define zeroext i16 @atomic_shl1_mask1_xor_16_gpr_valz(ptr %v, i16 zeroext %c) no
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB21_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -1276,7 +1276,7 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_valz(ptr %v, i16 zeroext %c) n
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB22_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -1302,7 +1302,7 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_valz(ptr %v, i16 zeroext %c) n
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB22_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -1341,7 +1341,7 @@ define zeroext i16 @atomic_blsi_xor_16_gpr_valz(ptr %v, i16 zeroext %c) nounwind
 ; X86-NEXT:    negl %ecx
 ; X86-NEXT:    andl %eax, %ecx
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB23_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %esi
@@ -1365,7 +1365,7 @@ define zeroext i16 @atomic_blsi_xor_16_gpr_valz(ptr %v, i16 zeroext %c) nounwind
 ; X64-NEXT:    negl %ecx
 ; X64-NEXT:    andl %esi, %ecx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB23_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %edx
@@ -1403,7 +1403,7 @@ define zeroext i16 @atomic_shl1_xor_16_gpr_valnz(ptr %v, i16 zeroext %c) nounwin
 ; X86-NEXT:    movl $1, %edx
 ; X86-NEXT:    shll %cl, %edx
 ; X86-NEXT:    movzwl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB24_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -1428,7 +1428,7 @@ define zeroext i16 @atomic_shl1_xor_16_gpr_valnz(ptr %v, i16 zeroext %c) nounwin
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB24_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -1468,7 +1468,7 @@ define zeroext i16 @atomic_shl1_small_mask_xor_16_gpr_valnz(ptr %v, i16 zeroext
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
 ; X86-NEXT:    movzwl %si, %esi
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB25_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -1495,7 +1495,7 @@ define zeroext i16 @atomic_shl1_small_mask_xor_16_gpr_valnz(ptr %v, i16 zeroext
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzwl %dx, %edx
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB25_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -1536,7 +1536,7 @@ define zeroext i16 @atomic_shl1_mask0_xor_16_gpr_valnz(ptr %v, i16 zeroext %c) n
 ; X86-NEXT:    movl $1, %edi
 ; X86-NEXT:    shll %cl, %edi
 ; X86-NEXT:    movzwl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB26_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -1562,7 +1562,7 @@ define zeroext i16 @atomic_shl1_mask0_xor_16_gpr_valnz(ptr %v, i16 zeroext %c) n
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB26_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -1601,7 +1601,7 @@ define zeroext i16 @atomic_shl1_mask1_xor_16_gpr_valnz(ptr %v, i16 zeroext %c) n
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB27_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -1627,7 +1627,7 @@ define zeroext i16 @atomic_shl1_mask1_xor_16_gpr_valnz(ptr %v, i16 zeroext %c) n
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB27_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -1669,7 +1669,7 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_valnz(ptr %v, i16 zeroext %c)
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB28_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -1695,7 +1695,7 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_valnz(ptr %v, i16 zeroext %c)
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB28_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -1734,7 +1734,7 @@ define zeroext i16 @atomic_blsi_xor_16_gpr_valnz(ptr %v, i16 zeroext %c) nounwin
 ; X86-NEXT:    negl %ecx
 ; X86-NEXT:    andl %eax, %ecx
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB29_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %esi
@@ -1758,7 +1758,7 @@ define zeroext i16 @atomic_blsi_xor_16_gpr_valnz(ptr %v, i16 zeroext %c) nounwin
 ; X64-NEXT:    negl %ecx
 ; X64-NEXT:    andl %esi, %ecx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB29_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %edx
@@ -1797,7 +1797,7 @@ define zeroext i16 @atomic_shl1_xor_16_gpr_brz(ptr %v, i16 zeroext %c) nounwind
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB30_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -1825,7 +1825,7 @@ define zeroext i16 @atomic_shl1_xor_16_gpr_brz(ptr %v, i16 zeroext %c) nounwind
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB30_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -1878,7 +1878,7 @@ define zeroext i16 @atomic_shl1_small_mask_xor_16_gpr_brz(ptr %v, i16 zeroext %c
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
 ; X86-NEXT:    movzwl %si, %esi
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB31_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -1908,7 +1908,7 @@ define zeroext i16 @atomic_shl1_small_mask_xor_16_gpr_brz(ptr %v, i16 zeroext %c
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzwl %dx, %edx
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB31_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -1962,7 +1962,7 @@ define zeroext i16 @atomic_shl1_mask0_xor_16_gpr_brz(ptr %v, i16 zeroext %c) nou
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB32_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -1991,7 +1991,7 @@ define zeroext i16 @atomic_shl1_mask0_xor_16_gpr_brz(ptr %v, i16 zeroext %c) nou
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB32_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -2043,7 +2043,7 @@ define zeroext i16 @atomic_shl1_mask1_xor_16_gpr_brz(ptr %v, i16 zeroext %c) nou
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB33_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -2073,7 +2073,7 @@ define zeroext i16 @atomic_shl1_mask1_xor_16_gpr_brz(ptr %v, i16 zeroext %c) nou
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB33_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -2131,7 +2131,7 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_brz(ptr %v, i16 zeroext %c) no
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB34_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -2160,7 +2160,7 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_brz(ptr %v, i16 zeroext %c) no
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB34_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -2213,7 +2213,7 @@ define zeroext i16 @atomic_blsi_xor_16_gpr_brz(ptr %v, i16 zeroext %c) nounwind
 ; X86-NEXT:    negl %esi
 ; X86-NEXT:    andl %ecx, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB35_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -2241,7 +2241,7 @@ define zeroext i16 @atomic_blsi_xor_16_gpr_brz(ptr %v, i16 zeroext %c) nounwind
 ; X64-NEXT:    negl %ecx
 ; X64-NEXT:    andl %esi, %ecx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB35_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %edx
@@ -2295,7 +2295,7 @@ define zeroext i16 @atomic_shl1_and_16_gpr_val(ptr %v, i16 zeroext %c) nounwind
 ; X86-NEXT:    movl $-2, %edi
 ; X86-NEXT:    roll %cl, %edi
 ; X86-NEXT:    movzwl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB36_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -2320,7 +2320,7 @@ define zeroext i16 @atomic_shl1_and_16_gpr_val(ptr %v, i16 zeroext %c) nounwind
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    roll %cl, %esi
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB36_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -2387,7 +2387,7 @@ define zeroext i16 @atomic_shl1_mask0_and_16_gpr_val(ptr %v, i16 zeroext %c) nou
 ; X86-NEXT:    movw $-2, %si
 ; X86-NEXT:    rolw %cl, %si
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB38_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -2412,7 +2412,7 @@ define zeroext i16 @atomic_shl1_mask0_and_16_gpr_val(ptr %v, i16 zeroext %c) nou
 ; X64-NEXT:    movw $-2, %dx
 ; X64-NEXT:    rolw %cl, %dx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB38_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -2450,7 +2450,7 @@ define zeroext i16 @atomic_shl1_mask1_and_16_gpr_val(ptr %v, i16 zeroext %c) nou
 ; X86-NEXT:    movl $-2, %esi
 ; X86-NEXT:    roll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB39_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -2476,7 +2476,7 @@ define zeroext i16 @atomic_shl1_mask1_and_16_gpr_val(ptr %v, i16 zeroext %c) nou
 ; X64-NEXT:    movl $-2, %edx
 ; X64-NEXT:    roll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB39_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -2552,7 +2552,7 @@ define zeroext i16 @atomic_blsi_and_16_gpr_val(ptr %v, i16 zeroext %c) nounwind
 ; X86-NEXT:    movl %ecx, %esi
 ; X86-NEXT:    notl %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB41_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -2576,7 +2576,7 @@ define zeroext i16 @atomic_blsi_and_16_gpr_val(ptr %v, i16 zeroext %c) nounwind
 ; X64-NEXT:    movl %ecx, %edx
 ; X64-NEXT:    notl %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB41_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -2610,7 +2610,7 @@ define zeroext i16 @atomic_shl1_and_16_gpr_valnz(ptr %v, i16 zeroext %c) nounwin
 ; X86-NEXT:    movl $-2, %edi
 ; X86-NEXT:    roll %cl, %edi
 ; X86-NEXT:    movzwl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB42_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -2638,7 +2638,7 @@ define zeroext i16 @atomic_shl1_and_16_gpr_valnz(ptr %v, i16 zeroext %c) nounwin
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    roll %cl, %esi
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB42_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -2678,7 +2678,7 @@ define zeroext i16 @atomic_shl1_small_mask_and_16_gpr_valnz(ptr %v, i16 zeroext
 ; X86-NEXT:    movw $-2, %si
 ; X86-NEXT:    rolw %cl, %si
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB43_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -2704,7 +2704,7 @@ define zeroext i16 @atomic_shl1_small_mask_and_16_gpr_valnz(ptr %v, i16 zeroext
 ; X64-NEXT:    movw $-2, %dx
 ; X64-NEXT:    rolw %cl, %dx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB43_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -2744,7 +2744,7 @@ define zeroext i16 @atomic_shl1_mask0_and_16_gpr_valnz(ptr %v, i16 zeroext %c) n
 ; X86-NEXT:    movw $-2, %si
 ; X86-NEXT:    rolw %cl, %si
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB44_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -2769,7 +2769,7 @@ define zeroext i16 @atomic_shl1_mask0_and_16_gpr_valnz(ptr %v, i16 zeroext %c) n
 ; X64-NEXT:    movw $-2, %dx
 ; X64-NEXT:    rolw %cl, %dx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB44_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -2809,7 +2809,7 @@ define zeroext i16 @atomic_shl1_mask1_and_16_gpr_valnz(ptr %v, i16 zeroext %c) n
 ; X86-NEXT:    movl $-2, %esi
 ; X86-NEXT:    roll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB45_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -2835,7 +2835,7 @@ define zeroext i16 @atomic_shl1_mask1_and_16_gpr_valnz(ptr %v, i16 zeroext %c) n
 ; X64-NEXT:    movl $-2, %edx
 ; X64-NEXT:    roll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB45_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -2881,7 +2881,7 @@ define zeroext i16 @atomic_shl1_mask01_and_16_gpr_valnz(ptr %v, i16 zeroext %c)
 ; X86-NEXT:    movl $-2, %edi
 ; X86-NEXT:    roll %cl, %edi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB46_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -2910,7 +2910,7 @@ define zeroext i16 @atomic_shl1_mask01_and_16_gpr_valnz(ptr %v, i16 zeroext %c)
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    roll %cl, %esi
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB46_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -2953,7 +2953,7 @@ define zeroext i16 @atomic_blsi_and_16_gpr_valnz(ptr %v, i16 zeroext %c) nounwin
 ; X86-NEXT:    movl %ecx, %esi
 ; X86-NEXT:    notl %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB47_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -2980,7 +2980,7 @@ define zeroext i16 @atomic_blsi_and_16_gpr_valnz(ptr %v, i16 zeroext %c) nounwin
 ; X64-NEXT:    movl %ecx, %edx
 ; X64-NEXT:    notl %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB47_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -3023,7 +3023,7 @@ define zeroext i16 @atomic_shl1_and_16_gpr_brnz(ptr %v, i16 zeroext %c) nounwind
 ; X86-NEXT:    movl $-2, %edi
 ; X86-NEXT:    roll %cl, %edi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB48_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ebx
@@ -3056,7 +3056,7 @@ define zeroext i16 @atomic_shl1_and_16_gpr_brnz(ptr %v, i16 zeroext %c) nounwind
 ; X64-NEXT:    movl $-2, %esi
 ; X64-NEXT:    roll %cl, %esi
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB48_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %r8d
@@ -3109,7 +3109,7 @@ define zeroext i16 @atomic_shl1_small_mask_and_16_gpr_brnz(ptr %v, i16 zeroext %
 ; X86-NEXT:    movw $-2, %si
 ; X86-NEXT:    rolw %cl, %si
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB49_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -3140,7 +3140,7 @@ define zeroext i16 @atomic_shl1_small_mask_and_16_gpr_brnz(ptr %v, i16 zeroext %
 ; X64-NEXT:    movw $-2, %dx
 ; X64-NEXT:    rolw %cl, %dx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB49_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -3193,7 +3193,7 @@ define zeroext i16 @atomic_shl1_mask0_and_16_gpr_brnz(ptr %v, i16 zeroext %c) no
 ; X86-NEXT:    movw $-2, %si
 ; X86-NEXT:    rolw %cl, %si
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB50_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -3223,7 +3223,7 @@ define zeroext i16 @atomic_shl1_mask0_and_16_gpr_brnz(ptr %v, i16 zeroext %c) no
 ; X64-NEXT:    movw $-2, %dx
 ; X64-NEXT:    rolw %cl, %dx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB50_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -3276,7 +3276,7 @@ define zeroext i16 @atomic_shl1_mask1_and_16_gpr_brnz(ptr %v, i16 zeroext %c) no
 ; X86-NEXT:    movl $-2, %esi
 ; X86-NEXT:    roll %cl, %esi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB51_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -3308,7 +3308,7 @@ define zeroext i16 @atomic_shl1_mask1_and_16_gpr_brnz(ptr %v, i16 zeroext %c) no
 ; X64-NEXT:    movl $-2, %edx
 ; X64-NEXT:    roll %cl, %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB51_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -3370,7 +3370,7 @@ define zeroext i16 @atomic_shl1_mask01_and_16_gpr_brnz(ptr %v, i16 zeroext %c) n
 ; X86-NEXT:    movl $-2, %edi
 ; X86-NEXT:    roll %cl, %edi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB52_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -3404,7 +3404,7 @@ define zeroext i16 @atomic_shl1_mask01_and_16_gpr_brnz(ptr %v, i16 zeroext %c) n
 ; X64-NEXT:    movl $-2, %r8d
 ; X64-NEXT:    roll %cl, %r8d
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB52_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -3461,7 +3461,7 @@ define zeroext i16 @atomic_blsi_and_16_gpr_brnz(ptr %v, i16 zeroext %c) nounwind
 ; X86-NEXT:    movl %esi, %edi
 ; X86-NEXT:    notl %edi
 ; X86-NEXT:    movzwl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB53_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ebx
@@ -3494,7 +3494,7 @@ define zeroext i16 @atomic_blsi_and_16_gpr_brnz(ptr %v, i16 zeroext %c) nounwind
 ; X64-NEXT:    movl %ecx, %edx
 ; X64-NEXT:    notl %edx
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB53_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %r8d
@@ -3567,7 +3567,7 @@ define zeroext i16 @atomic_shl1_or_16_const_valnz(ptr %v) nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB55_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edx
@@ -3585,7 +3585,7 @@ define zeroext i16 @atomic_shl1_or_16_const_valnz(ptr %v) nounwind {
 ; X64-LABEL: atomic_shl1_or_16_const_valnz:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB55_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -3675,7 +3675,7 @@ define zeroext i16 @atomic_shl1_and_16_const_valz(ptr %v) nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB58_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edx
@@ -3694,7 +3694,7 @@ define zeroext i16 @atomic_shl1_and_16_const_valz(ptr %v) nounwind {
 ; X64-LABEL: atomic_shl1_and_16_const_valz:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB58_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -3922,7 +3922,7 @@ define i32 @atomic_blsi_or_32_gpr_val(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    negl %ecx
 ; X86-NEXT:    andl %eax, %ecx
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB65_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %esi
@@ -3940,7 +3940,7 @@ define i32 @atomic_blsi_or_32_gpr_val(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    negl %ecx
 ; X64-NEXT:    andl %esi, %ecx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB65_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %edx
@@ -3968,7 +3968,7 @@ define i32 @atomic_shl1_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB66_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -3990,7 +3990,7 @@ define i32 @atomic_shl1_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB66_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -4023,7 +4023,7 @@ define i32 @atomic_shl1_small_mask_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB67_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -4046,7 +4046,7 @@ define i32 @atomic_shl1_small_mask_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB67_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -4079,7 +4079,7 @@ define i32 @atomic_shl1_mask0_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB68_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -4101,7 +4101,7 @@ define i32 @atomic_shl1_mask0_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB68_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -4134,7 +4134,7 @@ define i32 @atomic_shl1_mask1_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB69_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -4156,7 +4156,7 @@ define i32 @atomic_shl1_mask1_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB69_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -4190,7 +4190,7 @@ define i32 @atomic_shl1_mask01_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB70_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -4213,7 +4213,7 @@ define i32 @atomic_shl1_mask01_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB70_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -4246,7 +4246,7 @@ define i32 @atomic_blsi_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    negl %edx
 ; X86-NEXT:    andl %eax, %edx
 ; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB71_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %esi
@@ -4267,7 +4267,7 @@ define i32 @atomic_blsi_or_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    negl %edx
 ; X64-NEXT:    andl %esi, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB71_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -4300,7 +4300,7 @@ define i32 @atomic_shl1_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB72_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -4322,7 +4322,7 @@ define i32 @atomic_shl1_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB72_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -4354,7 +4354,7 @@ define i32 @atomic_shl1_small_mask_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB73_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -4377,7 +4377,7 @@ define i32 @atomic_shl1_small_mask_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB73_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -4409,7 +4409,7 @@ define i32 @atomic_shl1_mask0_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB74_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -4431,7 +4431,7 @@ define i32 @atomic_shl1_mask0_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB74_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -4463,7 +4463,7 @@ define i32 @atomic_shl1_mask1_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB75_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -4485,7 +4485,7 @@ define i32 @atomic_shl1_mask1_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB75_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -4518,7 +4518,7 @@ define i32 @atomic_shl1_mask01_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB76_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -4541,7 +4541,7 @@ define i32 @atomic_shl1_mask01_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB76_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -4573,7 +4573,7 @@ define i32 @atomic_blsi_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    negl %edx
 ; X86-NEXT:    andl %eax, %edx
 ; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB77_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %esi
@@ -4594,7 +4594,7 @@ define i32 @atomic_blsi_or_32_gpr_valnz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    negl %edx
 ; X64-NEXT:    andl %esi, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB77_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -4867,7 +4867,7 @@ define i32 @atomic_blsi_or_32_gpr_br(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    negl %esi
 ; X86-NEXT:    andl %ecx, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB83_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -4893,7 +4893,7 @@ define i32 @atomic_blsi_or_32_gpr_br(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    negl %ecx
 ; X64-NEXT:    andl %esi, %ecx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB83_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %edx
@@ -5179,7 +5179,7 @@ define i32 @atomic_blsi_or_32_gpr_brz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    negl %edi
 ; X86-NEXT:    andl %edx, %edi
 ; X86-NEXT:    movl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB89_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -5204,7 +5204,7 @@ define i32 @atomic_blsi_or_32_gpr_brz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    negl %edx
 ; X64-NEXT:    andl %esi, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB89_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -5492,7 +5492,7 @@ define i32 @atomic_blsi_or_32_gpr_brnz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    negl %esi
 ; X86-NEXT:    andl %ecx, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB95_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -5518,7 +5518,7 @@ define i32 @atomic_blsi_or_32_gpr_brnz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    negl %ecx
 ; X64-NEXT:    andl %esi, %ecx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB95_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %edx
@@ -5564,7 +5564,7 @@ define i32 @atomic_shl1_and_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB96_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -5586,7 +5586,7 @@ define i32 @atomic_shl1_and_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB96_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -5619,7 +5619,7 @@ define i32 @atomic_shl1_small_mask_and_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB97_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -5642,7 +5642,7 @@ define i32 @atomic_shl1_small_mask_and_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB97_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -5675,7 +5675,7 @@ define i32 @atomic_shl1_mask0_and_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB98_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -5697,7 +5697,7 @@ define i32 @atomic_shl1_mask0_and_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB98_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -5730,7 +5730,7 @@ define i32 @atomic_shl1_mask1_and_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB99_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -5752,7 +5752,7 @@ define i32 @atomic_shl1_mask1_and_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB99_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -5786,7 +5786,7 @@ define i32 @atomic_shl1_mask01_and_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB100_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -5809,7 +5809,7 @@ define i32 @atomic_shl1_mask01_and_32_gpr_valz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB100_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -5969,7 +5969,7 @@ define i32 @atomic_shl1_mask0_and_32_gpr_br(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB104_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -5995,7 +5995,7 @@ define i32 @atomic_shl1_mask0_and_32_gpr_br(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB104_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -6042,7 +6042,7 @@ define i32 @atomic_shl1_mask1_and_32_gpr_br(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB105_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -6068,7 +6068,7 @@ define i32 @atomic_shl1_mask1_and_32_gpr_br(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB105_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -6305,7 +6305,7 @@ define i32 @atomic_shl1_mask0_and_32_gpr_brz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %edx
 ; X86-NEXT:    shll %cl, %edx
 ; X86-NEXT:    movl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB110_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -6330,7 +6330,7 @@ define i32 @atomic_shl1_mask0_and_32_gpr_brz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB110_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -6379,7 +6379,7 @@ define i32 @atomic_shl1_mask1_and_32_gpr_brz(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %edx
 ; X86-NEXT:    shll %cl, %edx
 ; X86-NEXT:    movl (%esi), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB111_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edi
@@ -6404,7 +6404,7 @@ define i32 @atomic_shl1_mask1_and_32_gpr_brz(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    movl $1, %edx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB111_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %esi
@@ -6566,7 +6566,7 @@ define i32 @atomic_shl1_xor_32_const_valz(ptr %v) nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB115_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edx
@@ -6583,7 +6583,7 @@ define i32 @atomic_shl1_xor_32_const_valz(ptr %v) nounwind {
 ; X64-LABEL: atomic_shl1_xor_32_const_valz:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB115_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -6609,7 +6609,7 @@ define i32 @atomic_shl1_xor_32_const_valnz(ptr %v) nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB116_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edx
@@ -6624,7 +6624,7 @@ define i32 @atomic_shl1_xor_32_const_valnz(ptr %v) nounwind {
 ; X64-LABEL: atomic_shl1_xor_32_const_valnz:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB116_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -6670,7 +6670,7 @@ define i32 @atomic_shl1_and_32_const_valz(ptr %v) nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB118_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edx
@@ -6687,7 +6687,7 @@ define i32 @atomic_shl1_and_32_const_valz(ptr %v) nounwind {
 ; X64-LABEL: atomic_shl1_and_32_const_valz:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB118_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -6713,7 +6713,7 @@ define i32 @atomic_shl1_and_32_const_valnz(ptr %v) nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB119_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edx
@@ -6728,7 +6728,7 @@ define i32 @atomic_shl1_and_32_const_valnz(ptr %v) nounwind {
 ; X64-LABEL: atomic_shl1_and_32_const_valnz:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB119_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -6835,7 +6835,7 @@ define i32 @atomic_xor_dead_and(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl $1, %esi
 ; X86-NEXT:    shll %cl, %esi
 ; X86-NEXT:    movl (%edx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB122_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %ecx
@@ -6855,7 +6855,7 @@ define i32 @atomic_xor_dead_and(ptr %v, i32 %c) nounwind {
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %edx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB122_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -6882,7 +6882,7 @@ define i32 @atomic_xor_with_not_arg(ptr %v, i32 %c) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    notl %edx
 ; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB123_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %esi
@@ -6897,7 +6897,7 @@ define i32 @atomic_xor_with_not_arg(ptr %v, i32 %c) nounwind {
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    notl %esi
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB123_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -6920,7 +6920,7 @@ define i16 @atomic_or_with_not_arg(ptr %v, i16 %c) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    notl %edx
 ; X86-NEXT:    movzwl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB124_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %esi
@@ -6938,7 +6938,7 @@ define i16 @atomic_or_with_not_arg(ptr %v, i16 %c) nounwind {
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    notl %esi
 ; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB124_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx
@@ -6963,7 +6963,7 @@ define i8 @atomic_and_with_not_arg(ptr %v, i8 %c) nounwind {
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    notb %dl
 ; X86-NEXT:    movzbl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB125_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movb %al, %ah
@@ -6977,7 +6977,7 @@ define i8 @atomic_and_with_not_arg(ptr %v, i8 %c) nounwind {
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    notb %sil
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB125_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx

diff  --git a/llvm/test/CodeGen/X86/atomic-xor.ll b/llvm/test/CodeGen/X86/atomic-xor.ll
index 930286c8e5fb34..c648ecdfbe674b 100644
--- a/llvm/test/CodeGen/X86/atomic-xor.ll
+++ b/llvm/test/CodeGen/X86/atomic-xor.ll
@@ -32,7 +32,7 @@ define i128 @xor128_signbit_used(ptr %p) nounwind {
 ; X86-NEXT:    movl 8(%edi), %edx
 ; X86-NEXT:    movl (%edi), %ebx
 ; X86-NEXT:    movl 4(%edi), %esi
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB1_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %ebx, (%esp)
@@ -94,7 +94,7 @@ define i64 @xor64_signbit_used(ptr %p) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl (%esi), %eax
 ; X86-NEXT:    movl 4(%esi), %edx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB2_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    leal -2147483648(%edx), %ecx
@@ -171,7 +171,7 @@ define i32 @xor32_not_signbit_used(ptr %p) nounwind {
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB6_1: # %atomicrmw.start
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %eax, %edx
@@ -184,7 +184,7 @@ define i32 @xor32_not_signbit_used(ptr %p) nounwind {
 ; X64-LABEL: xor32_not_signbit_used:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB6_1: # %atomicrmw.start
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, %ecx

diff  --git a/llvm/test/CodeGen/X86/atomic128.ll b/llvm/test/CodeGen/X86/atomic128.ll
index 949ee9e276a43c..8d43c612aa07fc 100644
--- a/llvm/test/CodeGen/X86/atomic128.ll
+++ b/llvm/test/CodeGen/X86/atomic128.ll
@@ -47,7 +47,6 @@ define i128 @load_global_with_offset() nounwind {
 ; CHECK-AVX-NEXT:    vmovq %xmm0, %rax
 ; CHECK-AVX-NEXT:    vpextrq $1, %xmm0, %rdx
 ; CHECK-AVX-NEXT:    retq
-;
 entry:
   %0 = load atomic i128, ptr getelementptr inbounds ({i128, i128}, ptr @cmpxchg16b_global, i64 0, i32 1) acquire, align 16
   ret i128 %0
@@ -62,7 +61,7 @@ define void @fetch_and_nand(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, %r8
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB2_1: ## %atomicrmw.start
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rdx, %rcx
@@ -92,7 +91,7 @@ define void @fetch_and_or(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, %r8
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB3_1: ## %atomicrmw.start
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rbx
@@ -120,7 +119,7 @@ define void @fetch_and_add(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, %r8
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB4_1: ## %atomicrmw.start
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rbx
@@ -148,7 +147,7 @@ define void @fetch_and_sub(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, %r8
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB5_1: ## %atomicrmw.start
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %rbx
@@ -176,7 +175,7 @@ define void @fetch_and_min(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, %r8
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB6_1: ## %atomicrmw.start
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpq %rax, %rsi
@@ -207,7 +206,7 @@ define void @fetch_and_max(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, %r8
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB7_1: ## %atomicrmw.start
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpq %rax, %rsi
@@ -238,7 +237,7 @@ define void @fetch_and_umin(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, %r8
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB8_1: ## %atomicrmw.start
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpq %rax, %rsi
@@ -269,7 +268,7 @@ define void @fetch_and_umax(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, %r8
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB9_1: ## %atomicrmw.start
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpq %rax, %rsi
@@ -349,7 +348,7 @@ define void @atomic_store_seq_cst(ptr %p, i128 %in) {
 ; CHECK-NOAVX-NEXT:    movq %rsi, %rbx
 ; CHECK-NOAVX-NEXT:    movq (%rdi), %rax
 ; CHECK-NOAVX-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NOAVX-NEXT:    .p2align 4, 0x90
+; CHECK-NOAVX-NEXT:    .p2align 4
 ; CHECK-NOAVX-NEXT:  LBB12_1: ## %atomicrmw.start
 ; CHECK-NOAVX-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NOAVX-NEXT:    lock cmpxchg16b (%rdi)
@@ -380,7 +379,7 @@ define void @atomic_store_release(ptr %p, i128 %in) {
 ; CHECK-NOAVX-NEXT:    movq %rsi, %rbx
 ; CHECK-NOAVX-NEXT:    movq (%rdi), %rax
 ; CHECK-NOAVX-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NOAVX-NEXT:    .p2align 4, 0x90
+; CHECK-NOAVX-NEXT:    .p2align 4
 ; CHECK-NOAVX-NEXT:  LBB13_1: ## %atomicrmw.start
 ; CHECK-NOAVX-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NOAVX-NEXT:    lock cmpxchg16b (%rdi)
@@ -410,7 +409,7 @@ define void @atomic_store_relaxed(ptr %p, i128 %in) {
 ; CHECK-NOAVX-NEXT:    movq %rsi, %rbx
 ; CHECK-NOAVX-NEXT:    movq (%rdi), %rax
 ; CHECK-NOAVX-NEXT:    movq 8(%rdi), %rdx
-; CHECK-NOAVX-NEXT:    .p2align 4, 0x90
+; CHECK-NOAVX-NEXT:    .p2align 4
 ; CHECK-NOAVX-NEXT:  LBB14_1: ## %atomicrmw.start
 ; CHECK-NOAVX-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NOAVX-NEXT:    lock cmpxchg16b (%rdi)

diff  --git a/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll
index 04bfb4e367b9d7..3e1a631e39b069 100644
--- a/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll
@@ -9,7 +9,7 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
 ; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-32-NEXT:    movzbl (%edx), %eax
 ; CHECK-32-NEXT:    jmp .LBB0_1
-; CHECK-32-NEXT:    .p2align 4, 0x90
+; CHECK-32-NEXT:    .p2align 4
 ; CHECK-32-NEXT:  .LBB0_3: # %atomicrmw.start
 ; CHECK-32-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-32-NEXT:    lock cmpxchgb %ah, (%edx)
@@ -29,7 +29,7 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
 ; CHECK-64-LABEL: atomicrmw_usub_cond_i8:
 ; CHECK-64:       # %bb.0:
 ; CHECK-64-NEXT:    movzbl (%rdi), %eax
-; CHECK-64-NEXT:    .p2align 4, 0x90
+; CHECK-64-NEXT:    .p2align 4
 ; CHECK-64-NEXT:  .LBB0_1: # %atomicrmw.start
 ; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-64-NEXT:    movzbl %al, %ecx
@@ -55,7 +55,7 @@ define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
 ; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-32-NEXT:    movzwl (%edx), %eax
 ; CHECK-32-NEXT:    jmp .LBB1_1
-; CHECK-32-NEXT:    .p2align 4, 0x90
+; CHECK-32-NEXT:    .p2align 4
 ; CHECK-32-NEXT:  .LBB1_3: # %atomicrmw.start
 ; CHECK-32-NEXT:    # in Loop: Header=BB1_1 Depth=1
 ; CHECK-32-NEXT:    # kill: def $ax killed $ax killed $eax
@@ -80,7 +80,7 @@ define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
 ; CHECK-64-LABEL: atomicrmw_usub_cond_i16:
 ; CHECK-64:       # %bb.0:
 ; CHECK-64-NEXT:    movzwl (%rdi), %eax
-; CHECK-64-NEXT:    .p2align 4, 0x90
+; CHECK-64-NEXT:    .p2align 4
 ; CHECK-64-NEXT:  .LBB1_1: # %atomicrmw.start
 ; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-64-NEXT:    movl %eax, %ecx
@@ -107,7 +107,7 @@ define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
 ; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-32-NEXT:    movl (%edx), %eax
 ; CHECK-32-NEXT:    jmp .LBB2_1
-; CHECK-32-NEXT:    .p2align 4, 0x90
+; CHECK-32-NEXT:    .p2align 4
 ; CHECK-32-NEXT:  .LBB2_3: # %atomicrmw.start
 ; CHECK-32-NEXT:    # in Loop: Header=BB2_1 Depth=1
 ; CHECK-32-NEXT:    lock cmpxchgl %esi, (%edx)
@@ -129,7 +129,7 @@ define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
 ; CHECK-64-LABEL: atomicrmw_usub_cond_i32:
 ; CHECK-64:       # %bb.0:
 ; CHECK-64-NEXT:    movl (%rdi), %eax
-; CHECK-64-NEXT:    .p2align 4, 0x90
+; CHECK-64-NEXT:    .p2align 4
 ; CHECK-64-NEXT:  .LBB2_1: # %atomicrmw.start
 ; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-64-NEXT:    movl %eax, %ecx
@@ -164,7 +164,7 @@ define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
 ; CHECK-32-NEXT:    movl (%ebp), %eax
 ; CHECK-32-NEXT:    movl 4(%ebp), %edx
 ; CHECK-32-NEXT:    jmp .LBB3_1
-; CHECK-32-NEXT:    .p2align 4, 0x90
+; CHECK-32-NEXT:    .p2align 4
 ; CHECK-32-NEXT:  .LBB3_3: # %atomicrmw.start
 ; CHECK-32-NEXT:    # in Loop: Header=BB3_1 Depth=1
 ; CHECK-32-NEXT:    lock cmpxchg8b (%ebp)
@@ -195,7 +195,7 @@ define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
 ; CHECK-64-LABEL: atomicrmw_usub_cond_i64:
 ; CHECK-64:       # %bb.0:
 ; CHECK-64-NEXT:    movq (%rdi), %rax
-; CHECK-64-NEXT:    .p2align 4, 0x90
+; CHECK-64-NEXT:    .p2align 4
 ; CHECK-64-NEXT:  .LBB3_1: # %atomicrmw.start
 ; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-64-NEXT:    movq %rax, %rcx
@@ -219,7 +219,7 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
 ; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-32-NEXT:    movzbl (%edx), %eax
 ; CHECK-32-NEXT:    jmp .LBB4_1
-; CHECK-32-NEXT:    .p2align 4, 0x90
+; CHECK-32-NEXT:    .p2align 4
 ; CHECK-32-NEXT:  .LBB4_3: # %atomicrmw.start
 ; CHECK-32-NEXT:    # in Loop: Header=BB4_1 Depth=1
 ; CHECK-32-NEXT:    lock cmpxchgb %bl, (%edx)
@@ -241,7 +241,7 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
 ; CHECK-64:       # %bb.0:
 ; CHECK-64-NEXT:    movzbl (%rdi), %eax
 ; CHECK-64-NEXT:    xorl %ecx, %ecx
-; CHECK-64-NEXT:    .p2align 4, 0x90
+; CHECK-64-NEXT:    .p2align 4
 ; CHECK-64-NEXT:  .LBB4_1: # %atomicrmw.start
 ; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-64-NEXT:    movl %eax, %edx
@@ -269,7 +269,7 @@ define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
 ; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-32-NEXT:    movzwl (%edx), %eax
 ; CHECK-32-NEXT:    jmp .LBB5_1
-; CHECK-32-NEXT:    .p2align 4, 0x90
+; CHECK-32-NEXT:    .p2align 4
 ; CHECK-32-NEXT:  .LBB5_3: # %atomicrmw.start
 ; CHECK-32-NEXT:    # in Loop: Header=BB5_1 Depth=1
 ; CHECK-32-NEXT:    lock cmpxchgw %si, (%edx)
@@ -295,7 +295,7 @@ define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
 ; CHECK-64:       # %bb.0:
 ; CHECK-64-NEXT:    movzwl (%rdi), %eax
 ; CHECK-64-NEXT:    xorl %ecx, %ecx
-; CHECK-64-NEXT:    .p2align 4, 0x90
+; CHECK-64-NEXT:    .p2align 4
 ; CHECK-64-NEXT:  .LBB5_1: # %atomicrmw.start
 ; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-64-NEXT:    movl %eax, %edx
@@ -322,7 +322,7 @@ define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
 ; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-32-NEXT:    movl (%edx), %eax
 ; CHECK-32-NEXT:    jmp .LBB6_1
-; CHECK-32-NEXT:    .p2align 4, 0x90
+; CHECK-32-NEXT:    .p2align 4
 ; CHECK-32-NEXT:  .LBB6_3: # %atomicrmw.start
 ; CHECK-32-NEXT:    # in Loop: Header=BB6_1 Depth=1
 ; CHECK-32-NEXT:    lock cmpxchgl %esi, (%edx)
@@ -348,7 +348,7 @@ define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
 ; CHECK-64:       # %bb.0:
 ; CHECK-64-NEXT:    movl (%rdi), %eax
 ; CHECK-64-NEXT:    xorl %ecx, %ecx
-; CHECK-64-NEXT:    .p2align 4, 0x90
+; CHECK-64-NEXT:    .p2align 4
 ; CHECK-64-NEXT:  .LBB6_1: # %atomicrmw.start
 ; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-64-NEXT:    movl %eax, %edx
@@ -381,7 +381,7 @@ define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
 ; CHECK-32-NEXT:    movl (%ebp), %esi
 ; CHECK-32-NEXT:    movl 4(%ebp), %edi
 ; CHECK-32-NEXT:    jmp .LBB7_1
-; CHECK-32-NEXT:    .p2align 4, 0x90
+; CHECK-32-NEXT:    .p2align 4
 ; CHECK-32-NEXT:  .LBB7_3: # %atomicrmw.start
 ; CHECK-32-NEXT:    # in Loop: Header=BB7_1 Depth=1
 ; CHECK-32-NEXT:    movl %esi, %eax
@@ -421,7 +421,7 @@ define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
 ; CHECK-64:       # %bb.0:
 ; CHECK-64-NEXT:    movq (%rdi), %rax
 ; CHECK-64-NEXT:    xorl %ecx, %ecx
-; CHECK-64-NEXT:    .p2align 4, 0x90
+; CHECK-64-NEXT:    .p2align 4
 ; CHECK-64-NEXT:  .LBB7_1: # %atomicrmw.start
 ; CHECK-64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-64-NEXT:    movq %rax, %rdx

diff  --git a/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll b/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
index 4f8cd5a52ed4c7..105ee7f82ee79d 100644
--- a/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
+++ b/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
@@ -13,7 +13,7 @@ define <2 x half> @test_atomicrmw_fadd_v2f16_align4(ptr addrspace(1) %ptr, <2 x
 ; CHECK-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; CHECK-NEXT:    pinsrw $0, 2(%rdi), %xmm1
 ; CHECK-NEXT:    pinsrw $0, (%rdi), %xmm0
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -65,7 +65,7 @@ define <2 x float> @test_atomicrmw_fadd_v2f32_align8(ptr addrspace(1) %ptr, <2 x
 ; CHECK-LABEL: test_atomicrmw_fadd_v2f32_align8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %xmm1, %rax

diff  --git a/llvm/test/CodeGen/X86/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/X86/atomicrmw-uinc-udec-wrap.ll
index 3126917917fdec..f6fc65e3db4595 100644
--- a/llvm/test/CodeGen/X86/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/X86/atomicrmw-uinc-udec-wrap.ll
@@ -6,7 +6,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzbl (%rdi), %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leal 1(%rax), %edx
@@ -29,7 +29,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzwl (%rdi), %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leal 1(%rax), %edx
@@ -51,7 +51,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl (%rdi), %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leal 1(%rax), %edx
@@ -73,7 +73,7 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leaq 1(%rax), %rdx
@@ -92,7 +92,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzbl (%rdi), %eax
 ; CHECK-NEXT:    movzbl %sil, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB4_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %eax, %edx
@@ -114,7 +114,7 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
 ; CHECK-LABEL: atomicrmw_udec_wrap_i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzwl (%rdi), %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB5_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %eax, %ecx
@@ -135,7 +135,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
 ; CHECK-LABEL: atomicrmw_udec_wrap_i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl (%rdi), %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB6_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leal -1(%rax), %ecx
@@ -158,7 +158,7 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
 ; CHECK-LABEL: atomicrmw_udec_wrap_i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq (%rdi), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB7_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leaq -1(%rax), %rcx

diff  --git a/llvm/test/CodeGen/X86/avx-cmp.ll b/llvm/test/CodeGen/X86/avx-cmp.ll
index 4ab9c545ed90da..d31107bfeb7bb7 100644
--- a/llvm/test/CodeGen/X86/avx-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx-cmp.ll
@@ -34,7 +34,7 @@ define void @render(double %a0) nounwind {
 ; CHECK-NEXT:    jne .LBB2_5
 ; CHECK-NEXT:  # %bb.1: # %for.cond5.preheader
 ; CHECK-NEXT:    movb $1, %bl
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_2: # %for.cond5
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb %bl, %bl

diff  --git a/llvm/test/CodeGen/X86/avx-vbroadcast.ll b/llvm/test/CodeGen/X86/avx-vbroadcast.ll
index c69886df82bdfb..0bfd8921e8b42a 100644
--- a/llvm/test/CodeGen/X86/avx-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/avx-vbroadcast.ll
@@ -923,7 +923,7 @@ define void @fmul_by_v2f32_broadcast() nounwind {
 ; X86-NEXT:    vmovddup {{.*#+}} xmm0 = [3.1E+1,0.0E+0,3.1E+1,0.0E+0]
 ; X86-NEXT:    ## xmm0 = mem[0,0]
 ; X86-NEXT:    ## implicit-def: $xmm1
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  LBB42_1: ## =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
 ; X86-NEXT:    vmulps %xmm0, %xmm2, %xmm2
@@ -937,7 +937,7 @@ define void @fmul_by_v2f32_broadcast() nounwind {
 ; X64-NEXT:    vmovddup {{.*#+}} xmm0 = [3.1E+1,0.0E+0,3.1E+1,0.0E+0]
 ; X64-NEXT:    ## xmm0 = mem[0,0]
 ; X64-NEXT:    ## implicit-def: $xmm1
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  LBB42_1: ## =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
 ; X64-NEXT:    vmulps %xmm0, %xmm2, %xmm2

diff  --git a/llvm/test/CodeGen/X86/avx-vzeroupper.ll b/llvm/test/CodeGen/X86/avx-vzeroupper.ll
index 2fa71386c8056d..b84b0c2c5cc305 100644
--- a/llvm/test/CodeGen/X86/avx-vzeroupper.ll
+++ b/llvm/test/CodeGen/X86/avx-vzeroupper.ll
@@ -131,7 +131,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
 ; VZ-NEXT:    subq $16, %rsp
 ; VZ-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; VZ-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; VZ-NEXT:    .p2align 4, 0x90
+; VZ-NEXT:    .p2align 4
 ; VZ-NEXT:  .LBB3_1: # %while.cond
 ; VZ-NEXT:    # =>This Inner Loop Header: Depth=1
 ; VZ-NEXT:    callq foo
@@ -140,7 +140,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
 ; VZ-NEXT:  # %bb.2: # %for.body.preheader
 ; VZ-NEXT:    movl $4, %ebx
 ; VZ-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
-; VZ-NEXT:    .p2align 4, 0x90
+; VZ-NEXT:    .p2align 4
 ; VZ-NEXT:  .LBB3_3: # %for.body
 ; VZ-NEXT:    # =>This Inner Loop Header: Depth=1
 ; VZ-NEXT:    callq do_sse
@@ -160,7 +160,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
 ; DISABLE-VZ-NEXT:    subq $16, %rsp
 ; DISABLE-VZ-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; DISABLE-VZ-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; DISABLE-VZ-NEXT:    .p2align 4, 0x90
+; DISABLE-VZ-NEXT:    .p2align 4
 ; DISABLE-VZ-NEXT:  .LBB3_1: # %while.cond
 ; DISABLE-VZ-NEXT:    # =>This Inner Loop Header: Depth=1
 ; DISABLE-VZ-NEXT:    callq foo
@@ -169,7 +169,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
 ; DISABLE-VZ-NEXT:  # %bb.2: # %for.body.preheader
 ; DISABLE-VZ-NEXT:    movl $4, %ebx
 ; DISABLE-VZ-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
-; DISABLE-VZ-NEXT:    .p2align 4, 0x90
+; DISABLE-VZ-NEXT:    .p2align 4
 ; DISABLE-VZ-NEXT:  .LBB3_3: # %for.body
 ; DISABLE-VZ-NEXT:    # =>This Inner Loop Header: Depth=1
 ; DISABLE-VZ-NEXT:    callq do_sse
@@ -189,7 +189,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
 ; BDVER2-NEXT:    subq $16, %rsp
 ; BDVER2-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; BDVER2-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; BDVER2-NEXT:    .p2align 4, 0x90
+; BDVER2-NEXT:    .p2align 4
 ; BDVER2-NEXT:  .LBB3_1: # %while.cond
 ; BDVER2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; BDVER2-NEXT:    callq foo
@@ -198,7 +198,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
 ; BDVER2-NEXT:  # %bb.2: # %for.body.preheader
 ; BDVER2-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
 ; BDVER2-NEXT:    movl $4, %ebx
-; BDVER2-NEXT:    .p2align 4, 0x90
+; BDVER2-NEXT:    .p2align 4
 ; BDVER2-NEXT:  .LBB3_3: # %for.body
 ; BDVER2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; BDVER2-NEXT:    callq do_sse
@@ -218,7 +218,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
 ; BTVER2-NEXT:    subq $16, %rsp
 ; BTVER2-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; BTVER2-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; BTVER2-NEXT:    .p2align 4, 0x90
+; BTVER2-NEXT:    .p2align 4
 ; BTVER2-NEXT:  .LBB3_1: # %while.cond
 ; BTVER2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; BTVER2-NEXT:    callq foo
@@ -227,7 +227,7 @@ define <4 x float> @test03(<4 x float> %a, <4 x float> %b) nounwind {
 ; BTVER2-NEXT:  # %bb.2: # %for.body.preheader
 ; BTVER2-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
 ; BTVER2-NEXT:    movl $4, %ebx
-; BTVER2-NEXT:    .p2align 4, 0x90
+; BTVER2-NEXT:    .p2align 4
 ; BTVER2-NEXT:  .LBB3_3: # %for.body
 ; BTVER2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; BTVER2-NEXT:    callq do_sse

diff  --git a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
index 9ac0503831eb7f..c50af6968f5bb2 100644
--- a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
@@ -683,7 +683,7 @@ define void @crash() nounwind alwaysinline {
 ; X86-NEXT:    je LBB33_1
 ; X86-NEXT:  ## %bb.2: ## %ret
 ; X86-NEXT:    retl
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  LBB33_1: ## %footer329VF
 ; X86-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    jmp LBB33_1
@@ -695,7 +695,7 @@ define void @crash() nounwind alwaysinline {
 ; X64-NEXT:    je LBB33_1
 ; X64-NEXT:  ## %bb.2: ## %ret
 ; X64-NEXT:    retq
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  LBB33_1: ## %footer329VF
 ; X64-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    jmp LBB33_1

diff  --git a/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll b/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll
index ac65a1112be439..d7ecbb41e3b149 100644
--- a/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll
+++ b/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll
@@ -9,7 +9,7 @@ define void @bcast_unfold_add_v16i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpaddd 4096(%rdi,%rax), %zmm0, %zmm1
@@ -41,7 +41,7 @@ define void @bcast_unfold_add_v8i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpaddd 4096(%rdi,%rax), %ymm0, %ymm1
@@ -73,7 +73,7 @@ define void @bcast_unfold_add_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpaddd 4096(%rdi,%rax), %xmm0, %xmm1
@@ -104,7 +104,7 @@ define void @bcast_unfold_add_v8i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpaddq 8192(%rdi,%rax), %zmm0, %zmm1
@@ -136,7 +136,7 @@ define void @bcast_unfold_add_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB4_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpaddq 8192(%rdi,%rax), %ymm0, %ymm1
@@ -168,7 +168,7 @@ define void @bcast_unfold_add_v2i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB5_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpaddq 8192(%rdi,%rax), %xmm0, %xmm1
@@ -198,7 +198,7 @@ define void @bcast_unfold_mul_v16i32(ptr %arg) {
 ; CHECK-LABEL: bcast_unfold_mul_v16i32:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB6_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm0
@@ -231,7 +231,7 @@ define void @bcast_unfold_mul_v8i32(ptr %arg) {
 ; CHECK-LABEL: bcast_unfold_mul_v8i32:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB7_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %ymm0
@@ -264,7 +264,7 @@ define void @bcast_unfold_mul_v4i32(ptr %arg) {
 ; CHECK-LABEL: bcast_unfold_mul_v4i32:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB8_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm0
@@ -296,7 +296,7 @@ define void @bcast_unfold_mul_v8i64(ptr %arg) {
 ; CHECK-LABEL: bcast_unfold_mul_v8i64:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB9_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 8192(%rdi,%rax), %zmm0
@@ -329,7 +329,7 @@ define void @bcast_unfold_mul_v4i64(ptr %arg) {
 ; CHECK-LABEL: bcast_unfold_mul_v4i64:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB10_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm0
@@ -362,7 +362,7 @@ define void @bcast_unfold_mul_v2i64(ptr %arg) {
 ; CHECK-LABEL: bcast_unfold_mul_v2i64:
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB11_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %xmm0
@@ -395,7 +395,7 @@ define void @bcast_unfold_or_v16i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB12_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpord 4096(%rdi,%rax), %zmm0, %zmm1
@@ -427,7 +427,7 @@ define void @bcast_unfold_or_v8i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [3,3,3,3,3,3,3,3]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB13_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vorps 4096(%rdi,%rax), %ymm0, %ymm1
@@ -459,7 +459,7 @@ define void @bcast_unfold_or_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [3,3,3,3]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB14_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vorps 4096(%rdi,%rax), %xmm0, %xmm1
@@ -490,7 +490,7 @@ define void @bcast_unfold_or_v8i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [3,3,3,3,3,3,3,3]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB15_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vporq 8192(%rdi,%rax), %zmm0, %zmm1
@@ -522,7 +522,7 @@ define void @bcast_unfold_or_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [3,3,3,3]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB16_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vorps 8192(%rdi,%rax), %ymm0, %ymm1
@@ -555,7 +555,7 @@ define void @bcast_unfold_or_v2i64(ptr %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [3,3]
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB17_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vorps 8192(%rdi,%rax), %xmm0, %xmm1
@@ -586,7 +586,7 @@ define void @bcast_unfold_fneg_v16f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB18_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpxord 4096(%rdi,%rax), %zmm0, %zmm1
@@ -618,7 +618,7 @@ define void @bcast_unfold_fneg_v8f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB19_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vxorps 4096(%rdi,%rax), %ymm0, %ymm1
@@ -650,7 +650,7 @@ define void @bcast_unfold_fneg_v4f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB20_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vxorps 4096(%rdi,%rax), %xmm0, %xmm1
@@ -681,7 +681,7 @@ define void @bcast_unfold_fneg_v8f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB21_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpxorq 8192(%rdi,%rax), %zmm0, %zmm1
@@ -713,7 +713,7 @@ define void @bcast_unfold_fneg_v4f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB22_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vxorps 8192(%rdi,%rax), %ymm0, %ymm1
@@ -746,7 +746,7 @@ define void @bcast_unfold_fneg_v2f64(ptr %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0]
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB23_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vxorps 8192(%rdi,%rax), %xmm0, %xmm1
@@ -777,7 +777,7 @@ define void @bcast_unfold_fabs_v16f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB24_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpandd 4096(%rdi,%rax), %zmm0, %zmm1
@@ -812,7 +812,7 @@ define void @bcast_unfold_fabs_v8f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB25_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vandps 4096(%rdi,%rax), %ymm0, %ymm1
@@ -847,7 +847,7 @@ define void @bcast_unfold_fabs_v4f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB26_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vandps 4096(%rdi,%rax), %xmm0, %xmm1
@@ -881,7 +881,7 @@ define void @bcast_unfold_fabs_v8f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB27_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpandq 8192(%rdi,%rax), %zmm0, %zmm1
@@ -916,7 +916,7 @@ define void @bcast_unfold_fabs_v4f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB28_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vandps 8192(%rdi,%rax), %ymm0, %ymm1
@@ -952,7 +952,7 @@ define void @bcast_unfold_fabs_v2f64(ptr %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [NaN,NaN]
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB29_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vandps 8192(%rdi,%rax), %xmm0, %xmm1
@@ -986,7 +986,7 @@ define void @bcast_unfold_fadd_v16f32(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB30_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vaddps 4096(%rdi,%rax), %zmm0, %zmm1
@@ -1018,7 +1018,7 @@ define void @bcast_unfold_fadd_v8f32(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB31_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vaddps 4096(%rdi,%rax), %ymm0, %ymm1
@@ -1050,7 +1050,7 @@ define void @bcast_unfold_fadd_v4f32(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB32_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vaddps 4096(%rdi,%rax), %xmm0, %xmm1
@@ -1081,7 +1081,7 @@ define void @bcast_unfold_fadd_v8f64(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB33_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vaddpd 8192(%rdi,%rax), %zmm0, %zmm1
@@ -1113,7 +1113,7 @@ define void @bcast_unfold_fadd_v4f64(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB34_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vaddpd 8192(%rdi,%rax), %ymm0, %ymm1
@@ -1146,7 +1146,7 @@ define void @bcast_unfold_fadd_v2f64(ptr nocapture %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [2.0E+0,2.0E+0]
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB35_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vaddpd 8192(%rdi,%rax), %xmm0, %xmm1
@@ -1177,7 +1177,7 @@ define void @bcast_unfold_fmul_v16f32(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} zmm0 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB36_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmulps 4096(%rdi,%rax), %zmm0, %zmm1
@@ -1209,7 +1209,7 @@ define void @bcast_unfold_fmul_v8f32(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB37_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmulps 4096(%rdi,%rax), %ymm0, %ymm1
@@ -1241,7 +1241,7 @@ define void @bcast_unfold_fmul_v4f32(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB38_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmulps 4096(%rdi,%rax), %xmm0, %xmm1
@@ -1272,7 +1272,7 @@ define void @bcast_unfold_fmul_v8f64(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} zmm0 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB39_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmulpd 8192(%rdi,%rax), %zmm0, %zmm1
@@ -1304,7 +1304,7 @@ define void @bcast_unfold_fmul_v4f64(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB40_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmulpd 8192(%rdi,%rax), %ymm0, %ymm1
@@ -1337,7 +1337,7 @@ define void @bcast_unfold_fmul_v2f64(ptr nocapture %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [3.0E+0,3.0E+0]
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB41_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmulpd 8192(%rdi,%rax), %xmm0, %xmm1
@@ -1368,7 +1368,7 @@ define void @bcast_unfold_fdiv_v16f32(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} zmm0 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB42_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm1
@@ -1401,7 +1401,7 @@ define void @bcast_unfold_fdiv_v8f32(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB43_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm1
@@ -1434,7 +1434,7 @@ define void @bcast_unfold_fdiv_v4f32(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB44_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm1
@@ -1466,7 +1466,7 @@ define void @bcast_unfold_fdiv_v8f64(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} zmm0 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB45_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm1
@@ -1499,7 +1499,7 @@ define void @bcast_unfold_fdiv_v4f64(ptr nocapture %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB46_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm1
@@ -1533,7 +1533,7 @@ define void @bcast_unfold_fdiv_v2f64(ptr nocapture %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [3.0E+0,3.0E+0]
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB47_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm1
@@ -1565,7 +1565,7 @@ define void @bcast_unfold_fma213_v4f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB48_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm1
@@ -1598,7 +1598,7 @@ define void @bcast_unfold_fma231_v4f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB49_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm1
@@ -1631,7 +1631,7 @@ define void @bcast_unfold_fma213_v8f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB50_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm1
@@ -1665,7 +1665,7 @@ define void @bcast_unfold_fma231_v8f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB51_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm1
@@ -1699,7 +1699,7 @@ define void @bcast_unfold_fma213_v16f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB52_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm1
@@ -1733,7 +1733,7 @@ define void @bcast_unfold_fma231_v16f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB53_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm1
@@ -1768,7 +1768,7 @@ define void @bcast_unfold_fma213_v2f64(ptr %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [2.0E+0,2.0E+0]
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB54_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm1
@@ -1802,7 +1802,7 @@ define void @bcast_unfold_fma231_v2f64(ptr %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [2.0E+0,2.0E+0]
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB55_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm1
@@ -1835,7 +1835,7 @@ define void @bcast_unfold_fma213_v4f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB56_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm1
@@ -1869,7 +1869,7 @@ define void @bcast_unfold_fma231_v4f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB57_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm1
@@ -1903,7 +1903,7 @@ define void @bcast_unfold_fma213_v8f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB58_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm1
@@ -1937,7 +1937,7 @@ define void @bcast_unfold_fma231_v8f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB59_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm1
@@ -1971,7 +1971,7 @@ define void @bcast_unfold_fmax_v4f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB60_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm1
@@ -2004,7 +2004,7 @@ define void @bcast_unfold_fmax_v8f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB61_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm1
@@ -2038,7 +2038,7 @@ define void @bcast_unfold_fmax_v16f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB62_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm1
@@ -2073,7 +2073,7 @@ define void @bcast_unfold_fmax_v2f64(ptr %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [2.0E+0,2.0E+0]
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB63_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm1
@@ -2106,7 +2106,7 @@ define void @bcast_unfold_fmax_v4f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB64_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm1
@@ -2140,7 +2140,7 @@ define void @bcast_unfold_fmax_v8f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB65_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm1
@@ -2174,7 +2174,7 @@ define void @bcast_unfold_fmin_v4f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB66_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm1
@@ -2207,7 +2207,7 @@ define void @bcast_unfold_fmin_v8f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB67_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm1
@@ -2241,7 +2241,7 @@ define void @bcast_unfold_fmin_v16f32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB68_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm1
@@ -2276,7 +2276,7 @@ define void @bcast_unfold_fmin_v2f64(ptr %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [2.0E+0,2.0E+0]
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB69_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm1
@@ -2309,7 +2309,7 @@ define void @bcast_unfold_fmin_v4f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB70_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm1
@@ -2343,7 +2343,7 @@ define void @bcast_unfold_fmin_v8f64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB71_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm1
@@ -2377,7 +2377,7 @@ define void @bcast_unfold_smin_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB72_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminsd 4096(%rdi,%rax), %xmm0, %xmm1
@@ -2409,7 +2409,7 @@ define void @bcast_unfold_smin_v8i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB73_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminsd 4096(%rdi,%rax), %ymm0, %ymm1
@@ -2442,7 +2442,7 @@ define void @bcast_unfold_smin_v16i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB74_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminsd 4096(%rdi,%rax), %zmm0, %zmm1
@@ -2475,7 +2475,7 @@ define void @bcast_unfold_smin_v2i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB75_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminsq 8192(%rdi,%rax), %xmm0, %xmm1
@@ -2507,7 +2507,7 @@ define void @bcast_unfold_smin_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB76_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminsq 8192(%rdi,%rax), %ymm0, %ymm1
@@ -2540,7 +2540,7 @@ define void @bcast_unfold_smin_v8i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB77_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminsq 8192(%rdi,%rax), %zmm0, %zmm1
@@ -2573,7 +2573,7 @@ define void @bcast_unfold_smax_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB78_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxsd 4096(%rdi,%rax), %xmm0, %xmm1
@@ -2605,7 +2605,7 @@ define void @bcast_unfold_smax_v8i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB79_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxsd 4096(%rdi,%rax), %ymm0, %ymm1
@@ -2638,7 +2638,7 @@ define void @bcast_unfold_smax_v16i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB80_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxsd 4096(%rdi,%rax), %zmm0, %zmm1
@@ -2671,7 +2671,7 @@ define void @bcast_unfold_smax_v2i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB81_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxsq 8192(%rdi,%rax), %xmm0, %xmm1
@@ -2703,7 +2703,7 @@ define void @bcast_unfold_smax_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB82_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxsq 8192(%rdi,%rax), %ymm0, %ymm1
@@ -2736,7 +2736,7 @@ define void @bcast_unfold_smax_v8i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB83_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxsq 8192(%rdi,%rax), %zmm0, %zmm1
@@ -2769,7 +2769,7 @@ define void @bcast_unfold_umin_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB84_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminud 4096(%rdi,%rax), %xmm0, %xmm1
@@ -2801,7 +2801,7 @@ define void @bcast_unfold_umin_v8i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB85_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminud 4096(%rdi,%rax), %ymm0, %ymm1
@@ -2834,7 +2834,7 @@ define void @bcast_unfold_umin_v16i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB86_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminud 4096(%rdi,%rax), %zmm0, %zmm1
@@ -2867,7 +2867,7 @@ define void @bcast_unfold_umin_v2i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB87_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminuq 8192(%rdi,%rax), %xmm0, %xmm1
@@ -2899,7 +2899,7 @@ define void @bcast_unfold_umin_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB88_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminuq 8192(%rdi,%rax), %ymm0, %ymm1
@@ -2932,7 +2932,7 @@ define void @bcast_unfold_umin_v8i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB89_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpminuq 8192(%rdi,%rax), %zmm0, %zmm1
@@ -2965,7 +2965,7 @@ define void @bcast_unfold_umax_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB90_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxud 4096(%rdi,%rax), %xmm0, %xmm1
@@ -2997,7 +2997,7 @@ define void @bcast_unfold_umax_v8i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB91_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxud 4096(%rdi,%rax), %ymm0, %ymm1
@@ -3030,7 +3030,7 @@ define void @bcast_unfold_umax_v16i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB92_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxud 4096(%rdi,%rax), %zmm0, %zmm1
@@ -3063,7 +3063,7 @@ define void @bcast_unfold_umax_v2i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB93_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxuq 8192(%rdi,%rax), %xmm0, %xmm1
@@ -3095,7 +3095,7 @@ define void @bcast_unfold_umax_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB94_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxuq 8192(%rdi,%rax), %ymm0, %ymm1
@@ -3128,7 +3128,7 @@ define void @bcast_unfold_umax_v8i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB95_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpmaxuq 8192(%rdi,%rax), %zmm0, %zmm1
@@ -3161,7 +3161,7 @@ define void @bcast_unfold_pcmpgt_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB96_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm1
@@ -3195,7 +3195,7 @@ define void @bcast_unfold_pcmpgt_v8i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB97_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %ymm1
@@ -3230,7 +3230,7 @@ define void @bcast_unfold_pcmpgt_v16i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB98_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm1
@@ -3265,7 +3265,7 @@ define void @bcast_unfold_pcmpgt_v2i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB99_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %xmm1
@@ -3299,7 +3299,7 @@ define void @bcast_unfold_pcmpgt_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB100_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm1
@@ -3334,7 +3334,7 @@ define void @bcast_unfold_pcmpgt_v8i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB101_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 8192(%rdi,%rax), %zmm1
@@ -3369,7 +3369,7 @@ define void @bcast_unfold_pcmpeq_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB102_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm1
@@ -3403,7 +3403,7 @@ define void @bcast_unfold_pcmpeq_v8i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB103_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %ymm1
@@ -3438,7 +3438,7 @@ define void @bcast_unfold_pcmpeq_v16i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB104_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm1
@@ -3473,7 +3473,7 @@ define void @bcast_unfold_pcmpeq_v2i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB105_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %xmm1
@@ -3507,7 +3507,7 @@ define void @bcast_unfold_pcmpeq_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB106_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm1
@@ -3542,7 +3542,7 @@ define void @bcast_unfold_pcmpeq_v8i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB107_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 8192(%rdi,%rax), %zmm1
@@ -3577,7 +3577,7 @@ define void @bcast_unfold_pcmp_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB108_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,4), %xmm1
@@ -3612,7 +3612,7 @@ define void @bcast_unfold_pcmp_v8i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB109_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,4), %ymm1
@@ -3648,7 +3648,7 @@ define void @bcast_unfold_pcmp_v16i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB110_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 (%rdi,%rax,4), %zmm1
@@ -3684,7 +3684,7 @@ define void @bcast_unfold_pcmp_v2i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB111_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,8), %xmm1
@@ -3719,7 +3719,7 @@ define void @bcast_unfold_pcmp_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB112_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,8), %ymm1
@@ -3755,7 +3755,7 @@ define void @bcast_unfold_pcmp_v8i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB113_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 (%rdi,%rax,8), %zmm1
@@ -3791,7 +3791,7 @@ define void @bcast_unfold_pcmpu_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB114_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,4), %xmm1
@@ -3826,7 +3826,7 @@ define void @bcast_unfold_pcmpu_v8i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB115_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,4), %ymm1
@@ -3862,7 +3862,7 @@ define void @bcast_unfold_pcmpu_v16i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB116_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 (%rdi,%rax,4), %zmm1
@@ -3898,7 +3898,7 @@ define void @bcast_unfold_pcmpu_v2i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB117_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,8), %xmm1
@@ -3933,7 +3933,7 @@ define void @bcast_unfold_pcmpu_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB118_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,8), %ymm1
@@ -3969,7 +3969,7 @@ define void @bcast_unfold_pcmpu_v8i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB119_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 (%rdi,%rax,8), %zmm1
@@ -4006,7 +4006,7 @@ define void @bcast_unfold_cmp_v4f32(ptr %arg) {
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB120_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm2
@@ -4041,7 +4041,7 @@ define void @bcast_unfold_cmp_v8f32(ptr %arg) {
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB121_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm2
@@ -4077,7 +4077,7 @@ define void @bcast_unfold_cmp_v16f32(ptr %arg) {
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} zmm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB122_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm2
@@ -4115,7 +4115,7 @@ define void @bcast_unfold_cmp_v2f64(ptr %arg) {
 ; CHECK-NEXT:    # xmm0 = mem[0,0]
 ; CHECK-NEXT:    vmovddup {{.*#+}} xmm1 = [3.0E+0,3.0E+0]
 ; CHECK-NEXT:    # xmm1 = mem[0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB123_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm2
@@ -4150,7 +4150,7 @@ define void @bcast_unfold_cmp_v4f64(ptr %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB124_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm2
@@ -4186,7 +4186,7 @@ define void @bcast_unfold_cmp_v8f64(ptr %arg) {
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
 ; CHECK-NEXT:    vbroadcastsd {{.*#+}} zmm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB125_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm2
@@ -4222,7 +4222,7 @@ define void @bcast_unfold_cmp_v8f32_refold(ptr nocapture %0) {
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB126_1: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vcmpgtps 4096(%rdi,%rax), %ymm0, %k1
 ; CHECK-NEXT:    vblendmps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm2 {%k1}
@@ -4254,7 +4254,7 @@ define void @bcast_unfold_ptestm_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB127_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm1
@@ -4289,7 +4289,7 @@ define void @bcast_unfold_ptestnm_v4i32(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB128_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm1
@@ -4324,7 +4324,7 @@ define void @bcast_unfold_ptestm_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB129_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm1
@@ -4360,7 +4360,7 @@ define void @bcast_unfold_ptestnm_v4i64(ptr %arg) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
 ; CHECK-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB130_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm1
@@ -4396,7 +4396,7 @@ define void @bcast_unfold_vpternlog_v16i32(ptr %arg, ptr %arg1) {
 ; CHECK:       # %bb.0: # %bb
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB131_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm1

diff  --git a/llvm/test/CodeGen/X86/avx512-i1test.ll b/llvm/test/CodeGen/X86/avx512-i1test.ll
index bf31b2da56b62e..3cd733181599e4 100644
--- a/llvm/test/CodeGen/X86/avx512-i1test.ll
+++ b/llvm/test/CodeGen/X86/avx512-i1test.ll
@@ -15,7 +15,7 @@ define void @func() {
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB0_1: # %bb56
 ; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %bb33
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb %al, %al

diff  --git a/llvm/test/CodeGen/X86/avx512vnni-combine.ll b/llvm/test/CodeGen/X86/avx512vnni-combine.ll
index c491a952682d53..b7d950e9942416 100644
--- a/llvm/test/CodeGen/X86/avx512vnni-combine.ll
+++ b/llvm/test/CodeGen/X86/avx512vnni-combine.ll
@@ -54,7 +54,7 @@ define <8 x i64> @foo_512(i32 %0, <8 x i64> %1, <8 x i64> %2, ptr %3) {
 ; CHECK-NEXT:    andl $-4, %edx
 ; CHECK-NEXT:    leaq 192(%rsi), %rdi
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_8: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpdpwssd -192(%rdi), %zmm1, %zmm0
 ; CHECK-NEXT:    vpmaddwd -128(%rdi), %zmm1, %zmm2
@@ -75,7 +75,7 @@ define <8 x i64> @foo_512(i32 %0, <8 x i64> %1, <8 x i64> %2, ptr %3) {
 ; CHECK-NEXT:    addq %rcx, %rsi
 ; CHECK-NEXT:    shll $6, %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_5: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpdpwssd (%rsi,%rcx), %zmm1, %zmm0
 ; CHECK-NEXT:    addq $64, %rcx
@@ -179,7 +179,7 @@ define void @bar_512(i32 %0, ptr %1, <8 x i64> %2, ptr %3) {
 ; CHECK-NEXT:    andl $-2, %edi
 ; CHECK-NEXT:    movl $64, %r8d
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_7: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqa64 (%rsi,%r8), %zmm1
 ; CHECK-NEXT:    vpmaddwd -64(%rdx,%r8), %zmm0, %zmm2

diff  --git a/llvm/test/CodeGen/X86/avxvnni-combine.ll b/llvm/test/CodeGen/X86/avxvnni-combine.ll
index 158a1fdf44fc6a..45f9a6475244ee 100644
--- a/llvm/test/CodeGen/X86/avxvnni-combine.ll
+++ b/llvm/test/CodeGen/X86/avxvnni-combine.ll
@@ -59,7 +59,7 @@ define <2 x i64> @foo_128(i32 %0, <2 x i64> %1, <2 x i64> %2, ptr %3) {
 ; AVX-NEXT:    andl $-4, %edx
 ; AVX-NEXT:    leaq 48(%rsi), %rdi
 ; AVX-NEXT:    xorl %ecx, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB1_8: # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    {vex} vpdpwssd -48(%rdi), %xmm1, %xmm0
 ; AVX-NEXT:    vpmaddwd -32(%rdi), %xmm1, %xmm2
@@ -80,7 +80,7 @@ define <2 x i64> @foo_128(i32 %0, <2 x i64> %1, <2 x i64> %2, ptr %3) {
 ; AVX-NEXT:    addq %rcx, %rsi
 ; AVX-NEXT:    shll $4, %eax
 ; AVX-NEXT:    xorl %ecx, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB1_5: # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    {vex} vpdpwssd (%rsi,%rcx), %xmm1, %xmm0
 ; AVX-NEXT:    addq $16, %rcx
@@ -106,7 +106,7 @@ define <2 x i64> @foo_128(i32 %0, <2 x i64> %1, <2 x i64> %2, ptr %3) {
 ; AVX512-NEXT:    andl $-4, %edx
 ; AVX512-NEXT:    leaq 48(%rsi), %rdi
 ; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB1_8: # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vpdpwssd -48(%rdi), %xmm1, %xmm0
 ; AVX512-NEXT:    vpmaddwd -32(%rdi), %xmm1, %xmm2
@@ -127,7 +127,7 @@ define <2 x i64> @foo_128(i32 %0, <2 x i64> %1, <2 x i64> %2, ptr %3) {
 ; AVX512-NEXT:    addq %rcx, %rsi
 ; AVX512-NEXT:    shll $4, %eax
 ; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB1_5: # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vpdpwssd (%rsi,%rcx), %xmm1, %xmm0
 ; AVX512-NEXT:    addq $16, %rcx
@@ -225,7 +225,7 @@ define void @bar_128(i32 %0, ptr %1, <2 x i64> %2, ptr %3) {
 ; AVX-NEXT:    andl $-2, %edi
 ; AVX-NEXT:    movl $16, %r8d
 ; AVX-NEXT:    xorl %ecx, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB2_7: # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    vmovdqa (%rsi,%r8), %xmm1
 ; AVX-NEXT:    vpmaddwd -16(%rdx,%r8), %xmm0, %xmm2
@@ -265,7 +265,7 @@ define void @bar_128(i32 %0, ptr %1, <2 x i64> %2, ptr %3) {
 ; AVX512-NEXT:    andl $-2, %edi
 ; AVX512-NEXT:    movl $16, %r8d
 ; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB2_7: # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vmovdqa (%rsi,%r8), %xmm1
 ; AVX512-NEXT:    vpmaddwd -16(%rdx,%r8), %xmm0, %xmm2
@@ -406,7 +406,7 @@ define <4 x i64> @foo_256(i32 %0, <4 x i64> %1, <4 x i64> %2, ptr %3) {
 ; AVX-NEXT:    andl $-4, %edx
 ; AVX-NEXT:    leaq 96(%rsi), %rdi
 ; AVX-NEXT:    xorl %ecx, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB4_8: # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    {vex} vpdpwssd -96(%rdi), %ymm1, %ymm0
 ; AVX-NEXT:    vpmaddwd -64(%rdi), %ymm1, %ymm2
@@ -427,7 +427,7 @@ define <4 x i64> @foo_256(i32 %0, <4 x i64> %1, <4 x i64> %2, ptr %3) {
 ; AVX-NEXT:    addq %rcx, %rsi
 ; AVX-NEXT:    shll $5, %eax
 ; AVX-NEXT:    xorl %ecx, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB4_5: # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    {vex} vpdpwssd (%rsi,%rcx), %ymm1, %ymm0
 ; AVX-NEXT:    addq $32, %rcx
@@ -453,7 +453,7 @@ define <4 x i64> @foo_256(i32 %0, <4 x i64> %1, <4 x i64> %2, ptr %3) {
 ; AVX512-NEXT:    andl $-4, %edx
 ; AVX512-NEXT:    leaq 96(%rsi), %rdi
 ; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB4_8: # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vpdpwssd -96(%rdi), %ymm1, %ymm0
 ; AVX512-NEXT:    vpmaddwd -64(%rdi), %ymm1, %ymm2
@@ -474,7 +474,7 @@ define <4 x i64> @foo_256(i32 %0, <4 x i64> %1, <4 x i64> %2, ptr %3) {
 ; AVX512-NEXT:    addq %rcx, %rsi
 ; AVX512-NEXT:    shll $5, %eax
 ; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB4_5: # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vpdpwssd (%rsi,%rcx), %ymm1, %ymm0
 ; AVX512-NEXT:    addq $32, %rcx
@@ -579,7 +579,7 @@ define void @bar_256(i32 %0, ptr %1, <4 x i64> %2, ptr %3) {
 ; AVX-NEXT:    andl $-2, %edi
 ; AVX-NEXT:    movl $32, %r8d
 ; AVX-NEXT:    xorl %ecx, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB5_7: # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    vmovdqa (%rsi,%r8), %ymm1
 ; AVX-NEXT:    vpmaddwd -32(%rdx,%r8), %ymm0, %ymm2
@@ -620,7 +620,7 @@ define void @bar_256(i32 %0, ptr %1, <4 x i64> %2, ptr %3) {
 ; AVX512-NEXT:    andl $-2, %edi
 ; AVX512-NEXT:    movl $32, %r8d
 ; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB5_7: # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vmovdqa (%rsi,%r8), %ymm1
 ; AVX512-NEXT:    vpmaddwd -32(%rdx,%r8), %ymm0, %ymm2

diff  --git a/llvm/test/CodeGen/X86/block-placement.ll b/llvm/test/CodeGen/X86/block-placement.ll
index e479ad20d6e4c2..675293410dfe71 100644
--- a/llvm/test/CodeGen/X86/block-placement.ll
+++ b/llvm/test/CodeGen/X86/block-placement.ll
@@ -251,7 +251,7 @@ define i32 @test_loop_align(i32 %i, ptr %a) {
 ; pass.
 ; CHECK-LABEL: test_loop_align:
 ; CHECK: %entry
-; CHECK: .p2align [[ALIGN:[0-9]+]],
+; CHECK: .p2align [[ALIGN:[0-9]+]]
 ; CHECK-NEXT: %body
 ; CHECK: %exit
 
@@ -276,9 +276,9 @@ define i32 @test_nested_loop_align(i32 %i, ptr %a, ptr %b) {
 ; Check that we provide nested loop body alignment.
 ; CHECK-LABEL: test_nested_loop_align:
 ; CHECK: %entry
-; CHECK: .p2align [[ALIGN]],
+; CHECK: .p2align [[ALIGN]]
 ; CHECK-NEXT: %loop.body.1
-; CHECK: .p2align [[ALIGN]],
+; CHECK: .p2align [[ALIGN]]
 ; CHECK-NEXT: %inner.loop.body
 ; CHECK-NOT: .p2align
 ; CHECK: %exit

diff  --git a/llvm/test/CodeGen/X86/break-false-dep.ll b/llvm/test/CodeGen/X86/break-false-dep.ll
index 8ff7fb2d351ad1..0a367e1916ff62 100644
--- a/llvm/test/CodeGen/X86/break-false-dep.ll
+++ b/llvm/test/CodeGen/X86/break-false-dep.ll
@@ -138,7 +138,7 @@ define dso_local float @loopdep1(i32 %m) nounwind uwtable readnone ssp {
 ; SSE-LINUX-NEXT:    movl $1, %eax
 ; SSE-LINUX-NEXT:    xorps %xmm0, %xmm0
 ; SSE-LINUX-NEXT:    xorps %xmm1, %xmm1
-; SSE-LINUX-NEXT:    .p2align 4, 0x90
+; SSE-LINUX-NEXT:    .p2align 4
 ; SSE-LINUX-NEXT:  .LBB6_3: # %for.body
 ; SSE-LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-LINUX-NEXT:    xorps %xmm2, %xmm2
@@ -167,7 +167,7 @@ define dso_local float @loopdep1(i32 %m) nounwind uwtable readnone ssp {
 ; SSE-WIN-NEXT:    movl $1, %eax
 ; SSE-WIN-NEXT:    xorps %xmm0, %xmm0
 ; SSE-WIN-NEXT:    xorps %xmm1, %xmm1
-; SSE-WIN-NEXT:    .p2align 4, 0x90
+; SSE-WIN-NEXT:    .p2align 4
 ; SSE-WIN-NEXT:  .LBB6_3: # %for.body
 ; SSE-WIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-WIN-NEXT:    xorps %xmm2, %xmm2
@@ -196,7 +196,7 @@ define dso_local float @loopdep1(i32 %m) nounwind uwtable readnone ssp {
 ; AVX1-NEXT:    movl $1, %eax
 ; AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB6_3: # %for.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vcvtsi2ss %eax, %xmm4, %xmm2
@@ -223,7 +223,7 @@ define dso_local float @loopdep1(i32 %m) nounwind uwtable readnone ssp {
 ; AVX512VL-NEXT:    movl $1, %eax
 ; AVX512VL-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX512VL-NEXT:    .p2align 4, 0x90
+; AVX512VL-NEXT:    .p2align 4
 ; AVX512VL-NEXT:  .LBB6_3: # %for.body
 ; AVX512VL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VL-NEXT:    vcvtsi2ss %eax, %xmm3, %xmm2
@@ -276,7 +276,7 @@ define i64 @loopdep2(ptr nocapture %x, ptr nocapture %y) nounwind {
 ; SSE-LINUX:       # %bb.0: # %entry
 ; SSE-LINUX-NEXT:    movq (%rdi), %rax
 ; SSE-LINUX-NEXT:    movl $1, %ecx
-; SSE-LINUX-NEXT:    .p2align 4, 0x90
+; SSE-LINUX-NEXT:    .p2align 4
 ; SSE-LINUX-NEXT:  .LBB7_1: # %loop
 ; SSE-LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-LINUX-NEXT:    xorps %xmm0, %xmm0
@@ -310,7 +310,7 @@ define i64 @loopdep2(ptr nocapture %x, ptr nocapture %y) nounwind {
 ; SSE-WIN-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE-WIN-NEXT:    movq (%rcx), %rax
 ; SSE-WIN-NEXT:    movl $1, %ecx
-; SSE-WIN-NEXT:    .p2align 4, 0x90
+; SSE-WIN-NEXT:    .p2align 4
 ; SSE-WIN-NEXT:  .LBB7_1: # %loop
 ; SSE-WIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-WIN-NEXT:    xorps %xmm0, %xmm0
@@ -355,7 +355,7 @@ define i64 @loopdep2(ptr nocapture %x, ptr nocapture %y) nounwind {
 ; AVX-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX-NEXT:    movq (%rcx), %rax
 ; AVX-NEXT:    movl $1, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB7_1: # %loop
 ; AVX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
@@ -418,12 +418,12 @@ define dso_local void @loopdep3() {
 ; SSE-LINUX-LABEL: loopdep3:
 ; SSE-LINUX:       # %bb.0: # %entry
 ; SSE-LINUX-NEXT:    xorl %eax, %eax
-; SSE-LINUX-NEXT:    .p2align 4, 0x90
+; SSE-LINUX-NEXT:    .p2align 4
 ; SSE-LINUX-NEXT:  .LBB8_1: # %for.cond1.preheader
 ; SSE-LINUX-NEXT:    # =>This Loop Header: Depth=1
 ; SSE-LINUX-NEXT:    # Child Loop BB8_2 Depth 2
 ; SSE-LINUX-NEXT:    movq $-4096, %rcx # imm = 0xF000
-; SSE-LINUX-NEXT:    .p2align 4, 0x90
+; SSE-LINUX-NEXT:    .p2align 4
 ; SSE-LINUX-NEXT:  .LBB8_2: # %for.body3
 ; SSE-LINUX-NEXT:    # Parent Loop BB8_1 Depth=1
 ; SSE-LINUX-NEXT:    # => This Inner Loop Header: Depth=2
@@ -478,13 +478,13 @@ define dso_local void @loopdep3() {
 ; SSE-WIN-NEXT:    leaq y(%rip), %r8
 ; SSE-WIN-NEXT:    leaq z(%rip), %r9
 ; SSE-WIN-NEXT:    leaq w(%rip), %r10
-; SSE-WIN-NEXT:    .p2align 4, 0x90
+; SSE-WIN-NEXT:    .p2align 4
 ; SSE-WIN-NEXT:  .LBB8_1: # %for.cond1.preheader
 ; SSE-WIN-NEXT:    # =>This Loop Header: Depth=1
 ; SSE-WIN-NEXT:    # Child Loop BB8_2 Depth 2
 ; SSE-WIN-NEXT:    movq %rcx, %r11
 ; SSE-WIN-NEXT:    xorl %esi, %esi
-; SSE-WIN-NEXT:    .p2align 4, 0x90
+; SSE-WIN-NEXT:    .p2align 4
 ; SSE-WIN-NEXT:  .LBB8_2: # %for.body3
 ; SSE-WIN-NEXT:    # Parent Loop BB8_1 Depth=1
 ; SSE-WIN-NEXT:    # => This Inner Loop Header: Depth=2
@@ -554,13 +554,13 @@ define dso_local void @loopdep3() {
 ; AVX-NEXT:    leaq y(%rip), %r8
 ; AVX-NEXT:    leaq z(%rip), %r9
 ; AVX-NEXT:    leaq w(%rip), %r10
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB8_1: # %for.cond1.preheader
 ; AVX-NEXT:    # =>This Loop Header: Depth=1
 ; AVX-NEXT:    # Child Loop BB8_2 Depth 2
 ; AVX-NEXT:    movq %rcx, %r11
 ; AVX-NEXT:    xorl %esi, %esi
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB8_2: # %for.body3
 ; AVX-NEXT:    # Parent Loop BB8_1 Depth=1
 ; AVX-NEXT:    # => This Inner Loop Header: Depth=2
@@ -1114,7 +1114,7 @@ define i64 @loopclearence(ptr nocapture %x, ptr nocapture %y) nounwind {
 ; SSE-LINUX:       # %bb.0: # %entry
 ; SSE-LINUX-NEXT:    movq (%rdi), %rax
 ; SSE-LINUX-NEXT:    movl $1, %ecx
-; SSE-LINUX-NEXT:    .p2align 4, 0x90
+; SSE-LINUX-NEXT:    .p2align 4
 ; SSE-LINUX-NEXT:  .LBB12_1: # %loop
 ; SSE-LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-LINUX-NEXT:    xorps %xmm4, %xmm4
@@ -1155,7 +1155,7 @@ define i64 @loopclearence(ptr nocapture %x, ptr nocapture %y) nounwind {
 ; SSE-WIN-NEXT:    movaps %xmm8, (%rsp) # 16-byte Spill
 ; SSE-WIN-NEXT:    movq (%rcx), %rax
 ; SSE-WIN-NEXT:    movl $1, %ecx
-; SSE-WIN-NEXT:    .p2align 4, 0x90
+; SSE-WIN-NEXT:    .p2align 4
 ; SSE-WIN-NEXT:  .LBB12_1: # %loop
 ; SSE-WIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-WIN-NEXT:    xorps %xmm4, %xmm4
@@ -1205,7 +1205,7 @@ define i64 @loopclearence(ptr nocapture %x, ptr nocapture %y) nounwind {
 ; AVX-NEXT:    vmovaps %xmm8, (%rsp) # 16-byte Spill
 ; AVX-NEXT:    movq (%rcx), %rax
 ; AVX-NEXT:    movl $1, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB12_1: # %loop
 ; AVX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    vcvtsi2sd %rcx, %xmm5, %xmm4
@@ -1288,7 +1288,7 @@ define dso_local void @loopclearance2(ptr nocapture %y, ptr %x, double %c1, doub
 ; SSE-LINUX-NEXT:    #NO_APP
 ; SSE-LINUX-NEXT:    movl $1, %eax
 ; SSE-LINUX-NEXT:    xorl %ecx, %ecx
-; SSE-LINUX-NEXT:    .p2align 4, 0x90
+; SSE-LINUX-NEXT:    .p2align 4
 ; SSE-LINUX-NEXT:  .LBB13_1: # %inner_loop
 ; SSE-LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-LINUX-NEXT:    movq %rcx, %r8
@@ -1360,7 +1360,7 @@ define dso_local void @loopclearance2(ptr nocapture %y, ptr %x, double %c1, doub
 ; SSE-WIN-NEXT:    #NO_APP
 ; SSE-WIN-NEXT:    movl $1, %r8d
 ; SSE-WIN-NEXT:    xorl %r9d, %r9d
-; SSE-WIN-NEXT:    .p2align 4, 0x90
+; SSE-WIN-NEXT:    .p2align 4
 ; SSE-WIN-NEXT:  .LBB13_1: # %inner_loop
 ; SSE-WIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-WIN-NEXT:    movq %r9, %r10
@@ -1443,7 +1443,7 @@ define dso_local void @loopclearance2(ptr nocapture %y, ptr %x, double %c1, doub
 ; AVX1-NEXT:    #NO_APP
 ; AVX1-NEXT:    movl $1, %r8d
 ; AVX1-NEXT:    xorl %r9d, %r9d
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB13_1: # %inner_loop
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    movq %r9, %r10
@@ -1523,7 +1523,7 @@ define dso_local void @loopclearance2(ptr nocapture %y, ptr %x, double %c1, doub
 ; AVX512VL-NEXT:    #NO_APP
 ; AVX512VL-NEXT:    movl $1, %r8d
 ; AVX512VL-NEXT:    xorl %r9d, %r9d
-; AVX512VL-NEXT:    .p2align 4, 0x90
+; AVX512VL-NEXT:    .p2align 4
 ; AVX512VL-NEXT:  .LBB13_1: # %inner_loop
 ; AVX512VL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VL-NEXT:    movq %r9, %r10

diff  --git a/llvm/test/CodeGen/X86/callbr-asm-blockplacement.ll b/llvm/test/CodeGen/X86/callbr-asm-blockplacement.ll
index 941f331833c199..7f91b846447b3b 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-blockplacement.ll
+++ b/llvm/test/CodeGen/X86/callbr-asm-blockplacement.ll
@@ -29,7 +29,7 @@ define i32 @foo(i32 %arg, ptr %arg3) nounwind {
 ; CHECK-NEXT:    leaq global(%rax,%rax,2), %r14
 ; CHECK-NEXT:    leaq global+4(%rax,%rax,2), %r15
 ; CHECK-NEXT:    xorl %r13d, %r13d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %bb8
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    callq bar at PLT

diff  --git a/llvm/test/CodeGen/X86/cast-vsel.ll b/llvm/test/CodeGen/X86/cast-vsel.ll
index 3067ea674c00d9..d5ccc152abc239 100644
--- a/llvm/test/CodeGen/X86/cast-vsel.ll
+++ b/llvm/test/CodeGen/X86/cast-vsel.ll
@@ -278,7 +278,7 @@ define dso_local void @example25() nounwind {
 ; SSE2-LABEL: example25:
 ; SSE2:       # %bb.0: # %vector.ph
 ; SSE2-NEXT:    movq $-4096, %rax # imm = 0xF000
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB5_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movaps da+4112(%rax), %xmm0
@@ -303,7 +303,7 @@ define dso_local void @example25() nounwind {
 ; SSE41-LABEL: example25:
 ; SSE41:       # %bb.0: # %vector.ph
 ; SSE41-NEXT:    movq $-4096, %rax # imm = 0xF000
-; SSE41-NEXT:    .p2align 4, 0x90
+; SSE41-NEXT:    .p2align 4
 ; SSE41-NEXT:  .LBB5_1: # %vector.body
 ; SSE41-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE41-NEXT:    movaps da+4112(%rax), %xmm0
@@ -329,7 +329,7 @@ define dso_local void @example25() nounwind {
 ; AVX1:       # %bb.0: # %vector.ph
 ; AVX1-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1]
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB5_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovups da+4096(%rax), %ymm1
@@ -348,7 +348,7 @@ define dso_local void @example25() nounwind {
 ; AVX2-LABEL: example25:
 ; AVX2:       # %bb.0: # %vector.ph
 ; AVX2-NEXT:    movq $-4096, %rax # imm = 0xF000
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB5_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vmovups da+4096(%rax), %ymm0
@@ -402,7 +402,7 @@ define dso_local void @example24(i16 signext %x, i16 signext %y) nounwind {
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
 ; SSE2-NEXT:    movq $-4096, %rax # imm = 0xF000
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB6_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movaps da+4096(%rax), %xmm2
@@ -434,7 +434,7 @@ define dso_local void @example24(i16 signext %x, i16 signext %y) nounwind {
 ; SSE41-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
 ; SSE41-NEXT:    movq $-4096, %rax # imm = 0xF000
-; SSE41-NEXT:    .p2align 4, 0x90
+; SSE41-NEXT:    .p2align 4
 ; SSE41-NEXT:  .LBB6_1: # %vector.body
 ; SSE41-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE41-NEXT:    movaps da+4096(%rax), %xmm0
@@ -469,7 +469,7 @@ define dso_local void @example24(i16 signext %x, i16 signext %y) nounwind {
 ; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm2
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
 ; AVX1-NEXT:    vpmovsxwd %xmm3, %xmm3
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB6_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovups da+4096(%rax), %ymm4
@@ -494,7 +494,7 @@ define dso_local void @example24(i16 signext %x, i16 signext %y) nounwind {
 ; AVX2-NEXT:    movq $-4096, %rax # imm = 0xF000
 ; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; AVX2-NEXT:    vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB6_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vmovups da+4096(%rax), %ymm2

diff  --git a/llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll b/llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll
index 29751dcfca5d6b..4877d15e44a4fc 100644
--- a/llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll
+++ b/llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll
@@ -127,12 +127,12 @@ define i32 @test_control_flow(ptr %p, i32 %i, i32 %j) nounwind {
 ; X86-ALL-NEXT:    jle .LBB1_6
 ; X86-ALL-NEXT:  # %bb.1: # %loop_start
 ; X86-ALL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-ALL-NEXT:    .p2align 4, 0x90
+; X86-ALL-NEXT:    .p2align 4
 ; X86-ALL-NEXT:  .LBB1_2: # %while.condthread-pre-split.i
 ; X86-ALL-NEXT:    # =>This Loop Header: Depth=1
 ; X86-ALL-NEXT:    # Child Loop BB1_3 Depth 2
 ; X86-ALL-NEXT:    movl (%ecx), %edx
-; X86-ALL-NEXT:    .p2align 4, 0x90
+; X86-ALL-NEXT:    .p2align 4
 ; X86-ALL-NEXT:  .LBB1_3: # %while.cond.i
 ; X86-ALL-NEXT:    # Parent Loop BB1_2 Depth=1
 ; X86-ALL-NEXT:    # => This Inner Loop Header: Depth=2
@@ -154,12 +154,12 @@ define i32 @test_control_flow(ptr %p, i32 %i, i32 %j) nounwind {
 ; X64-ALL-NEXT:    movl %esi, %eax
 ; X64-ALL-NEXT:    cmpl %edx, %esi
 ; X64-ALL-NEXT:    jle .LBB1_5
-; X64-ALL-NEXT:    .p2align 4, 0x90
+; X64-ALL-NEXT:    .p2align 4
 ; X64-ALL-NEXT:  .LBB1_1: # %while.condthread-pre-split.i
 ; X64-ALL-NEXT:    # =>This Loop Header: Depth=1
 ; X64-ALL-NEXT:    # Child Loop BB1_2 Depth 2
 ; X64-ALL-NEXT:    movl (%rdi), %ecx
-; X64-ALL-NEXT:    .p2align 4, 0x90
+; X64-ALL-NEXT:    .p2align 4
 ; X64-ALL-NEXT:  .LBB1_2: # %while.cond.i
 ; X64-ALL-NEXT:    # Parent Loop BB1_1 Depth=1
 ; X64-ALL-NEXT:    # => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll b/llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll
index 3fb561d00f97d1..bb07640dd285fa 100644
--- a/llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll
+++ b/llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll
@@ -113,7 +113,7 @@ define i128 @cmpxchg_use_eflags_and_val(ptr %addr, i128 %offset) {
 ; CHECK-NEXT:    vmovdqa (%rdi), %xmm0
 ; CHECK-NEXT:    vpextrq $1, %xmm0, %rdx
 ; CHECK-NEXT:    vmovq %xmm0, %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB4_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rdx, %r9

diff  --git a/llvm/test/CodeGen/X86/coalesce-esp.ll b/llvm/test/CodeGen/X86/coalesce-esp.ll
index 34964c0afcc696..0c495c9cf2b009 100644
--- a/llvm/test/CodeGen/X86/coalesce-esp.ll
+++ b/llvm/test/CodeGen/X86/coalesce-esp.ll
@@ -27,7 +27,7 @@ define void @_ZSt17__gslice_to_indexjRKSt8valarrayIjES2_RS0_(i32 %__o, ptr nocap
 ; CHECK-NEXT:  .LBB0_1: # %bb4.preheader
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    jmp .LBB0_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_4: # %bb7.backedge
 ; CHECK-NEXT:    # in Loop: Header=BB0_2 Depth=1
 ; CHECK-NEXT:    addl $-4, %edx

diff  --git a/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness-reduced.ll b/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness-reduced.ll
index a5295d44b07c8e..e9f529eea7d3f1 100644
--- a/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness-reduced.ll
+++ b/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness-reduced.ll
@@ -46,7 +46,7 @@ define void @foo(ptr %arg3, i1 %icmp16) #0 {
 ; CHECK-NEXT:    xorl %r12d, %r12d
 ; CHECK-NEXT:    # implicit-def: $r13
 ; CHECK-NEXT:    jmp .LBB0_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb5
 ; CHECK-NEXT:    # in Loop: Header=BB0_2 Depth=1
 ; CHECK-NEXT:    orl $1, %r12d

diff  --git a/llvm/test/CodeGen/X86/coalescer-commute1.ll b/llvm/test/CodeGen/X86/coalescer-commute1.ll
index 93c6756131fd0f..28502782cf6426 100644
--- a/llvm/test/CodeGen/X86/coalescer-commute1.ll
+++ b/llvm/test/CodeGen/X86/coalescer-commute1.ll
@@ -13,7 +13,7 @@ define void @runcont(ptr %source) nounwind  {
 ; CHECK-NEXT:    movl (%ecx), %ecx
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    xorl %edx, %edx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %bb
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vcvtsi2ssl (%eax,%edx,4), %xmm2, %xmm1

diff  --git a/llvm/test/CodeGen/X86/coalescer-commute4.ll b/llvm/test/CodeGen/X86/coalescer-commute4.ll
index 72ca9f17841b61..0ec99dcba88aeb 100644
--- a/llvm/test/CodeGen/X86/coalescer-commute4.ll
+++ b/llvm/test/CodeGen/X86/coalescer-commute4.ll
@@ -15,7 +15,7 @@ define float @foo(ptr %x, ptr %y, i32 %c) nounwind  {
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_3: ## %bb
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    xorps %xmm1, %xmm1

diff  --git a/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll b/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
index 4a396599a4444d..4d41c8406f6e0b 100644
--- a/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
+++ b/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
@@ -11,7 +11,7 @@ define void @_ZNK4llvm5APInt21multiplicativeInverseERKS0_(ptr %r) {
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    jmp .LBB0_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_4: # %_ZNK4llvm5APInt13getActiveBitsEv.exit.i.i
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movl %edx, %edx
@@ -26,7 +26,7 @@ define void @_ZNK4llvm5APInt21multiplicativeInverseERKS0_(ptr %r) {
 ; CHECK-NEXT:    movq %rcx, %r8
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB0_4
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %for.body.i.i.i.i.i.3
 ; CHECK-NEXT:    # Parent Loop BB0_1 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -72,7 +72,7 @@ define void @_ZNK4llvm5APInt21multiplicativeInverseERKS0__assert(ptr %r) {
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    jmp .LBB1_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_4: # %_ZNK4llvm5APInt13getActiveBitsEv.exit.i.i
 ; CHECK-NEXT:    # in Loop: Header=BB1_1 Depth=1
 ; CHECK-NEXT:    movl %edx, %edx
@@ -89,7 +89,7 @@ define void @_ZNK4llvm5APInt21multiplicativeInverseERKS0__assert(ptr %r) {
 ; CHECK-NEXT:    xorl %r8d, %r8d
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB1_4
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_3: # %for.body.i.i.i.i.i.3
 ; CHECK-NEXT:    # Parent Loop BB1_1 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/code-align-loops.ll b/llvm/test/CodeGen/X86/code-align-loops.ll
index 164df14df4173f..68ae49792ed189 100644
--- a/llvm/test/CodeGen/X86/code-align-loops.ll
+++ b/llvm/test/CodeGen/X86/code-align-loops.ll
@@ -20,19 +20,19 @@
 ; The 
diff erence between test1 and test2 is test2 only set one loop metadata node for the second loop.
 
 ; CHECK-LABEL: test1:
-; ALIGN: .p2align 6, 0x90
+; ALIGN: .p2align 6
 ; ALIGN-NEXT: .LBB0_2: # %for.body
-; ALIGN: .p2align 9, 0x90
+; ALIGN: .p2align 9
 ; ALIGN-NEXT: .LBB0_3: # %for.body
 
-; ALIGN32: .p2align 6, 0x90
+; ALIGN32: .p2align 6
 ; ALIGN32-NEXT: .LBB0_2: # %for.body
-; ALIGN32: .p2align 9, 0x90
+; ALIGN32: .p2align 9
 ; ALIGN32-NEXT: .LBB0_3: # %for.body
 
-; ALIGN256: .p2align 8, 0x90
+; ALIGN256: .p2align 8
 ; ALIGN256-NEXT: .LBB0_2: # %for.body
-; ALIGN256: .p2align 9, 0x90
+; ALIGN256: .p2align 9
 ; ALIGN256-NEXT: .LBB0_3: # %for.body
 
 define void @test1(i32 %a) nounwind {
@@ -59,19 +59,19 @@ for.body5:                                        ; preds = %for.body, %for.body
 }
 
 ; CHECK-LABEL: test2:
-; ALIGN: .p2align 4, 0x90
+; ALIGN: .p2align 4
 ; ALIGN-NEXT: .LBB1_2: # %for.body
-; ALIGN: .p2align 9, 0x90
+; ALIGN: .p2align 9
 ; ALIGN-NEXT: .LBB1_3: # %for.body
 
-; ALIGN32: .p2align 5, 0x90
+; ALIGN32: .p2align 5
 ; ALIGN32-NEXT: .LBB1_2: # %for.body
-; ALIGN32: .p2align 9, 0x90
+; ALIGN32: .p2align 9
 ; ALIGN32-NEXT: .LBB1_3: # %for.body
 
-; ALIGN256: .p2align 8, 0x90
+; ALIGN256: .p2align 8
 ; ALIGN256-NEXT: .LBB1_2: # %for.body
-; ALIGN256: .p2align 9, 0x90
+; ALIGN256: .p2align 9
 ; ALIGN256-NEXT: .LBB1_3: # %for.body
 define void @test2(i32 %a) nounwind {
 entry:
@@ -111,7 +111,7 @@ for.body5:                                        ; preds = %for.body, %for.body
 ;     }
 ; }
 ; CHECK-LABEL: test3_multilatch:
-; ALIGN: .p2align 6, 0x90
+; ALIGN: .p2align 6
 ; ALIGN-NEXT: .LBB2_1: # %while.cond
 define dso_local i32 @test3_multilatch() #0 {
 entry:
@@ -147,7 +147,7 @@ while.end:                                        ; preds = %while.cond
 }
 
 ; CHECK-LABEL: test4_multilatch:
-; ALIGN: .p2align 6, 0x90
+; ALIGN: .p2align 6
 ; ALIGN-NEXT: .LBB3_4: # %bb4
 define void @test4_multilatch(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
 entry:

diff  --git a/llvm/test/CodeGen/X86/code_placement_align_all.ll b/llvm/test/CodeGen/X86/code_placement_align_all.ll
index 11dc59a3bab9d5..0e57b0e8ebd32d 100644
--- a/llvm/test/CodeGen/X86/code_placement_align_all.ll
+++ b/llvm/test/CodeGen/X86/code_placement_align_all.ll
@@ -1,9 +1,9 @@
 ; RUN: llc  -mcpu=corei7 -mtriple=x86_64-linux -align-all-blocks=16 < %s | FileCheck %s
 
 ;CHECK-LABEL: foo:
-;CHECK: .p2align  16, 0x90
-;CHECK: .p2align  16, 0x90
-;CHECK: .p2align  16, 0x90
+;CHECK: .p2align  16
+;CHECK: .p2align  16
+;CHECK: .p2align  16
 ;CHECK: ret
 define i32 @foo(i32 %t, i32 %l) nounwind readnone ssp uwtable {
   %1 = icmp eq i32 %t, 0

diff  --git a/llvm/test/CodeGen/X86/combine-pmuldq.ll b/llvm/test/CodeGen/X86/combine-pmuldq.ll
index aa3bea2791416a..2a52b9eabc7b48 100644
--- a/llvm/test/CodeGen/X86/combine-pmuldq.ll
+++ b/llvm/test/CodeGen/X86/combine-pmuldq.ll
@@ -157,7 +157,7 @@ define <8 x i64> @combine_zext_pmuludq_256(<8 x i32> %a) {
 define void @PR39398(i32 %a0) {
 ; SSE-LABEL: PR39398:
 ; SSE:       # %bb.0: # %bb
-; SSE-NEXT:    .p2align 4, 0x90
+; SSE-NEXT:    .p2align 4
 ; SSE-NEXT:  .LBB5_1: # %bb10
 ; SSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-NEXT:    cmpl $232, %edi
@@ -167,7 +167,7 @@ define void @PR39398(i32 %a0) {
 ;
 ; AVX-LABEL: PR39398:
 ; AVX:       # %bb.0: # %bb
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB5_1: # %bb10
 ; AVX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    cmpl $232, %edi
@@ -338,7 +338,7 @@ define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
 ; SSE-NEXT:    pxor %xmm0, %xmm0
 ; SSE-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
 ; SSE-NEXT:    pxor %xmm1, %xmm1
-; SSE-NEXT:    .p2align 4, 0x90
+; SSE-NEXT:    .p2align 4
 ; SSE-NEXT:  .LBB7_1: # %loop
 ; SSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-NEXT:    pmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
@@ -367,7 +367,7 @@ define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB7_1: # %loop
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
@@ -396,7 +396,7 @@ define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %ymm1
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB7_1: # %loop
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -418,7 +418,7 @@ define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
 ; AVX512VL-NEXT:    vpbroadcastq %rax, %zmm1
 ; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
-; AVX512VL-NEXT:    .p2align 4, 0x90
+; AVX512VL-NEXT:    .p2align 4
 ; AVX512VL-NEXT:  .LBB7_1: # %loop
 ; AVX512VL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -437,7 +437,7 @@ define <8 x i32> @PR49658_zext(ptr %ptr, i32 %mul) {
 ; AVX512DQVL-NEXT:    vpbroadcastq %rax, %zmm1
 ; AVX512DQVL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512DQVL-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
-; AVX512DQVL-NEXT:    .p2align 4, 0x90
+; AVX512DQVL-NEXT:    .p2align 4
 ; AVX512DQVL-NEXT:  .LBB7_1: # %loop
 ; AVX512DQVL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512DQVL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -482,7 +482,7 @@ define <8 x i32> @PR49658_sext(ptr %ptr, i32 %mul) {
 ; SSE-NEXT:    movdqa %xmm2, %xmm3
 ; SSE-NEXT:    psrlq $32, %xmm3
 ; SSE-NEXT:    pxor %xmm1, %xmm1
-; SSE-NEXT:    .p2align 4, 0x90
+; SSE-NEXT:    .p2align 4
 ; SSE-NEXT:  .LBB8_1: # %loop
 ; SSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-NEXT:    pmovsxdq 2097176(%rdi,%rax), %xmm5
@@ -543,7 +543,7 @@ define <8 x i32> @PR49658_sext(ptr %ptr, i32 %mul) {
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB8_1: # %loop
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovsxdq 2097152(%rdi,%rax), %xmm3
@@ -572,7 +572,7 @@ define <8 x i32> @PR49658_sext(ptr %ptr, i32 %mul) {
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %ymm1
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB8_1: # %loop
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vpmovsxdq 2097168(%rdi,%rax), %ymm2
@@ -594,7 +594,7 @@ define <8 x i32> @PR49658_sext(ptr %ptr, i32 %mul) {
 ; AVX512VL-NEXT:    vpbroadcastq %rax, %zmm1
 ; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512VL-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
-; AVX512VL-NEXT:    .p2align 4, 0x90
+; AVX512VL-NEXT:    .p2align 4
 ; AVX512VL-NEXT:  .LBB8_1: # %loop
 ; AVX512VL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -613,7 +613,7 @@ define <8 x i32> @PR49658_sext(ptr %ptr, i32 %mul) {
 ; AVX512DQVL-NEXT:    vpbroadcastq %rax, %zmm1
 ; AVX512DQVL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512DQVL-NEXT:    movq $-2097152, %rax # imm = 0xFFE00000
-; AVX512DQVL-NEXT:    .p2align 4, 0x90
+; AVX512DQVL-NEXT:    .p2align 4
 ; AVX512DQVL-NEXT:  .LBB8_1: # %loop
 ; AVX512DQVL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512DQVL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero

diff  --git a/llvm/test/CodeGen/X86/constant-pool-sharing.ll b/llvm/test/CodeGen/X86/constant-pool-sharing.ll
index 062d87ed035fd6..9e0114c017419f 100644
--- a/llvm/test/CodeGen/X86/constant-pool-sharing.ll
+++ b/llvm/test/CodeGen/X86/constant-pool-sharing.ll
@@ -12,7 +12,7 @@ define void @share_v4i32_v4f32(ptr %p, ptr %q, i1 %t) nounwind {
 ; SSE-LINUX-LABEL: share_v4i32_v4f32:
 ; SSE-LINUX:       # %bb.0: # %entry
 ; SSE-LINUX-NEXT:    movaps {{.*#+}} xmm0 = [1073741824,1073741824,1073741824,1073741824]
-; SSE-LINUX-NEXT:    .p2align 4, 0x90
+; SSE-LINUX-NEXT:    .p2align 4
 ; SSE-LINUX-NEXT:  .LBB0_1: # %loop
 ; SSE-LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-LINUX-NEXT:    movaps %xmm0, (%rdi)
@@ -25,7 +25,7 @@ define void @share_v4i32_v4f32(ptr %p, ptr %q, i1 %t) nounwind {
 ; SSE-MSVC-LABEL: share_v4i32_v4f32:
 ; SSE-MSVC:       # %bb.0: # %entry
 ; SSE-MSVC-NEXT:    movaps {{.*#+}} xmm0 = [1073741824,1073741824,1073741824,1073741824]
-; SSE-MSVC-NEXT:    .p2align 4, 0x90
+; SSE-MSVC-NEXT:    .p2align 4
 ; SSE-MSVC-NEXT:  .LBB0_1: # %loop
 ; SSE-MSVC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-MSVC-NEXT:    movaps %xmm0, (%rcx)
@@ -39,7 +39,7 @@ define void @share_v4i32_v4f32(ptr %p, ptr %q, i1 %t) nounwind {
 ; AVX-LINUX:       # %bb.0: # %entry
 ; AVX-LINUX-NEXT:    vbroadcastss {{.*#+}} xmm0 = [1073741824,1073741824,1073741824,1073741824]
 ; AVX-LINUX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1073741824,1073741824,1073741824,1073741824]
-; AVX-LINUX-NEXT:    .p2align 4, 0x90
+; AVX-LINUX-NEXT:    .p2align 4
 ; AVX-LINUX-NEXT:  .LBB0_1: # %loop
 ; AVX-LINUX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX-LINUX-NEXT:    vmovaps %xmm0, (%rdi)
@@ -53,7 +53,7 @@ define void @share_v4i32_v4f32(ptr %p, ptr %q, i1 %t) nounwind {
 ; AVX-MSVC:       # %bb.0: # %entry
 ; AVX-MSVC-NEXT:    vbroadcastss {{.*#+}} xmm0 = [1073741824,1073741824,1073741824,1073741824]
 ; AVX-MSVC-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1073741824,1073741824,1073741824,1073741824]
-; AVX-MSVC-NEXT:    .p2align 4, 0x90
+; AVX-MSVC-NEXT:    .p2align 4
 ; AVX-MSVC-NEXT:  .LBB0_1: # %loop
 ; AVX-MSVC-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX-MSVC-NEXT:    vmovaps %xmm0, (%rcx)

diff  --git a/llvm/test/CodeGen/X86/copy-eflags.ll b/llvm/test/CodeGen/X86/copy-eflags.ll
index 6af80860401afd..e1711ccdbe13fb 100644
--- a/llvm/test/CodeGen/X86/copy-eflags.ll
+++ b/llvm/test/CodeGen/X86/copy-eflags.ll
@@ -211,7 +211,7 @@ define dso_local void @PR37100(i8 %arg1, i16 %arg2, i64 %arg3, i8 %arg4, ptr %pt
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %ch
 ; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X32-NEXT:    jmp .LBB3_1
-; X32-NEXT:    .p2align 4, 0x90
+; X32-NEXT:    .p2align 4
 ; X32-NEXT:  .LBB3_5: # %bb1
 ; X32-NEXT:    # in Loop: Header=BB3_1 Depth=1
 ; X32-NEXT:    movl %esi, %eax
@@ -250,7 +250,7 @@ define dso_local void @PR37100(i8 %arg1, i16 %arg2, i64 %arg3, i8 %arg4, ptr %pt
 ; X64-NEXT:    movq %rdx, %r10
 ; X64-NEXT:    movl {{[0-9]+}}(%rsp), %esi
 ; X64-NEXT:    movzbl %cl, %ecx
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_1: # %bb1
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movsbq %dil, %rax

diff  --git a/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll b/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
index adb7319fe80b11..bf6b09674e1870 100644
--- a/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
+++ b/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
@@ -144,7 +144,7 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    movq x0 at GOTPCREL(%rip), %r15
 ; CHECK-NEXT:    movq %rsi, %r12
 ; CHECK-NEXT:    jmp .LBB1_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_15: # %for.cond1.for.inc3_crit_edge
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    movl %edx, (%r8)
@@ -220,7 +220,7 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    subq %rdx, %rax
 ; CHECK-NEXT:    addq %r13, %r10
 ; CHECK-NEXT:    leaq (%r11,%r10,8), %r10
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_12: # %vector.body
 ; CHECK-NEXT:    # Parent Loop BB1_2 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -237,7 +237,7 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    cmpq %rdx, %rbp
 ; CHECK-NEXT:    movq %r13, %rdx
 ; CHECK-NEXT:    je .LBB1_15
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_14: # %for.body2
 ; CHECK-NEXT:    # Parent Loop BB1_2 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
index c350ed64280dd2..f66d81c781fe0d 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
@@ -417,7 +417,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    xorl %edx, %edx
 ; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB4_3: # %udiv-do-while
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %edx, %esi

diff  --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
index 16dc1d6b446cf7..6fdde0b14a9843 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
@@ -377,7 +377,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB4_3: # %udiv-do-while
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill

diff  --git a/llvm/test/CodeGen/X86/fdiv-combine.ll b/llvm/test/CodeGen/X86/fdiv-combine.ll
index 322ef6b393922d..1abcdc3cca9b78 100644
--- a/llvm/test/CodeGen/X86/fdiv-combine.ll
+++ b/llvm/test/CodeGen/X86/fdiv-combine.ll
@@ -128,7 +128,7 @@ define void @PR24141() {
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    # implicit-def: $xmm0
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB8_1: # %while.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    callq g at PLT

diff  --git a/llvm/test/CodeGen/X86/fixup-lea.ll b/llvm/test/CodeGen/X86/fixup-lea.ll
index 35c3976408f645..548e74de392956 100644
--- a/llvm/test/CodeGen/X86/fixup-lea.ll
+++ b/llvm/test/CodeGen/X86/fixup-lea.ll
@@ -165,7 +165,7 @@ define void @foo_nosize(i32 inreg %dns) {
 ; SLOW-LABEL: foo_nosize:
 ; SLOW:       # %bb.0: # %entry
 ; SLOW-NEXT:    movw $-1, %cx
-; SLOW-NEXT:    .p2align 4, 0x90
+; SLOW-NEXT:    .p2align 4
 ; SLOW-NEXT:  .LBB6_1: # %for.body
 ; SLOW-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SLOW-NEXT:    movzwl %cx, %edx
@@ -178,7 +178,7 @@ define void @foo_nosize(i32 inreg %dns) {
 ; FAST-LABEL: foo_nosize:
 ; FAST:       # %bb.0: # %entry
 ; FAST-NEXT:    movw $-1, %cx
-; FAST-NEXT:    .p2align 4, 0x90
+; FAST-NEXT:    .p2align 4
 ; FAST-NEXT:  .LBB6_1: # %for.body
 ; FAST-NEXT:    # =>This Inner Loop Header: Depth=1
 ; FAST-NEXT:    movzwl %cx, %edx
@@ -205,7 +205,7 @@ define void @bar_nosize(i32 inreg %dns) {
 ; SLOW-LABEL: bar_nosize:
 ; SLOW:       # %bb.0: # %entry
 ; SLOW-NEXT:    movw $1, %cx
-; SLOW-NEXT:    .p2align 4, 0x90
+; SLOW-NEXT:    .p2align 4
 ; SLOW-NEXT:  .LBB7_1: # %for.body
 ; SLOW-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SLOW-NEXT:    movzwl %cx, %edx
@@ -218,7 +218,7 @@ define void @bar_nosize(i32 inreg %dns) {
 ; FAST-LABEL: bar_nosize:
 ; FAST:       # %bb.0: # %entry
 ; FAST-NEXT:    movw $1, %cx
-; FAST-NEXT:    .p2align 4, 0x90
+; FAST-NEXT:    .p2align 4
 ; FAST-NEXT:  .LBB7_1: # %for.body
 ; FAST-NEXT:    # =>This Inner Loop Header: Depth=1
 ; FAST-NEXT:    movzwl %cx, %edx

diff  --git a/llvm/test/CodeGen/X86/fma-commute-loop.ll b/llvm/test/CodeGen/X86/fma-commute-loop.ll
index d26aaaa71e28d9..ae0efa320f1e4f 100644
--- a/llvm/test/CodeGen/X86/fma-commute-loop.ll
+++ b/llvm/test/CodeGen/X86/fma-commute-loop.ll
@@ -30,7 +30,7 @@ define void @eggs(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3, ptr %arg4, ptr %arg
 ; CHECK-NEXT:    vxorpd %xmm5, %xmm5, %xmm5
 ; CHECK-NEXT:    vxorpd %xmm6, %xmm6, %xmm6
 ; CHECK-NEXT:    vxorpd %xmm7, %xmm7, %xmm7
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %bb15
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vbroadcastsd (%r11,%rbx,8), %zmm9

diff  --git a/llvm/test/CodeGen/X86/fma-intrinsics-phi-213-to-231.ll b/llvm/test/CodeGen/X86/fma-intrinsics-phi-213-to-231.ll
index a509e93ef3f83f..5f1c5a5690ef20 100644
--- a/llvm/test/CodeGen/X86/fma-intrinsics-phi-213-to-231.ll
+++ b/llvm/test/CodeGen/X86/fma-intrinsics-phi-213-to-231.ll
@@ -7,7 +7,7 @@ define <2 x double> @fmaddsubpd_loop_128(i32 %iter, <2 x double> %a, <2 x double
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB0_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmaddsub231pd {{.*#+}} xmm2 = (xmm0 * xmm1) +/- xmm2
@@ -44,7 +44,7 @@ define <2 x double> @fmsubaddpd_loop_128(i32 %iter, <2 x double> %a, <2 x double
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB1_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmsubadd231pd {{.*#+}} xmm2 = (xmm0 * xmm1) -/+ xmm2
@@ -81,7 +81,7 @@ define <2 x double> @fmaddpd_loop_128(i32 %iter, <2 x double> %a, <2 x double> %
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB2_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmadd231pd {{.*#+}} xmm2 = (xmm0 * xmm1) + xmm2
@@ -118,7 +118,7 @@ define <2 x double> @fmsubpd_loop_128(i32 %iter, <2 x double> %a, <2 x double> %
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB3_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmsub231pd {{.*#+}} xmm2 = (xmm0 * xmm1) - xmm2
@@ -155,7 +155,7 @@ define <2 x double> @fnmaddpd_loop_128(i32 %iter, <2 x double> %a, <2 x double>
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB4_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB4_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfnmadd231pd {{.*#+}} xmm2 = -(xmm0 * xmm1) + xmm2
@@ -192,7 +192,7 @@ define <2 x double> @fnmsubpd_loop_128(i32 %iter, <2 x double> %a, <2 x double>
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB5_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB5_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfnmsub231pd {{.*#+}} xmm2 = -(xmm0 * xmm1) - xmm2
@@ -236,7 +236,7 @@ define <4 x float> @fmaddsubps_loop_128(i32 %iter, <4 x float> %a, <4 x float> %
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB6_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB6_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmaddsub231ps {{.*#+}} xmm2 = (xmm0 * xmm1) +/- xmm2
@@ -273,7 +273,7 @@ define <4 x float> @fmsubaddps_loop_128(i32 %iter, <4 x float> %a, <4 x float> %
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB7_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB7_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmsubadd231ps {{.*#+}} xmm2 = (xmm0 * xmm1) -/+ xmm2
@@ -310,7 +310,7 @@ define <4 x float> @fmaddps_loop_128(i32 %iter, <4 x float> %a, <4 x float> %b,
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB8_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB8_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmadd231ps {{.*#+}} xmm2 = (xmm0 * xmm1) + xmm2
@@ -347,7 +347,7 @@ define <4 x float> @fmsubps_loop_128(i32 %iter, <4 x float> %a, <4 x float> %b,
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB9_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB9_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmsub231ps {{.*#+}} xmm2 = (xmm0 * xmm1) - xmm2
@@ -384,7 +384,7 @@ define <4 x float> @fnmaddps_loop_128(i32 %iter, <4 x float> %a, <4 x float> %b,
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB10_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB10_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfnmadd231ps {{.*#+}} xmm2 = -(xmm0 * xmm1) + xmm2
@@ -421,7 +421,7 @@ define <4 x float> @fnmsubps_loop_128(i32 %iter, <4 x float> %a, <4 x float> %b,
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB11_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB11_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfnmsub231ps {{.*#+}} xmm2 = -(xmm0 * xmm1) - xmm2
@@ -465,7 +465,7 @@ define <4 x double> @fmaddsubpd_loop_256(i32 %iter, <4 x double> %a, <4 x double
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB12_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB12_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmaddsub231pd {{.*#+}} ymm2 = (ymm0 * ymm1) +/- ymm2
@@ -502,7 +502,7 @@ define <4 x double> @fmsubaddpd_loop_256(i32 %iter, <4 x double> %a, <4 x double
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB13_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB13_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmsubadd231pd {{.*#+}} ymm2 = (ymm0 * ymm1) -/+ ymm2
@@ -539,7 +539,7 @@ define <4 x double> @fmaddpd_loop_256(i32 %iter, <4 x double> %a, <4 x double> %
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB14_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB14_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmadd231pd {{.*#+}} ymm2 = (ymm0 * ymm1) + ymm2
@@ -576,7 +576,7 @@ define <4 x double> @fmsubpd_loop_256(i32 %iter, <4 x double> %a, <4 x double> %
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB15_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB15_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmsub231pd {{.*#+}} ymm2 = (ymm0 * ymm1) - ymm2
@@ -613,7 +613,7 @@ define <4 x double> @fnmaddpd_loop_256(i32 %iter, <4 x double> %a, <4 x double>
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB16_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB16_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfnmadd231pd {{.*#+}} ymm2 = -(ymm0 * ymm1) + ymm2
@@ -650,7 +650,7 @@ define <4 x double> @fnmsubpd_loop_256(i32 %iter, <4 x double> %a, <4 x double>
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB17_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB17_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfnmsub231pd {{.*#+}} ymm2 = -(ymm0 * ymm1) - ymm2
@@ -694,7 +694,7 @@ define <8 x float> @fmaddsubps_loop_256(i32 %iter, <8 x float> %a, <8 x float> %
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB18_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB18_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmaddsub231ps {{.*#+}} ymm2 = (ymm0 * ymm1) +/- ymm2
@@ -731,7 +731,7 @@ define <8 x float> @fmsubaddps_loop_256(i32 %iter, <8 x float> %a, <8 x float> %
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB19_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB19_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmsubadd231ps {{.*#+}} ymm2 = (ymm0 * ymm1) -/+ ymm2
@@ -768,7 +768,7 @@ define <8 x float> @fmaddps_loop_256(i32 %iter, <8 x float> %a, <8 x float> %b,
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB20_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB20_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmadd231ps {{.*#+}} ymm2 = (ymm0 * ymm1) + ymm2
@@ -805,7 +805,7 @@ define <8 x float> @fmsubps_loop_256(i32 %iter, <8 x float> %a, <8 x float> %b,
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB21_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB21_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfmsub231ps {{.*#+}} ymm2 = (ymm0 * ymm1) - ymm2
@@ -842,7 +842,7 @@ define <8 x float> @fnmaddps_loop_256(i32 %iter, <8 x float> %a, <8 x float> %b,
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB22_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB22_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfnmadd231ps {{.*#+}} ymm2 = -(ymm0 * ymm1) + ymm2
@@ -879,7 +879,7 @@ define <8 x float> @fnmsubps_loop_256(i32 %iter, <8 x float> %a, <8 x float> %b,
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    cmpl %edi, %eax
 ; CHECK-NEXT:    jge .LBB23_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB23_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vfnmsub231ps {{.*#+}} ymm2 = -(ymm0 * ymm1) - ymm2

diff  --git a/llvm/test/CodeGen/X86/fold-call-3.ll b/llvm/test/CodeGen/X86/fold-call-3.ll
index 691f46b9eeb0ef..1db5cba8892d12 100644
--- a/llvm/test/CodeGen/X86/fold-call-3.ll
+++ b/llvm/test/CodeGen/X86/fold-call-3.ll
@@ -22,7 +22,7 @@ define void @_Z25RawPointerPerformanceTestPvRN5clang6ActionE(ptr %Val, ptr %Acti
 ; CHECK-NEXT:    movq %rsi, %rbx
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    xorl %ebp, %ebp
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_2: ## %bb
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq (%rbx), %rcx
@@ -52,7 +52,7 @@ define void @_Z25RawPointerPerformanceTestPvRN5clang6ActionE(ptr %Val, ptr %Acti
 ; pre-RA-NEXT:    movq %rsi, %rbx
 ; pre-RA-NEXT:    movq %rdi, %rax
 ; pre-RA-NEXT:    xorl %ebp, %ebp
-; pre-RA-NEXT:    .p2align 4, 0x90
+; pre-RA-NEXT:    .p2align 4
 ; pre-RA-NEXT:  LBB0_2: ## %bb
 ; pre-RA-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; pre-RA-NEXT:    movq (%rbx), %rcx

diff  --git a/llvm/test/CodeGen/X86/fold-loop-of-urem.ll b/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
index c4b130a8b4e717..c1beb7c803b2b3 100644
--- a/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
+++ b/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
@@ -24,7 +24,7 @@ define void @simple_urem_to_sel(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    xorl %r15d, %r15d
 ; CHECK-NEXT:    xorl %r14d, %r14d
 ; CHECK-NEXT:    xorl %r12d, %r12d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r14d, %edi
@@ -71,7 +71,7 @@ define void @simple_urem_to_sel_fail_not_in_loop(i32 %N, i32 %rem_amt) nounwind
 ; CHECK-NEXT:  # %bb.3: # %for.body.preheader
 ; CHECK-NEXT:    movl %edi, %r14d
 ; CHECK-NEXT:    xorl %ebp, %ebp
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_4: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %ebp, %edi
@@ -129,7 +129,7 @@ define void @simple_urem_to_sel_inner_loop(i32 %N, i32 %M) nounwind {
 ; CHECK-NEXT:    xorl %r15d, %r15d
 ; CHECK-NEXT:    xorl %r13d, %r13d
 ; CHECK-NEXT:    jmp .LBB2_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_5: # %for.inner.cond.cleanup
 ; CHECK-NEXT:    # in Loop: Header=BB2_2 Depth=1
 ; CHECK-NEXT:    incl %r15d
@@ -147,7 +147,7 @@ define void @simple_urem_to_sel_inner_loop(i32 %N, i32 %M) nounwind {
 ; CHECK-NEXT:  # %bb.3: # %for.inner.body.preheader
 ; CHECK-NEXT:    # in Loop: Header=BB2_2 Depth=1
 ; CHECK-NEXT:    movl %r12d, %ebx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_4: # %for.inner.body
 ; CHECK-NEXT:    # Parent Loop BB2_2 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -208,7 +208,7 @@ define void @simple_urem_to_sel_inner_loop_fail_not_invariant(i32 %N, i32 %M) no
 ; CHECK-NEXT:    movl %edi, %ebp
 ; CHECK-NEXT:    xorl %r14d, %r14d
 ; CHECK-NEXT:    jmp .LBB3_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_5: # %for.inner.cond.cleanup
 ; CHECK-NEXT:    # in Loop: Header=BB3_2 Depth=1
 ; CHECK-NEXT:    incl %r14d
@@ -224,7 +224,7 @@ define void @simple_urem_to_sel_inner_loop_fail_not_invariant(i32 %N, i32 %M) no
 ; CHECK-NEXT:    # in Loop: Header=BB3_2 Depth=1
 ; CHECK-NEXT:    movl %eax, %r15d
 ; CHECK-NEXT:    movl %ebx, %r12d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_4: # %for.inner.body
 ; CHECK-NEXT:    # Parent Loop BB3_2 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -288,7 +288,7 @@ define void @simple_urem_to_sel_nested2(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    xorl %r14d, %r14d
 ; CHECK-NEXT:    xorl %r12d, %r12d
 ; CHECK-NEXT:    jmp .LBB4_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB4_5: # %for.body1
 ; CHECK-NEXT:    # in Loop: Header=BB4_2 Depth=1
 ; CHECK-NEXT:    movl %r14d, %edi
@@ -363,7 +363,7 @@ define void @simple_urem_fail_bad_incr3(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    jmp .LBB5_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB5_6: # %for.body1
 ; CHECK-NEXT:    # in Loop: Header=BB5_2 Depth=1
 ; CHECK-NEXT:    movl %ebp, %eax
@@ -392,7 +392,7 @@ define void @simple_urem_fail_bad_incr3(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    movl %eax, %ebp
 ; CHECK-NEXT:    incl %ebp
 ; CHECK-NEXT:    jmp .LBB5_6
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB5_5: # %for.body2
 ; CHECK-NEXT:    # in Loop: Header=BB5_2 Depth=1
 ; CHECK-NEXT:    xorl %ebp, %ebp
@@ -442,7 +442,7 @@ define void @simple_urem_to_sel_vec(<2 x i64> %rem_amt) nounwind {
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pxor %xmm0, %xmm0
 ; CHECK-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB6_1: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movdqa %xmm1, (%rsp) # 16-byte Spill
@@ -496,7 +496,7 @@ define void @simple_urem_fail_bad_incr(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    movl %edi, %ebp
 ; CHECK-NEXT:    xorl %r14d, %r14d
 ; CHECK-NEXT:    jmp .LBB7_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB7_4: # %for.body.tail
 ; CHECK-NEXT:    # in Loop: Header=BB7_2 Depth=1
 ; CHECK-NEXT:    movl %r14d, %eax
@@ -566,7 +566,7 @@ define void @simple_urem_to_sel_second_acc(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    xorl %r12d, %r12d
 ; CHECK-NEXT:    xorl %r14d, %r14d
 ; CHECK-NEXT:    xorl %r13d, %r13d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB8_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r14d, %edi
@@ -618,7 +618,7 @@ define void @simple_urem_fail_srem(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    movl %edi, %ebp
 ; CHECK-NEXT:    xorl %r14d, %r14d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB9_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r14d, %eax
@@ -663,7 +663,7 @@ define void @simple_urem_fail_missing_nuw(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    movl %edi, %ebp
 ; CHECK-NEXT:    xorl %r14d, %r14d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB10_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r14d, %eax
@@ -708,7 +708,7 @@ define void @simple_urem_fail_bad_incr2(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    movl %edi, %ebp
 ; CHECK-NEXT:    xorl %r14d, %r14d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB11_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r14d, %eax
@@ -753,7 +753,7 @@ define void @simple_urem_non_zero_entry4(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    movl %edi, %ebp
 ; CHECK-NEXT:    movl $4, %r14d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB12_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r14d, %eax
@@ -799,7 +799,7 @@ define void @simple_urem_skip_const_rem_amt(i32 %N) nounwind {
 ; CHECK-NEXT:    addl $-4, %ebx
 ; CHECK-NEXT:    movl $4, %ebp
 ; CHECK-NEXT:    movl $2938661835, %r14d # imm = 0xAF286BCB
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB13_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %ebp, %eax
@@ -855,7 +855,7 @@ define void @simple_urem_fail_no_preheader_non_canonical(i32 %N, i32 %rem_amt) n
 ; CHECK-NEXT:    jmp .LBB14_3
 ; CHECK-NEXT:  .LBB14_1:
 ; CHECK-NEXT:    xorl %r14d, %r14d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB14_3: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r14d, %eax
@@ -913,7 +913,7 @@ define void @simple_urem_multi_latch_non_canonical(i32 %N, i32 %rem_amt) nounwin
 ; CHECK-NEXT:    xorl %r14d, %r14d
 ; CHECK-NEXT:    xorl %r13d, %r13d
 ; CHECK-NEXT:    jmp .LBB15_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB15_3: # %for.body.backedge
 ; CHECK-NEXT:    # in Loop: Header=BB15_2 Depth=1
 ; CHECK-NEXT:    incl %r14d
@@ -1038,7 +1038,7 @@ define void @simple_urem_fail_intermediate_inc(i32 %N, i32 %rem_amt) nounwind {
 ; CHECK-NEXT:    movl %edi, %r14d
 ; CHECK-NEXT:    negl %r14d
 ; CHECK-NEXT:    movl $1, %r15d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB17_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r15d, %eax
@@ -1078,7 +1078,7 @@ for.body:
 define void @weird_loop(i64 %sub.ptr.div.i56) personality ptr null {
 ; CHECK-LABEL: weird_loop:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB18_1: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB18_1
@@ -1107,7 +1107,7 @@ define void @simple_urem_to_sel_non_zero_start_fail(i32 %N, i32 %rem_amt) nounwi
 ; CHECK-NEXT:    movl %esi, %ebx
 ; CHECK-NEXT:    movl %edi, %ebp
 ; CHECK-NEXT:    movl $2, %r14d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB19_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r14d, %eax
@@ -1157,7 +1157,7 @@ define void @simple_urem_to_sel_non_zero_start_okay(i32 %N, i32 %rem_amt_in) nou
 ; CHECK-NEXT:    movl $2, %r14d
 ; CHECK-NEXT:    xorl %r15d, %r15d
 ; CHECK-NEXT:    movl $2, %r12d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB20_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r14d, %edi
@@ -1207,7 +1207,7 @@ define void @simple_urem_to_sel_non_zero_start_through_add(i32 %N, i32 %rem_amt_
 ; CHECK-NEXT:    orl $16, %ebx
 ; CHECK-NEXT:    negl %r14d
 ; CHECK-NEXT:    movl $7, %r15d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB21_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r15d, %eax
@@ -1259,7 +1259,7 @@ define void @simple_urem_to_sel_non_zero_start_through_add_fail_missing_nuw(i32
 ; CHECK-NEXT:    orl $16, %ebx
 ; CHECK-NEXT:    negl %r14d
 ; CHECK-NEXT:    movl $7, %r15d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB22_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r15d, %eax
@@ -1310,7 +1310,7 @@ define void @simple_urem_to_sel_non_zero_start_through_add_fail_no_simplify_rem(
 ; CHECK-NEXT:    movl %edi, %r14d
 ; CHECK-NEXT:    negl %r14d
 ; CHECK-NEXT:    movl $7, %r15d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB23_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r15d, %eax
@@ -1363,7 +1363,7 @@ define void @simple_urem_to_sel_non_zero_start_through_sub(i32 %N, i32 %rem_amt,
 ; CHECK-NEXT:    xorl %r15d, %r15d
 ; CHECK-NEXT:    xorl %r14d, %r14d
 ; CHECK-NEXT:    xorl %r12d, %r12d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB24_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r14d, %edi
@@ -1412,7 +1412,7 @@ define void @simple_urem_to_sel_non_zero_start_through_sub_no_simplfy(i32 %N, i3
 ; CHECK-NEXT:    movl %edi, %r14d
 ; CHECK-NEXT:    negl %r14d
 ; CHECK-NEXT:    addl $-2, %r15d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB25_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %r15d, %eax

diff  --git a/llvm/test/CodeGen/X86/fp-une-cmp.ll b/llvm/test/CodeGen/X86/fp-une-cmp.ll
index f1b1732f9092cd..dd32f23d3b9cb4 100644
--- a/llvm/test/CodeGen/X86/fp-une-cmp.ll
+++ b/llvm/test/CodeGen/X86/fp-une-cmp.ll
@@ -109,7 +109,7 @@ define void @pr27750(ptr %b, float %x, i1 %y) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorps %xmm1, %xmm1
 ; CHECK-NEXT:    jmp .LBB3_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_3: # %for.end
 ; CHECK-NEXT:    # in Loop: Header=BB3_1 Depth=1
 ; CHECK-NEXT:    ucomiss %xmm1, %xmm0

diff  --git a/llvm/test/CodeGen/X86/hoist-invariant-load.ll b/llvm/test/CodeGen/X86/hoist-invariant-load.ll
index ee23c922174d64..68e10c0c988710 100644
--- a/llvm/test/CodeGen/X86/hoist-invariant-load.ll
+++ b/llvm/test/CodeGen/X86/hoist-invariant-load.ll
@@ -43,7 +43,7 @@ define void @test(ptr %x) uwtable ssp {
 ; CHECK-NEXT:    movl $10000, %ebp ## imm = 0x2710
 ; CHECK-NEXT:    movq L_OBJC_SELECTOR_REFERENCES_(%rip), %r14
 ; CHECK-NEXT:    movq _objc_msgSend at GOTPCREL(%rip), %r15
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %for.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rbx, %rdi
@@ -94,7 +94,7 @@ define void @test_unordered(ptr %x) uwtable ssp {
 ; CHECK-NEXT:    movl $10000, %ebp ## imm = 0x2710
 ; CHECK-NEXT:    movq L_OBJC_SELECTOR_REFERENCES_(%rip), %r14
 ; CHECK-NEXT:    movq _objc_msgSend at GOTPCREL(%rip), %r15
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB1_1: ## %for.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rbx, %rdi
@@ -139,7 +139,7 @@ define void @test_volatile(ptr %x) uwtable ssp {
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    movl $10000, %ebp ## imm = 0x2710
 ; CHECK-NEXT:    movq _objc_msgSend at GOTPCREL(%rip), %r14
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB2_1: ## %for.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq L_OBJC_SELECTOR_REFERENCES_(%rip), %rsi
@@ -182,7 +182,7 @@ define void @test_seq_cst(ptr %x) uwtable ssp {
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    movl $10000, %ebp ## imm = 0x2710
 ; CHECK-NEXT:    movq _objc_msgSend at GOTPCREL(%rip), %r14
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB3_1: ## %for.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq L_OBJC_SELECTOR_REFERENCES_(%rip), %rsi
@@ -219,7 +219,7 @@ define void @test_multi_def(ptr dereferenceable(8) align(8) %x1,
 ; CHECK-NEXT:    xorl %r8d, %r8d
 ; CHECK-NEXT:    movq (%rdi), %rdx
 ; CHECK-NEXT:    movq (%rsi), %rsi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB4_2: ## %for.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    mulxq %rsi, %r9, %rdi
@@ -267,7 +267,7 @@ define void @test_div_def(ptr dereferenceable(8) align(8) %x1,
 ; CHECK-NEXT:    xorl %r9d, %r9d
 ; CHECK-NEXT:    movl (%rdi), %edi
 ; CHECK-NEXT:    movl (%rsi), %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB5_2: ## %for.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %edi, %eax

diff  --git a/llvm/test/CodeGen/X86/i128-mul.ll b/llvm/test/CodeGen/X86/i128-mul.ll
index 9f58ed08433485..cffd88c55bb0a9 100644
--- a/llvm/test/CodeGen/X86/i128-mul.ll
+++ b/llvm/test/CodeGen/X86/i128-mul.ll
@@ -115,7 +115,7 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
 ; X86-NOBMI-NEXT:    xorl %edx, %edx
 ; X86-NOBMI-NEXT:    xorl %ecx, %ecx
 ; X86-NOBMI-NEXT:    movl $0, (%esp) # 4-byte Folded Spill
-; X86-NOBMI-NEXT:    .p2align 4, 0x90
+; X86-NOBMI-NEXT:    .p2align 4
 ; X86-NOBMI-NEXT:  .LBB1_2: # %for.body
 ; X86-NOBMI-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NOBMI-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -194,7 +194,7 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
 ; X86-BMI-NEXT:    xorl %eax, %eax
 ; X86-BMI-NEXT:    xorl %ebx, %ebx
 ; X86-BMI-NEXT:    xorl %ebp, %ebp
-; X86-BMI-NEXT:    .p2align 4, 0x90
+; X86-BMI-NEXT:    .p2align 4
 ; X86-BMI-NEXT:  .LBB1_2: # %for.body
 ; X86-BMI-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-BMI-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -263,7 +263,7 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
 ; X64-NOBMI-NEXT:    movq %rdx, %r8
 ; X64-NOBMI-NEXT:    xorl %r10d, %r10d
 ; X64-NOBMI-NEXT:    xorl %r9d, %r9d
-; X64-NOBMI-NEXT:    .p2align 4, 0x90
+; X64-NOBMI-NEXT:    .p2align 4
 ; X64-NOBMI-NEXT:  .LBB1_2: # %for.body
 ; X64-NOBMI-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NOBMI-NEXT:    movq %rcx, %rax
@@ -287,7 +287,7 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
 ; X64-BMI-NEXT:    movq %rdx, %rax
 ; X64-BMI-NEXT:    xorl %r9d, %r9d
 ; X64-BMI-NEXT:    xorl %r8d, %r8d
-; X64-BMI-NEXT:    .p2align 4, 0x90
+; X64-BMI-NEXT:    .p2align 4
 ; X64-BMI-NEXT:  .LBB1_2: # %for.body
 ; X64-BMI-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-BMI-NEXT:    movq %rcx, %rdx

diff  --git a/llvm/test/CodeGen/X86/i386-shrink-wrapping.ll b/llvm/test/CodeGen/X86/i386-shrink-wrapping.ll
index d282a8f42622b2..7bb3bf42d3361b 100644
--- a/llvm/test/CodeGen/X86/i386-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/X86/i386-shrink-wrapping.ll
@@ -30,7 +30,7 @@ define i32 @eflagsLiveInPrologue() #0 {
 ; ENABLE-NEXT:    movl (%eax), %eax
 ; ENABLE-NEXT:    testl %eax, %eax
 ; ENABLE-NEXT:    je LBB0_4
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB0_3: ## %for.body
 ; ENABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    jmp LBB0_3
@@ -70,7 +70,7 @@ define i32 @eflagsLiveInPrologue() #0 {
 ; DISABLE-NEXT:    movl (%eax), %eax
 ; DISABLE-NEXT:    testl %eax, %eax
 ; DISABLE-NEXT:    je LBB0_4
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB0_3: ## %for.body
 ; DISABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    jmp LBB0_3

diff  --git a/llvm/test/CodeGen/X86/icmp-shift-opt.ll b/llvm/test/CodeGen/X86/icmp-shift-opt.ll
index 1673649f5c07b3..c52b3ed6c926dd 100644
--- a/llvm/test/CodeGen/X86/icmp-shift-opt.ll
+++ b/llvm/test/CodeGen/X86/icmp-shift-opt.ll
@@ -18,7 +18,7 @@ define i128 @opt_setcc_lt_power_of_2(i128 %a) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_1: # %loop
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    addl $1, %edi
@@ -47,7 +47,7 @@ define i128 @opt_setcc_lt_power_of_2(i128 %a) nounwind {
 ; X64:       # %bb.0:
 ; X64-NEXT:    movq %rsi, %rdx
 ; X64-NEXT:    movq %rdi, %rax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_1: # %loop
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    addq $1, %rax

diff  --git a/llvm/test/CodeGen/X86/ifunc-asm.ll b/llvm/test/CodeGen/X86/ifunc-asm.ll
index 6cfd37604ff5e1..a4c47da7f4c657 100644
--- a/llvm/test/CodeGen/X86/ifunc-asm.ll
+++ b/llvm/test/CodeGen/X86/ifunc-asm.ll
@@ -8,7 +8,7 @@ entry:
 ; ELF:             .type foo_resolver, at function
 ; ELF-NEXT:    foo_resolver:
 
-; MACHO:           .p2align 4, 0x90
+; MACHO:           .p2align 4
 ; MACHO-NEXT:  _foo_resolver
 
 
@@ -23,10 +23,10 @@ entry:
 ; MACHO-NEXT:      .quad _foo_ifunc.stub_helper
 ; MACHO-NEXT:      .section __TEXT,__text,regular,pure_instructions
 ; MACHO-NEXT:      .globl _foo_ifunc
-; MACHO-NEXT:      .p2align 0, 0x90
+; MACHO-NEXT:      .p2align 0
 ; MACHO-NEXT:  _foo_ifunc:
 ; MACHO-NEXT:      jmpl   *_foo_ifunc.lazy_pointer(%rip)
-; MACHO-NEXT:      .p2align 0, 0x90
+; MACHO-NEXT:      .p2align 0
 ; MACHO-NEXT:  _foo_ifunc.stub_helper:
 ; MACHO-NEXT:      pushq   %rax
 ; MACHO-NEXT:      pushq   %rdi

diff  --git a/llvm/test/CodeGen/X86/innermost-loop-alignment.ll b/llvm/test/CodeGen/X86/innermost-loop-alignment.ll
index 113d5bffe983be..482c8141b97ba7 100644
--- a/llvm/test/CodeGen/X86/innermost-loop-alignment.ll
+++ b/llvm/test/CodeGen/X86/innermost-loop-alignment.ll
@@ -25,12 +25,12 @@ define void @test(i32 %n, i32 %m) {
 ; DEFAULT-NEXT:    xorl %esi, %esi
 ; DEFAULT-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; DEFAULT-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; DEFAULT-NEXT:    .p2align 4, 0x90
+; DEFAULT-NEXT:    .p2align 4
 ; DEFAULT-NEXT:  .LBB0_1: # %outer
 ; DEFAULT-NEXT:    # =>This Loop Header: Depth=1
 ; DEFAULT-NEXT:    # Child Loop BB0_2 Depth 2
 ; DEFAULT-NEXT:    movl %edi, %ebp
-; DEFAULT-NEXT:    .p2align 4, 0x90
+; DEFAULT-NEXT:    .p2align 4
 ; DEFAULT-NEXT:  .LBB0_2: # %inner
 ; DEFAULT-NEXT:    # Parent Loop BB0_1 Depth=1
 ; DEFAULT-NEXT:    # => This Inner Loop Header: Depth=2
@@ -74,12 +74,12 @@ define void @test(i32 %n, i32 %m) {
 ; ALIGN32-NEXT:    xorl %esi, %esi
 ; ALIGN32-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; ALIGN32-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; ALIGN32-NEXT:    .p2align 4, 0x90
+; ALIGN32-NEXT:    .p2align 4
 ; ALIGN32-NEXT:  .LBB0_1: # %outer
 ; ALIGN32-NEXT:    # =>This Loop Header: Depth=1
 ; ALIGN32-NEXT:    # Child Loop BB0_2 Depth 2
 ; ALIGN32-NEXT:    movl %edi, %ebp
-; ALIGN32-NEXT:    .p2align 5, 0x90
+; ALIGN32-NEXT:    .p2align 5
 ; ALIGN32-NEXT:  .LBB0_2: # %inner
 ; ALIGN32-NEXT:    # Parent Loop BB0_1 Depth=1
 ; ALIGN32-NEXT:    # => This Inner Loop Header: Depth=2
@@ -123,12 +123,12 @@ define void @test(i32 %n, i32 %m) {
 ; ALIGN64-NEXT:    xorl %esi, %esi
 ; ALIGN64-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; ALIGN64-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; ALIGN64-NEXT:    .p2align 5, 0x90
+; ALIGN64-NEXT:    .p2align 5
 ; ALIGN64-NEXT:  .LBB0_1: # %outer
 ; ALIGN64-NEXT:    # =>This Loop Header: Depth=1
 ; ALIGN64-NEXT:    # Child Loop BB0_2 Depth 2
 ; ALIGN64-NEXT:    movl %edi, %ebp
-; ALIGN64-NEXT:    .p2align 6, 0x90
+; ALIGN64-NEXT:    .p2align 6
 ; ALIGN64-NEXT:  .LBB0_2: # %inner
 ; ALIGN64-NEXT:    # Parent Loop BB0_1 Depth=1
 ; ALIGN64-NEXT:    # => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll b/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll
index 3660741fef3641..296694ca3c72d4 100644
--- a/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll
+++ b/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll
@@ -23,7 +23,7 @@ define void @FontChange(i1 %foo) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB0_9
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb366
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb $1, %dil
@@ -31,7 +31,7 @@ define void @FontChange(i1 %foo) nounwind {
 ; CHECK-NEXT:  # %bb.2: # %bb428
 ; CHECK-NEXT:    testb $1, %dil
 ; CHECK-NEXT:    je .LBB0_9
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %bb650
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpb $0, 0

diff  --git a/llvm/test/CodeGen/X86/issue76416.ll b/llvm/test/CodeGen/X86/issue76416.ll
index d0f7fe684a8401..7193e54a6ad553 100644
--- a/llvm/test/CodeGen/X86/issue76416.ll
+++ b/llvm/test/CodeGen/X86/issue76416.ll
@@ -12,7 +12,7 @@ define dso_local void @vga_load_state() #0 {
 ; CHECK-NEXT:    movl $0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    cmpl $3, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    jg .LBB0_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    xorl %eax, %eax
@@ -23,7 +23,7 @@ define dso_local void @vga_load_state() #0 {
 ; CHECK-NEXT:    jle .LBB0_2
 ; CHECK-NEXT:  .LBB0_3: # %for.end
 ; CHECK-NEXT:    movl $0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_4: # %for.cond1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    #APP

diff  --git a/llvm/test/CodeGen/X86/kcfi-patchable-function-prefix.ll b/llvm/test/CodeGen/X86/kcfi-patchable-function-prefix.ll
index e5d541f8ed9d97..1b7bd7835e890c 100644
--- a/llvm/test/CodeGen/X86/kcfi-patchable-function-prefix.ll
+++ b/llvm/test/CodeGen/X86/kcfi-patchable-function-prefix.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 
-; CHECK:          .p2align 4, 0x90
+; CHECK:          .p2align 4
 ; CHECK-LABEL:    __cfi_f1:
 ; CHECK-COUNT-11:   nop
 ; CHECK-NEXT:       movl $12345678, %eax
@@ -13,7 +13,7 @@ define void @f1(ptr noundef %x) !kcfi_type !1 {
   ret void
 }
 
-; CHECK:          .p2align 4, 0x90
+; CHECK:          .p2align 4
 ; CHECK-NOT:      __cfi_f2:
 ; CHECK-NOT:        nop
 ; CHECK-LABEL:    f2:
@@ -23,7 +23,7 @@ define void @f2(ptr noundef %x) {
   ret void
 }
 
-; CHECK:          .p2align 4, 0x90
+; CHECK:          .p2align 4
 ; CHECK-LABEL:    __cfi_f3:
 ; CHECK-NOT:        nop
 ; CHECK-NEXT:       movl $12345678, %eax
@@ -35,7 +35,7 @@ define void @f3(ptr noundef %x) #0 !kcfi_type !1 {
   ret void
 }
 
-; CHECK:          .p2align 4, 0x90
+; CHECK:          .p2align 4
 ; CHECK-NOT:      __cfi_f4:
 ; CHECK-COUNT-16:   nop
 ; CHECK-LABEL:    f4:

diff  --git a/llvm/test/CodeGen/X86/kcfi.ll b/llvm/test/CodeGen/X86/kcfi.ll
index 566a88b76c4fc9..059efcc71b0eb8 100644
--- a/llvm/test/CodeGen/X86/kcfi.ll
+++ b/llvm/test/CodeGen/X86/kcfi.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -verify-machineinstrs -stop-after=finalize-isel < %s | FileCheck %s --check-prefixes=MIR,ISEL
 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -verify-machineinstrs -stop-after=kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
 
-; ASM:       .p2align 4, 0x90
+; ASM:       .p2align 4
 ; ASM:       .type __cfi_f1, at function
 ; ASM-LABEL: __cfi_f1:
 ; ASM-NEXT:    nop

diff  --git a/llvm/test/CodeGen/X86/known-bits.ll b/llvm/test/CodeGen/X86/known-bits.ll
index f7b2538e1de2f7..58a0595e4322a4 100644
--- a/llvm/test/CodeGen/X86/known-bits.ll
+++ b/llvm/test/CodeGen/X86/known-bits.ll
@@ -14,13 +14,13 @@ define void @knownbits_zext_in_reg(ptr) nounwind {
 ; X86-NEXT:    shrl $14, %edx
 ; X86-NEXT:    movzbl %al, %ecx
 ; X86-NEXT:    xorl %ebx, %ebx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_1: # %CF
 ; X86-NEXT:    # =>This Loop Header: Depth=1
 ; X86-NEXT:    # Child Loop BB0_2 Depth 2
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    divb %dl
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_2: # %CF237
 ; X86-NEXT:    # Parent Loop BB0_1 Depth=1
 ; X86-NEXT:    # => This Inner Loop Header: Depth=2
@@ -37,13 +37,13 @@ define void @knownbits_zext_in_reg(ptr) nounwind {
 ; X64-NEXT:    shrl $14, %edx
 ; X64-NEXT:    movzbl %cl, %ecx
 ; X64-NEXT:    xorl %esi, %esi
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_1: # %CF
 ; X64-NEXT:    # =>This Loop Header: Depth=1
 ; X64-NEXT:    # Child Loop BB0_2 Depth 2
 ; X64-NEXT:    movl %ecx, %eax
 ; X64-NEXT:    divb %dl
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_2: # %CF237
 ; X64-NEXT:    # Parent Loop BB0_1 Depth=1
 ; X64-NEXT:    # => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/lea-opt-cse2.ll b/llvm/test/CodeGen/X86/lea-opt-cse2.ll
index 258a12a8b54e95..e39d01f1447f84 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse2.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse2.ll
@@ -7,7 +7,7 @@
 define void @foo(ptr nocapture %ctx, i32 %n) local_unnamed_addr #0 {
 ; X64-LABEL: foo:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_1: # %loop
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl (%rdi), %eax
@@ -32,7 +32,7 @@ define void @foo(ptr nocapture %ctx, i32 %n) local_unnamed_addr #0 {
 ; X86-NEXT:    .cfi_offset %edi, -8
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_1: # %loop
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl (%eax), %edx

diff  --git a/llvm/test/CodeGen/X86/lea-opt-cse4.ll b/llvm/test/CodeGen/X86/lea-opt-cse4.ll
index ef8e030d6caf22..4fa9acd99bb2fa 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse4.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse4.ll
@@ -59,7 +59,7 @@ define void @foo(ptr nocapture %ctx, i32 %n) local_unnamed_addr #0 {
 define void @foo_loop(ptr nocapture %ctx, i32 %n) local_unnamed_addr #0 {
 ; X64-LABEL: foo_loop:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB1_1: # %loop
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl (%rdi), %ecx
@@ -88,7 +88,7 @@ define void @foo_loop(ptr nocapture %ctx, i32 %n) local_unnamed_addr #0 {
 ; X86-NEXT:    .cfi_offset %edi, -8
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB1_1: # %loop
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl (%eax), %esi

diff  --git a/llvm/test/CodeGen/X86/licm-symbol.ll b/llvm/test/CodeGen/X86/licm-symbol.ll
index 2e62ed9c8f6d93..e6b3f34ee9bb69 100644
--- a/llvm/test/CodeGen/X86/licm-symbol.ll
+++ b/llvm/test/CodeGen/X86/licm-symbol.ll
@@ -6,7 +6,7 @@
 ; CHECK: pushl
 ; CHECK: movl  $176, %esi
 ; CHECK: addl  L___sF$non_lazy_ptr, %esi
-; CHECK: .p2align  4, 0x90
+; CHECK: .p2align  4
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
 target triple = "i386-apple-darwin8"

diff  --git a/llvm/test/CodeGen/X86/loop-search.ll b/llvm/test/CodeGen/X86/loop-search.ll
index 4646452ffdc653..0d5f97d21fb3ad 100644
--- a/llvm/test/CodeGen/X86/loop-search.ll
+++ b/llvm/test/CodeGen/X86/loop-search.ll
@@ -12,7 +12,7 @@ define zeroext i1 @search(i32 %needle, ptr nocapture readonly %haystack, i32 %co
 ; CHECK-NEXT:  ## %bb.1: ## %for.body.preheader
 ; CHECK-NEXT:    movslq %edx, %rax
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_2: ## %for.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpl %edi, (%rsi,%rcx,4)

diff  --git a/llvm/test/CodeGen/X86/loop-strength-reduce5.ll b/llvm/test/CodeGen/X86/loop-strength-reduce5.ll
index 8b512fc94f78ba..ebae51fa2aa468 100644
--- a/llvm/test/CodeGen/X86/loop-strength-reduce5.ll
+++ b/llvm/test/CodeGen/X86/loop-strength-reduce5.ll
@@ -13,7 +13,7 @@ define void @foo(i32 %N) nounwind {
 ; CHECK-NEXT:  # %bb.1: # %bb.preheader
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    xorl %edx, %edx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %bb
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movw %dx, X

diff  --git a/llvm/test/CodeGen/X86/loop-strength-reduce7.ll b/llvm/test/CodeGen/X86/loop-strength-reduce7.ll
index 3687194972f27f..11473bacc56cf5 100644
--- a/llvm/test/CodeGen/X86/loop-strength-reduce7.ll
+++ b/llvm/test/CodeGen/X86/loop-strength-reduce7.ll
@@ -13,13 +13,13 @@ define fastcc void @outer_loop(ptr nocapture %gfp, ptr nocapture %xr, i32 %targ_
 ; CHECK-NEXT:    pushl %esi
 ; CHECK-NEXT:    movl $88, %eax
 ; CHECK-NEXT:    movl $168, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_2: ## %bb28.i37
 ; CHECK-NEXT:    ## =>This Loop Header: Depth=1
 ; CHECK-NEXT:    ## Child Loop BB0_3 Depth 2
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    movl %eax, %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_3: ## %bb29.i38
 ; CHECK-NEXT:    ## Parent Loop BB0_2 Depth=1
 ; CHECK-NEXT:    ## => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/lsr-addrecloops.ll b/llvm/test/CodeGen/X86/lsr-addrecloops.ll
index 963405c8b0b3d3..98c8f587784c2d 100644
--- a/llvm/test/CodeGen/X86/lsr-addrecloops.ll
+++ b/llvm/test/CodeGen/X86/lsr-addrecloops.ll
@@ -16,7 +16,7 @@ define void @in4dob_(ptr nocapture writeonly %0, ptr nocapture readonly %1, ptr
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    jmp .LBB0_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_20: # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    incq %r10
 ; CHECK-NEXT:    addq %r9, %rax
@@ -46,7 +46,7 @@ define void @in4dob_(ptr nocapture writeonly %0, ptr nocapture readonly %1, ptr
 ; CHECK-NEXT:    andq $-8, %r10
 ; CHECK-NEXT:    xorl %r9d, %r9d
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_6: # %vector.body807
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leaq (%rdi,%r9), %r11
@@ -68,7 +68,7 @@ define void @in4dob_(ptr nocapture writeonly %0, ptr nocapture readonly %1, ptr
 ; CHECK-NEXT:    addq %rdi, %r9
 ; CHECK-NEXT:    xorl %r10d, %r10d
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_9: # %vector.body807.epil
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leaq (%r9,%r10), %r11
@@ -95,7 +95,7 @@ define void @in4dob_(ptr nocapture writeonly %0, ptr nocapture readonly %1, ptr
 ; CHECK-NEXT:    andq $-8, %rdx
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_14: # %vector.body847
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leaq (%rdi,%rcx), %r8
@@ -117,7 +117,7 @@ define void @in4dob_(ptr nocapture writeonly %0, ptr nocapture readonly %1, ptr
 ; CHECK-NEXT:    leaq 96(%rcx,%rdi), %rcx
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_17: # %vector.body847.epil
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leaq (%rcx,%rdx), %rdi

diff  --git a/llvm/test/CodeGen/X86/lsr-interesting-step.ll b/llvm/test/CodeGen/X86/lsr-interesting-step.ll
index b15c8c42959984..049d2640d25dbd 100644
--- a/llvm/test/CodeGen/X86/lsr-interesting-step.ll
+++ b/llvm/test/CodeGen/X86/lsr-interesting-step.ll
@@ -10,12 +10,12 @@ define void @foo() nounwind {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $2, %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb7
 ; CHECK-NEXT:    # =>This Loop Header: Depth=1
 ; CHECK-NEXT:    # Child Loop BB0_2 Depth 2
 ; CHECK-NEXT:    movl $2, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %bb11
 ; CHECK-NEXT:    # Parent Loop BB0_1 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll b/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
index 14dfc046c029a6..4cd206adc31de5 100644
--- a/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
+++ b/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
@@ -22,7 +22,7 @@ define void @t(ptr nocapture %in, ptr nocapture %out, ptr nocapture %rk, i32 %r)
 ; GENERIC-NEXT:    movq _Te1 at GOTPCREL(%rip), %rax
 ; GENERIC-NEXT:    movq _Te3 at GOTPCREL(%rip), %r10
 ; GENERIC-NEXT:    movq %rcx, %r11
-; GENERIC-NEXT:    .p2align 4, 0x90
+; GENERIC-NEXT:    .p2align 4
 ; GENERIC-NEXT:  LBB0_1: ## %bb
 ; GENERIC-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; GENERIC-NEXT:    movzbl %r8b, %r14d
@@ -105,7 +105,7 @@ define void @t(ptr nocapture %in, ptr nocapture %out, ptr nocapture %rk, i32 %r)
 ; ATOM-NEXT:    movq _Te3 at GOTPCREL(%rip), %r10
 ; ATOM-NEXT:    decl %ecx
 ; ATOM-NEXT:    movq %rcx, %r11
-; ATOM-NEXT:    .p2align 4, 0x90
+; ATOM-NEXT:    .p2align 4
 ; ATOM-NEXT:  LBB0_1: ## %bb
 ; ATOM-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ATOM-NEXT:    movl %r15d, %ebx
@@ -315,7 +315,7 @@ define i32 @f(i32 %i, ptr nocapture %a) nounwind uwtable readonly ssp {
 ; GENERIC-NEXT:    leaq (%rsi,%rax,4), %rcx
 ; GENERIC-NEXT:    xorl %eax, %eax
 ; GENERIC-NEXT:    xorl %edx, %edx
-; GENERIC-NEXT:    .p2align 4, 0x90
+; GENERIC-NEXT:    .p2align 4
 ; GENERIC-NEXT:  LBB1_2: ## %for.body
 ; GENERIC-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; GENERIC-NEXT:    movl (%rcx), %esi
@@ -339,7 +339,7 @@ define i32 @f(i32 %i, ptr nocapture %a) nounwind uwtable readonly ssp {
 ; ATOM-NEXT:    xorl %edx, %edx
 ; ATOM-NEXT:    leaq (%rsi,%rax,4), %rcx
 ; ATOM-NEXT:    xorl %eax, %eax
-; ATOM-NEXT:    .p2align 4, 0x90
+; ATOM-NEXT:    .p2align 4
 ; ATOM-NEXT:  LBB1_2: ## %for.body
 ; ATOM-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ATOM-NEXT:    movl (%rcx), %esi

diff  --git a/llvm/test/CodeGen/X86/lsr-negative-stride.ll b/llvm/test/CodeGen/X86/lsr-negative-stride.ll
index 26c6128ab48db5..1d5e208f3a3268 100644
--- a/llvm/test/CodeGen/X86/lsr-negative-stride.ll
+++ b/llvm/test/CodeGen/X86/lsr-negative-stride.ll
@@ -23,12 +23,12 @@ define i32 @t(i32 %a, i32 %b) nounwind {
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    movl %edx, %eax
 ; CHECK-NEXT:    retl
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %bb.outer
 ; CHECK-NEXT:    # =>This Loop Header: Depth=1
 ; CHECK-NEXT:    # Child Loop BB0_3 Depth 2
 ; CHECK-NEXT:    movl %edx, %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %bb
 ; CHECK-NEXT:    # Parent Loop BB0_2 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -40,7 +40,7 @@ define i32 @t(i32 %a, i32 %b) nounwind {
 ; CHECK-NEXT:    movl %eax, %edx
 ; CHECK-NEXT:    jne .LBB0_3
 ; CHECK-NEXT:    jmp .LBB0_6
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_5: # %cond_false
 ; CHECK-NEXT:    # in Loop: Header=BB0_2 Depth=1
 ; CHECK-NEXT:    subl %edx, %ecx

diff  --git a/llvm/test/CodeGen/X86/lsr-sort.ll b/llvm/test/CodeGen/X86/lsr-sort.ll
index 9c7f5efaf74eb7..37cb6f996d34fe 100644
--- a/llvm/test/CodeGen/X86/lsr-sort.ll
+++ b/llvm/test/CodeGen/X86/lsr-sort.ll
@@ -9,7 +9,7 @@ define dso_local i32 @foo(i32 %N) nounwind {
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jle .LBB0_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movw %ax, X(%rip)

diff  --git a/llvm/test/CodeGen/X86/lsr-static-addr.ll b/llvm/test/CodeGen/X86/lsr-static-addr.ll
index a06ead491bf7df..c67ac7d909fa7d 100644
--- a/llvm/test/CodeGen/X86/lsr-static-addr.ll
+++ b/llvm/test/CodeGen/X86/lsr-static-addr.ll
@@ -12,7 +12,7 @@ define void @foo(i64 %n) nounwind {
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [2.2999999999999998E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
@@ -31,7 +31,7 @@ define void @foo(i64 %n) nounwind {
 ; ATOM-NEXT:  # %bb.1: # %for.body.preheader
 ; ATOM-NEXT:    xorl %eax, %eax
 ; ATOM-NEXT:    movsd {{.*#+}} xmm0 = [2.2999999999999998E+0,0.0E+0]
-; ATOM-NEXT:    .p2align 4, 0x90
+; ATOM-NEXT:    .p2align 4
 ; ATOM-NEXT:  .LBB0_2: # %for.body
 ; ATOM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; ATOM-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero

diff  --git a/llvm/test/CodeGen/X86/machine-cp.ll b/llvm/test/CodeGen/X86/machine-cp.ll
index f84960485840d8..c84a1159ad56a6 100644
--- a/llvm/test/CodeGen/X86/machine-cp.ll
+++ b/llvm/test/CodeGen/X86/machine-cp.ll
@@ -11,7 +11,7 @@ define i32 @t1(i32 %a, i32 %b) nounwind  {
 ; CHECK-NEXT:    je LBB0_4
 ; CHECK-NEXT:  ## %bb.1: ## %while.body.preheader
 ; CHECK-NEXT:    movl %esi, %edx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_2: ## %while.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %edx, %ecx
@@ -62,7 +62,7 @@ define i32 @t3(i64 %a, i64 %b) nounwind  {
 ; CHECK-NEXT:    je LBB2_4
 ; CHECK-NEXT:  ## %bb.1: ## %while.body.preheader
 ; CHECK-NEXT:    movq %rsi, %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB2_2: ## %while.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rdx, %rcx

diff  --git a/llvm/test/CodeGen/X86/machine-cse.ll b/llvm/test/CodeGen/X86/machine-cse.ll
index 431031136e4a42..e953f67651d552 100644
--- a/llvm/test/CodeGen/X86/machine-cse.ll
+++ b/llvm/test/CodeGen/X86/machine-cse.ll
@@ -68,7 +68,7 @@ define void @commute(i32 %test_case, i32 %scale) nounwind ssp {
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    callq printf at PLT
 ; CHECK-NEXT:    addq $8, %rsp
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_3: # %for.body53.us
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB1_3
@@ -141,7 +141,7 @@ define ptr @bsd_memchr(ptr %s, i32 %a, i32 %c, i64 %n) nounwind ssp {
 ; CHECK-NEXT:  # %bb.1: # %preheader
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    movzbl %dl, %edx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_2: # %do.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpl %edx, %esi

diff  --git a/llvm/test/CodeGen/X86/madd.ll b/llvm/test/CodeGen/X86/madd.ll
index f3b117c626c8cd..bdb7c307a57590 100644
--- a/llvm/test/CodeGen/X86/madd.ll
+++ b/llvm/test/CodeGen/X86/madd.ll
@@ -12,7 +12,7 @@ define i32 @_Z10test_shortPsS_i_128(ptr nocapture readonly, ptr nocapture readon
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    xorl %ecx, %ecx
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB0_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
@@ -37,7 +37,7 @@ define i32 @_Z10test_shortPsS_i_128(ptr nocapture readonly, ptr nocapture readon
 ; AVX-NEXT:    movl %edx, %eax
 ; AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    xorl %ecx, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB0_1: # %vector.body
 ; AVX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -91,7 +91,7 @@ define i32 @_Z10test_shortPsS_i_256(ptr nocapture readonly, ptr nocapture readon
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    xorl %ecx, %ecx
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB1_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqu (%rdi,%rcx,2), %xmm2
@@ -115,7 +115,7 @@ define i32 @_Z10test_shortPsS_i_256(ptr nocapture readonly, ptr nocapture readon
 ; AVX1-NEXT:    movl %edx, %eax
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    xorl %ecx, %ecx
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB1_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovdqu (%rsi,%rcx,2), %xmm1
@@ -141,7 +141,7 @@ define i32 @_Z10test_shortPsS_i_256(ptr nocapture readonly, ptr nocapture readon
 ; AVX256-NEXT:    movl %edx, %eax
 ; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX256-NEXT:    xorl %ecx, %ecx
-; AVX256-NEXT:    .p2align 4, 0x90
+; AVX256-NEXT:    .p2align 4
 ; AVX256-NEXT:  .LBB1_1: # %vector.body
 ; AVX256-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX256-NEXT:    vmovdqu (%rsi,%rcx,2), %xmm1
@@ -200,7 +200,7 @@ define i32 @_Z10test_shortPsS_i_512(ptr nocapture readonly, ptr nocapture readon
 ; SSE2-NEXT:    xorl %ecx, %ecx
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB2_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqu (%rdi,%rcx,2), %xmm3
@@ -231,7 +231,7 @@ define i32 @_Z10test_shortPsS_i_512(ptr nocapture readonly, ptr nocapture readon
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    xorl %ecx, %ecx
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB2_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovdqu (%rsi,%rcx,2), %xmm2
@@ -265,7 +265,7 @@ define i32 @_Z10test_shortPsS_i_512(ptr nocapture readonly, ptr nocapture readon
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    xorl %ecx, %ecx
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB2_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vmovdqu (%rsi,%rcx,2), %ymm2
@@ -291,7 +291,7 @@ define i32 @_Z10test_shortPsS_i_512(ptr nocapture readonly, ptr nocapture readon
 ; AVX512-NEXT:    movl %edx, %eax
 ; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB2_1: # %vector.body
 ; AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vmovdqu (%rsi,%rcx,2), %ymm1
@@ -356,7 +356,7 @@ define i32 @_Z10test_shortPsS_i_1024(ptr nocapture readonly, ptr nocapture reado
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB3_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqu (%rdi,%rcx,2), %xmm5
@@ -400,7 +400,7 @@ define i32 @_Z10test_shortPsS_i_1024(ptr nocapture readonly, ptr nocapture reado
 ; AVX1-NEXT:    xorl %ecx, %ecx
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB3_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovdqu (%rsi,%rcx,2), %xmm3
@@ -448,7 +448,7 @@ define i32 @_Z10test_shortPsS_i_1024(ptr nocapture readonly, ptr nocapture reado
 ; AVX2-NEXT:    xorl %ecx, %ecx
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB3_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vmovdqu (%rsi,%rcx,2), %ymm3
@@ -480,7 +480,7 @@ define i32 @_Z10test_shortPsS_i_1024(ptr nocapture readonly, ptr nocapture reado
 ; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    xorl %ecx, %ecx
 ; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT:    .p2align 4, 0x90
+; AVX512F-NEXT:    .p2align 4
 ; AVX512F-NEXT:  .LBB3_1: # %vector.body
 ; AVX512F-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512F-NEXT:    vmovdqu (%rsi,%rcx,2), %ymm2
@@ -512,7 +512,7 @@ define i32 @_Z10test_shortPsS_i_1024(ptr nocapture readonly, ptr nocapture reado
 ; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    xorl %ecx, %ecx
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512BW-NEXT:    .p2align 4, 0x90
+; AVX512BW-NEXT:    .p2align 4
 ; AVX512BW-NEXT:  .LBB3_1: # %vector.body
 ; AVX512BW-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512BW-NEXT:    vmovdqu64 (%rsi,%rcx,2), %zmm2
@@ -576,7 +576,7 @@ define i32 @_Z9test_charPcS_i_128(ptr nocapture readonly, ptr nocapture readonly
 ; SSE2-NEXT:    movl %edx, %eax
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    xorl %ecx, %ecx
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB4_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -605,7 +605,7 @@ define i32 @_Z9test_charPcS_i_128(ptr nocapture readonly, ptr nocapture readonly
 ; AVX-NEXT:    movl %edx, %eax
 ; AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    xorl %ecx, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB4_1: # %vector.body
 ; AVX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    vpmovsxbd (%rdi,%rcx), %xmm1
@@ -659,7 +659,7 @@ define i32 @_Z9test_charPcS_i_256(ptr nocapture readonly, ptr nocapture readonly
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    xorl %ecx, %ecx
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB5_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
@@ -687,7 +687,7 @@ define i32 @_Z9test_charPcS_i_256(ptr nocapture readonly, ptr nocapture readonly
 ; AVX1-NEXT:    movl %edx, %eax
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    xorl %ecx, %ecx
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB5_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovsxbw (%rdi,%rcx), %xmm1
@@ -714,7 +714,7 @@ define i32 @_Z9test_charPcS_i_256(ptr nocapture readonly, ptr nocapture readonly
 ; AVX256-NEXT:    movl %edx, %eax
 ; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX256-NEXT:    xorl %ecx, %ecx
-; AVX256-NEXT:    .p2align 4, 0x90
+; AVX256-NEXT:    .p2align 4
 ; AVX256-NEXT:  .LBB5_1: # %vector.body
 ; AVX256-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX256-NEXT:    vpmovsxbw (%rdi,%rcx), %xmm1
@@ -774,7 +774,7 @@ define i32 @_Z9test_charPcS_i_512(ptr nocapture readonly, ptr nocapture readonly
 ; SSE2-NEXT:    xorl %ecx, %ecx
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB6_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqu (%rdi,%rcx), %xmm3
@@ -811,7 +811,7 @@ define i32 @_Z9test_charPcS_i_512(ptr nocapture readonly, ptr nocapture readonly
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    xorl %ecx, %ecx
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB6_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovsxbw 8(%rdi,%rcx), %xmm2
@@ -847,7 +847,7 @@ define i32 @_Z9test_charPcS_i_512(ptr nocapture readonly, ptr nocapture readonly
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    xorl %ecx, %ecx
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB6_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vpmovsxbw (%rdi,%rcx), %ymm2
@@ -874,7 +874,7 @@ define i32 @_Z9test_charPcS_i_512(ptr nocapture readonly, ptr nocapture readonly
 ; AVX512-NEXT:    movl %edx, %eax
 ; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB6_1: # %vector.body
 ; AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vpmovsxbw (%rdi,%rcx), %ymm1
@@ -940,7 +940,7 @@ define i32 @_Z9test_charPcS_i_1024(ptr nocapture readonly, ptr nocapture readonl
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB7_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqu (%rdi,%rcx), %xmm7
@@ -996,7 +996,7 @@ define i32 @_Z9test_charPcS_i_1024(ptr nocapture readonly, ptr nocapture readonl
 ; AVX1-NEXT:    xorl %ecx, %ecx
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB7_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovsxbw 24(%rdi,%rcx), %xmm3
@@ -1048,7 +1048,7 @@ define i32 @_Z9test_charPcS_i_1024(ptr nocapture readonly, ptr nocapture readonl
 ; AVX2-NEXT:    xorl %ecx, %ecx
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB7_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vpmovsxbw 16(%rdi,%rcx), %ymm3
@@ -1082,7 +1082,7 @@ define i32 @_Z9test_charPcS_i_1024(ptr nocapture readonly, ptr nocapture readonl
 ; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    xorl %ecx, %ecx
 ; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT:    .p2align 4, 0x90
+; AVX512F-NEXT:    .p2align 4
 ; AVX512F-NEXT:  .LBB7_1: # %vector.body
 ; AVX512F-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512F-NEXT:    vpmovsxbw (%rdi,%rcx), %ymm2
@@ -1116,7 +1116,7 @@ define i32 @_Z9test_charPcS_i_1024(ptr nocapture readonly, ptr nocapture readonl
 ; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    xorl %ecx, %ecx
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512BW-NEXT:    .p2align 4, 0x90
+; AVX512BW-NEXT:    .p2align 4
 ; AVX512BW-NEXT:  .LBB7_1: # %vector.body
 ; AVX512BW-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512BW-NEXT:    vpmovsxbw (%rdi,%rcx), %zmm2
@@ -1181,7 +1181,7 @@ define i32 @test_unsigned_short_128(ptr nocapture readonly, ptr nocapture readon
 ; SSE2-NEXT:    movl %edx, %eax
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    xorl %ecx, %ecx
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB8_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
@@ -1207,7 +1207,7 @@ define i32 @test_unsigned_short_128(ptr nocapture readonly, ptr nocapture readon
 ; AVX-NEXT:    movl %edx, %eax
 ; AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    xorl %ecx, %ecx
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB8_1: # %vector.body
 ; AVX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -1261,7 +1261,7 @@ define i32 @test_unsigned_short_256(ptr nocapture readonly, ptr nocapture readon
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    xorl %ecx, %ecx
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB9_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqu (%rdi,%rcx,2), %xmm2
@@ -1291,7 +1291,7 @@ define i32 @test_unsigned_short_256(ptr nocapture readonly, ptr nocapture readon
 ; AVX1-NEXT:    movl %edx, %eax
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    xorl %ecx, %ecx
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB9_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -1323,7 +1323,7 @@ define i32 @test_unsigned_short_256(ptr nocapture readonly, ptr nocapture readon
 ; AVX256-NEXT:    movl %edx, %eax
 ; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX256-NEXT:    xorl %ecx, %ecx
-; AVX256-NEXT:    .p2align 4, 0x90
+; AVX256-NEXT:    .p2align 4
 ; AVX256-NEXT:  .LBB9_1: # %vector.body
 ; AVX256-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX256-NEXT:    vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -1384,7 +1384,7 @@ define i32 @test_unsigned_short_512(ptr nocapture readonly, ptr nocapture readon
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB10_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqu (%rdi,%rcx,2), %xmm4
@@ -1427,7 +1427,7 @@ define i32 @test_unsigned_short_512(ptr nocapture readonly, ptr nocapture readon
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    xorl %ecx, %ecx
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB10_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -1473,7 +1473,7 @@ define i32 @test_unsigned_short_512(ptr nocapture readonly, ptr nocapture readon
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    xorl %ecx, %ecx
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB10_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -1504,7 +1504,7 @@ define i32 @test_unsigned_short_512(ptr nocapture readonly, ptr nocapture readon
 ; AVX512-NEXT:    movl %edx, %eax
 ; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    xorl %ecx, %ecx
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB10_1: # %vector.body
 ; AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
@@ -1573,7 +1573,7 @@ define i32 @test_unsigned_short_1024(ptr nocapture readonly, ptr nocapture reado
 ; SSE2-NEXT:    pxor %xmm6, %xmm6
 ; SSE2-NEXT:    pxor %xmm5, %xmm5
 ; SSE2-NEXT:    pxor %xmm7, %xmm7
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB11_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqu 48(%rdi,%rcx,2), %xmm8
@@ -1642,7 +1642,7 @@ define i32 @test_unsigned_short_1024(ptr nocapture readonly, ptr nocapture reado
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB11_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -1716,7 +1716,7 @@ define i32 @test_unsigned_short_1024(ptr nocapture readonly, ptr nocapture reado
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB11_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -1758,7 +1758,7 @@ define i32 @test_unsigned_short_1024(ptr nocapture readonly, ptr nocapture reado
 ; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    xorl %ecx, %ecx
 ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB11_1: # %vector.body
 ; AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
@@ -2742,7 +2742,7 @@ define i64 @sum_and_sum_of_squares(ptr %a, i32 %n) {
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB33_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movq {{.*#+}} xmm4 = mem[0],zero
@@ -2780,7 +2780,7 @@ define i64 @sum_and_sum_of_squares(ptr %a, i32 %n) {
 ; AVX1-NEXT:    movl %esi, %eax
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB33_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -2823,7 +2823,7 @@ define i64 @sum_and_sum_of_squares(ptr %a, i32 %n) {
 ; AVX256-NEXT:    movl %esi, %eax
 ; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX256-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX256-NEXT:    .p2align 4, 0x90
+; AVX256-NEXT:    .p2align 4
 ; AVX256-NEXT:  .LBB33_1: # %vector.body
 ; AVX256-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX256-NEXT:    vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -2900,7 +2900,7 @@ define i32 @sum_of_square_
diff erences(ptr %a, ptr %b, i32 %n) {
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    xorl %ecx, %ecx
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB34_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
@@ -2927,7 +2927,7 @@ define i32 @sum_of_square_
diff erences(ptr %a, ptr %b, i32 %n) {
 ; AVX1-NEXT:    movl %edx, %eax
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    xorl %ecx, %ecx
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB34_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -2955,7 +2955,7 @@ define i32 @sum_of_square_
diff erences(ptr %a, ptr %b, i32 %n) {
 ; AVX256-NEXT:    movl %edx, %eax
 ; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX256-NEXT:    xorl %ecx, %ecx
-; AVX256-NEXT:    .p2align 4, 0x90
+; AVX256-NEXT:    .p2align 4
 ; AVX256-NEXT:  .LBB34_1: # %vector.body
 ; AVX256-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX256-NEXT:    vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -3105,7 +3105,7 @@ define i32 @add_used_by_loop_phi(ptr %a, ptr %b, i64 %offset_a, i64 %offset_b, i
 ; SSE2-NEXT:    xorl %eax, %eax
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB38_1: # %loop
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqu (%rdi,%rax), %xmm3
@@ -3143,7 +3143,7 @@ define i32 @add_used_by_loop_phi(ptr %a, ptr %b, i64 %offset_a, i64 %offset_b, i
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    xorl %eax, %eax
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB38_1: # %loop
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovsxbw 8(%rdi,%rax), %xmm2
@@ -3180,7 +3180,7 @@ define i32 @add_used_by_loop_phi(ptr %a, ptr %b, i64 %offset_a, i64 %offset_b, i
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    xorl %eax, %eax
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB38_1: # %loop
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vpmovsxbw (%rdi,%rax), %ymm2
@@ -3208,7 +3208,7 @@ define i32 @add_used_by_loop_phi(ptr %a, ptr %b, i64 %offset_a, i64 %offset_b, i
 ; AVX512-NEXT:    addq %rcx, %rsi
 ; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    xorl %eax, %eax
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB38_1: # %loop
 ; AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vpmovsxbw (%rdi,%rax), %ymm1

diff  --git a/llvm/test/CodeGen/X86/masked-iv-safe.ll b/llvm/test/CodeGen/X86/masked-iv-safe.ll
index 4e4ad3a0161e03..a4f5e52a27d8a7 100644
--- a/llvm/test/CodeGen/X86/masked-iv-safe.ll
+++ b/llvm/test/CodeGen/X86/masked-iv-safe.ll
@@ -11,7 +11,7 @@ define void @count_up(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
@@ -57,7 +57,7 @@ define void @count_down(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
@@ -103,7 +103,7 @@ define void @count_up_signed(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
@@ -151,7 +151,7 @@ define void @count_down_signed(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
@@ -199,7 +199,7 @@ define void @another_count_up(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB4_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
@@ -251,7 +251,7 @@ define void @another_count_down(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
 ; CHECK-NEXT:    movq %rdi, %rcx
 ; CHECK-NEXT:    movq %rdi, %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB5_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
@@ -303,7 +303,7 @@ define void @another_count_up_signed(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB6_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
@@ -351,7 +351,7 @@ define void @another_count_down_signed(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB7_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero

diff  --git a/llvm/test/CodeGen/X86/masked-iv-unsafe.ll b/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
index f10db3424c2ea0..42bd6e9b754477 100644
--- a/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
+++ b/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
@@ -11,7 +11,7 @@ define void @count_up(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movzbl %al, %ecx
@@ -64,7 +64,7 @@ define void @count_down(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movzbl %al, %ecx
@@ -120,7 +120,7 @@ define void @count_up_signed(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rdx, %rsi
@@ -180,7 +180,7 @@ define void @count_down_signed(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rdx, %rsi
@@ -238,7 +238,7 @@ define void @another_count_up(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB4_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movzbl %al, %ecx
@@ -291,7 +291,7 @@ define void @another_count_down(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB5_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movzbl %sil, %eax
@@ -347,7 +347,7 @@ define void @another_count_up_signed(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    movq %rdi, %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB6_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rax, %r8
@@ -409,7 +409,7 @@ define void @another_count_down_signed(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB7_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rsi, %rdx
@@ -469,7 +469,7 @@ define void @yet_another_count_down(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
 ; CHECK-NEXT:    movq %rdi, %rcx
 ; CHECK-NEXT:    movq %rdi, %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB8_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
@@ -521,7 +521,7 @@ define void @yet_another_count_up(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB9_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movzbl %al, %ecx
@@ -575,7 +575,7 @@ define void @still_another_count_down(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB10_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movzbl %al, %ecx
@@ -630,7 +630,7 @@ define void @yet_another_count_up_signed(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
 ; CHECK-NEXT:    xorl %edx, %edx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB11_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rcx, %rsi
@@ -690,7 +690,7 @@ define void @yet_another_count_down_signed(ptr %d, i64 %n) nounwind {
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [1.0000000000000001E-1,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [2.2999999999999998E+0,0.0E+0]
 ; CHECK-NEXT:    movsd {{.*#+}} xmm2 = [4.5E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB12_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rdx, %rsi

diff  --git a/llvm/test/CodeGen/X86/merge_store.ll b/llvm/test/CodeGen/X86/merge_store.ll
index 1cf3b9a83bac13..afe0ef969a40e7 100644
--- a/llvm/test/CodeGen/X86/merge_store.ll
+++ b/llvm/test/CodeGen/X86/merge_store.ll
@@ -6,7 +6,7 @@ define void @merge_store(ptr nocapture %a) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    movabsq $4294967297, %rcx # imm = 0x100000001
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rcx, (%rdi,%rax,4)

diff  --git a/llvm/test/CodeGen/X86/min-legal-vector-width.ll b/llvm/test/CodeGen/X86/min-legal-vector-width.ll
index 1413a19b349357..e3d2ac659d4386 100644
--- a/llvm/test/CodeGen/X86/min-legal-vector-width.ll
+++ b/llvm/test/CodeGen/X86/min-legal-vector-width.ll
@@ -177,7 +177,7 @@ define dso_local i32 @_Z9test_charPcS_i_256(ptr nocapture readonly, ptr nocaptur
 ; CHECK-SKX-NEXT:    xorl %ecx, %ecx
 ; CHECK-SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; CHECK-SKX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; CHECK-SKX-NEXT:    .p2align 4, 0x90
+; CHECK-SKX-NEXT:    .p2align 4
 ; CHECK-SKX-NEXT:  .LBB8_1: # %vector.body
 ; CHECK-SKX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-SKX-NEXT:    vpmovsxbw 16(%rdi,%rcx), %ymm3
@@ -212,7 +212,7 @@ define dso_local i32 @_Z9test_charPcS_i_256(ptr nocapture readonly, ptr nocaptur
 ; CHECK-AVX512-NEXT:    xorl %ecx, %ecx
 ; CHECK-AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; CHECK-AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; CHECK-AVX512-NEXT:    .p2align 4, 0x90
+; CHECK-AVX512-NEXT:    .p2align 4
 ; CHECK-AVX512-NEXT:  .LBB8_1: # %vector.body
 ; CHECK-AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-AVX512-NEXT:    vpmovsxbw 16(%rdi,%rcx), %ymm3
@@ -247,7 +247,7 @@ define dso_local i32 @_Z9test_charPcS_i_256(ptr nocapture readonly, ptr nocaptur
 ; CHECK-VBMI-NEXT:    xorl %ecx, %ecx
 ; CHECK-VBMI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; CHECK-VBMI-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; CHECK-VBMI-NEXT:    .p2align 4, 0x90
+; CHECK-VBMI-NEXT:    .p2align 4
 ; CHECK-VBMI-NEXT:  .LBB8_1: # %vector.body
 ; CHECK-VBMI-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-VBMI-NEXT:    vpmovsxbw 16(%rdi,%rcx), %ymm3
@@ -317,7 +317,7 @@ define dso_local i32 @_Z9test_charPcS_i_512(ptr nocapture readonly, ptr nocaptur
 ; CHECK-SKX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-SKX-NEXT:    xorl %ecx, %ecx
 ; CHECK-SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-SKX-NEXT:    .p2align 4, 0x90
+; CHECK-SKX-NEXT:    .p2align 4
 ; CHECK-SKX-NEXT:  .LBB9_1: # %vector.body
 ; CHECK-SKX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-SKX-NEXT:    vpmovsxbw (%rdi,%rcx), %zmm2
@@ -347,7 +347,7 @@ define dso_local i32 @_Z9test_charPcS_i_512(ptr nocapture readonly, ptr nocaptur
 ; CHECK-AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-AVX512-NEXT:    xorl %ecx, %ecx
 ; CHECK-AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-AVX512-NEXT:    .p2align 4, 0x90
+; CHECK-AVX512-NEXT:    .p2align 4
 ; CHECK-AVX512-NEXT:  .LBB9_1: # %vector.body
 ; CHECK-AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-AVX512-NEXT:    vpmovsxbw (%rdi,%rcx), %zmm2
@@ -377,7 +377,7 @@ define dso_local i32 @_Z9test_charPcS_i_512(ptr nocapture readonly, ptr nocaptur
 ; CHECK-VBMI-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-VBMI-NEXT:    xorl %ecx, %ecx
 ; CHECK-VBMI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-VBMI-NEXT:    .p2align 4, 0x90
+; CHECK-VBMI-NEXT:    .p2align 4
 ; CHECK-VBMI-NEXT:  .LBB9_1: # %vector.body
 ; CHECK-VBMI-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-VBMI-NEXT:    vpmovsxbw (%rdi,%rcx), %zmm2
@@ -445,7 +445,7 @@ define dso_local i32 @sad_16i8_256() "min-legal-vector-width"="256" {
 ; CHECK-SKX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-SKX-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; CHECK-SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-SKX-NEXT:    .p2align 4, 0x90
+; CHECK-SKX-NEXT:    .p2align 4
 ; CHECK-SKX-NEXT:  .LBB10_1: # %vector.body
 ; CHECK-SKX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-SKX-NEXT:    vmovdqu a+1024(%rax), %xmm2
@@ -470,7 +470,7 @@ define dso_local i32 @sad_16i8_256() "min-legal-vector-width"="256" {
 ; CHECK-AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-AVX512-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; CHECK-AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-AVX512-NEXT:    .p2align 4, 0x90
+; CHECK-AVX512-NEXT:    .p2align 4
 ; CHECK-AVX512-NEXT:  .LBB10_1: # %vector.body
 ; CHECK-AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-AVX512-NEXT:    vmovdqu a+1024(%rax), %xmm2
@@ -495,7 +495,7 @@ define dso_local i32 @sad_16i8_256() "min-legal-vector-width"="256" {
 ; CHECK-VBMI-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-VBMI-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; CHECK-VBMI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; CHECK-VBMI-NEXT:    .p2align 4, 0x90
+; CHECK-VBMI-NEXT:    .p2align 4
 ; CHECK-VBMI-NEXT:  .LBB10_1: # %vector.body
 ; CHECK-VBMI-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-VBMI-NEXT:    vmovdqu a+1024(%rax), %xmm2
@@ -555,7 +555,7 @@ define dso_local i32 @sad_16i8_512() "min-legal-vector-width"="512" {
 ; CHECK-SKX:       # %bb.0: # %entry
 ; CHECK-SKX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-SKX-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; CHECK-SKX-NEXT:    .p2align 4, 0x90
+; CHECK-SKX-NEXT:    .p2align 4
 ; CHECK-SKX-NEXT:  .LBB11_1: # %vector.body
 ; CHECK-SKX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-SKX-NEXT:    vmovdqu a+1024(%rax), %xmm1
@@ -580,7 +580,7 @@ define dso_local i32 @sad_16i8_512() "min-legal-vector-width"="512" {
 ; CHECK-AVX512:       # %bb.0: # %entry
 ; CHECK-AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-AVX512-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; CHECK-AVX512-NEXT:    .p2align 4, 0x90
+; CHECK-AVX512-NEXT:    .p2align 4
 ; CHECK-AVX512-NEXT:  .LBB11_1: # %vector.body
 ; CHECK-AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-AVX512-NEXT:    vmovdqu a+1024(%rax), %xmm1
@@ -605,7 +605,7 @@ define dso_local i32 @sad_16i8_512() "min-legal-vector-width"="512" {
 ; CHECK-VBMI:       # %bb.0: # %entry
 ; CHECK-VBMI-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-VBMI-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; CHECK-VBMI-NEXT:    .p2align 4, 0x90
+; CHECK-VBMI-NEXT:    .p2align 4
 ; CHECK-VBMI-NEXT:  .LBB11_1: # %vector.body
 ; CHECK-VBMI-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-VBMI-NEXT:    vmovdqu a+1024(%rax), %xmm1

diff  --git a/llvm/test/CodeGen/X86/mmx-arith.ll b/llvm/test/CodeGen/X86/mmx-arith.ll
index 5bb3b17fdf4219..73d459ba770264 100644
--- a/llvm/test/CodeGen/X86/mmx-arith.ll
+++ b/llvm/test/CodeGen/X86/mmx-arith.ll
@@ -409,7 +409,7 @@ define <1 x i64> @test3(ptr %a, ptr %b, i32 %count) nounwind {
 ; X86-NEXT:    xorl %ebx, %ebx
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    xorl %edx, %edx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB3_3: # %bb26
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
@@ -442,7 +442,7 @@ define <1 x i64> @test3(ptr %a, ptr %b, i32 %count) nounwind {
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    testl %edx, %edx
 ; X64-NEXT:    je .LBB3_2
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_1: # %bb26
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movslq %ecx, %rcx

diff  --git a/llvm/test/CodeGen/X86/negative-stride-fptosi-user.ll b/llvm/test/CodeGen/X86/negative-stride-fptosi-user.ll
index d0d46b5f118365..96080d2d87cdf7 100644
--- a/llvm/test/CodeGen/X86/negative-stride-fptosi-user.ll
+++ b/llvm/test/CodeGen/X86/negative-stride-fptosi-user.ll
@@ -19,7 +19,7 @@ define void @foo(i32 %N) nounwind {
 ; CHECK-NEXT:    pushq %rax
 ; CHECK-NEXT:    movl %edi, %ebx
 ; CHECK-NEXT:    xorl %ebp, %ebp
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %bb
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    xorps %xmm0, %xmm0

diff  --git a/llvm/test/CodeGen/X86/optimize-max-0.ll b/llvm/test/CodeGen/X86/optimize-max-0.ll
index 81dafdffe31163..283c00e17f21aa 100644
--- a/llvm/test/CodeGen/X86/optimize-max-0.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-0.ll
@@ -41,7 +41,7 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_4: ## %bb6
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movzbl (%eax,%esi,2), %ebx
@@ -91,7 +91,7 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
 ; CHECK-NEXT:    addl %esi, %ecx
 ; CHECK-NEXT:    xorl %esi, %esi
 ; CHECK-NEXT:    xorl %edi, %edi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_9: ## %bb13
 ; CHECK-NEXT:    ## =>This Loop Header: Depth=1
 ; CHECK-NEXT:    ## Child Loop BB0_10 Depth 2
@@ -102,7 +102,7 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
 ; CHECK-NEXT:    imull {{[0-9]+}}(%esp), %edi
 ; CHECK-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %edi ## 4-byte Folded Reload
 ; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_10: ## %bb14
 ; CHECK-NEXT:    ## Parent Loop BB0_9 Depth=1
 ; CHECK-NEXT:    ## => This Inner Loop Header: Depth=2
@@ -150,7 +150,7 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
 ; CHECK-NEXT:    leal 15(%ebp), %eax
 ; CHECK-NEXT:    andl $-16, %eax
 ; CHECK-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_17: ## %bb23
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subl $4, %esp
@@ -183,7 +183,7 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
 ; CHECK-NEXT:    andl $-16, %eax
 ; CHECK-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_21: ## %bb30
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subl $4, %esp
@@ -479,7 +479,7 @@ define void @bar(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    movl %esi, %edx
 ; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB1_4: ## %bb6
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movzbl (%eax,%esi,2), %ebx
@@ -524,7 +524,7 @@ define void @bar(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
 ; CHECK-NEXT:    addl %edx, %eax
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    xorl %ebx, %ebx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB1_9: ## %bb13
 ; CHECK-NEXT:    ## =>This Loop Header: Depth=1
 ; CHECK-NEXT:    ## Child Loop BB1_10 Depth 2
@@ -535,7 +535,7 @@ define void @bar(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
 ; CHECK-NEXT:    imull {{[0-9]+}}(%esp), %ebx
 ; CHECK-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Folded Reload
 ; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB1_10: ## %bb14
 ; CHECK-NEXT:    ## Parent Loop BB1_9 Depth=1
 ; CHECK-NEXT:    ## => This Inner Loop Header: Depth=2
@@ -584,7 +584,7 @@ define void @bar(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
 ; CHECK-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax ## 4-byte Reload
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    leal (%edx,%eax), %ebp
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB1_17: ## %bb23
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subl $4, %esp
@@ -622,7 +622,7 @@ define void @bar(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
 ; CHECK-NEXT:    andl $-16, %eax
 ; CHECK-NEXT:    movl %eax, (%esp) ## 4-byte Spill
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ebp
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB1_21: ## %bb30
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subl $4, %esp

diff  --git a/llvm/test/CodeGen/X86/optimize-max-1.ll b/llvm/test/CodeGen/X86/optimize-max-1.ll
index 4a49dc70d49958..1ac48586bf900b 100644
--- a/llvm/test/CodeGen/X86/optimize-max-1.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-1.ll
@@ -10,7 +10,7 @@ define void @fs(ptr nocapture %p, i64 %n) nounwind {
 ; CHECK-LABEL: fs:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq $0, (%rdi,%rax,8)
@@ -40,7 +40,7 @@ define void @bs(ptr nocapture %p, i64 %n) nounwind {
 ; CHECK-LABEL: bs:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # %bb
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq $0, (%rdi,%rax,8)
@@ -70,7 +70,7 @@ define void @fu(ptr nocapture %p, i64 %n) nounwind {
 ; CHECK-LABEL: fu:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_1: # %bb
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq $0, (%rdi,%rax,8)
@@ -100,7 +100,7 @@ define void @bu(ptr nocapture %p, i64 %n) nounwind {
 ; CHECK-LABEL: bu:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_1: # %bb
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq $0, (%rdi,%rax,8)

diff  --git a/llvm/test/CodeGen/X86/optimize-max-2.ll b/llvm/test/CodeGen/X86/optimize-max-2.ll
index c83fe63883aae8..3533bfbc61e6d6 100644
--- a/llvm/test/CodeGen/X86/optimize-max-2.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-2.ll
@@ -14,7 +14,7 @@ define void @foo(ptr nocapture %p, i64 %x, i64 %y) nounwind {
 ; CHECK-NEXT:    cmovneq %rdx, %rax
 ; CHECK-NEXT:    cmpq %rsi, %rax
 ; CHECK-NEXT:    cmovbeq %rsi, %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb4
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero

diff  --git a/llvm/test/CodeGen/X86/or-address.ll b/llvm/test/CodeGen/X86/or-address.ll
index ef1ea0ef5de952..6e59d3f70c3320 100644
--- a/llvm/test/CodeGen/X86/or-address.ll
+++ b/llvm/test/CodeGen/X86/or-address.ll
@@ -12,7 +12,7 @@ define void @test(ptr nocapture %array, i32 %r0) nounwind ssp noredzone {
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    xorl %r8d, %r8d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %bb
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpb $4, %r8b
@@ -75,7 +75,7 @@ define void @test1(ptr nocapture %array, i32 %r0, i8 signext %k, i8 signext %i0)
 ; CHECK-NEXT:    ## kill: def $ecx killed $ecx def $rcx
 ; CHECK-NEXT:    movb $32, %al
 ; CHECK-NEXT:    xorl %r8d, %r8d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB1_1: ## %for.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpb $4, %cl

diff  --git a/llvm/test/CodeGen/X86/overflowing-iv-codegen.ll b/llvm/test/CodeGen/X86/overflowing-iv-codegen.ll
index 3e4539d333474f..e5dd5b23bea969 100644
--- a/llvm/test/CodeGen/X86/overflowing-iv-codegen.ll
+++ b/llvm/test/CodeGen/X86/overflowing-iv-codegen.ll
@@ -6,7 +6,7 @@ define i32 @test_01(ptr %p, i64 %len, i32 %x) {
 ; CHECK-LABEL: test_01:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq $-4, %rdi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subq $1, %rsi
@@ -48,7 +48,7 @@ define i32 @test_02(ptr %p, i64 %len, i32 %x) {
 ; CHECK-LABEL: test_02:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq $-4, %rdi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subq $1, %rsi
@@ -90,7 +90,7 @@ define i32 @test_02_nopoison(ptr %p, i64 %len, i32 %x) {
 ; CHECK-LABEL: test_02_nopoison:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq $-4, %rdi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB2_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subq $1, %rsi
@@ -134,7 +134,7 @@ define i32 @test_03(ptr %p, i64 %len, i32 %x) {
 ; CHECK-LABEL: test_03:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq $-4, %rdi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB3_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subq $1, %rsi
@@ -176,7 +176,7 @@ define i32 @test_03_nopoison(ptr %p, i64 %len, i32 %x) {
 ; CHECK-LABEL: test_03_nopoison:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addq $-4, %rdi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB4_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subq $1, %rsi

diff  --git a/llvm/test/CodeGen/X86/patchable-prologue.ll b/llvm/test/CodeGen/X86/patchable-prologue.ll
index aec76a359d267e..a6a6c0d20a1af7 100644
--- a/llvm/test/CodeGen/X86/patchable-prologue.ll
+++ b/llvm/test/CodeGen/X86/patchable-prologue.ll
@@ -12,7 +12,7 @@ define void @f0() "patchable-function"="prologue-short-redirect" {
 ; CHECK-LABEL: _f0{{>?}}:
 ; CHECK-NEXT:  66 90 	nop
 
-; CHECK-ALIGN: 	.p2align	4, 0x90
+; CHECK-ALIGN: 	.p2align	4
 ; CHECK-ALIGN: _f0:
 
 ; X86: f0:
@@ -35,7 +35,7 @@ define void @f1() "patchable-function"="prologue-short-redirect" "frame-pointer"
 ; CHECK-NEXT: 66 90     nop
 ; CHECK-NEXT: 55		pushq	%rbp
 
-; CHECK-ALIGN: 	.p2align	4, 0x90
+; CHECK-ALIGN: 	.p2align	4
 ; CHECK-ALIGN: _f1:
 
 ; X86: f1:
@@ -58,7 +58,7 @@ define void @f2() "patchable-function"="prologue-short-redirect" {
 ; CHECK-LABEL: _f2
 ; CHECK-NEXT: 48 81 ec a8 00 00 00 	subq	$168, %rsp
 
-; CHECK-ALIGN: 	.p2align	4, 0x90
+; CHECK-ALIGN: 	.p2align	4
 ; CHECK-ALIGN: _f2:
 
 ; X86: f2:
@@ -82,7 +82,7 @@ define void @f3() "patchable-function"="prologue-short-redirect" optsize {
 ; CHECK-LABEL: _f3
 ; CHECK-NEXT: 66 90 	nop
 
-; CHECK-ALIGN: 	.p2align	4, 0x90
+; CHECK-ALIGN: 	.p2align	4
 ; CHECK-ALIGN: _f3:
 
 ; X86: f3:
@@ -140,7 +140,7 @@ bb21:
 ; CHECK-LABEL: _emptyfunc
 ; CHECK-NEXT: 0f 0b 	ud2
 
-; CHECK-ALIGN: 	.p2align	4, 0x90
+; CHECK-ALIGN: 	.p2align	4
 ; CHECK-ALIGN: _emptyfunc:
 
 ; X86: emptyfunc:
@@ -166,7 +166,7 @@ define i32 @emptyfunc() "patchable-function"="prologue-short-redirect" {
 ;   } while ((++(*b++)));
 ; }
 
-; CHECK-ALIGN: 	.p2align	4, 0x90
+; CHECK-ALIGN: 	.p2align	4
 ; CHECK-ALIGN: _jmp_to_start:
 
 ; X86: jmp_to_start:

diff  --git a/llvm/test/CodeGen/X86/pcsections-atomics.ll b/llvm/test/CodeGen/X86/pcsections-atomics.ll
index cfc9d50763af4a..672ebc1ec7275a 100644
--- a/llvm/test/CodeGen/X86/pcsections-atomics.ll
+++ b/llvm/test/CodeGen/X86/pcsections-atomics.ll
@@ -706,7 +706,7 @@ define void @atomic8_nand_monotonic(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection16:
 ; O1-NEXT:    movzbl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB16_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -727,7 +727,7 @@ define void @atomic8_nand_monotonic(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection16:
 ; O2-NEXT:    movzbl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB16_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -748,7 +748,7 @@ define void @atomic8_nand_monotonic(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection16:
 ; O3-NEXT:    movzbl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB16_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -1044,7 +1044,7 @@ define void @atomic8_nand_acquire(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection27:
 ; O1-NEXT:    movzbl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB23_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -1065,7 +1065,7 @@ define void @atomic8_nand_acquire(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection27:
 ; O2-NEXT:    movzbl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB23_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -1086,7 +1086,7 @@ define void @atomic8_nand_acquire(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection27:
 ; O3-NEXT:    movzbl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB23_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -1382,7 +1382,7 @@ define void @atomic8_nand_release(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection38:
 ; O1-NEXT:    movzbl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB30_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -1403,7 +1403,7 @@ define void @atomic8_nand_release(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection38:
 ; O2-NEXT:    movzbl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB30_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -1424,7 +1424,7 @@ define void @atomic8_nand_release(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection38:
 ; O3-NEXT:    movzbl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB30_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -1720,7 +1720,7 @@ define void @atomic8_nand_acq_rel(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection49:
 ; O1-NEXT:    movzbl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB37_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -1741,7 +1741,7 @@ define void @atomic8_nand_acq_rel(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection49:
 ; O2-NEXT:    movzbl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB37_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -1762,7 +1762,7 @@ define void @atomic8_nand_acq_rel(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection49:
 ; O3-NEXT:    movzbl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB37_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -2058,7 +2058,7 @@ define void @atomic8_nand_seq_cst(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection60:
 ; O1-NEXT:    movzbl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB44_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -2079,7 +2079,7 @@ define void @atomic8_nand_seq_cst(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection60:
 ; O2-NEXT:    movzbl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB44_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -2100,7 +2100,7 @@ define void @atomic8_nand_seq_cst(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection60:
 ; O3-NEXT:    movzbl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB44_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -3151,7 +3151,7 @@ define void @atomic16_nand_monotonic(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection109:
 ; O1-NEXT:    movzwl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB64_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -3176,7 +3176,7 @@ define void @atomic16_nand_monotonic(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection109:
 ; O2-NEXT:    movzwl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB64_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -3201,7 +3201,7 @@ define void @atomic16_nand_monotonic(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection109:
 ; O3-NEXT:    movzwl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB64_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -3505,7 +3505,7 @@ define void @atomic16_nand_acquire(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection122:
 ; O1-NEXT:    movzwl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB71_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -3530,7 +3530,7 @@ define void @atomic16_nand_acquire(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection122:
 ; O2-NEXT:    movzwl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB71_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -3555,7 +3555,7 @@ define void @atomic16_nand_acquire(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection122:
 ; O3-NEXT:    movzwl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB71_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -3859,7 +3859,7 @@ define void @atomic16_nand_release(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection135:
 ; O1-NEXT:    movzwl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB78_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -3884,7 +3884,7 @@ define void @atomic16_nand_release(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection135:
 ; O2-NEXT:    movzwl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB78_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -3909,7 +3909,7 @@ define void @atomic16_nand_release(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection135:
 ; O3-NEXT:    movzwl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB78_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -4213,7 +4213,7 @@ define void @atomic16_nand_acq_rel(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection148:
 ; O1-NEXT:    movzwl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB85_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -4238,7 +4238,7 @@ define void @atomic16_nand_acq_rel(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection148:
 ; O2-NEXT:    movzwl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB85_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -4263,7 +4263,7 @@ define void @atomic16_nand_acq_rel(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection148:
 ; O3-NEXT:    movzwl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB85_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -4567,7 +4567,7 @@ define void @atomic16_nand_seq_cst(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection161:
 ; O1-NEXT:    movzwl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB92_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -4592,7 +4592,7 @@ define void @atomic16_nand_seq_cst(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection161:
 ; O2-NEXT:    movzwl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB92_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -4617,7 +4617,7 @@ define void @atomic16_nand_seq_cst(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection161:
 ; O3-NEXT:    movzwl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB92_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -5623,7 +5623,7 @@ define void @atomic32_nand_monotonic(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection197:
 ; O1-NEXT:    movl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB112_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -5644,7 +5644,7 @@ define void @atomic32_nand_monotonic(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection197:
 ; O2-NEXT:    movl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB112_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -5665,7 +5665,7 @@ define void @atomic32_nand_monotonic(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection197:
 ; O3-NEXT:    movl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB112_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -5961,7 +5961,7 @@ define void @atomic32_nand_acquire(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection208:
 ; O1-NEXT:    movl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB119_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -5982,7 +5982,7 @@ define void @atomic32_nand_acquire(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection208:
 ; O2-NEXT:    movl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB119_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -6003,7 +6003,7 @@ define void @atomic32_nand_acquire(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection208:
 ; O3-NEXT:    movl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB119_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -6299,7 +6299,7 @@ define void @atomic32_nand_release(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection219:
 ; O1-NEXT:    movl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB126_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -6320,7 +6320,7 @@ define void @atomic32_nand_release(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection219:
 ; O2-NEXT:    movl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB126_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -6341,7 +6341,7 @@ define void @atomic32_nand_release(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection219:
 ; O3-NEXT:    movl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB126_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -6637,7 +6637,7 @@ define void @atomic32_nand_acq_rel(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection230:
 ; O1-NEXT:    movl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB133_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -6658,7 +6658,7 @@ define void @atomic32_nand_acq_rel(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection230:
 ; O2-NEXT:    movl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB133_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -6679,7 +6679,7 @@ define void @atomic32_nand_acq_rel(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection230:
 ; O3-NEXT:    movl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB133_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -6975,7 +6975,7 @@ define void @atomic32_nand_seq_cst(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection241:
 ; O1-NEXT:    movl (%rdi), %eax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB140_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -6996,7 +6996,7 @@ define void @atomic32_nand_seq_cst(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection241:
 ; O2-NEXT:    movl (%rdi), %eax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB140_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -7017,7 +7017,7 @@ define void @atomic32_nand_seq_cst(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection241:
 ; O3-NEXT:    movl (%rdi), %eax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB140_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -8145,7 +8145,7 @@ define void @atomic64_nand_monotonic(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection292:
 ; O1-NEXT:    movq (%rdi), %rax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB162_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -8166,7 +8166,7 @@ define void @atomic64_nand_monotonic(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection292:
 ; O2-NEXT:    movq (%rdi), %rax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB162_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -8187,7 +8187,7 @@ define void @atomic64_nand_monotonic(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection292:
 ; O3-NEXT:    movq (%rdi), %rax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB162_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -8486,7 +8486,7 @@ define void @atomic64_nand_acquire(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection303:
 ; O1-NEXT:    movq (%rdi), %rax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB169_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -8507,7 +8507,7 @@ define void @atomic64_nand_acquire(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection303:
 ; O2-NEXT:    movq (%rdi), %rax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB169_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -8528,7 +8528,7 @@ define void @atomic64_nand_acquire(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection303:
 ; O3-NEXT:    movq (%rdi), %rax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB169_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -8827,7 +8827,7 @@ define void @atomic64_nand_release(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection314:
 ; O1-NEXT:    movq (%rdi), %rax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB176_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -8848,7 +8848,7 @@ define void @atomic64_nand_release(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection314:
 ; O2-NEXT:    movq (%rdi), %rax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB176_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -8869,7 +8869,7 @@ define void @atomic64_nand_release(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection314:
 ; O3-NEXT:    movq (%rdi), %rax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB176_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -9168,7 +9168,7 @@ define void @atomic64_nand_acq_rel(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection325:
 ; O1-NEXT:    movq (%rdi), %rax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB183_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -9189,7 +9189,7 @@ define void @atomic64_nand_acq_rel(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection325:
 ; O2-NEXT:    movq (%rdi), %rax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB183_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -9210,7 +9210,7 @@ define void @atomic64_nand_acq_rel(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection325:
 ; O3-NEXT:    movq (%rdi), %rax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB183_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -9509,7 +9509,7 @@ define void @atomic64_nand_seq_cst(ptr %a) {
 ; O1-NEXT:    movq foo(%rip), %rax
 ; O1-NEXT:  .Lpcsection336:
 ; O1-NEXT:    movq (%rdi), %rax
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB190_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ecx
@@ -9530,7 +9530,7 @@ define void @atomic64_nand_seq_cst(ptr %a) {
 ; O2-NEXT:    movq foo(%rip), %rax
 ; O2-NEXT:  .Lpcsection336:
 ; O2-NEXT:    movq (%rdi), %rax
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB190_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ecx
@@ -9551,7 +9551,7 @@ define void @atomic64_nand_seq_cst(ptr %a) {
 ; O3-NEXT:    movq foo(%rip), %rax
 ; O3-NEXT:  .Lpcsection336:
 ; O3-NEXT:    movq (%rdi), %rax
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB190_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ecx
@@ -10561,7 +10561,7 @@ define void @atomic128_store_unordered(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection396:
 ; O1-NEXT:    movl $42, %ebx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB203_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:  .Lpcsection397:
@@ -10588,7 +10588,7 @@ define void @atomic128_store_unordered(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection396:
 ; O2-NEXT:    movl $42, %ebx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB203_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:  .Lpcsection397:
@@ -10615,7 +10615,7 @@ define void @atomic128_store_unordered(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection396:
 ; O3-NEXT:    movl $42, %ebx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB203_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:  .Lpcsection397:
@@ -10688,7 +10688,7 @@ define void @atomic128_store_monotonic(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection402:
 ; O1-NEXT:    movl $42, %ebx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB204_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:  .Lpcsection403:
@@ -10715,7 +10715,7 @@ define void @atomic128_store_monotonic(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection402:
 ; O2-NEXT:    movl $42, %ebx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB204_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:  .Lpcsection403:
@@ -10742,7 +10742,7 @@ define void @atomic128_store_monotonic(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection402:
 ; O3-NEXT:    movl $42, %ebx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB204_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:  .Lpcsection403:
@@ -10815,7 +10815,7 @@ define void @atomic128_store_release(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection408:
 ; O1-NEXT:    movl $42, %ebx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB205_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:  .Lpcsection409:
@@ -10842,7 +10842,7 @@ define void @atomic128_store_release(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection408:
 ; O2-NEXT:    movl $42, %ebx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB205_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:  .Lpcsection409:
@@ -10869,7 +10869,7 @@ define void @atomic128_store_release(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection408:
 ; O3-NEXT:    movl $42, %ebx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB205_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:  .Lpcsection409:
@@ -10942,7 +10942,7 @@ define void @atomic128_store_seq_cst(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection414:
 ; O1-NEXT:    movl $42, %ebx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB206_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:  .Lpcsection415:
@@ -10969,7 +10969,7 @@ define void @atomic128_store_seq_cst(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection414:
 ; O2-NEXT:    movl $42, %ebx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB206_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:  .Lpcsection415:
@@ -10996,7 +10996,7 @@ define void @atomic128_store_seq_cst(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection414:
 ; O3-NEXT:    movl $42, %ebx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB206_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:  .Lpcsection415:
@@ -11108,7 +11108,7 @@ define void @atomic128_xchg_monotonic(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection421:
 ; O1-NEXT:    movl $42, %ebx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB208_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:  .Lpcsection422:
@@ -11135,7 +11135,7 @@ define void @atomic128_xchg_monotonic(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection421:
 ; O2-NEXT:    movl $42, %ebx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB208_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:  .Lpcsection422:
@@ -11162,7 +11162,7 @@ define void @atomic128_xchg_monotonic(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection421:
 ; O3-NEXT:    movl $42, %ebx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB208_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:  .Lpcsection422:
@@ -11233,7 +11233,7 @@ define void @atomic128_add_monotonic(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection426:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB209_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -11262,7 +11262,7 @@ define void @atomic128_add_monotonic(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection426:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB209_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -11291,7 +11291,7 @@ define void @atomic128_add_monotonic(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection426:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB209_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -11366,7 +11366,7 @@ define void @atomic128_sub_monotonic(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection432:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB210_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -11395,7 +11395,7 @@ define void @atomic128_sub_monotonic(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection432:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB210_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -11424,7 +11424,7 @@ define void @atomic128_sub_monotonic(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection432:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB210_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -11501,7 +11501,7 @@ define void @atomic128_and_monotonic(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection438:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB211_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ebx
@@ -11529,7 +11529,7 @@ define void @atomic128_and_monotonic(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection438:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB211_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ebx
@@ -11557,7 +11557,7 @@ define void @atomic128_and_monotonic(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection438:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB211_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ebx
@@ -11629,7 +11629,7 @@ define void @atomic128_or_monotonic(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection444:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB212_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -11656,7 +11656,7 @@ define void @atomic128_or_monotonic(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection444:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB212_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -11683,7 +11683,7 @@ define void @atomic128_or_monotonic(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection444:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB212_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -11754,7 +11754,7 @@ define void @atomic128_xor_monotonic(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection449:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB213_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -11781,7 +11781,7 @@ define void @atomic128_xor_monotonic(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection449:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB213_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -11808,7 +11808,7 @@ define void @atomic128_xor_monotonic(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection449:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB213_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -11887,7 +11887,7 @@ define void @atomic128_nand_monotonic(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection455:
 ; O1-NEXT:    movq $-1, %rcx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB214_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ebx
@@ -11917,7 +11917,7 @@ define void @atomic128_nand_monotonic(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection455:
 ; O2-NEXT:    movq $-1, %rcx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB214_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ebx
@@ -11947,7 +11947,7 @@ define void @atomic128_nand_monotonic(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection455:
 ; O3-NEXT:    movq $-1, %rcx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB214_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ebx
@@ -12023,7 +12023,7 @@ define void @atomic128_xchg_acquire(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection462:
 ; O1-NEXT:    movl $42, %ebx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB215_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:  .Lpcsection463:
@@ -12050,7 +12050,7 @@ define void @atomic128_xchg_acquire(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection462:
 ; O2-NEXT:    movl $42, %ebx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB215_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:  .Lpcsection463:
@@ -12077,7 +12077,7 @@ define void @atomic128_xchg_acquire(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection462:
 ; O3-NEXT:    movl $42, %ebx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB215_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:  .Lpcsection463:
@@ -12148,7 +12148,7 @@ define void @atomic128_add_acquire(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection467:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB216_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -12177,7 +12177,7 @@ define void @atomic128_add_acquire(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection467:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB216_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -12206,7 +12206,7 @@ define void @atomic128_add_acquire(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection467:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB216_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -12281,7 +12281,7 @@ define void @atomic128_sub_acquire(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection473:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB217_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -12310,7 +12310,7 @@ define void @atomic128_sub_acquire(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection473:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB217_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -12339,7 +12339,7 @@ define void @atomic128_sub_acquire(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection473:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB217_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -12416,7 +12416,7 @@ define void @atomic128_and_acquire(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection479:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB218_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ebx
@@ -12444,7 +12444,7 @@ define void @atomic128_and_acquire(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection479:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB218_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ebx
@@ -12472,7 +12472,7 @@ define void @atomic128_and_acquire(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection479:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB218_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ebx
@@ -12544,7 +12544,7 @@ define void @atomic128_or_acquire(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection485:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB219_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -12571,7 +12571,7 @@ define void @atomic128_or_acquire(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection485:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB219_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -12598,7 +12598,7 @@ define void @atomic128_or_acquire(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection485:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB219_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -12669,7 +12669,7 @@ define void @atomic128_xor_acquire(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection490:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB220_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -12696,7 +12696,7 @@ define void @atomic128_xor_acquire(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection490:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB220_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -12723,7 +12723,7 @@ define void @atomic128_xor_acquire(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection490:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB220_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -12802,7 +12802,7 @@ define void @atomic128_nand_acquire(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection496:
 ; O1-NEXT:    movq $-1, %rcx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB221_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ebx
@@ -12832,7 +12832,7 @@ define void @atomic128_nand_acquire(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection496:
 ; O2-NEXT:    movq $-1, %rcx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB221_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ebx
@@ -12862,7 +12862,7 @@ define void @atomic128_nand_acquire(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection496:
 ; O3-NEXT:    movq $-1, %rcx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB221_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ebx
@@ -12938,7 +12938,7 @@ define void @atomic128_xchg_release(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection503:
 ; O1-NEXT:    movl $42, %ebx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB222_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:  .Lpcsection504:
@@ -12965,7 +12965,7 @@ define void @atomic128_xchg_release(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection503:
 ; O2-NEXT:    movl $42, %ebx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB222_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:  .Lpcsection504:
@@ -12992,7 +12992,7 @@ define void @atomic128_xchg_release(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection503:
 ; O3-NEXT:    movl $42, %ebx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB222_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:  .Lpcsection504:
@@ -13062,7 +13062,7 @@ define void @atomic128_add_release(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection508:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB223_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -13091,7 +13091,7 @@ define void @atomic128_add_release(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection508:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB223_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -13120,7 +13120,7 @@ define void @atomic128_add_release(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection508:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB223_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -13195,7 +13195,7 @@ define void @atomic128_sub_release(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection514:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB224_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -13224,7 +13224,7 @@ define void @atomic128_sub_release(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection514:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB224_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -13253,7 +13253,7 @@ define void @atomic128_sub_release(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection514:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB224_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -13330,7 +13330,7 @@ define void @atomic128_and_release(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection520:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB225_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ebx
@@ -13358,7 +13358,7 @@ define void @atomic128_and_release(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection520:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB225_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ebx
@@ -13386,7 +13386,7 @@ define void @atomic128_and_release(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection520:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB225_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ebx
@@ -13458,7 +13458,7 @@ define void @atomic128_or_release(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection526:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB226_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -13485,7 +13485,7 @@ define void @atomic128_or_release(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection526:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB226_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -13512,7 +13512,7 @@ define void @atomic128_or_release(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection526:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB226_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -13583,7 +13583,7 @@ define void @atomic128_xor_release(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection531:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB227_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -13610,7 +13610,7 @@ define void @atomic128_xor_release(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection531:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB227_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -13637,7 +13637,7 @@ define void @atomic128_xor_release(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection531:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB227_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -13716,7 +13716,7 @@ define void @atomic128_nand_release(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection537:
 ; O1-NEXT:    movq $-1, %rcx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB228_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ebx
@@ -13746,7 +13746,7 @@ define void @atomic128_nand_release(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection537:
 ; O2-NEXT:    movq $-1, %rcx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB228_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ebx
@@ -13776,7 +13776,7 @@ define void @atomic128_nand_release(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection537:
 ; O3-NEXT:    movq $-1, %rcx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB228_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ebx
@@ -13852,7 +13852,7 @@ define void @atomic128_xchg_acq_rel(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection544:
 ; O1-NEXT:    movl $42, %ebx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB229_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:  .Lpcsection545:
@@ -13879,7 +13879,7 @@ define void @atomic128_xchg_acq_rel(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection544:
 ; O2-NEXT:    movl $42, %ebx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB229_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:  .Lpcsection545:
@@ -13906,7 +13906,7 @@ define void @atomic128_xchg_acq_rel(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection544:
 ; O3-NEXT:    movl $42, %ebx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB229_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:  .Lpcsection545:
@@ -13977,7 +13977,7 @@ define void @atomic128_add_acq_rel(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection549:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB230_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -14006,7 +14006,7 @@ define void @atomic128_add_acq_rel(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection549:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB230_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -14035,7 +14035,7 @@ define void @atomic128_add_acq_rel(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection549:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB230_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -14110,7 +14110,7 @@ define void @atomic128_sub_acq_rel(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection555:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB231_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -14139,7 +14139,7 @@ define void @atomic128_sub_acq_rel(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection555:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB231_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -14168,7 +14168,7 @@ define void @atomic128_sub_acq_rel(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection555:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB231_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -14245,7 +14245,7 @@ define void @atomic128_and_acq_rel(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection561:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB232_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ebx
@@ -14273,7 +14273,7 @@ define void @atomic128_and_acq_rel(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection561:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB232_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ebx
@@ -14301,7 +14301,7 @@ define void @atomic128_and_acq_rel(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection561:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB232_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ebx
@@ -14373,7 +14373,7 @@ define void @atomic128_or_acq_rel(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection567:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB233_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -14400,7 +14400,7 @@ define void @atomic128_or_acq_rel(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection567:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB233_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -14427,7 +14427,7 @@ define void @atomic128_or_acq_rel(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection567:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB233_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -14498,7 +14498,7 @@ define void @atomic128_xor_acq_rel(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection572:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB234_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -14525,7 +14525,7 @@ define void @atomic128_xor_acq_rel(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection572:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB234_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -14552,7 +14552,7 @@ define void @atomic128_xor_acq_rel(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection572:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB234_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -14631,7 +14631,7 @@ define void @atomic128_nand_acq_rel(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection578:
 ; O1-NEXT:    movq $-1, %rcx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB235_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ebx
@@ -14661,7 +14661,7 @@ define void @atomic128_nand_acq_rel(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection578:
 ; O2-NEXT:    movq $-1, %rcx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB235_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ebx
@@ -14691,7 +14691,7 @@ define void @atomic128_nand_acq_rel(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection578:
 ; O3-NEXT:    movq $-1, %rcx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB235_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ebx
@@ -14767,7 +14767,7 @@ define void @atomic128_xchg_seq_cst(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection585:
 ; O1-NEXT:    movl $42, %ebx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB236_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:  .Lpcsection586:
@@ -14794,7 +14794,7 @@ define void @atomic128_xchg_seq_cst(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection585:
 ; O2-NEXT:    movl $42, %ebx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB236_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:  .Lpcsection586:
@@ -14821,7 +14821,7 @@ define void @atomic128_xchg_seq_cst(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection585:
 ; O3-NEXT:    movl $42, %ebx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB236_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:  .Lpcsection586:
@@ -14892,7 +14892,7 @@ define void @atomic128_add_seq_cst(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection590:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB237_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -14921,7 +14921,7 @@ define void @atomic128_add_seq_cst(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection590:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB237_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -14950,7 +14950,7 @@ define void @atomic128_add_seq_cst(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection590:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB237_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -15025,7 +15025,7 @@ define void @atomic128_sub_seq_cst(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection596:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB238_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -15054,7 +15054,7 @@ define void @atomic128_sub_seq_cst(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection596:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB238_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -15083,7 +15083,7 @@ define void @atomic128_sub_seq_cst(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection596:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB238_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -15160,7 +15160,7 @@ define void @atomic128_and_seq_cst(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection602:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB239_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ebx
@@ -15188,7 +15188,7 @@ define void @atomic128_and_seq_cst(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection602:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB239_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ebx
@@ -15216,7 +15216,7 @@ define void @atomic128_and_seq_cst(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection602:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB239_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ebx
@@ -15288,7 +15288,7 @@ define void @atomic128_or_seq_cst(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection608:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB240_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -15315,7 +15315,7 @@ define void @atomic128_or_seq_cst(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection608:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB240_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -15342,7 +15342,7 @@ define void @atomic128_or_seq_cst(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection608:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB240_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -15413,7 +15413,7 @@ define void @atomic128_xor_seq_cst(ptr %a) {
 ; O1-NEXT:    movq (%rdi), %rax
 ; O1-NEXT:  .Lpcsection613:
 ; O1-NEXT:    movq 8(%rdi), %rdx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB241_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movq %rax, %rbx
@@ -15440,7 +15440,7 @@ define void @atomic128_xor_seq_cst(ptr %a) {
 ; O2-NEXT:    movq (%rdi), %rax
 ; O2-NEXT:  .Lpcsection613:
 ; O2-NEXT:    movq 8(%rdi), %rdx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB241_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movq %rax, %rbx
@@ -15467,7 +15467,7 @@ define void @atomic128_xor_seq_cst(ptr %a) {
 ; O3-NEXT:    movq (%rdi), %rax
 ; O3-NEXT:  .Lpcsection613:
 ; O3-NEXT:    movq 8(%rdi), %rdx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB241_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movq %rax, %rbx
@@ -15546,7 +15546,7 @@ define void @atomic128_nand_seq_cst(ptr %a) {
 ; O1-NEXT:    movq 8(%rdi), %rdx
 ; O1-NEXT:  .Lpcsection619:
 ; O1-NEXT:    movq $-1, %rcx
-; O1-NEXT:    .p2align 4, 0x90
+; O1-NEXT:    .p2align 4
 ; O1-NEXT:  .LBB242_1: # %atomicrmw.start
 ; O1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O1-NEXT:    movl %eax, %ebx
@@ -15576,7 +15576,7 @@ define void @atomic128_nand_seq_cst(ptr %a) {
 ; O2-NEXT:    movq 8(%rdi), %rdx
 ; O2-NEXT:  .Lpcsection619:
 ; O2-NEXT:    movq $-1, %rcx
-; O2-NEXT:    .p2align 4, 0x90
+; O2-NEXT:    .p2align 4
 ; O2-NEXT:  .LBB242_1: # %atomicrmw.start
 ; O2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O2-NEXT:    movl %eax, %ebx
@@ -15606,7 +15606,7 @@ define void @atomic128_nand_seq_cst(ptr %a) {
 ; O3-NEXT:    movq 8(%rdi), %rdx
 ; O3-NEXT:  .Lpcsection619:
 ; O3-NEXT:    movq $-1, %rcx
-; O3-NEXT:    .p2align 4, 0x90
+; O3-NEXT:    .p2align 4
 ; O3-NEXT:  .LBB242_1: # %atomicrmw.start
 ; O3-NEXT:    # =>This Inner Loop Header: Depth=1
 ; O3-NEXT:    movl %eax, %ebx

diff  --git a/llvm/test/CodeGen/X86/peep-test-0.ll b/llvm/test/CodeGen/X86/peep-test-0.ll
index 71dadf7edea3af..3a0ef86b64ee31 100644
--- a/llvm/test/CodeGen/X86/peep-test-0.ll
+++ b/llvm/test/CodeGen/X86/peep-test-0.ll
@@ -8,7 +8,7 @@ define void @loop(i64 %n, ptr nocapture %d) nounwind {
 ; CHECK-NEXT:    shlq $4, %rax
 ; CHECK-NEXT:    addq %rsi, %rax
 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [3.0E+0,0.0E+0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero

diff  --git a/llvm/test/CodeGen/X86/peep-test-1.ll b/llvm/test/CodeGen/X86/peep-test-1.ll
index 0d86bd4ec8c45b..730a7fdccbd7cc 100644
--- a/llvm/test/CodeGen/X86/peep-test-1.ll
+++ b/llvm/test/CodeGen/X86/peep-test-1.ll
@@ -6,7 +6,7 @@ define void @foo(i32 %n, ptr nocapture %p) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    fldl (%eax,%ecx,8)

diff  --git a/llvm/test/CodeGen/X86/peephole-copy.ll b/llvm/test/CodeGen/X86/peephole-copy.ll
index 69112d0a1f471c..e7ca7c5cc63938 100644
--- a/llvm/test/CodeGen/X86/peephole-copy.ll
+++ b/llvm/test/CodeGen/X86/peephole-copy.ll
@@ -17,7 +17,7 @@ define void @foo(ptr %p1, ptr %p2, ptr %p3, ptr %p4) {
 ; CHECK-NEXT:    movl (%rax), %eax
 ; CHECK-NEXT:    shll %cl, %eax
 ; CHECK-NEXT:    movl %r8d, (%rdx)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %loop2.header
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl $5, %edi

diff  --git a/llvm/test/CodeGen/X86/pic-load-remat.ll b/llvm/test/CodeGen/X86/pic-load-remat.ll
index 7737093d2ba076..2587b0379a0f76 100644
--- a/llvm/test/CodeGen/X86/pic-load-remat.ll
+++ b/llvm/test/CodeGen/X86/pic-load-remat.ll
@@ -12,7 +12,7 @@ define void @f() nounwind  {
 ; CHECK-NEXT:    movdqa {{.*#+}} xmm0 = [21183,21183,21183,21183,21183,21183,21183,21183]
 ; CHECK-NEXT:    paddsw %xmm0, %xmm0
 ; CHECK-NEXT:    paddw %xmm1, %xmm0
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %bb
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movdqa %xmm0, 0

diff  --git a/llvm/test/CodeGen/X86/postalloc-coalescing.ll b/llvm/test/CodeGen/X86/postalloc-coalescing.ll
index 68266fc4e158a3..7430d7a9baf260 100644
--- a/llvm/test/CodeGen/X86/postalloc-coalescing.ll
+++ b/llvm/test/CodeGen/X86/postalloc-coalescing.ll
@@ -7,7 +7,7 @@ define fastcc i32 @_Z18yy_get_next_bufferv() nounwind {
 ; CHECK-NEXT:    movl $42, %eax
 ; CHECK-NEXT:    cmpl $-1, %eax
 ; CHECK-NEXT:    je .LBB0_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb116
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movb %al, 0

diff  --git a/llvm/test/CodeGen/X86/pr14314.ll b/llvm/test/CodeGen/X86/pr14314.ll
index c7dabf1f661cf3..bb1608e3cd9b29 100644
--- a/llvm/test/CodeGen/X86/pr14314.ll
+++ b/llvm/test/CodeGen/X86/pr14314.ll
@@ -13,7 +13,7 @@ define i64 @atomicSub(ptr %a, i64 %b) nounwind {
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ebp
 ; CHECK-NEXT:    movl (%ebp), %eax
 ; CHECK-NEXT:    movl 4(%ebp), %edx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %eax, %ebx

diff  --git a/llvm/test/CodeGen/X86/pr22338.ll b/llvm/test/CodeGen/X86/pr22338.ll
index 9ab5248a94602d..bf3967ca8fedee 100644
--- a/llvm/test/CodeGen/X86/pr22338.ll
+++ b/llvm/test/CodeGen/X86/pr22338.ll
@@ -21,7 +21,7 @@ define i32 @fn(i32 %a0, i32 %a1) {
 ; X86-NEXT:    addb %dl, %dl
 ; X86-NEXT:    movl %edx, %ecx
 ; X86-NEXT:    shll %cl, %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_1: # %bb1
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    testl %ebx, %ebx
@@ -46,7 +46,7 @@ define i32 @fn(i32 %a0, i32 %a1) {
 ; X64-NEXT:    addb %dl, %dl
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shll %cl, %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_1: # %bb1
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    testl %esi, %esi

diff  --git a/llvm/test/CodeGen/X86/pr30562.ll b/llvm/test/CodeGen/X86/pr30562.ll
index 2745514cff8d42..e05a8672b1f811 100644
--- a/llvm/test/CodeGen/X86/pr30562.ll
+++ b/llvm/test/CodeGen/X86/pr30562.ll
@@ -10,7 +10,7 @@ define i32 @foo(ptr nocapture %perm, i32 %n) {
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    andl $1, %ecx
 ; CHECK-NEXT:    movaps {{.*#+}} xmm1 = [2,3]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)

diff  --git a/llvm/test/CodeGen/X86/pr32108.ll b/llvm/test/CodeGen/X86/pr32108.ll
index dc14746440ae5f..32f8a7657a3f11 100644
--- a/llvm/test/CodeGen/X86/pr32108.ll
+++ b/llvm/test/CodeGen/X86/pr32108.ll
@@ -4,7 +4,7 @@
 define void @pr32108() {
 ; CHECK-LABEL: pr32108:
 ; CHECK:       # %bb.0: # %BB
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %CF244
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_1

diff  --git a/llvm/test/CodeGen/X86/pr33290.ll b/llvm/test/CodeGen/X86/pr33290.ll
index 0e6f2e92f14008..3991080d75bc00 100644
--- a/llvm/test/CodeGen/X86/pr33290.ll
+++ b/llvm/test/CodeGen/X86/pr33290.ll
@@ -10,7 +10,7 @@ define dso_local void @e() {
 ; X86-LABEL: e:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl b, %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_1: # %for.cond
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movzbl c, %ecx
@@ -22,7 +22,7 @@ define dso_local void @e() {
 ; X64-LABEL: e:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq b(%rip), %rax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_1: # %for.cond
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movzbl c(%rip), %ecx

diff  --git a/llvm/test/CodeGen/X86/pr33747.ll b/llvm/test/CodeGen/X86/pr33747.ll
index c8ba2b2e3a7909..f6df42450e1879 100644
--- a/llvm/test/CodeGen/X86/pr33747.ll
+++ b/llvm/test/CodeGen/X86/pr33747.ll
@@ -12,10 +12,10 @@ define void @PR33747(ptr nocapture) {
 ; CHECK-NEXT:    setne %al
 ; CHECK-NEXT:    testb %cl, %al
 ; CHECK-NEXT:    je .LBB0_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_2
   %2 = getelementptr inbounds i32, ptr %0, i64 6

diff  --git a/llvm/test/CodeGen/X86/pr37916.ll b/llvm/test/CodeGen/X86/pr37916.ll
index 2f8f0fdae1848c..e6639a11ca5ea4 100644
--- a/llvm/test/CodeGen/X86/pr37916.ll
+++ b/llvm/test/CodeGen/X86/pr37916.ll
@@ -7,7 +7,7 @@
 define void @fn1() local_unnamed_addr {
 ; CHECK-LABEL: fn1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %if.end
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl a+4, %eax

diff  --git a/llvm/test/CodeGen/X86/pr38185.ll b/llvm/test/CodeGen/X86/pr38185.ll
index e062302d4f0b56..d5591c50738fa1 100644
--- a/llvm/test/CodeGen/X86/pr38185.ll
+++ b/llvm/test/CodeGen/X86/pr38185.ll
@@ -5,7 +5,7 @@ define void @foo(ptr %a, ptr %b, ptr noalias %c, i64 %s) {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq $0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rax

diff  --git a/llvm/test/CodeGen/X86/pr38217.ll b/llvm/test/CodeGen/X86/pr38217.ll
index f1538f3598aec6..02d2d9e02721a1 100644
--- a/llvm/test/CodeGen/X86/pr38217.ll
+++ b/llvm/test/CodeGen/X86/pr38217.ll
@@ -11,7 +11,7 @@ define dso_local void @_Z12d2s_bufferedmPc(i64 %arg, ptr nocapture %arg1) {
 ; CHECK-NEXT:  # %bb.1: # %bb2.preheader
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    movabsq $3777893186295716171, %r8 # imm = 0x346DC5D63886594B
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rdi, %rax

diff  --git a/llvm/test/CodeGen/X86/pr38539.ll b/llvm/test/CodeGen/X86/pr38539.ll
index fb169a3777fb82..b633c28a214b70 100644
--- a/llvm/test/CodeGen/X86/pr38539.ll
+++ b/llvm/test/CodeGen/X86/pr38539.ll
@@ -168,7 +168,7 @@ define void @f() nounwind {
 ; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    xorl %ecx, %ecx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_12: # %udiv-do-while
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    movl %ebx, %esi

diff  --git a/llvm/test/CodeGen/X86/pr38795.ll b/llvm/test/CodeGen/X86/pr38795.ll
index f64c70e8fc79a4..c3c96e8228797a 100644
--- a/llvm/test/CodeGen/X86/pr38795.ll
+++ b/llvm/test/CodeGen/X86/pr38795.ll
@@ -30,7 +30,7 @@ define dso_local void @fn() {
 ; CHECK-NEXT:    # kill: killed $al
 ; CHECK-NEXT:    # implicit-def: $ebp
 ; CHECK-NEXT:    jmp .LBB0_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_15: # %for.inc
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
@@ -49,7 +49,7 @@ define dso_local void @fn() {
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    jne .LBB0_10
 ; CHECK-NEXT:    jmp .LBB0_6
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %if.end
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movl %ecx, %eax
@@ -73,7 +73,7 @@ define dso_local void @fn() {
 ; CHECK-NEXT:    # implicit-def: $eax
 ; CHECK-NEXT:    movb %dh, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
 ; CHECK-NEXT:    jne .LBB0_15
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  # %bb.5: # %for.cond35
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    testl %edi, %edi
@@ -98,16 +98,16 @@ define dso_local void @fn() {
 ; CHECK-NEXT:    # implicit-def: $dl
 ; CHECK-NEXT:    # implicit-def: $ebp
 ; CHECK-NEXT:    jmp .LBB0_19
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_7: # %if.end21
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    # implicit-def: $ebp
 ; CHECK-NEXT:    jmp .LBB0_8
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_6: # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    xorl %edi, %edi
 ; CHECK-NEXT:    movb {{[-0-9]+}}(%e{{[sb]}}p), %dh # 1-byte Reload
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_19: # %for.cond47
 ; CHECK-NEXT:    # Parent Loop BB0_1 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -131,7 +131,7 @@ define dso_local void @fn() {
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    xorl %ebp, %ebp
 ; CHECK-NEXT:    jmp .LBB0_15
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_9: # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    # implicit-def: $eax
 ; CHECK-NEXT:    testb %bl, %bl
@@ -264,7 +264,7 @@ define void @verifier_error_reduced_issue38788(i1 %cmp11) {
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    xorl %ebx, %ebx
 ; CHECK-NEXT:    jmp .LBB1_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_7: # %if.end26
 ; CHECK-NEXT:    # in Loop: Header=BB1_1 Depth=1
 ; CHECK-NEXT:    movl %ecx, %edx
@@ -280,7 +280,7 @@ define void @verifier_error_reduced_issue38788(i1 %cmp11) {
 ; CHECK-NEXT:  # %bb.2: # in Loop: Header=BB1_1 Depth=1
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    jmp .LBB1_5
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_3: # %if.end
 ; CHECK-NEXT:    # in Loop: Header=BB1_1 Depth=1
 ; CHECK-NEXT:    testb $1, %al
@@ -293,7 +293,7 @@ define void @verifier_error_reduced_issue38788(i1 %cmp11) {
 ; CHECK-NEXT:    movl $0, %ebx
 ; CHECK-NEXT:    jne .LBB1_8
 ; CHECK-NEXT:    jmp .LBB1_5
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_4: # in Loop: Header=BB1_1 Depth=1
 ; CHECK-NEXT:    movl %ebx, %eax
 ; CHECK-NEXT:    xorl %ebx, %ebx

diff  --git a/llvm/test/CodeGen/X86/pr42565.ll b/llvm/test/CodeGen/X86/pr42565.ll
index 33acf02135eab7..10071064057b88 100644
--- a/llvm/test/CodeGen/X86/pr42565.ll
+++ b/llvm/test/CodeGen/X86/pr42565.ll
@@ -7,7 +7,7 @@ define void @HUF_writeCTable_wksp()  {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl $2, %eax
 ; CHECK-NEXT:    movb $-2, %cl
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    leal 1(%rcx), %edx

diff  --git a/llvm/test/CodeGen/X86/pr42909.ll b/llvm/test/CodeGen/X86/pr42909.ll
index 47b85e144919db..8ff7caf4638541 100644
--- a/llvm/test/CodeGen/X86/pr42909.ll
+++ b/llvm/test/CodeGen/X86/pr42909.ll
@@ -4,7 +4,7 @@
 define void @autogen_SD31033(ptr %a0) {
 ; CHECK-LABEL: autogen_SD31033:
 ; CHECK:       # %bb.0: # %BB
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %CF
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_1

diff  --git a/llvm/test/CodeGen/X86/pr43529.ll b/llvm/test/CodeGen/X86/pr43529.ll
index 3e9976c90449e8..ac5f7c84f82c39 100644
--- a/llvm/test/CodeGen/X86/pr43529.ll
+++ b/llvm/test/CodeGen/X86/pr43529.ll
@@ -12,7 +12,7 @@ define i32 @a() nounwind {
 ; CHECK-NEXT:    calll d at PLT
 ; CHECK-NEXT:    cmpl $a, %esi
 ; CHECK-NEXT:    jbe .LBB0_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %for.cond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_1

diff  --git a/llvm/test/CodeGen/X86/pr44140.ll b/llvm/test/CodeGen/X86/pr44140.ll
index 9455895a99b873..02525d73a786dd 100644
--- a/llvm/test/CodeGen/X86/pr44140.ll
+++ b/llvm/test/CodeGen/X86/pr44140.ll
@@ -17,7 +17,7 @@ define i32 @main() {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 592
 ; CHECK-NEXT:    vmovaps {{.*#+}} xmm6 = [1010101010101010101,2020202020202020202]
 ; CHECK-NEXT:    xorl %esi, %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # %fake-loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups {{[0-9]+}}(%rsp), %ymm0

diff  --git a/llvm/test/CodeGen/X86/pr44412.ll b/llvm/test/CodeGen/X86/pr44412.ll
index 67579a5bb7c52d..546dbcc1561299 100644
--- a/llvm/test/CodeGen/X86/pr44412.ll
+++ b/llvm/test/CodeGen/X86/pr44412.ll
@@ -10,7 +10,7 @@ define void @bar(i32 %0, i32 %1) nounwind {
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movl %edi, %ebx
 ; CHECK-NEXT:    decl %ebx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %ebx, %edi
 ; CHECK-NEXT:    callq foo at PLT
@@ -43,7 +43,7 @@ define void @baz(i32 %0, i32 %1) nounwind {
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movl %edi, %ebx
 ; CHECK-NEXT:    decl %ebx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_2: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %ebx, %edi
 ; CHECK-NEXT:    callq foo at PLT

diff  --git a/llvm/test/CodeGen/X86/pr47874.ll b/llvm/test/CodeGen/X86/pr47874.ll
index 2da3585357a1cc..ce3aaca59fae8c 100644
--- a/llvm/test/CodeGen/X86/pr47874.ll
+++ b/llvm/test/CodeGen/X86/pr47874.ll
@@ -11,7 +11,7 @@ define void @a(ptr %arg, i32 %arg1) {
 ; SSE2-NEXT:  ## %bb.1: ## %bb2
 ; SSE2-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
 ; SSE2-NEXT:    movl %esi, %eax
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  LBB0_2: ## %bb6
 ; SSE2-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    ## InlineAsm Start
@@ -32,7 +32,7 @@ define void @a(ptr %arg, i32 %arg1) {
 ; AVX-NEXT:  ## %bb.1: ## %bb2
 ; AVX-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
 ; AVX-NEXT:    movl %esi, %eax
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  LBB0_2: ## %bb6
 ; AVX-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    ## InlineAsm Start
@@ -77,7 +77,7 @@ define void @b(ptr %arg, i64 %arg1) {
 ; SSE2-NEXT:    jle LBB1_3
 ; SSE2-NEXT:  ## %bb.1: ## %bb2
 ; SSE2-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  LBB1_2: ## %bb6
 ; SSE2-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    ## InlineAsm Start
@@ -97,7 +97,7 @@ define void @b(ptr %arg, i64 %arg1) {
 ; AVX-NEXT:    jle LBB1_3
 ; AVX-NEXT:  ## %bb.1: ## %bb2
 ; AVX-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  LBB1_2: ## %bb6
 ; AVX-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    ## InlineAsm Start
@@ -142,7 +142,7 @@ define void @c(ptr %arg, ptr %arg1, i32 %arg2) {
 ; SSE2-NEXT:  ## %bb.1: ## %bb4
 ; SSE2-NEXT:    movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
 ; SSE2-NEXT:    movl %edx, %eax
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  LBB2_2: ## %bb8
 ; SSE2-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    ## InlineAsm Start
@@ -163,7 +163,7 @@ define void @c(ptr %arg, ptr %arg1, i32 %arg2) {
 ; AVX-NEXT:  ## %bb.1: ## %bb4
 ; AVX-NEXT:    movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
 ; AVX-NEXT:    movl %edx, %eax
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  LBB2_2: ## %bb8
 ; AVX-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    ## InlineAsm Start
@@ -210,7 +210,7 @@ define void @d(ptr %arg, ptr %arg1, i64 %arg2) {
 ; SSE2-NEXT:    jle LBB3_3
 ; SSE2-NEXT:  ## %bb.1: ## %bb3
 ; SSE2-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  LBB3_2: ## %bb6
 ; SSE2-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    ## InlineAsm Start
@@ -230,7 +230,7 @@ define void @d(ptr %arg, ptr %arg1, i64 %arg2) {
 ; AVX-NEXT:    jle LBB3_3
 ; AVX-NEXT:  ## %bb.1: ## %bb3
 ; AVX-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  LBB3_2: ## %bb6
 ; AVX-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    ## InlineAsm Start

diff  --git a/llvm/test/CodeGen/X86/pr49393.ll b/llvm/test/CodeGen/X86/pr49393.ll
index 3fb6a82f3fbb2c..512177246b5d99 100644
--- a/llvm/test/CodeGen/X86/pr49393.ll
+++ b/llvm/test/CodeGen/X86/pr49393.ll
@@ -9,7 +9,7 @@ define void @f() {
 ; CHECK-NEXT:    movapd %xmm0, %xmm1
 ; CHECK-NEXT:    mulsd %xmm0, %xmm1
 ; CHECK-NEXT:    subsd %xmm0, %xmm1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %for.cond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    imull %eax, %eax

diff  --git a/llvm/test/CodeGen/X86/pr49451.ll b/llvm/test/CodeGen/X86/pr49451.ll
index cc4607c5c55f3c..173c41140ebef1 100644
--- a/llvm/test/CodeGen/X86/pr49451.ll
+++ b/llvm/test/CodeGen/X86/pr49451.ll
@@ -15,7 +15,7 @@ define void @func_6(i8 %uc_8, i64 %uli_10) nounwind {
 ; X86-NEXT:    xorl %edx, %edx
 ; X86-NEXT:    xorl %ebx, %ebx
 ; X86-NEXT:    # implicit-def: $si
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB0_1: # %for.body612
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    testb %dl, %dl
@@ -37,7 +37,7 @@ define void @func_6(i8 %uc_8, i64 %uli_10) nounwind {
 ; X64-NEXT:    movl $23090, %eax # imm = 0x5A32
 ; X64-NEXT:    xorl %ecx, %ecx
 ; X64-NEXT:    # implicit-def: $dx
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_1: # %for.body612
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    testb %cl, %cl

diff  --git a/llvm/test/CodeGen/X86/pr50374.ll b/llvm/test/CodeGen/X86/pr50374.ll
index 0db038f7d8595f..edb52c6e27bb6b 100644
--- a/llvm/test/CodeGen/X86/pr50374.ll
+++ b/llvm/test/CodeGen/X86/pr50374.ll
@@ -4,7 +4,7 @@
 define void @PR50374() {
 ; CHECK-LABEL: PR50374:
 ; CHECK:       # %bb.0: # %while.84.body.preheader
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %vector.body1999
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_1

diff  --git a/llvm/test/CodeGen/X86/pr50782.ll b/llvm/test/CodeGen/X86/pr50782.ll
index c6dab7dc26fd58..591a33446d4e30 100644
--- a/llvm/test/CodeGen/X86/pr50782.ll
+++ b/llvm/test/CodeGen/X86/pr50782.ll
@@ -52,7 +52,7 @@ define void @h(float %i) {
 ; CHECK-NEXT:    fildl 12(%esi)
 ; CHECK-NEXT:    movl _c, %edx
 ; CHECK-NEXT:    jmp LBB0_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_5: # %for.inc
 ; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
 ; CHECK-NEXT:    fxch %st(5)

diff  --git a/llvm/test/CodeGen/X86/pr51371.ll b/llvm/test/CodeGen/X86/pr51371.ll
index 9b63269e962070..8952f0967cde0e 100644
--- a/llvm/test/CodeGen/X86/pr51371.ll
+++ b/llvm/test/CodeGen/X86/pr51371.ll
@@ -9,7 +9,7 @@ define void @pmuldq(ptr nocapture %0, i32 %1, i64 %2) {
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    movd %esi, %xmm0
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movdqa (%rdi), %xmm1
 ; CHECK-NEXT:    pmuldq %xmm0, %xmm1
@@ -51,7 +51,7 @@ define void @pmuludq(ptr nocapture %0, i32 %1, i64 %2) {
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    movd %esi, %xmm0
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_2: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movdqa (%rdi), %xmm1
 ; CHECK-NEXT:    pmuludq %xmm0, %xmm1

diff  --git a/llvm/test/CodeGen/X86/pr5145.ll b/llvm/test/CodeGen/X86/pr5145.ll
index 16398cb57061ff..5cb0ac9bc9821a 100644
--- a/llvm/test/CodeGen/X86/pr5145.ll
+++ b/llvm/test/CodeGen/X86/pr5145.ll
@@ -6,7 +6,7 @@ define void @atomic_maxmin_i8() {
 ; CHECK-LABEL: atomic_maxmin_i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzbl sc8(%rip), %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %atomicrmw.start14
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpb $6, %al
@@ -18,7 +18,7 @@ define void @atomic_maxmin_i8() {
 ; CHECK-NEXT:    jne .LBB0_1
 ; CHECK-NEXT:  # %bb.2: # %atomicrmw.end13
 ; CHECK-NEXT:    movzbl sc8(%rip), %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %atomicrmw.start8
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpb $7, %al
@@ -30,7 +30,7 @@ define void @atomic_maxmin_i8() {
 ; CHECK-NEXT:    jne .LBB0_3
 ; CHECK-NEXT:  # %bb.4: # %atomicrmw.end7
 ; CHECK-NEXT:    movzbl sc8(%rip), %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_5: # %atomicrmw.start2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpb $8, %al
@@ -42,7 +42,7 @@ define void @atomic_maxmin_i8() {
 ; CHECK-NEXT:    jne .LBB0_5
 ; CHECK-NEXT:  # %bb.6: # %atomicrmw.end1
 ; CHECK-NEXT:    movzbl sc8(%rip), %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_7: # %atomicrmw.start
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpb $9, %al

diff  --git a/llvm/test/CodeGen/X86/pr51615.ll b/llvm/test/CodeGen/X86/pr51615.ll
index 8b2cea73fd36c4..a062aa138a1e58 100644
--- a/llvm/test/CodeGen/X86/pr51615.ll
+++ b/llvm/test/CodeGen/X86/pr51615.ll
@@ -83,13 +83,13 @@ define void @elts_from_consecutive_loads(ptr %arg, ptr %arg12, ptr %arg13, float
 ; ALL-LABEL: elts_from_consecutive_loads:
 ; ALL:       # %bb.0: # %bb
 ; ALL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; ALL-NEXT:    .p2align 4, 0x90
+; ALL-NEXT:    .p2align 4
 ; ALL-NEXT:  .LBB3_1: # %bb16
 ; ALL-NEXT:    # =>This Loop Header: Depth=1
 ; ALL-NEXT:    # Child Loop BB3_2 Depth 2
 ; ALL-NEXT:    testb $1, %cl
 ; ALL-NEXT:    je .LBB3_1
-; ALL-NEXT:    .p2align 4, 0x90
+; ALL-NEXT:    .p2align 4
 ; ALL-NEXT:  .LBB3_2: # %bb17
 ; ALL-NEXT:    # Parent Loop BB3_1 Depth=1
 ; ALL-NEXT:    # => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/pr53842.ll b/llvm/test/CodeGen/X86/pr53842.ll
index 26d1a977c84c53..a962a9b3ddf2fb 100644
--- a/llvm/test/CodeGen/X86/pr53842.ll
+++ b/llvm/test/CodeGen/X86/pr53842.ll
@@ -14,7 +14,7 @@ define void @PR53842() {
 ; CHECK-NEXT:    vpcmpeqq %ymm3, %ymm2, %ymm2
 ; CHECK-NEXT:    vpcmpeqq %ymm3, %ymm1, %ymm1
 ; CHECK-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vpsubq %zmm1, %zmm0, %zmm0

diff  --git a/llvm/test/CodeGen/X86/pr53990-incorrect-machine-sink.ll b/llvm/test/CodeGen/X86/pr53990-incorrect-machine-sink.ll
index ac5ee4635d5063..f2f6e6934abeb6 100644
--- a/llvm/test/CodeGen/X86/pr53990-incorrect-machine-sink.ll
+++ b/llvm/test/CodeGen/X86/pr53990-incorrect-machine-sink.ll
@@ -16,7 +16,7 @@ define void @test(i1 %c, ptr %p, ptr noalias %p2) nounwind {
 ; CHECK-NEXT:    movq (%rsi), %r14
 ; CHECK-NEXT:    movb $1, %r15b
 ; CHECK-NEXT:    jmp .LBB0_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_4: # %sink
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movq %r14, (%rbx)

diff  --git a/llvm/test/CodeGen/X86/pr55648.ll b/llvm/test/CodeGen/X86/pr55648.ll
index 6e99643aecbd3a..da62b07915c868 100644
--- a/llvm/test/CodeGen/X86/pr55648.ll
+++ b/llvm/test/CodeGen/X86/pr55648.ll
@@ -4,7 +4,7 @@
 define void @PR55648() #0 {
 ; CHECK-LABEL: PR55648:
 ; CHECK:       # %bb.0: # %bb
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb38
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_1

diff  --git a/llvm/test/CodeGen/X86/pr61923.ll b/llvm/test/CodeGen/X86/pr61923.ll
index 576b029cd03297..f9290b9743f47f 100644
--- a/llvm/test/CodeGen/X86/pr61923.ll
+++ b/llvm/test/CodeGen/X86/pr61923.ll
@@ -14,7 +14,7 @@ define void @test_loop(ptr align 1 %src, ptr align 1 %dest, i32 %len) {
 ; CHECK-NEXT:    movl %edx, %eax
 ; CHECK-NEXT:    andl $-32, %eax
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %memcmp.loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovups (%rsi,%rcx), %ymm0

diff  --git a/llvm/test/CodeGen/X86/pr63108.ll b/llvm/test/CodeGen/X86/pr63108.ll
index b1576851ed0236..53f6fa697c452e 100644
--- a/llvm/test/CodeGen/X86/pr63108.ll
+++ b/llvm/test/CodeGen/X86/pr63108.ll
@@ -17,7 +17,7 @@ define i32 @PR63108() {
 ; SSE-NEXT:    pxor %xmm0, %xmm0
 ; SSE-NEXT:    movd {{.*#+}} xmm1 = [57339,0,0,0]
 ; SSE-NEXT:    xorl %eax, %eax
-; SSE-NEXT:    .p2align 4, 0x90
+; SSE-NEXT:    .p2align 4
 ; SSE-NEXT:  .LBB0_3: # %vector.body
 ; SSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-NEXT:    movdqa %xmm1, %xmm2
@@ -52,7 +52,7 @@ define i32 @PR63108() {
 ; AVX1-NEXT:  .LBB0_2: # %vector.body.preheader
 ; AVX1-NEXT:    vmovss {{.*#+}} xmm0 = [57339,0,0,0]
 ; AVX1-NEXT:    xorl %eax, %eax
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB0_3: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovaps %ymm0, %ymm1
@@ -89,7 +89,7 @@ define i32 @PR63108() {
 ; AVX2-NEXT:  .LBB0_2: # %vector.body.preheader
 ; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = [57339,0,0,0]
 ; AVX2-NEXT:    xorl %eax, %eax
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB0_3: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vmovdqa %ymm0, %ymm1
@@ -126,7 +126,7 @@ define i32 @PR63108() {
 ; AVX512-NEXT:  .LBB0_2: # %vector.body.preheader
 ; AVX512-NEXT:    vmovd {{.*#+}} xmm0 = [57339,0,0,0]
 ; AVX512-NEXT:    xorl %eax, %eax
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB0_3: # %vector.body
 ; AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vmovdqa %ymm0, %ymm1

diff  --git a/llvm/test/CodeGen/X86/pr63692.ll b/llvm/test/CodeGen/X86/pr63692.ll
index fb3198743a3949..8cbd24240e1d8a 100644
--- a/llvm/test/CodeGen/X86/pr63692.ll
+++ b/llvm/test/CodeGen/X86/pr63692.ll
@@ -6,7 +6,7 @@ define void @prefault(ptr noundef %range_start, ptr noundef readnone %range_end)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    cmpq %rsi, %rdi
 ; CHECK-NEXT:    jae .LBB0_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %while.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lock orb $0, (%rdi)

diff  --git a/llvm/test/CodeGen/X86/pr65895.ll b/llvm/test/CodeGen/X86/pr65895.ll
index ced60ee73cc84a..0990b10fa936de 100644
--- a/llvm/test/CodeGen/X86/pr65895.ll
+++ b/llvm/test/CodeGen/X86/pr65895.ll
@@ -16,7 +16,7 @@ define i32 @PR65895() {
 ; CHECK-NEXT:    je .LBB0_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.lr.ph
 ; CHECK-NEXT:    movb %al, b(%rip)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_2

diff  --git a/llvm/test/CodeGen/X86/pr68539.ll b/llvm/test/CodeGen/X86/pr68539.ll
index 8c7e7792dc5f4c..5e3a7b8dbafb28 100644
--- a/llvm/test/CodeGen/X86/pr68539.ll
+++ b/llvm/test/CodeGen/X86/pr68539.ll
@@ -4,7 +4,7 @@
 define i32 @main(i1 %arg) {
 ; CHECK-LABEL: main:
 ; CHECK:       # %bb.0: # %bb
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB0_1

diff  --git a/llvm/test/CodeGen/X86/pr93000.ll b/llvm/test/CodeGen/X86/pr93000.ll
index 0bd5da48847e84..d55332f81c2ca4 100644
--- a/llvm/test/CodeGen/X86/pr93000.ll
+++ b/llvm/test/CodeGen/X86/pr93000.ll
@@ -6,7 +6,7 @@ define void @PR93000(ptr %a0, ptr %a1, ptr %a2, <32 x i16> %a3) {
 ; CHECK:       # %bb.0: # %Entry
 ; CHECK-NEXT:    movl (%rdi), %eax
 ; CHECK-NEXT:    addq $4, %rdi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %Loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    kmovd %eax, %k1

diff  --git a/llvm/test/CodeGen/X86/promote-sra-by-itself.ll b/llvm/test/CodeGen/X86/promote-sra-by-itself.ll
index e19f30b3863396..d57411f495b697 100644
--- a/llvm/test/CodeGen/X86/promote-sra-by-itself.ll
+++ b/llvm/test/CodeGen/X86/promote-sra-by-itself.ll
@@ -19,7 +19,7 @@ define i16 @basic(ptr %p) {
 define void @crash(i1 %cond, ptr %p) {
 ; CHECK-LABEL: crash:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # %loop
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb $1, %dil

diff  --git a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
index c0bfb71e189cd5..beb42f55b709cc 100644
--- a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
+++ b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
@@ -84,7 +84,7 @@ define ptr @SyFgets(ptr %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:    movq _syCTRO at GOTPCREL(%rip), %rax
 ; CHECK-NEXT:    movl $1, %r13d
 ; CHECK-NEXT:    movb $1, %cl
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_9: ## %do.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl $0, (%rax)
@@ -108,7 +108,7 @@ define ptr @SyFgets(ptr %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:    ## in Loop: Header=BB0_13 Depth=1
 ; CHECK-NEXT:    testb %r12b, %r12b
 ; CHECK-NEXT:    je LBB0_54
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_20: ## %while.cond197.backedge
 ; CHECK-NEXT:    ## in Loop: Header=BB0_13 Depth=1
 ; CHECK-NEXT:    decl %r13d
@@ -141,7 +141,7 @@ define ptr @SyFgets(ptr %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:    ## in Loop: Header=BB0_13 Depth=1
 ; CHECK-NEXT:    ## implicit-def: $rax
 ; CHECK-NEXT:    jmp LBB0_28
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_31: ## %do.body479.backedge
 ; CHECK-NEXT:    ## in Loop: Header=BB0_28 Depth=2
 ; CHECK-NEXT:    leaq 1(%rbx), %rax
@@ -165,7 +165,7 @@ define ptr @SyFgets(ptr %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:    testb %r12b, %r12b
 ; CHECK-NEXT:    jne LBB0_31
 ; CHECK-NEXT:    jmp LBB0_33
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_14: ## %while.body200
 ; CHECK-NEXT:    ## in Loop: Header=BB0_13 Depth=1
 ; CHECK-NEXT:    leal 1(%rbp), %eax
@@ -223,7 +223,7 @@ define ptr @SyFgets(ptr %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:    ## in Loop: Header=BB0_13 Depth=1
 ; CHECK-NEXT:    movq _SyFgets.yank at GOTPCREL(%rip), %rax
 ; CHECK-NEXT:    movb $0, (%rax)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_37: ## %for.cond534
 ; CHECK-NEXT:    ## Parent Loop BB0_13 Depth=1
 ; CHECK-NEXT:    ## => This Inner Loop Header: Depth=2
@@ -235,15 +235,15 @@ define ptr @SyFgets(ptr %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:    movb $0, (%rbx)
 ; CHECK-NEXT:    leaq LJTI0_0(%rip), %rdx
 ; CHECK-NEXT:    jmp LBB0_20
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_41: ## %while.cond864
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp LBB0_41
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_42: ## %while.cond962
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp LBB0_42
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_24: ## %for.cond357
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp LBB0_24
@@ -266,7 +266,7 @@ define ptr @SyFgets(ptr %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:    subq %rbx, %r14
 ; CHECK-NEXT:    movq _syHistory at GOTPCREL(%rip), %rax
 ; CHECK-NEXT:    leaq 8189(%r14,%rax), %rax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_48: ## %for.body1723
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    decq %rax
@@ -279,11 +279,11 @@ define ptr @SyFgets(ptr %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    je LBB0_40
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_17: ## %for.body643.us
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp LBB0_17
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_40: ## %while.cond661
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp LBB0_40
@@ -300,7 +300,7 @@ define ptr @SyFgets(ptr %line, i64 %length, i64 %fid) {
 ; CHECK-NEXT:  ## %bb.51: ## %while.body1679.preheader
 ; CHECK-NEXT:    incl %ebp
 ; CHECK-NEXT:    xorl %ebx, %ebx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_52: ## %while.body1679
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq (%r14), %rdi

diff  --git a/llvm/test/CodeGen/X86/rdrand.ll b/llvm/test/CodeGen/X86/rdrand.ll
index 164145a789943a..52f767a4a419ce 100644
--- a/llvm/test/CodeGen/X86/rdrand.ll
+++ b/llvm/test/CodeGen/X86/rdrand.ll
@@ -89,7 +89,7 @@ define void @loop(ptr %p, i32 %n) nounwind {
 ; X86-NEXT:  # %bb.1: # %while.body.preheader
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    xorl %edx, %edx
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB3_2: # %while.body
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    rdrandl %esi
@@ -108,7 +108,7 @@ define void @loop(ptr %p, i32 %n) nounwind {
 ; X64-NEXT:  # %bb.1: # %while.body.preheader
 ; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    xorl %ecx, %ecx
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_2: # %while.body
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    rdrandl %edx

diff  --git a/llvm/test/CodeGen/X86/retpoline.ll b/llvm/test/CodeGen/X86/retpoline.ll
index d06ddc64044d04..ea9c9bc4fae22f 100644
--- a/llvm/test/CodeGen/X86/retpoline.ll
+++ b/llvm/test/CodeGen/X86/retpoline.ll
@@ -426,7 +426,7 @@ latch:
 ; X64-NEXT:          pause
 ; X64-NEXT:          lfence
 ; X64-NEXT:          jmp     [[CAPTURE_SPEC]]
-; X64-NEXT:          .p2align        4, 0x90
+; X64-NEXT:          .p2align        4
 ; X64-NEXT:  {{.*}}                                  # Block address taken
 ; X64-NEXT:                                          # %entry
 ; X64-NEXT:  [[CALL_TARGET]]:
@@ -445,7 +445,7 @@ latch:
 ; X86-NEXT:          pause
 ; X86-NEXT:          lfence
 ; X86-NEXT:          jmp     [[CAPTURE_SPEC]]
-; X86-NEXT:          .p2align        4, 0x90
+; X86-NEXT:          .p2align        4
 ; X86-NEXT:  {{.*}}                                  # Block address taken
 ; X86-NEXT:                                          # %entry
 ; X86-NEXT:  [[CALL_TARGET]]:
@@ -464,7 +464,7 @@ latch:
 ; X86-NEXT:          pause
 ; X86-NEXT:          lfence
 ; X86-NEXT:          jmp     [[CAPTURE_SPEC]]
-; X86-NEXT:          .p2align        4, 0x90
+; X86-NEXT:          .p2align        4
 ; X86-NEXT:  {{.*}}                                  # Block address taken
 ; X86-NEXT:                                          # %entry
 ; X86-NEXT:  [[CALL_TARGET]]:
@@ -483,7 +483,7 @@ latch:
 ; X86-NEXT:          pause
 ; X86-NEXT:          lfence
 ; X86-NEXT:          jmp     [[CAPTURE_SPEC]]
-; X86-NEXT:          .p2align        4, 0x90
+; X86-NEXT:          .p2align        4
 ; X86-NEXT:  {{.*}}                                  # Block address taken
 ; X86-NEXT:                                          # %entry
 ; X86-NEXT:  [[CALL_TARGET]]:
@@ -502,7 +502,7 @@ latch:
 ; X86-NEXT:          pause
 ; X86-NEXT:          lfence
 ; X86-NEXT:          jmp     [[CAPTURE_SPEC]]
-; X86-NEXT:          .p2align        4, 0x90
+; X86-NEXT:          .p2align        4
 ; X86-NEXT:  {{.*}}                                  # Block address taken
 ; X86-NEXT:                                          # %entry
 ; X86-NEXT:  [[CALL_TARGET]]:

diff  --git a/llvm/test/CodeGen/X86/reverse_branches.ll b/llvm/test/CodeGen/X86/reverse_branches.ll
index d874a47356e3ce..93c82a4524ef9b 100644
--- a/llvm/test/CodeGen/X86/reverse_branches.ll
+++ b/llvm/test/CodeGen/X86/reverse_branches.ll
@@ -37,7 +37,7 @@ define i32 @test_branches_order() uwtable ssp {
 ; CHECK-NEXT:    leaq -{{[0-9]+}}(%rsp), %r14
 ; CHECK-NEXT:    movq %rsp, %r15
 ; CHECK-NEXT:    jmp LBB0_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_6: ## %for.inc9
 ; CHECK-NEXT:    ## in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    incl %ebx
@@ -51,7 +51,7 @@ define i32 @test_branches_order() uwtable ssp {
 ; CHECK-NEXT:    movl $-1, %ebp
 ; CHECK-NEXT:    movq %r15, %rdi
 ; CHECK-NEXT:    movq %r14, %r12
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_3: ## %for.cond1
 ; CHECK-NEXT:    ## Parent Loop BB0_1 Depth=1
 ; CHECK-NEXT:    ## => This Inner Loop Header: Depth=2
@@ -75,7 +75,7 @@ define i32 @test_branches_order() uwtable ssp {
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    movq %rsp, %rcx
 ; CHECK-NEXT:    jmp LBB0_8
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_15: ## %for.inc38
 ; CHECK-NEXT:    ## in Loop: Header=BB0_8 Depth=1
 ; CHECK-NEXT:    incl %eax
@@ -91,7 +91,7 @@ define i32 @test_branches_order() uwtable ssp {
 ; CHECK-NEXT:    xorl %esi, %esi
 ; CHECK-NEXT:    xorl %edi, %edi
 ; CHECK-NEXT:    jmp LBB0_10
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_14: ## %exit
 ; CHECK-NEXT:    ## in Loop: Header=BB0_10 Depth=2
 ; CHECK-NEXT:    addq %rsi, %r8
@@ -109,7 +109,7 @@ define i32 @test_branches_order() uwtable ssp {
 ; CHECK-NEXT:  ## %bb.11: ## %for.body20
 ; CHECK-NEXT:    ## in Loop: Header=BB0_10 Depth=2
 ; CHECK-NEXT:    movq $-1000, %r8 ## imm = 0xFC18
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_12: ## %do.body.i
 ; CHECK-NEXT:    ## Parent Loop BB0_8 Depth=1
 ; CHECK-NEXT:    ## Parent Loop BB0_10 Depth=2

diff  --git a/llvm/test/CodeGen/X86/sad.ll b/llvm/test/CodeGen/X86/sad.ll
index ca319687da54d4..245516974d15b5 100644
--- a/llvm/test/CodeGen/X86/sad.ll
+++ b/llvm/test/CodeGen/X86/sad.ll
@@ -14,7 +14,7 @@ define dso_local i32 @sad_16i8() nounwind {
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB0_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqu a+1024(%rax), %xmm2
@@ -39,7 +39,7 @@ define dso_local i32 @sad_16i8() nounwind {
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB0_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovdqu a+1024(%rax), %xmm2
@@ -67,7 +67,7 @@ define dso_local i32 @sad_16i8() nounwind {
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB0_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vmovdqu a+1024(%rax), %xmm2
@@ -91,7 +91,7 @@ define dso_local i32 @sad_16i8() nounwind {
 ; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB0_1: # %vector.body
 ; AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vmovdqu a+1024(%rax), %xmm1
@@ -154,7 +154,7 @@ define dso_local i32 @sad_32i8() nounwind {
 ; SSE2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB1_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqa a+1024(%rax), %xmm3
@@ -184,7 +184,7 @@ define dso_local i32 @sad_32i8() nounwind {
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB1_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovdqa a+1024(%rax), %xmm2
@@ -220,7 +220,7 @@ define dso_local i32 @sad_32i8() nounwind {
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB1_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vmovdqa a+1024(%rax), %ymm2
@@ -247,7 +247,7 @@ define dso_local i32 @sad_32i8() nounwind {
 ; AVX512-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    .p2align 4, 0x90
+; AVX512-NEXT:    .p2align 4
 ; AVX512-NEXT:  .LBB1_1: # %vector.body
 ; AVX512-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512-NEXT:    vmovdqa a+1024(%rax), %ymm2
@@ -315,7 +315,7 @@ define dso_local i32 @sad_avx64i8() nounwind {
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB2_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqa a+1024(%rax), %xmm5
@@ -359,7 +359,7 @@ define dso_local i32 @sad_avx64i8() nounwind {
 ; AVX1-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB2_1: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovdqa a+1024(%rax), %xmm3
@@ -411,7 +411,7 @@ define dso_local i32 @sad_avx64i8() nounwind {
 ; AVX2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB2_1: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vmovdqa a+1024(%rax), %ymm3
@@ -444,7 +444,7 @@ define dso_local i32 @sad_avx64i8() nounwind {
 ; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512F-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT:    .p2align 4, 0x90
+; AVX512F-NEXT:    .p2align 4
 ; AVX512F-NEXT:  .LBB2_1: # %vector.body
 ; AVX512F-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512F-NEXT:    vmovdqa a+1024(%rax), %ymm2
@@ -476,7 +476,7 @@ define dso_local i32 @sad_avx64i8() nounwind {
 ; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX512BW-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512BW-NEXT:    .p2align 4, 0x90
+; AVX512BW-NEXT:    .p2align 4
 ; AVX512BW-NEXT:  .LBB2_1: # %vector.body
 ; AVX512BW-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512BW-NEXT:    vmovdqa64 a+1024(%rax), %zmm2
@@ -545,7 +545,7 @@ define dso_local i32 @sad_2i8() nounwind {
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; SSE2-NEXT:    movd {{.*#+}} xmm1 = [65535,0,0,0]
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB3_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -567,7 +567,7 @@ define dso_local i32 @sad_2i8() nounwind {
 ; AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB3_1: # %vector.body
 ; AVX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -618,7 +618,7 @@ define dso_local i32 @sad_4i8() nounwind {
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB4_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -639,7 +639,7 @@ define dso_local i32 @sad_4i8() nounwind {
 ; AVX:       # %bb.0: # %entry
 ; AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB4_1: # %vector.body
 ; AVX-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero

diff  --git a/llvm/test/CodeGen/X86/saddo-redundant-add.ll b/llvm/test/CodeGen/X86/saddo-redundant-add.ll
index e89d81a8423181..f132ef8baab924 100644
--- a/llvm/test/CodeGen/X86/saddo-redundant-add.ll
+++ b/llvm/test/CodeGen/X86/saddo-redundant-add.ll
@@ -6,7 +6,7 @@ define void @redundant_add(i64 %n) {
 ; CHECK-LABEL: redundant_add:
 ; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %exit_check
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpq %rdi, %rax

diff  --git a/llvm/test/CodeGen/X86/same-align-bytes-with-llasm-llobj.ll b/llvm/test/CodeGen/X86/same-align-bytes-with-llasm-llobj.ll
new file mode 100644
index 00000000000000..419bf627b39e6a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/same-align-bytes-with-llasm-llobj.ll
@@ -0,0 +1,46 @@
+; RUN: llc %s -o - -filetype=obj --mcpu=znver2 | llvm-objdump -dr  - | FileCheck %s
+; RUN: llc %s -o - -filetype=asm --mcpu=znver2 | llvm-mc - -o - --mcpu=znver2 -filetype=obj -triple x86_64-unknown-linux-gnu | llvm-objdump -dr  - | FileCheck %s
+; RUN: llc %s -o - -filetype=asm --mcpu=znver2 | FileCheck %s --check-prefix=ASM
+
+;; Check that we produce a push, then an align-to-16-bytes p2align.
+;
+; ASM:        # %bb.0:
+; ASM-NEXT:   pushq   %rax
+; ASM-NEXT:   .cfi_def_cfa_offset 16
+; ASM-NEXT:   .p2align 4{{$}}
+
+;; When we assemble the file, either using the built-in asssembler or going
+;; via a textual assembly file, we should get the same padding between the
+;; initial push and the next block for alignment. It's a single 15 byte
+;; nop.
+
+; CHECK:        0:   50
+; CHECK-NEXT:   66 66 66 66 66 66 2e 0f 1f 84 00 00 00 00 00 nopw %cs:(%rax,%rax)
+
+;; Note that we specify a CPU to ensure the same nop patterns are selected
+;; between llvm-mc and llc, just in case defaults changed, which one isn't
+;; important.
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: noreturn nounwind uwtable
+define dso_local void @b() local_unnamed_addr {
+entry:
+  br label %for.cond
+
+for.cond:
+  tail call void (...) @a()
+  br label %for.cond
+}
+
+declare void @a(...) local_unnamed_addr
+
+!llvm.module.flags = !{!0, !1, !2, !3}
+!llvm.ident = !{!4}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 8, !"PIC Level", i32 2}
+!2 = !{i32 7, !"PIE Level", i32 2}
+!3 = !{i32 7, !"uwtable", i32 2}
+!4 = !{!"clang"}

diff  --git a/llvm/test/CodeGen/X86/scalar_widen_div.ll b/llvm/test/CodeGen/X86/scalar_widen_div.ll
index 23e3d0c5c43213..1d98b4f62069d0 100644
--- a/llvm/test/CodeGen/X86/scalar_widen_div.ll
+++ b/llvm/test/CodeGen/X86/scalar_widen_div.ll
@@ -394,7 +394,7 @@ define void @test_int_div(ptr %dest, ptr %old, i32 %n) {
 ; CHECK-NEXT:  # %bb.1: # %bb.nph
 ; CHECK-NEXT:    movl %edx, %ecx
 ; CHECK-NEXT:    xorl %r10d, %r10d
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB12_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl (%rdi,%r10), %r8d

diff  --git a/llvm/test/CodeGen/X86/setcc-lowering.ll b/llvm/test/CodeGen/X86/setcc-lowering.ll
index 5bbc73eacd968e..bf31df20887d03 100644
--- a/llvm/test/CodeGen/X86/setcc-lowering.ll
+++ b/llvm/test/CodeGen/X86/setcc-lowering.ll
@@ -54,7 +54,7 @@ define void @pr26232(i64 %a, <16 x i1> %b) nounwind {
 ; AVX1-LABEL: pr26232:
 ; AVX1:       # %bb.0: # %allocas
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB1_1: # %for_loop599
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    cmpq $65536, %rdi # imm = 0x10000
@@ -71,7 +71,7 @@ define void @pr26232(i64 %a, <16 x i1> %b) nounwind {
 ;
 ; AVX2-LABEL: pr26232:
 ; AVX2:       # %bb.0: # %allocas
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB1_1: # %for_loop599
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    cmpq $65536, %rdi # imm = 0x10000
@@ -93,7 +93,7 @@ define void @pr26232(i64 %a, <16 x i1> %b) nounwind {
 ; KNL-32-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL-32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; KNL-32-NEXT:    .p2align 4, 0x90
+; KNL-32-NEXT:    .p2align 4
 ; KNL-32-NEXT:  .LBB1_1: # %for_loop599
 ; KNL-32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; KNL-32-NEXT:    cmpl $65536, %ecx # imm = 0x10000

diff  --git a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
index 76cf2423a254fe..2ac2be5545dfdc 100644
--- a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
+++ b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
@@ -48,14 +48,14 @@ define void @failing(ptr %0, ptr %1) nounwind {
 ; CHECK-NEXT:    movdqa {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
 ; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [1,1]
 ; CHECK-NEXT:    movdqa {{.*#+}} xmm2 = [2,2]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %vector.ph
 ; CHECK-NEXT:    # =>This Loop Header: Depth=1
 ; CHECK-NEXT:    # Child Loop BB0_2 Depth 2
 ; CHECK-NEXT:    xorpd %xmm3, %xmm3
 ; CHECK-NEXT:    movq $-1024, %rsi # imm = 0xFC00
 ; CHECK-NEXT:    movdqa %xmm0, %xmm4
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %vector.body
 ; CHECK-NEXT:    # Parent Loop BB0_1 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
@@ -108,14 +108,14 @@ define void @failing(ptr %0, ptr %1) nounwind {
 ; CHECK-AVX2-NEXT:    vpmovsxbq {{.*#+}} xmm0 = [0,1]
 ; CHECK-AVX2-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [1,1]
 ; CHECK-AVX2-NEXT:    vpmovsxbq {{.*#+}} xmm2 = [2,2]
-; CHECK-AVX2-NEXT:    .p2align 4, 0x90
+; CHECK-AVX2-NEXT:    .p2align 4
 ; CHECK-AVX2-NEXT:  .LBB0_1: # %vector.ph
 ; CHECK-AVX2-NEXT:    # =>This Loop Header: Depth=1
 ; CHECK-AVX2-NEXT:    # Child Loop BB0_2 Depth 2
 ; CHECK-AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; CHECK-AVX2-NEXT:    movq $-1024, %rsi # imm = 0xFC00
 ; CHECK-AVX2-NEXT:    vmovdqa %xmm0, %xmm4
-; CHECK-AVX2-NEXT:    .p2align 4, 0x90
+; CHECK-AVX2-NEXT:    .p2align 4
 ; CHECK-AVX2-NEXT:  .LBB0_2: # %vector.body
 ; CHECK-AVX2-NEXT:    # Parent Loop BB0_1 Depth=1
 ; CHECK-AVX2-NEXT:    # => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/shift-parts.ll b/llvm/test/CodeGen/X86/shift-parts.ll
index c48f7c5a0c0ba0..62d86909ffca27 100644
--- a/llvm/test/CodeGen/X86/shift-parts.ll
+++ b/llvm/test/CodeGen/X86/shift-parts.ll
@@ -13,7 +13,7 @@ define i32 @int87(i32 %uint64p_8, i1 %cond) nounwind {
 ; CHECK-NEXT:    movq g_144+16(%rip), %rcx
 ; CHECK-NEXT:    movzbl %sil, %edx
 ; CHECK-NEXT:    shll $6, %edx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %for.cond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb $64, %dl

diff  --git a/llvm/test/CodeGen/X86/sink-out-of-loop.ll b/llvm/test/CodeGen/X86/sink-out-of-loop.ll
index 55d8c7bb03697f..4634d34a7536cd 100644
--- a/llvm/test/CodeGen/X86/sink-out-of-loop.ll
+++ b/llvm/test/CodeGen/X86/sink-out-of-loop.ll
@@ -9,19 +9,19 @@ define i32 @sink_succ(i32 %argc, ptr nocapture %argv) nounwind uwtable ssp {
 ; CHECK-LABEL: sink_succ:
 ; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %preheader
 ; CHECK-NEXT:    ## =>This Loop Header: Depth=1
 ; CHECK-NEXT:    ## Child Loop BB0_2 Depth 2
 ; CHECK-NEXT:    ## Child Loop BB0_3 Depth 3
 ; CHECK-NEXT:    movl $1, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_2: ## %for.body1.lr
 ; CHECK-NEXT:    ## Parent Loop BB0_1 Depth=1
 ; CHECK-NEXT:    ## => This Loop Header: Depth=2
 ; CHECK-NEXT:    ## Child Loop BB0_3 Depth 3
 ; CHECK-NEXT:    movl %ecx, %edx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_3: ## %for.body1
 ; CHECK-NEXT:    ## Parent Loop BB0_1 Depth=1
 ; CHECK-NEXT:    ## Parent Loop BB0_2 Depth=2
@@ -40,7 +40,7 @@ define i32 @sink_succ(i32 %argc, ptr nocapture %argv) nounwind uwtable ssp {
 ; CHECK-NEXT:    jne LBB0_1
 ; CHECK-NEXT:  ## %bb.6: ## %for.body2.preheader
 ; CHECK-NEXT:    movl $2048, %eax ## imm = 0x800
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_7: ## %for.body2
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    decl %eax
@@ -91,7 +91,7 @@ define i32 @sink_out_of_loop(i32 %n, ptr %output) {
 ; CHECK-LABEL: sink_out_of_loop:
 ; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB1_1: ## %loop
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %ecx, %eax

diff  --git a/llvm/test/CodeGen/X86/speculative-load-hardening.ll b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
index 45da777ea4e2a2..5fd1f77e166d43 100644
--- a/llvm/test/CodeGen/X86/speculative-load-hardening.ll
+++ b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
@@ -217,7 +217,7 @@ define void @test_basic_loop(i32 %a, i32 %b, ptr %ptr1, ptr %ptr2) nounwind spec
 ; X64-NEXT:    movl %esi, %ebp
 ; X64-NEXT:    cmovneq %r15, %rax
 ; X64-NEXT:    xorl %r12d, %r12d
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB2_3: # %l.header
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movslq (%r14), %rcx
@@ -267,7 +267,7 @@ define void @test_basic_loop(i32 %a, i32 %b, ptr %ptr1, ptr %ptr2) nounwind spec
 ; X64-LFENCE-NEXT:    movl %esi, %ebp
 ; X64-LFENCE-NEXT:    lfence
 ; X64-LFENCE-NEXT:    xorl %r15d, %r15d
-; X64-LFENCE-NEXT:    .p2align 4, 0x90
+; X64-LFENCE-NEXT:    .p2align 4
 ; X64-LFENCE-NEXT:  .LBB2_2: # %l.header
 ; X64-LFENCE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-LFENCE-NEXT:    lfence
@@ -331,11 +331,11 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, ptr %ptr1, ptr %ptr2
 ; X64-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
 ; X64-NEXT:    testl %r15d, %r15d
 ; X64-NEXT:    jle .LBB3_4
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_5: # %l2.header.preheader
 ; X64-NEXT:    cmovleq %r12, %rax
 ; X64-NEXT:    xorl %r15d, %r15d
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_6: # %l2.header
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movslq (%r14), %rcx
@@ -358,12 +358,12 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, ptr %ptr1, ptr %ptr2
 ; X64-NEXT:  # %bb.11: # in Loop: Header=BB3_6 Depth=1
 ; X64-NEXT:    cmovgeq %r12, %rax
 ; X64-NEXT:    jmp .LBB3_6
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_7:
 ; X64-NEXT:    cmovlq %r12, %rax
 ; X64-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %r15d # 4-byte Reload
 ; X64-NEXT:    jmp .LBB3_8
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_4:
 ; X64-NEXT:    cmovgq %r12, %rax
 ; X64-NEXT:  .LBB3_8: # %l1.latch
@@ -432,7 +432,7 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, ptr %ptr1, ptr %ptr2
 ; X64-LFENCE-NEXT:    lfence
 ; X64-LFENCE-NEXT:    xorl %r12d, %r12d
 ; X64-LFENCE-NEXT:    jmp .LBB3_2
-; X64-LFENCE-NEXT:    .p2align 4, 0x90
+; X64-LFENCE-NEXT:    .p2align 4
 ; X64-LFENCE-NEXT:  .LBB3_5: # %l1.latch
 ; X64-LFENCE-NEXT:    # in Loop: Header=BB3_2 Depth=1
 ; X64-LFENCE-NEXT:    lfence
@@ -452,7 +452,7 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, ptr %ptr1, ptr %ptr2
 ; X64-LFENCE-NEXT:    # in Loop: Header=BB3_2 Depth=1
 ; X64-LFENCE-NEXT:    lfence
 ; X64-LFENCE-NEXT:    xorl %r13d, %r13d
-; X64-LFENCE-NEXT:    .p2align 4, 0x90
+; X64-LFENCE-NEXT:    .p2align 4
 ; X64-LFENCE-NEXT:  .LBB3_4: # %l2.header
 ; X64-LFENCE-NEXT:    # Parent Loop BB3_2 Depth=1
 ; X64-LFENCE-NEXT:    # => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/split-extend-vector-inreg.ll b/llvm/test/CodeGen/X86/split-extend-vector-inreg.ll
index 08088496b74287..8a6c2f851a6d69 100644
--- a/llvm/test/CodeGen/X86/split-extend-vector-inreg.ll
+++ b/llvm/test/CodeGen/X86/split-extend-vector-inreg.ll
@@ -9,7 +9,7 @@ define <4 x i64> @autogen_SD88863() {
 ; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[3],ymm1[3]
 ; CHECK-NEXT:    movb $1, %al
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %CF
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb %al, %al

diff  --git a/llvm/test/CodeGen/X86/sse-domains.ll b/llvm/test/CodeGen/X86/sse-domains.ll
index 116efa69bb319b..249425ec4dc9e9 100644
--- a/llvm/test/CodeGen/X86/sse-domains.ll
+++ b/llvm/test/CodeGen/X86/sse-domains.ll
@@ -18,7 +18,7 @@ define void @f(ptr nocapture %p, i32 %n) nounwind uwtable ssp {
 ; CHECK-NEXT:    addq $16, %rdi
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    pmovsxbd {{.*#+}} xmm0 = [127,127,127,127]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_1: ## %while.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    pand %xmm0, %xmm1

diff  --git a/llvm/test/CodeGen/X86/stack-coloring-wineh.ll b/llvm/test/CodeGen/X86/stack-coloring-wineh.ll
index 79057e0ea78d39..aa2f9e1cd2b1ed 100644
--- a/llvm/test/CodeGen/X86/stack-coloring-wineh.ll
+++ b/llvm/test/CodeGen/X86/stack-coloring-wineh.ll
@@ -42,7 +42,7 @@ define void @pr66984(ptr %arg) personality ptr @__CxxFrameHandler3 {
 ; I686-NEXT:    .scl 3;
 ; I686-NEXT:    .type 32;
 ; I686-NEXT:    .endef
-; I686-NEXT:    .p2align 4, 0x90
+; I686-NEXT:    .p2align 4
 ; I686-NEXT:  "?catch$2@?0?pr66984 at 4HA":
 ; I686-NEXT:  LBB0_2: # %bb17
 ; I686-NEXT:    pushl %ebp
@@ -58,7 +58,7 @@ define void @pr66984(ptr %arg) personality ptr @__CxxFrameHandler3 {
 ; I686-NEXT:    .scl 3;
 ; I686-NEXT:    .type 32;
 ; I686-NEXT:    .endef
-; I686-NEXT:    .p2align 4, 0x90
+; I686-NEXT:    .p2align 4
 ; I686-NEXT:  "?dtor$5@?0?pr66984 at 4HA":
 ; I686-NEXT:  LBB0_5: # %bb8
 ; I686-NEXT:    pushl %ebp
@@ -101,7 +101,7 @@ define void @pr66984(ptr %arg) personality ptr @__CxxFrameHandler3 {
 ; X86_64-NEXT:    .scl 3;
 ; X86_64-NEXT:    .type 32;
 ; X86_64-NEXT:    .endef
-; X86_64-NEXT:    .p2align 4, 0x90
+; X86_64-NEXT:    .p2align 4
 ; X86_64-NEXT:  "?catch$2@?0?pr66984 at 4HA":
 ; X86_64-NEXT:  .seh_proc "?catch$2@?0?pr66984 at 4HA"
 ; X86_64-NEXT:    .seh_handler __CxxFrameHandler3, @unwind, @except
@@ -127,7 +127,7 @@ define void @pr66984(ptr %arg) personality ptr @__CxxFrameHandler3 {
 ; X86_64-NEXT:    .scl 3;
 ; X86_64-NEXT:    .type 32;
 ; X86_64-NEXT:    .endef
-; X86_64-NEXT:    .p2align 4, 0x90
+; X86_64-NEXT:    .p2align 4
 ; X86_64-NEXT:  "?dtor$4@?0?pr66984 at 4HA":
 ; X86_64-NEXT:  .seh_proc "?dtor$4@?0?pr66984 at 4HA"
 ; X86_64-NEXT:  .LBB0_4: # %bb8

diff  --git a/llvm/test/CodeGen/X86/switch.ll b/llvm/test/CodeGen/X86/switch.ll
index b00044a1e4f795..629ba48fcae6bd 100644
--- a/llvm/test/CodeGen/X86/switch.ll
+++ b/llvm/test/CodeGen/X86/switch.ll
@@ -1304,7 +1304,7 @@ return: ret void
 define void @phi_node_trouble(ptr %s) {
 ; CHECK-LABEL: phi_node_trouble:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB13_1: # %header
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testq %rdi, %rdi

diff  --git a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
index 8d84e887d3f279..72e4fe410e2694 100644
--- a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
@@ -6,7 +6,7 @@ define void @tail_dup_merge_loops(i32 %a, ptr %b, ptr %c) local_unnamed_addr #0
 ; CHECK-LABEL: tail_dup_merge_loops:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    jmp .LBB0_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %inner_loop_exit
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    incq %rsi
@@ -15,7 +15,7 @@ define void @tail_dup_merge_loops(i32 %a, ptr %b, ptr %c) local_unnamed_addr #0
 ; CHECK-NEXT:    # Child Loop BB0_4 Depth 2
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    je .LBB0_5
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  # %bb.2: # %inner_loop_top
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    cmpb $0, (%rsi)
@@ -118,7 +118,7 @@ define i32 @loop_shared_header(ptr %exe, i32 %exesz, i32 %headsize, i32 %min, i3
 ; CHECK-NEXT:    movb $32, %cl
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    jmp .LBB1_4
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_15: # %merge_predecessor_split
 ; CHECK-NEXT:    # in Loop: Header=BB1_4 Depth=1
 ; CHECK-NEXT:    movb $32, %cl
@@ -127,7 +127,7 @@ define i32 @loop_shared_header(ptr %exe, i32 %exesz, i32 %headsize, i32 %min, i3
 ; CHECK-NEXT:    # Child Loop BB1_8 Depth 2
 ; CHECK-NEXT:    testl %r12d, %r12d
 ; CHECK-NEXT:    je .LBB1_5
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_8: # %shared_loop_header
 ; CHECK-NEXT:    # Parent Loop BB1_4 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/tail-dup-multiple-latch-loop.ll b/llvm/test/CodeGen/X86/tail-dup-multiple-latch-loop.ll
index fdcad3c1973e70..862f60c063804f 100644
--- a/llvm/test/CodeGen/X86/tail-dup-multiple-latch-loop.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-multiple-latch-loop.ll
@@ -16,7 +16,7 @@ define ptr @large_loop_switch(ptr %p) {
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    jmp ccc at PLT # TAILCALL
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %sw.bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
@@ -26,7 +26,7 @@ define ptr @large_loop_switch(ptr %p) {
 ; CHECK-NEXT:    decl %ebx
 ; CHECK-NEXT:    movl %ebx, %ecx
 ; CHECK-NEXT:    jmpq *.LJTI0_0(,%rcx,8)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %sw.bb3
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl $532, %edi # imm = 0x214
@@ -35,7 +35,7 @@ define ptr @large_loop_switch(ptr %p) {
 ; CHECK-NEXT:    decl %ebx
 ; CHECK-NEXT:    movl %ebx, %ecx
 ; CHECK-NEXT:    jmpq *.LJTI0_0(,%rcx,8)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_4: # %sw.bb5
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl $533, %edi # imm = 0x215
@@ -44,7 +44,7 @@ define ptr @large_loop_switch(ptr %p) {
 ; CHECK-NEXT:    decl %ebx
 ; CHECK-NEXT:    movl %ebx, %ecx
 ; CHECK-NEXT:    jmpq *.LJTI0_0(,%rcx,8)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_5: # %sw.bb7
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl $535, %edi # imm = 0x217
@@ -53,7 +53,7 @@ define ptr @large_loop_switch(ptr %p) {
 ; CHECK-NEXT:    decl %ebx
 ; CHECK-NEXT:    movl %ebx, %ecx
 ; CHECK-NEXT:    jmpq *.LJTI0_0(,%rcx,8)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_6: # %sw.bb9
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl $536, %edi # imm = 0x218
@@ -62,7 +62,7 @@ define ptr @large_loop_switch(ptr %p) {
 ; CHECK-NEXT:    decl %ebx
 ; CHECK-NEXT:    movl %ebx, %ecx
 ; CHECK-NEXT:    jmpq *.LJTI0_0(,%rcx,8)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_7: # %sw.bb11
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl $658, %edi # imm = 0x292
@@ -136,7 +136,7 @@ define i32 @interp_switch(ptr nocapture readonly %0, i32 %1) {
 ; CHECK-NEXT:  .LBB1_7: # in Loop: Header=BB1_1 Depth=1
 ; CHECK-NEXT:    addl $7, %eax
 ; CHECK-NEXT:    incq %rdi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movzbl (%rdi), %ecx
 ; CHECK-NEXT:    decl %ecx

diff  --git a/llvm/test/CodeGen/X86/tail-dup-partial.ll b/llvm/test/CodeGen/X86/tail-dup-partial.ll
index 3b506fc6fce86d..292d59d3450d30 100644
--- a/llvm/test/CodeGen/X86/tail-dup-partial.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-partial.ll
@@ -8,7 +8,7 @@
 define void @partial_tail_dup(i1 %a1, i1 %a2, ptr %a4, ptr %a5, ptr %a6, i32 %a7) #0 align 2  !prof !1 {
 ; CHECK-LABEL: partial_tail_dup:
 ; CHECK:        # %bb.0: # %entry
-; CHECK-NEXT:   .p2align 4, 0x90
+; CHECK-NEXT:   .p2align 4
 ; CHECK-NEXT:   .LBB0_1: # %for.cond
 ; CHECK-NEXT:   # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:	  testb	$1, %dil
@@ -20,14 +20,14 @@ define void @partial_tail_dup(i1 %a1, i1 %a2, ptr %a4, ptr %a5, ptr %a6, i32 %a7
 ; CHECK-NEXT:	  testl	%r9d, %r9d
 ; CHECK-NEXT:	  je	.LBB0_1
 ; CHECK-NEXT:	  jmp	.LBB0_8
-; CHECK-NEXT:	  .p2align	4, 0x90
+; CHECK-NEXT:	  .p2align	4
 ; CHECK-NEXT:   .LBB0_6: # %dup2
 ; CHECK-NEXT:   # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:	  movl	$2, (%rcx)
 ; CHECK-NEXT:	  testl	%r9d, %r9d
 ; CHECK-NEXT:	  je	.LBB0_1
 ; CHECK-NEXT:	  jmp	.LBB0_8
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_3: # %if.end56
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    testb $1, %sil

diff  --git a/llvm/test/CodeGen/X86/tail-dup-repeat.ll b/llvm/test/CodeGen/X86/tail-dup-repeat.ll
index 5e0096a028d855..7a9eb0c3fb85fe 100644
--- a/llvm/test/CodeGen/X86/tail-dup-repeat.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-repeat.ll
@@ -11,7 +11,7 @@ define void @repeated_tail_dup(i1 %a1, i1 %a2, ptr %a4, ptr %a5, ptr %a6, i32 %a
 ; CHECK-LABEL: repeated_tail_dup:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    jmp .LBB0_1
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %land.lhs.true
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movl $10, (%rdx)
@@ -34,7 +34,7 @@ define void @repeated_tail_dup(i1 %a1, i1 %a2, ptr %a4, ptr %a5, ptr %a6, i32 %a
 ; CHECK-NEXT:    testl %r9d, %r9d
 ; CHECK-NEXT:    je .LBB0_1
 ; CHECK-NEXT:    jmp .LBB0_8
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_5: # %if.end70
 ; CHECK-NEXT:    # in Loop: Header=BB0_1 Depth=1
 ; CHECK-NEXT:    movl $12, (%rdx)

diff  --git a/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll b/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
index 8a9ee60f341c2b..f96ba351f53f79 100644
--- a/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
+++ b/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
@@ -32,7 +32,7 @@ define i32 @foo(i32 %x) nounwind ssp {
 ; CHECK-NEXT:  LBB0_8: ## %return
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    retq
-; CHECK-NEXT:    .p2align 2, 0x90
+; CHECK-NEXT:    .p2align 2
 ; CHECK-NEXT:    .data_region jt32
 ; CHECK-NEXT:  .set L0_0_set_2, LBB0_2-LJTI0_0
 ; CHECK-NEXT:  .set L0_0_set_3, LBB0_3-LJTI0_0

diff  --git a/llvm/test/CodeGen/X86/tls-loads-control3.ll b/llvm/test/CodeGen/X86/tls-loads-control3.ll
index 4e521b1c696a4d..92dccee296ad77 100644
--- a/llvm/test/CodeGen/X86/tls-loads-control3.ll
+++ b/llvm/test/CodeGen/X86/tls-loads-control3.ll
@@ -51,7 +51,7 @@ define i32 @_Z2f1i(i32 %c) local_unnamed_addr #0 {
 ; HOIST0-NEXT:    rex64
 ; HOIST0-NEXT:    callq __tls_get_addr at PLT
 ; HOIST0-NEXT:    movq %rax, %r15
-; HOIST0-NEXT:    .p2align 4, 0x90
+; HOIST0-NEXT:    .p2align 4
 ; HOIST0-NEXT:  .LBB0_2: # %while.body
 ; HOIST0-NEXT:    # =>This Inner Loop Header: Depth=1
 ; HOIST0-NEXT:    movl (%r15), %edi
@@ -86,7 +86,7 @@ define i32 @_Z2f1i(i32 %c) local_unnamed_addr #0 {
 ; HOIST2-NEXT:    je .LBB0_4
 ; HOIST2-NEXT:  # %bb.1:
 ; HOIST2-NEXT:    movl %edi, %ebx
-; HOIST2-NEXT:    .p2align 4, 0x90
+; HOIST2-NEXT:    .p2align 4
 ; HOIST2-NEXT:  .LBB0_2: # %while.body
 ; HOIST2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; HOIST2-NEXT:    data16
@@ -185,7 +185,7 @@ define i32 @_Z2f2i(i32 %c) local_unnamed_addr #0 {
 ; HOIST0-NEXT:    callq __tls_get_addr at PLT
 ; HOIST0-NEXT:    leaq _ZZ2f2iE2st.0 at DTPOFF(%rax), %r15
 ; HOIST0-NEXT:    leaq _ZZ2f2iE2st.1 at DTPOFF(%rax), %r12
-; HOIST0-NEXT:    .p2align 4, 0x90
+; HOIST0-NEXT:    .p2align 4
 ; HOIST0-NEXT:  .LBB1_2: # %while.body
 ; HOIST0-NEXT:    # =>This Inner Loop Header: Depth=1
 ; HOIST0-NEXT:    callq _Z5gfuncv at PLT
@@ -225,7 +225,7 @@ define i32 @_Z2f2i(i32 %c) local_unnamed_addr #0 {
 ; HOIST2-NEXT:    je .LBB1_3
 ; HOIST2-NEXT:  # %bb.1: # %while.body.preheader
 ; HOIST2-NEXT:    movl %edi, %ebx
-; HOIST2-NEXT:    .p2align 4, 0x90
+; HOIST2-NEXT:    .p2align 4
 ; HOIST2-NEXT:  .LBB1_2: # %while.body
 ; HOIST2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; HOIST2-NEXT:    callq _Z5gfuncv at PLT

diff  --git a/llvm/test/CodeGen/X86/trunc-store.ll b/llvm/test/CodeGen/X86/trunc-store.ll
index fbdf966dc76aed..3ce06f7caa28e5 100644
--- a/llvm/test/CodeGen/X86/trunc-store.ll
+++ b/llvm/test/CodeGen/X86/trunc-store.ll
@@ -29,7 +29,7 @@
 define void @fn1(i64 %a0) {
 ; CHECK-LABEL: fn1:
 ; CHECK:       # %bb.0: # %for.cond
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpq $8, %rdi

diff  --git a/llvm/test/CodeGen/X86/twoaddr-coalesce.ll b/llvm/test/CodeGen/X86/twoaddr-coalesce.ll
index a7bdfbc7bb25f1..fbedd92517da68 100644
--- a/llvm/test/CodeGen/X86/twoaddr-coalesce.ll
+++ b/llvm/test/CodeGen/X86/twoaddr-coalesce.ll
@@ -9,7 +9,7 @@ define i32 @foo() nounwind {
 ; CHECK:       # %bb.0: # %bb1.thread
 ; CHECK-NEXT:    pushl %ebx
 ; CHECK-NEXT:    xorl %ebx, %ebx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %ebx, %eax

diff  --git a/llvm/test/CodeGen/X86/twoaddr-lea.ll b/llvm/test/CodeGen/X86/twoaddr-lea.ll
index 2f6b943ca72753..f20b777531c5a4 100644
--- a/llvm/test/CodeGen/X86/twoaddr-lea.ll
+++ b/llvm/test/CodeGen/X86/twoaddr-lea.ll
@@ -71,14 +71,14 @@ define void @ham() {
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testb %cl, %cl
 ; CHECK-NEXT:    je LBB3_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB3_6: ## %bb2
 ; CHECK-NEXT:    ## =>This Loop Header: Depth=1
 ; CHECK-NEXT:    ## Child Loop BB3_7 Depth 2
 ; CHECK-NEXT:    movl (%rdx), %edi
 ; CHECK-NEXT:    leal (%rdi,%rax), %r8d
 ; CHECK-NEXT:    movslq %r8d, %r8
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB3_7: ## %bb6
 ; CHECK-NEXT:    ## Parent Loop BB3_6 Depth=1
 ; CHECK-NEXT:    ## => This Inner Loop Header: Depth=2
@@ -94,7 +94,7 @@ define void @ham() {
 ; CHECK-NEXT:    jne LBB3_6
 ; CHECK-NEXT:  LBB3_2: ## %bb3.preheader
 ; CHECK-NEXT:    xorl %ecx, %ecx
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB3_3: ## %bb3
 ; CHECK-NEXT:    ## =>This Loop Header: Depth=1
 ; CHECK-NEXT:    ## Child Loop BB3_4 Depth 2
@@ -102,7 +102,7 @@ define void @ham() {
 ; CHECK-NEXT:    addq $4, %rcx
 ; CHECK-NEXT:    movl %eax, %esi
 ; CHECK-NEXT:    subl %edx, %esi
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB3_4: ## %bb4
 ; CHECK-NEXT:    ## Parent Loop BB3_3 Depth=1
 ; CHECK-NEXT:    ## => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/X86/unaligned-load.ll b/llvm/test/CodeGen/X86/unaligned-load.ll
index c95cb3fe9d9c2e..2310cb228f0634 100644
--- a/llvm/test/CodeGen/X86/unaligned-load.ll
+++ b/llvm/test/CodeGen/X86/unaligned-load.ll
@@ -12,7 +12,7 @@ define void @func() nounwind ssp {
 ; I386-LABEL: func:
 ; I386:       ## %bb.0: ## %entry
 ; I386-NEXT:    subl $32, %esp
-; I386-NEXT:    .p2align 4, 0x90
+; I386-NEXT:    .p2align 4
 ; I386-NEXT:  LBB0_1: ## %bb
 ; I386-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; I386-NEXT:    movl $4673097, {{[0-9]+}}(%esp) ## imm = 0x474E49
@@ -31,7 +31,7 @@ define void @func() nounwind ssp {
 ; CORE2-NEXT:    movabsq $2325069237881678925, %rcx ## imm = 0x20444E2732202C4D
 ; CORE2-NEXT:    movabsq $4706902966564560965, %rdx ## imm = 0x4152474F52502045
 ; CORE2-NEXT:    movabsq $5642821575076104260, %rsi ## imm = 0x4E4F545359524844
-; CORE2-NEXT:    .p2align 4, 0x90
+; CORE2-NEXT:    .p2align 4
 ; CORE2-NEXT:  LBB0_1: ## %bb
 ; CORE2-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CORE2-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
@@ -44,7 +44,7 @@ define void @func() nounwind ssp {
 ; COREI7:       ## %bb.0: ## %entry
 ; COREI7-NEXT:    movups _.str3+15(%rip), %xmm0
 ; COREI7-NEXT:    movups _.str3(%rip), %xmm1
-; COREI7-NEXT:    .p2align 4, 0x90
+; COREI7-NEXT:    .p2align 4
 ; COREI7-NEXT:  LBB0_1: ## %bb
 ; COREI7-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; COREI7-NEXT:    movups %xmm0, -{{[0-9]+}}(%rsp)
@@ -67,7 +67,7 @@ define void @func_aligned() nounwind ssp {
 ; I386:       ## %bb.0: ## %entry
 ; I386-NEXT:    subl $44, %esp
 ; I386-NEXT:    movaps {{.*#+}} xmm0 = [1498564676,1313821779,1380982853,1095911247]
-; I386-NEXT:    .p2align 4, 0x90
+; I386-NEXT:    .p2align 4
 ; I386-NEXT:  LBB1_1: ## %bb
 ; I386-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; I386-NEXT:    movaps %xmm0, (%esp)
@@ -83,7 +83,7 @@ define void @func_aligned() nounwind ssp {
 ; CORE2-NEXT:    movabsq $2325069237881678925, %rcx ## imm = 0x20444E2732202C4D
 ; CORE2-NEXT:    movabsq $4706902966564560965, %rdx ## imm = 0x4152474F52502045
 ; CORE2-NEXT:    movabsq $5642821575076104260, %rsi ## imm = 0x4E4F545359524844
-; CORE2-NEXT:    .p2align 4, 0x90
+; CORE2-NEXT:    .p2align 4
 ; CORE2-NEXT:  LBB1_1: ## %bb
 ; CORE2-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CORE2-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
@@ -96,7 +96,7 @@ define void @func_aligned() nounwind ssp {
 ; COREI7:       ## %bb.0: ## %entry
 ; COREI7-NEXT:    movups _.str3+15(%rip), %xmm0
 ; COREI7-NEXT:    movups _.str3(%rip), %xmm1
-; COREI7-NEXT:    .p2align 4, 0x90
+; COREI7-NEXT:    .p2align 4
 ; COREI7-NEXT:  LBB1_1: ## %bb
 ; COREI7-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; COREI7-NEXT:    movups %xmm0, -{{[0-9]+}}(%rsp)

diff  --git a/llvm/test/CodeGen/X86/undef-label.ll b/llvm/test/CodeGen/X86/undef-label.ll
index 4cacaec85e7c08..a2d9bc571f5827 100644
--- a/llvm/test/CodeGen/X86/undef-label.ll
+++ b/llvm/test/CodeGen/X86/undef-label.ll
@@ -15,7 +15,7 @@ define dso_local void @xyz() {
 ; CHECK-NEXT:    ucomisd %xmm1, %xmm0
 ; CHECK-NEXT:    jne .LBB0_1
 ; CHECK-NEXT:    jnp .LBB0_2
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %foo
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ucomisd %xmm1, %xmm0

diff  --git a/llvm/test/CodeGen/X86/vec_setcc-2.ll b/llvm/test/CodeGen/X86/vec_setcc-2.ll
index edf726a1e37c4f..5a71878ea45796 100644
--- a/llvm/test/CodeGen/X86/vec_setcc-2.ll
+++ b/llvm/test/CodeGen/X86/vec_setcc-2.ll
@@ -13,7 +13,7 @@ define void @loop_no_const_reload(ptr  %in, ptr %out, i32 %n) {
 ; SSE2-NEXT:    xorl %eax, %eax
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [25,25,25,25,25,25,25,25]
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  LBB0_2: ## %for.body
 ; SSE2-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqa (%rdi,%rax), %xmm2
@@ -33,7 +33,7 @@ define void @loop_no_const_reload(ptr  %in, ptr %out, i32 %n) {
 ; SSE41-NEXT:  ## %bb.1: ## %for.body.preheader
 ; SSE41-NEXT:    xorl %eax, %eax
 ; SSE41-NEXT:    pmovsxbw {{.*#+}} xmm0 = [25,25,25,25,25,25,25,25]
-; SSE41-NEXT:    .p2align 4, 0x90
+; SSE41-NEXT:    .p2align 4
 ; SSE41-NEXT:  LBB0_2: ## %for.body
 ; SSE41-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; SSE41-NEXT:    movdqa (%rdi,%rax), %xmm1
@@ -80,7 +80,7 @@ define void @loop_const_folding_underflow(ptr  %in, ptr %out, i32 %n) {
 ; SSE2-NEXT:    xorl %eax, %eax
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [32768,32768,32768,32768,32768,32768,32768,32768]
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [32768,32794,32794,32794,32794,32794,32794,32794]
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  LBB1_2: ## %for.body
 ; SSE2-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movdqa (%rdi,%rax), %xmm2
@@ -102,7 +102,7 @@ define void @loop_const_folding_underflow(ptr  %in, ptr %out, i32 %n) {
 ; SSE41-NEXT:    xorl %eax, %eax
 ; SSE41-NEXT:    pmovsxbw {{.*#+}} xmm0 = [0,26,26,26,26,26,26,26]
 ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    .p2align 4, 0x90
+; SSE41-NEXT:    .p2align 4
 ; SSE41-NEXT:  LBB1_2: ## %for.body
 ; SSE41-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; SSE41-NEXT:    movdqa (%rdi,%rax), %xmm2

diff  --git a/llvm/test/CodeGen/X86/vector-fshl-128.ll b/llvm/test/CodeGen/X86/vector-fshl-128.ll
index c7cff092c5a4fe..28b3d7c7d717ec 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-128.ll
@@ -1454,7 +1454,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; SSE-NEXT:    movd %esi, %xmm0
 ; SSE-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE-NEXT:    .p2align 4, 0x90
+; SSE-NEXT:    .p2align 4
 ; SSE-NEXT:  .LBB8_1: # %loop
 ; SSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-NEXT:    movdqu 1024(%rdi,%rax), %xmm1
@@ -1474,7 +1474,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; AVX1-NEXT:    vmovd %esi, %xmm0
 ; AVX1-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB8_1: # %loop
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovdqu 1024(%rdi,%rax), %xmm1
@@ -1498,7 +1498,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [32,32,32,32]
 ; AVX2-NEXT:    vpsubd %xmm0, %xmm1, %xmm1
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB8_1: # %loop
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vmovdqu 1024(%rdi,%rax), %xmm2
@@ -1516,7 +1516,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; AVX512F-NEXT:    vmovd %esi, %xmm0
 ; AVX512F-NEXT:    vpbroadcastd %xmm0, %xmm0
 ; AVX512F-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512F-NEXT:    .p2align 4, 0x90
+; AVX512F-NEXT:    .p2align 4
 ; AVX512F-NEXT:  .LBB8_1: # %loop
 ; AVX512F-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512F-NEXT:    vmovdqu 1024(%rdi,%rax), %xmm1
@@ -1532,7 +1532,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; AVX512VL:       # %bb.0: # %entry
 ; AVX512VL-NEXT:    vpbroadcastd %esi, %xmm0
 ; AVX512VL-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512VL-NEXT:    .p2align 4, 0x90
+; AVX512VL-NEXT:    .p2align 4
 ; AVX512VL-NEXT:  .LBB8_1: # %loop
 ; AVX512VL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VL-NEXT:    vmovdqu 1024(%rdi,%rax), %xmm1
@@ -1548,7 +1548,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; AVX512BW-NEXT:    vmovd %esi, %xmm0
 ; AVX512BW-NEXT:    vpbroadcastd %xmm0, %xmm0
 ; AVX512BW-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512BW-NEXT:    .p2align 4, 0x90
+; AVX512BW-NEXT:    .p2align 4
 ; AVX512BW-NEXT:  .LBB8_1: # %loop
 ; AVX512BW-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512BW-NEXT:    vmovdqu 1024(%rdi,%rax), %xmm1
@@ -1565,7 +1565,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; AVX512VBMI2-NEXT:    vmovd %esi, %xmm0
 ; AVX512VBMI2-NEXT:    vpbroadcastd %xmm0, %xmm0
 ; AVX512VBMI2-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512VBMI2-NEXT:    .p2align 4, 0x90
+; AVX512VBMI2-NEXT:    .p2align 4
 ; AVX512VBMI2-NEXT:  .LBB8_1: # %loop
 ; AVX512VBMI2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VBMI2-NEXT:    vmovdqu 1024(%rdi,%rax), %xmm1
@@ -1581,7 +1581,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; AVX512VLBW:       # %bb.0: # %entry
 ; AVX512VLBW-NEXT:    vpbroadcastd %esi, %xmm0
 ; AVX512VLBW-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512VLBW-NEXT:    .p2align 4, 0x90
+; AVX512VLBW-NEXT:    .p2align 4
 ; AVX512VLBW-NEXT:  .LBB8_1: # %loop
 ; AVX512VLBW-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VLBW-NEXT:    vmovdqu 1024(%rdi,%rax), %xmm1
@@ -1596,7 +1596,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; AVX512VLVBMI2:       # %bb.0: # %entry
 ; AVX512VLVBMI2-NEXT:    vpbroadcastd %esi, %xmm0
 ; AVX512VLVBMI2-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512VLVBMI2-NEXT:    .p2align 4, 0x90
+; AVX512VLVBMI2-NEXT:    .p2align 4
 ; AVX512VLVBMI2-NEXT:  .LBB8_1: # %loop
 ; AVX512VLVBMI2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VLVBMI2-NEXT:    vmovdqu 1024(%rdi,%rax), %xmm1
@@ -1612,7 +1612,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; XOPAVX1-NEXT:    vmovd %esi, %xmm0
 ; XOPAVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; XOPAVX1-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; XOPAVX1-NEXT:    .p2align 4, 0x90
+; XOPAVX1-NEXT:    .p2align 4
 ; XOPAVX1-NEXT:  .LBB8_1: # %loop
 ; XOPAVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; XOPAVX1-NEXT:    vprotd %xmm0, 1024(%rdi,%rax), %xmm1
@@ -1627,7 +1627,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; XOPAVX2-NEXT:    vmovd %esi, %xmm0
 ; XOPAVX2-NEXT:    vpbroadcastd %xmm0, %xmm0
 ; XOPAVX2-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; XOPAVX2-NEXT:    .p2align 4, 0x90
+; XOPAVX2-NEXT:    .p2align 4
 ; XOPAVX2-NEXT:  .LBB8_1: # %loop
 ; XOPAVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; XOPAVX2-NEXT:    vprotd %xmm0, 1024(%rdi,%rax), %xmm1
@@ -1647,7 +1647,7 @@ define void @sink_splatvar(ptr %p, i32 %shift_amt) {
 ; X86-SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    xorl %edx, %edx
-; X86-SSE2-NEXT:    .p2align 4, 0x90
+; X86-SSE2-NEXT:    .p2align 4
 ; X86-SSE2-NEXT:  .LBB8_1: # %loop
 ; X86-SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-SSE2-NEXT:    movdqu (%eax,%ecx,4), %xmm1

diff  --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll
index b26580541fe401..fca72cad474cd1 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll
@@ -1279,7 +1279,7 @@ define void @fancierRotate2(ptr %arr, ptr %control, i32 %rot0, i32 %rot1) {
 ; AVX1-NEXT:    vpmovsxbq {{.*#+}} xmm3 = [31,0]
 ; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
 ; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB8_1: # %loop
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
@@ -1325,7 +1325,7 @@ define void @fancierRotate2(ptr %arr, ptr %control, i32 %rot0, i32 %rot1) {
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vbroadcastss {{.*#+}} ymm3 = [31,31,31,31,31,31,31,31]
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm4 = [32,32,32,32,32,32,32,32]
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB8_1: # %loop
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -1352,7 +1352,7 @@ define void @fancierRotate2(ptr %arr, ptr %control, i32 %rot0, i32 %rot1) {
 ; AVX512F-NEXT:    vpbroadcastd %xmm1, %ymm1
 ; AVX512F-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512F-NEXT:    .p2align 4, 0x90
+; AVX512F-NEXT:    .p2align 4
 ; AVX512F-NEXT:  .LBB8_1: # %loop
 ; AVX512F-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512F-NEXT:    vpmovzxbd {{.*#+}} ymm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -1372,7 +1372,7 @@ define void @fancierRotate2(ptr %arr, ptr %control, i32 %rot0, i32 %rot1) {
 ; AVX512VL-NEXT:    vpbroadcastd %edx, %ymm0
 ; AVX512VL-NEXT:    vpbroadcastd %ecx, %ymm1
 ; AVX512VL-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512VL-NEXT:    .p2align 4, 0x90
+; AVX512VL-NEXT:    .p2align 4
 ; AVX512VL-NEXT:  .LBB8_1: # %loop
 ; AVX512VL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -1395,7 +1395,7 @@ define void @fancierRotate2(ptr %arr, ptr %control, i32 %rot0, i32 %rot1) {
 ; AVX512BW-NEXT:    vpbroadcastd %xmm1, %ymm1
 ; AVX512BW-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512BW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512BW-NEXT:    .p2align 4, 0x90
+; AVX512BW-NEXT:    .p2align 4
 ; AVX512BW-NEXT:  .LBB8_1: # %loop
 ; AVX512BW-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512BW-NEXT:    vpmovzxbd {{.*#+}} ymm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -1418,7 +1418,7 @@ define void @fancierRotate2(ptr %arr, ptr %control, i32 %rot0, i32 %rot1) {
 ; AVX512VBMI2-NEXT:    vpbroadcastd %xmm1, %ymm1
 ; AVX512VBMI2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; AVX512VBMI2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512VBMI2-NEXT:    .p2align 4, 0x90
+; AVX512VBMI2-NEXT:    .p2align 4
 ; AVX512VBMI2-NEXT:  .LBB8_1: # %loop
 ; AVX512VBMI2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VBMI2-NEXT:    vpmovzxbd {{.*#+}} ymm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -1438,7 +1438,7 @@ define void @fancierRotate2(ptr %arr, ptr %control, i32 %rot0, i32 %rot1) {
 ; AVX512VLBW-NEXT:    vpbroadcastd %edx, %ymm0
 ; AVX512VLBW-NEXT:    vpbroadcastd %ecx, %ymm1
 ; AVX512VLBW-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX512VLBW-NEXT:    .p2align 4, 0x90
+; AVX512VLBW-NEXT:    .p2align 4
 ; AVX512VLBW-NEXT:  .LBB8_1: # %loop
 ; AVX512VLBW-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX512VLBW-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
@@ -1458,7 +1458,7 @@ define void @fancierRotate2(ptr %arr, ptr %control, i32 %rot0, i32 %rot1) {
 ; AVX10-NEXT:    vpbroadcastd %edx, %ymm0
 ; AVX10-NEXT:    vpbroadcastd %ecx, %ymm1
 ; AVX10-NEXT:    movq $-1024, %rax # imm = 0xFC00
-; AVX10-NEXT:    .p2align 4, 0x90
+; AVX10-NEXT:    .p2align 4
 ; AVX10-NEXT:  .LBB8_1: # %loop
 ; AVX10-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX10-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
@@ -1485,7 +1485,7 @@ define void @fancierRotate2(ptr %arr, ptr %control, i32 %rot0, i32 %rot1) {
 ; XOPAVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; XOPAVX1-NEXT:    .p2align 4, 0x90
+; XOPAVX1-NEXT:    .p2align 4
 ; XOPAVX1-NEXT:  .LBB8_1: # %loop
 ; XOPAVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; XOPAVX1-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
@@ -1513,7 +1513,7 @@ define void @fancierRotate2(ptr %arr, ptr %control, i32 %rot0, i32 %rot1) {
 ; XOPAVX2-NEXT:    vpbroadcastd %xmm1, %ymm1
 ; XOPAVX2-NEXT:    movq $-1024, %rax # imm = 0xFC00
 ; XOPAVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX2-NEXT:    .p2align 4, 0x90
+; XOPAVX2-NEXT:    .p2align 4
 ; XOPAVX2-NEXT:  .LBB8_1: # %loop
 ; XOPAVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; XOPAVX2-NEXT:    vpmovzxbd {{.*#+}} ymm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero

diff  --git a/llvm/test/CodeGen/X86/vector-pack-128.ll b/llvm/test/CodeGen/X86/vector-pack-128.ll
index d11e1a70b684cd..f58f19ecd24819 100644
--- a/llvm/test/CodeGen/X86/vector-pack-128.ll
+++ b/llvm/test/CodeGen/X86/vector-pack-128.ll
@@ -327,7 +327,7 @@ define <16 x i8> @concat_trunc_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) nounwi
 define void @autogen_SD10339(<1 x i32> %I49) {
 ; CHECK-LABEL: autogen_SD10339:
 ; CHECK:       # %bb.0: # %BB
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB8_1: # %CF
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movw $1, 0

diff  --git a/llvm/test/CodeGen/X86/vector-shift-by-select-loop.ll b/llvm/test/CodeGen/X86/vector-shift-by-select-loop.ll
index e33c99be0ed093..177d8f78b5a8d4 100644
--- a/llvm/test/CodeGen/X86/vector-shift-by-select-loop.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-by-select-loop.ll
@@ -33,7 +33,7 @@ define void @vector_variable_shift_left_loop(ptr nocapture %arr, ptr nocapture r
 ; SSE-NEXT:    pxor %xmm8, %xmm8
 ; SSE-NEXT:    pmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero
 ; SSE-NEXT:    pmovzxdq {{.*#+}} xmm10 = xmm1[0],zero,xmm1[1],zero
-; SSE-NEXT:    .p2align 4, 0x90
+; SSE-NEXT:    .p2align 4
 ; SSE-NEXT:  .LBB0_4: # %vector.body
 ; SSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
@@ -119,7 +119,7 @@ define void @vector_variable_shift_left_loop(ptr nocapture %arr, ptr nocapture r
 ; SSE-NEXT:    jne .LBB0_6
 ; SSE-NEXT:  .LBB0_9: # %for.cond.cleanup
 ; SSE-NEXT:    retq
-; SSE-NEXT:    .p2align 4, 0x90
+; SSE-NEXT:    .p2align 4
 ; SSE-NEXT:  .LBB0_8: # %for.body
 ; SSE-NEXT:    # in Loop: Header=BB0_6 Depth=1
 ; SSE-NEXT:    # kill: def $cl killed $cl killed $ecx
@@ -167,7 +167,7 @@ define void @vector_variable_shift_left_loop(ptr nocapture %arr, ptr nocapture r
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm8[0],zero,xmm8[1],zero
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm8 = xmm8[0],zero,xmm8[1],zero
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB0_4: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vmovq {{.*#+}} xmm9 = mem[0],zero
@@ -244,7 +244,7 @@ define void @vector_variable_shift_left_loop(ptr nocapture %arr, ptr nocapture r
 ; AVX1-NEXT:  .LBB0_9: # %for.cond.cleanup
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB0_8: # %for.body
 ; AVX1-NEXT:    # in Loop: Header=BB0_6 Depth=1
 ; AVX1-NEXT:    # kill: def $cl killed $cl killed $ecx
@@ -283,7 +283,7 @@ define void @vector_variable_shift_left_loop(ptr nocapture %arr, ptr nocapture r
 ; AVX2-NEXT:    vpbroadcastd %xmm1, %ymm1
 ; AVX2-NEXT:    xorl %ecx, %ecx
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB0_4: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -319,7 +319,7 @@ define void @vector_variable_shift_left_loop(ptr nocapture %arr, ptr nocapture r
 ; AVX2-NEXT:  .LBB0_9: # %for.cond.cleanup
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB0_8: # %for.body
 ; AVX2-NEXT:    # in Loop: Header=BB0_6 Depth=1
 ; AVX2-NEXT:    # kill: def $cl killed $cl killed $ecx
@@ -362,7 +362,7 @@ define void @vector_variable_shift_left_loop(ptr nocapture %arr, ptr nocapture r
 ; XOP-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
 ; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; XOP-NEXT:    .p2align 4, 0x90
+; XOP-NEXT:    .p2align 4
 ; XOP-NEXT:  .LBB0_4: # %vector.body
 ; XOP-NEXT:    # =>This Inner Loop Header: Depth=1
 ; XOP-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
@@ -418,7 +418,7 @@ define void @vector_variable_shift_left_loop(ptr nocapture %arr, ptr nocapture r
 ; XOP-NEXT:  .LBB0_9: # %for.cond.cleanup
 ; XOP-NEXT:    vzeroupper
 ; XOP-NEXT:    retq
-; XOP-NEXT:    .p2align 4, 0x90
+; XOP-NEXT:    .p2align 4
 ; XOP-NEXT:  .LBB0_8: # %for.body
 ; XOP-NEXT:    # in Loop: Header=BB0_6 Depth=1
 ; XOP-NEXT:    # kill: def $cl killed $cl killed $ecx
@@ -547,7 +547,7 @@ define void @vector_variable_shift_left_loop_simpler(ptr nocapture %arr, ptr noc
 ; SSE-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero
 ; SSE-NEXT:    pslld %xmm0, %xmm1
 ; SSE-NEXT:    pxor %xmm3, %xmm3
-; SSE-NEXT:    .p2align 4, 0x90
+; SSE-NEXT:    .p2align 4
 ; SSE-NEXT:  .LBB1_2: # %vector.body
 ; SSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -578,7 +578,7 @@ define void @vector_variable_shift_left_loop_simpler(ptr nocapture %arr, ptr noc
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
 ; AVX1-NEXT:    vpslld %xmm1, %xmm2, %xmm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    .p2align 4, 0x90
+; AVX1-NEXT:    .p2align 4
 ; AVX1-NEXT:  .LBB1_2: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -606,7 +606,7 @@ define void @vector_variable_shift_left_loop_simpler(ptr nocapture %arr, ptr noc
 ; AVX2-NEXT:    vpbroadcastd %xmm2, %xmm2
 ; AVX2-NEXT:    xorl %ecx, %ecx
 ; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT:    .p2align 4, 0x90
+; AVX2-NEXT:    .p2align 4
 ; AVX2-NEXT:  .LBB1_2: # %vector.body
 ; AVX2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -635,7 +635,7 @@ define void @vector_variable_shift_left_loop_simpler(ptr nocapture %arr, ptr noc
 ; XOP-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
 ; XOP-NEXT:    xorl %ecx, %ecx
 ; XOP-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; XOP-NEXT:    .p2align 4, 0x90
+; XOP-NEXT:    .p2align 4
 ; XOP-NEXT:  .LBB1_2: # %vector.body
 ; XOP-NEXT:    # =>This Inner Loop Header: Depth=1
 ; XOP-NEXT:    vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
index 36c4be5f1939ec..07c770abc65d6c 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -3656,7 +3656,7 @@ entry:
 define void @autogen_SD25931() {
 ; CHECK-LABEL: autogen_SD25931:
 ; CHECK:       # %bb.0: # %BB
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB142_1: # %CF242
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    jmp .LBB142_1

diff  --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll
index 364390a4a60e56..a2c3613ecca12d 100644
--- a/llvm/test/CodeGen/X86/vselect-avx.ll
+++ b/llvm/test/CodeGen/X86/vselect-avx.ll
@@ -187,7 +187,7 @@ define <32 x i8> @PR22706(<32 x i1> %x) {
 define void @PR59003(<2 x float> %0, <2 x float> %1, <8 x i1> %shuffle108) {
 ; AVX-LABEL: PR59003:
 ; AVX:       ## %bb.0: ## %entry
-; AVX-NEXT:    .p2align 4, 0x90
+; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  LBB4_1: ## %for.body.i
 ; AVX-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; AVX-NEXT:    jmp LBB4_1

diff  --git a/llvm/test/CodeGen/X86/widen_arith-1.ll b/llvm/test/CodeGen/X86/widen_arith-1.ll
index f6be700645fb33..e08df8f76d1a61 100644
--- a/llvm/test/CodeGen/X86/widen_arith-1.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-1.ll
@@ -7,7 +7,7 @@ define void @update(ptr %dst, ptr %src, i32 %n) nounwind {
 ; CHECK-NEXT:    pushl %eax
 ; CHECK-NEXT:    movl $0, (%esp)
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %forcond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl (%esp), %eax

diff  --git a/llvm/test/CodeGen/X86/widen_arith-2.ll b/llvm/test/CodeGen/X86/widen_arith-2.ll
index a0f78db8746f45..260b87cc461a98 100644
--- a/llvm/test/CodeGen/X86/widen_arith-2.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-2.ll
@@ -10,7 +10,7 @@ define void @update(ptr %dst_i, ptr %src_i, i32 %n) nounwind {
 ; CHECK-NEXT:    movl $0, (%esp)
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
 ; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,u,u,u,u,u,u,u,u]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %forcond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl (%esp), %eax

diff  --git a/llvm/test/CodeGen/X86/widen_arith-3.ll b/llvm/test/CodeGen/X86/widen_arith-3.ll
index 85640a0af597a3..9031588f2690d5 100644
--- a/llvm/test/CodeGen/X86/widen_arith-3.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-3.ll
@@ -17,7 +17,7 @@ define void @update(ptr %dst, ptr %src, i32 %n) nounwind {
 ; CHECK-NEXT:    movw $1, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    movl $65537, {{[0-9]+}}(%esp) # imm = 0x10001
 ; CHECK-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %forcond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax

diff  --git a/llvm/test/CodeGen/X86/widen_arith-4.ll b/llvm/test/CodeGen/X86/widen_arith-4.ll
index 8bb716374b4822..c49882ffe0b389 100644
--- a/llvm/test/CodeGen/X86/widen_arith-4.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-4.ll
@@ -16,7 +16,7 @@ define void @update(ptr %dst, ptr %src, i32 %n) nounwind {
 ; SSE2-NEXT:    movl $0, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [271,271,271,271,271,u,u,u]
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2,4,2,2,2,u,u,u]
-; SSE2-NEXT:    .p2align 4, 0x90
+; SSE2-NEXT:    .p2align 4
 ; SSE2-NEXT:  .LBB0_1: # %forcond
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE2-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
@@ -49,7 +49,7 @@ define void @update(ptr %dst, ptr %src, i32 %n) nounwind {
 ; SSE41-NEXT:    movw $0, -{{[0-9]+}}(%rsp)
 ; SSE41-NEXT:    movl $0, -{{[0-9]+}}(%rsp)
 ; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [271,271,271,271,271,u,u,u]
-; SSE41-NEXT:    .p2align 4, 0x90
+; SSE41-NEXT:    .p2align 4
 ; SSE41-NEXT:  .LBB0_1: # %forcond
 ; SSE41-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE41-NEXT:    movl -{{[0-9]+}}(%rsp), %eax

diff  --git a/llvm/test/CodeGen/X86/widen_arith-5.ll b/llvm/test/CodeGen/X86/widen_arith-5.ll
index dd75a4ea2cf229..466249d1bf1d42 100644
--- a/llvm/test/CodeGen/X86/widen_arith-5.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-5.ll
@@ -14,7 +14,7 @@ define void @update(ptr %dst, ptr %src, i32 %n) nounwind {
 ; CHECK-NEXT:    movl $1, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movl $0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    pmovsxbd {{.*#+}} xmm0 = [3,3,3,3]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %forcond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl -{{[0-9]+}}(%rsp), %eax

diff  --git a/llvm/test/CodeGen/X86/widen_arith-6.ll b/llvm/test/CodeGen/X86/widen_arith-6.ll
index 239fe25b0c4a9a..6fa232f4d3227d 100644
--- a/llvm/test/CodeGen/X86/widen_arith-6.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-6.ll
@@ -15,7 +15,7 @@ define void @update(ptr %dst, ptr %src, i32 %n) nounwind {
 ; CHECK-NEXT:    movl $1065353216, {{[0-9]+}}(%esp) # imm = 0x3F800000
 ; CHECK-NEXT:    movl $0, {{[0-9]+}}(%esp)
 ; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [1.97604004E+3,1.97604004E+3,1.97604004E+3,u]
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %forcond
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax

diff  --git a/llvm/test/CodeGen/X86/widen_cast-1.ll b/llvm/test/CodeGen/X86/widen_cast-1.ll
index 58f0d2c2da5749..566dde0ca13d3c 100644
--- a/llvm/test/CodeGen/X86/widen_cast-1.ll
+++ b/llvm/test/CodeGen/X86/widen_cast-1.ll
@@ -14,7 +14,7 @@ define void @convert(ptr %dst, ptr %src) nounwind {
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
 ; CHECK-NEXT:    cmpl $3, (%esp)
 ; CHECK-NEXT:    jg .LBB0_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %forbody
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl (%esp), %eax
@@ -37,7 +37,7 @@ define void @convert(ptr %dst, ptr %src) nounwind {
 ; ATOM-NEXT:    movl $0, (%esp)
 ; ATOM-NEXT:    cmpl $3, (%esp)
 ; ATOM-NEXT:    jg .LBB0_3
-; ATOM-NEXT:    .p2align 4, 0x90
+; ATOM-NEXT:    .p2align 4
 ; ATOM-NEXT:  .LBB0_2: # %forbody
 ; ATOM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; ATOM-NEXT:    movl (%esp), %eax

diff  --git a/llvm/test/CodeGen/X86/widen_cast-2.ll b/llvm/test/CodeGen/X86/widen_cast-2.ll
index 6e174180d04a7e..cd06f27dcc55c2 100644
--- a/llvm/test/CodeGen/X86/widen_cast-2.ll
+++ b/llvm/test/CodeGen/X86/widen_cast-2.ll
@@ -10,7 +10,7 @@ define void @convert(ptr %dst, ptr %src) nounwind {
 ; CHECK-NEXT:    pcmpeqd %xmm0, %xmm0
 ; CHECK-NEXT:    cmpl $3, (%esp)
 ; CHECK-NEXT:    jg .LBB0_3
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_2: # %forbody
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl (%esp), %eax

diff  --git a/llvm/test/CodeGen/X86/widen_cast-4.ll b/llvm/test/CodeGen/X86/widen_cast-4.ll
index 7dd92ce874aefe..7468e229e3a841 100644
--- a/llvm/test/CodeGen/X86/widen_cast-4.ll
+++ b/llvm/test/CodeGen/X86/widen_cast-4.ll
@@ -11,7 +11,7 @@ define void @update(ptr %dst_i, ptr %src_i, i32 %n) nounwind {
 ; WIDE-NEXT:    pcmpeqd %xmm0, %xmm0
 ; WIDE-NEXT:    movdqa {{.*#+}} xmm1 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
 ; WIDE-NEXT:    movdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
-; WIDE-NEXT:    .p2align 4, 0x90
+; WIDE-NEXT:    .p2align 4
 ; WIDE-NEXT:  .LBB0_1: # %forcond
 ; WIDE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; WIDE-NEXT:    movl (%esp), %eax

diff  --git a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll
index fab3847b3a2c51..fe7459ea45e141 100644
--- a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll
@@ -80,7 +80,7 @@ define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:    ## InlineAsm End
 ; ENABLE-NEXT:    xorl %eax, %eax
 ; ENABLE-NEXT:    movl $10, %ecx
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB1_2: ## %for.body
 ; ENABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    ## InlineAsm Start
@@ -111,7 +111,7 @@ define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
 ; DISABLE-NEXT:    ## InlineAsm End
 ; DISABLE-NEXT:    xorl %eax, %eax
 ; DISABLE-NEXT:    movl $10, %ecx
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB1_2: ## %for.body
 ; DISABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    ## InlineAsm Start
@@ -174,7 +174,7 @@ define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
 ; ENABLE-NEXT:    ## InlineAsm End
 ; ENABLE-NEXT:    xorl %eax, %eax
 ; ENABLE-NEXT:    movl $10, %ecx
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB2_1: ## %for.body
 ; ENABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    ## InlineAsm Start
@@ -200,7 +200,7 @@ define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
 ; DISABLE-NEXT:    ## InlineAsm End
 ; DISABLE-NEXT:    xorl %eax, %eax
 ; DISABLE-NEXT:    movl $10, %ecx
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB2_1: ## %for.body
 ; DISABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    ## InlineAsm Start
@@ -255,7 +255,7 @@ define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:    ## InlineAsm End
 ; ENABLE-NEXT:    xorl %eax, %eax
 ; ENABLE-NEXT:    movl $10, %ecx
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB3_2: ## %for.body
 ; ENABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    ## InlineAsm Start
@@ -289,7 +289,7 @@ define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
 ; DISABLE-NEXT:    ## InlineAsm End
 ; DISABLE-NEXT:    xorl %eax, %eax
 ; DISABLE-NEXT:    movl $10, %ecx
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB3_2: ## %for.body
 ; DISABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    ## InlineAsm Start
@@ -355,7 +355,7 @@ define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind {
 ; ENABLE-NEXT:    ## InlineAsm End
 ; ENABLE-NEXT:    xorl %eax, %eax
 ; ENABLE-NEXT:    movl $10, %ecx
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB4_2: ## %for.body
 ; ENABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    ## InlineAsm Start
@@ -384,7 +384,7 @@ define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind {
 ; DISABLE-NEXT:    ## InlineAsm End
 ; DISABLE-NEXT:    xorl %eax, %eax
 ; DISABLE-NEXT:    movl $10, %ecx
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB4_2: ## %for.body
 ; DISABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    ## InlineAsm Start
@@ -461,7 +461,7 @@ define i32 @inlineAsm(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:    nop
 ; ENABLE-NEXT:    ## InlineAsm End
 ; ENABLE-NEXT:    movl $10, %eax
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB6_2: ## %for.body
 ; ENABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    ## InlineAsm Start
@@ -493,7 +493,7 @@ define i32 @inlineAsm(i32 %cond, i32 %N) {
 ; DISABLE-NEXT:    nop
 ; DISABLE-NEXT:    ## InlineAsm End
 ; DISABLE-NEXT:    movl $10, %eax
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB6_2: ## %for.body
 ; DISABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    ## InlineAsm Start
@@ -824,7 +824,7 @@ define void @infiniteloop() {
 ; ENABLE-NEXT:    ## InlineAsm Start
 ; ENABLE-NEXT:    movl $1, %edx
 ; ENABLE-NEXT:    ## InlineAsm End
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB10_2: ## %for.body
 ; ENABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    addl %edx, %eax
@@ -856,7 +856,7 @@ define void @infiniteloop() {
 ; DISABLE-NEXT:    ## InlineAsm Start
 ; DISABLE-NEXT:    movl $1, %edx
 ; DISABLE-NEXT:    ## InlineAsm End
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB10_2: ## %for.body
 ; DISABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    addl %edx, %eax
@@ -906,7 +906,7 @@ define void @infiniteloop2() {
 ; ENABLE-NEXT:    movq %rcx, %rsp
 ; ENABLE-NEXT:    xorl %edx, %edx
 ; ENABLE-NEXT:    jmp LBB11_2
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB11_4: ## %body2
 ; ENABLE-NEXT:    ## in Loop: Header=BB11_2 Depth=1
 ; ENABLE-NEXT:    ## InlineAsm Start
@@ -954,7 +954,7 @@ define void @infiniteloop2() {
 ; DISABLE-NEXT:    movq %rcx, %rsp
 ; DISABLE-NEXT:    xorl %edx, %edx
 ; DISABLE-NEXT:    jmp LBB11_2
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB11_4: ## %body2
 ; DISABLE-NEXT:    ## in Loop: Header=BB11_2 Depth=1
 ; DISABLE-NEXT:    ## InlineAsm Start
@@ -1024,7 +1024,7 @@ define void @infiniteloop3() {
 ; ENABLE-NEXT:    xorl %ecx, %ecx
 ; ENABLE-NEXT:    movq %rax, %rsi
 ; ENABLE-NEXT:    jmp LBB12_4
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB12_3: ## %loop2b
 ; ENABLE-NEXT:    ## in Loop: Header=BB12_4 Depth=1
 ; ENABLE-NEXT:    movq %rdx, (%rsi)
@@ -1056,7 +1056,7 @@ define void @infiniteloop3() {
 ; DISABLE-NEXT:    xorl %ecx, %ecx
 ; DISABLE-NEXT:    movq %rax, %rsi
 ; DISABLE-NEXT:    jmp LBB12_4
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB12_3: ## %loop2b
 ; DISABLE-NEXT:    ## in Loop: Header=BB12_4 Depth=1
 ; DISABLE-NEXT:    movq %rdx, (%rsi)
@@ -1188,7 +1188,7 @@ define i32 @useLEAForPrologue(i32 %d, i32 %a, i8 %c) #3 {
 ; ENABLE-NEXT:    ## InlineAsm Start
 ; ENABLE-NEXT:    nop
 ; ENABLE-NEXT:    ## InlineAsm End
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB14_2: ## %for.body
 ; ENABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    cmpl %esi, %edi
@@ -1223,7 +1223,7 @@ define i32 @useLEAForPrologue(i32 %d, i32 %a, i8 %c) #3 {
 ; DISABLE-NEXT:    ## InlineAsm Start
 ; DISABLE-NEXT:    nop
 ; DISABLE-NEXT:    ## InlineAsm End
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB14_2: ## %for.body
 ; DISABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    cmpl %esi, %edi
@@ -1375,7 +1375,7 @@ define i32 @irreducibleCFG() #4 {
 ; ENABLE-NEXT:    movq _irreducibleCFGf at GOTPCREL(%rip), %rax
 ; ENABLE-NEXT:    cmpb $0, (%rax)
 ; ENABLE-NEXT:    je LBB16_2
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB16_1: ## %preheader
 ; ENABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    jmp LBB16_1
@@ -1392,7 +1392,7 @@ define i32 @irreducibleCFG() #4 {
 ; ENABLE-NEXT:    jmp LBB16_5
 ; ENABLE-NEXT:  LBB16_3:
 ; ENABLE-NEXT:    xorl %ebx, %ebx
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB16_5: ## %for.inc
 ; ENABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    incl %ebx
@@ -1418,7 +1418,7 @@ define i32 @irreducibleCFG() #4 {
 ; DISABLE-NEXT:    movq _irreducibleCFGf at GOTPCREL(%rip), %rax
 ; DISABLE-NEXT:    cmpb $0, (%rax)
 ; DISABLE-NEXT:    je LBB16_2
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB16_1: ## %preheader
 ; DISABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    jmp LBB16_1
@@ -1435,7 +1435,7 @@ define i32 @irreducibleCFG() #4 {
 ; DISABLE-NEXT:    jmp LBB16_5
 ; DISABLE-NEXT:  LBB16_3:
 ; DISABLE-NEXT:    xorl %ebx, %ebx
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB16_5: ## %for.inc
 ; DISABLE-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    incl %ebx
@@ -1515,7 +1515,7 @@ define void @infiniteLoopNoSuccessor() #5 {
 ; ENABLE-NEXT:  LBB17_3:
 ; ENABLE-NEXT:    xorl %eax, %eax
 ; ENABLE-NEXT:    callq _something
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  LBB17_4: ## =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    xorl %eax, %eax
 ; ENABLE-NEXT:    callq _somethingElse
@@ -1542,7 +1542,7 @@ define void @infiniteLoopNoSuccessor() #5 {
 ; DISABLE-NEXT:  LBB17_3:
 ; DISABLE-NEXT:    xorl %eax, %eax
 ; DISABLE-NEXT:    callq _something
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  LBB17_4: ## =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    xorl %eax, %eax
 ; DISABLE-NEXT:    callq _somethingElse

diff  --git a/llvm/test/CodeGen/X86/x86-win64-shrink-wrapping.ll b/llvm/test/CodeGen/X86/x86-win64-shrink-wrapping.ll
index 6ed23b0d770cd4..d52990e753d3eb 100644
--- a/llvm/test/CodeGen/X86/x86-win64-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/X86/x86-win64-shrink-wrapping.ll
@@ -27,7 +27,7 @@ define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) #0 {
 ; ENABLE-NEXT:    #APP
 ; ENABLE-NEXT:    movl $1, %edx
 ; ENABLE-NEXT:    #NO_APP
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  .LBB0_2: # %for.body
 ; ENABLE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    addl %edx, %eax
@@ -63,7 +63,7 @@ define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) #0 {
 ; DISABLE-NEXT:    #APP
 ; DISABLE-NEXT:    movl $1, %edx
 ; DISABLE-NEXT:    #NO_APP
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  .LBB0_2: # %for.body
 ; DISABLE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    addl %edx, %eax
@@ -129,7 +129,7 @@ define i32 @loopInfoSaveOutsideLoop2(i32 %cond, i32 %N) #0 {
 ; ENABLE-NEXT:    #NO_APP
 ; ENABLE-NEXT:    xorl %eax, %eax
 ; ENABLE-NEXT:    movl $10, %ecx
-; ENABLE-NEXT:    .p2align 4, 0x90
+; ENABLE-NEXT:    .p2align 4
 ; ENABLE-NEXT:  .LBB1_2: # %for.body
 ; ENABLE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    #APP
@@ -164,7 +164,7 @@ define i32 @loopInfoSaveOutsideLoop2(i32 %cond, i32 %N) #0 {
 ; DISABLE-NEXT:    #NO_APP
 ; DISABLE-NEXT:    xorl %eax, %eax
 ; DISABLE-NEXT:    movl $10, %ecx
-; DISABLE-NEXT:    .p2align 4, 0x90
+; DISABLE-NEXT:    .p2align 4
 ; DISABLE-NEXT:  .LBB1_2: # %for.body
 ; DISABLE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    #APP

diff  --git a/llvm/test/CodeGen/X86/xor.ll b/llvm/test/CodeGen/X86/xor.ll
index 7f4006792ef1a8..2bef66825d8c02 100644
--- a/llvm/test/CodeGen/X86/xor.ll
+++ b/llvm/test/CodeGen/X86/xor.ll
@@ -59,7 +59,7 @@ define i32 @test4(i32 %a, i32 %b) nounwind  {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB3_1: # %bb
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    xorl %ecx, %eax
@@ -75,7 +75,7 @@ define i32 @test4(i32 %a, i32 %b) nounwind  {
 ; X64-LIN-LABEL: test4:
 ; X64-LIN:       # %bb.0: # %entry
 ; X64-LIN-NEXT:    movl %edi, %eax
-; X64-LIN-NEXT:    .p2align 4, 0x90
+; X64-LIN-NEXT:    .p2align 4
 ; X64-LIN-NEXT:  .LBB3_1: # %bb
 ; X64-LIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-LIN-NEXT:    xorl %esi, %eax
@@ -91,7 +91,7 @@ define i32 @test4(i32 %a, i32 %b) nounwind  {
 ; X64-WIN-LABEL: test4:
 ; X64-WIN:       # %bb.0: # %entry
 ; X64-WIN-NEXT:    movl %ecx, %eax
-; X64-WIN-NEXT:    .p2align 4, 0x90
+; X64-WIN-NEXT:    .p2align 4
 ; X64-WIN-NEXT:  .LBB3_1: # %bb
 ; X64-WIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-WIN-NEXT:    xorl %edx, %eax
@@ -123,7 +123,7 @@ define i16 @test5(i16 %a, i16 %b) nounwind  {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB4_1: # %bb
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    xorl %ecx, %eax
@@ -141,7 +141,7 @@ define i16 @test5(i16 %a, i16 %b) nounwind  {
 ; X64-LIN-LABEL: test5:
 ; X64-LIN:       # %bb.0: # %entry
 ; X64-LIN-NEXT:    movl %edi, %eax
-; X64-LIN-NEXT:    .p2align 4, 0x90
+; X64-LIN-NEXT:    .p2align 4
 ; X64-LIN-NEXT:  .LBB4_1: # %bb
 ; X64-LIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-LIN-NEXT:    xorl %esi, %eax
@@ -160,7 +160,7 @@ define i16 @test5(i16 %a, i16 %b) nounwind  {
 ; X64-WIN:       # %bb.0: # %entry
 ; X64-WIN-NEXT:    # kill: def $dx killed $dx def $edx
 ; X64-WIN-NEXT:    movl %ecx, %eax
-; X64-WIN-NEXT:    .p2align 4, 0x90
+; X64-WIN-NEXT:    .p2align 4
 ; X64-WIN-NEXT:  .LBB4_1: # %bb
 ; X64-WIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-WIN-NEXT:    xorl %edx, %eax
@@ -194,7 +194,7 @@ define i8 @test6(i8 %a, i8 %b) nounwind  {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB5_1: # %bb
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    xorb %cl, %al
@@ -210,7 +210,7 @@ define i8 @test6(i8 %a, i8 %b) nounwind  {
 ; X64-LIN-LABEL: test6:
 ; X64-LIN:       # %bb.0: # %entry
 ; X64-LIN-NEXT:    movl %edi, %eax
-; X64-LIN-NEXT:    .p2align 4, 0x90
+; X64-LIN-NEXT:    .p2align 4
 ; X64-LIN-NEXT:  .LBB5_1: # %bb
 ; X64-LIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-LIN-NEXT:    xorb %sil, %al
@@ -227,7 +227,7 @@ define i8 @test6(i8 %a, i8 %b) nounwind  {
 ; X64-WIN-LABEL: test6:
 ; X64-WIN:       # %bb.0: # %entry
 ; X64-WIN-NEXT:    movl %ecx, %eax
-; X64-WIN-NEXT:    .p2align 4, 0x90
+; X64-WIN-NEXT:    .p2align 4
 ; X64-WIN-NEXT:  .LBB5_1: # %bb
 ; X64-WIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-WIN-NEXT:    xorb %dl, %al
@@ -259,7 +259,7 @@ define i32 @test7(i32 %a, i32 %b) nounwind  {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    .p2align 4, 0x90
+; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB6_1: # %bb
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X86-NEXT:    xorl %ecx, %eax
@@ -275,7 +275,7 @@ define i32 @test7(i32 %a, i32 %b) nounwind  {
 ; X64-LIN-LABEL: test7:
 ; X64-LIN:       # %bb.0: # %entry
 ; X64-LIN-NEXT:    movl %edi, %eax
-; X64-LIN-NEXT:    .p2align 4, 0x90
+; X64-LIN-NEXT:    .p2align 4
 ; X64-LIN-NEXT:  .LBB6_1: # %bb
 ; X64-LIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-LIN-NEXT:    xorl %esi, %eax
@@ -291,7 +291,7 @@ define i32 @test7(i32 %a, i32 %b) nounwind  {
 ; X64-WIN-LABEL: test7:
 ; X64-WIN:       # %bb.0: # %entry
 ; X64-WIN-NEXT:    movl %ecx, %eax
-; X64-WIN-NEXT:    .p2align 4, 0x90
+; X64-WIN-NEXT:    .p2align 4
 ; X64-WIN-NEXT:  .LBB6_1: # %bb
 ; X64-WIN-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-WIN-NEXT:    xorl %edx, %eax

diff  --git a/llvm/test/CodeGen/X86/xray-attribute-instrumentation.ll b/llvm/test/CodeGen/X86/xray-attribute-instrumentation.ll
index 9cebffc3abe9ab..fc1f3352ce8183 100644
--- a/llvm/test/CodeGen/X86/xray-attribute-instrumentation.ll
+++ b/llvm/test/CodeGen/X86/xray-attribute-instrumentation.ll
@@ -3,12 +3,12 @@
 ; RUN: llc -mtriple=x86_64-darwin-unknown                          < %s | FileCheck %s --check-prefixes=CHECK,CHECK-MACOS
 
 define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" {
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_0:
 ; CHECK:       .ascii "\353\t"
 ; CHECK-NEXT:  nopw 512(%rax,%rax)
   ret i32 0
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_1:
 ; CHECK:       retq
 ; CHECK-NEXT:  nopw %cs:512(%rax,%rax)
@@ -38,7 +38,7 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always"
 ; We test multiple returns in a single function to make sure we're getting all
 ; of them with XRay instrumentation.
 define i32 @bar(i32 %i) nounwind noinline uwtable "function-instrument"="xray-always" {
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_2:
 ; CHECK:       .ascii "\353\t"
 ; CHECK-NEXT:  nopw 512(%rax,%rax)
@@ -47,13 +47,13 @@ Test:
   br i1 %cond, label %IsEqual, label %NotEqual
 IsEqual:
   ret i32 0
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_3:
 ; CHECK:       retq
 ; CHECK-NEXT:  nopw %cs:512(%rax,%rax)
 NotEqual:
   ret i32 1
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_4:
 ; CHECK:       retq
 ; CHECK-NEXT:  nopw %cs:512(%rax,%rax)

diff  --git a/llvm/test/CodeGen/X86/xray-custom-log.ll b/llvm/test/CodeGen/X86/xray-custom-log.ll
index 9ccda8ecb04809..8f23055aca97c3 100644
--- a/llvm/test/CodeGen/X86/xray-custom-log.ll
+++ b/llvm/test/CodeGen/X86/xray-custom-log.ll
@@ -84,7 +84,7 @@ define void @leaf_func() "function-instrument"="xray-always" "frame-pointer"="no
   ; CHECK:         pushq %rax
   ; CHECK:         movl $leaf_func.event_id, %eax
   ; CHECK-NEXT:    movl $4, %ecx
-  ; CHECK-NEXT:    .p2align 1, 0x90
+  ; CHECK-NEXT:    .p2align 1
   ; CHECK-NEXT:  .Lxray_event_sled_1:
   call void @llvm.xray.customevent(ptr @leaf_func.event_id, i64 4)
   ret void

diff  --git a/llvm/test/CodeGen/X86/xray-partial-instrumentation-skip-entry.ll b/llvm/test/CodeGen/X86/xray-partial-instrumentation-skip-entry.ll
index f58d743d465ddd..51b047c5a0e635 100644
--- a/llvm/test/CodeGen/X86/xray-partial-instrumentation-skip-entry.ll
+++ b/llvm/test/CodeGen/X86/xray-partial-instrumentation-skip-entry.ll
@@ -5,7 +5,7 @@
 define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" "xray-skip-entry" {
 ; CHECK-NOT: Lxray_sled_0:
   ret i32 0
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_0:
 ; CHECK:       retq
 ; CHECK-NEXT:  nopw %cs:512(%rax,%rax)
@@ -39,13 +39,13 @@ Test:
   br i1 %cond, label %IsEqual, label %NotEqual
 IsEqual:
   ret i32 0
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_1:
 ; CHECK:       retq
 ; CHECK-NEXT:  nopw %cs:512(%rax,%rax)
 NotEqual:
   ret i32 1
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_2:
 ; CHECK:       retq
 ; CHECK-NEXT:  nopw %cs:512(%rax,%rax)

diff  --git a/llvm/test/CodeGen/X86/xray-partial-instrumentation-skip-exit.ll b/llvm/test/CodeGen/X86/xray-partial-instrumentation-skip-exit.ll
index 3d631d73b68c50..ab841d734a23ed 100644
--- a/llvm/test/CodeGen/X86/xray-partial-instrumentation-skip-exit.ll
+++ b/llvm/test/CodeGen/X86/xray-partial-instrumentation-skip-exit.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=x86_64-darwin-unknown    < %s | FileCheck %s
 
 define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always" "xray-skip-exit" {
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_0:
 ; CHECK:       .ascii "\353\t"
 ; CHECK-NEXT:  nopw 512(%rax,%rax)
@@ -35,7 +35,7 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always"
 ; We test multiple returns in a single function to make sure we're skipping all
 ; of them with XRay instrumentation.
 define i32 @bar(i32 %i) nounwind noinline uwtable "function-instrument"="xray-always" "xray-skip-exit" {
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_1:
 ; CHECK:       .ascii "\353\t"
 ; CHECK-NEXT:  nopw 512(%rax,%rax)

diff  --git a/llvm/test/CodeGen/X86/xray-selective-instrumentation.ll b/llvm/test/CodeGen/X86/xray-selective-instrumentation.ll
index 7bf47ea2894c0f..868c35363f6e96 100644
--- a/llvm/test/CodeGen/X86/xray-selective-instrumentation.ll
+++ b/llvm/test/CodeGen/X86/xray-selective-instrumentation.ll
@@ -7,12 +7,12 @@ target triple = "x86_64-apple-darwin8"
 define i32 @foo() nounwind uwtable "xray-instruction-threshold"="1" {
 ; CHECK-LABEL: foo:
 ; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    .p2align 1, 0x90
+; CHECK-NEXT:    .p2align 1
 ; CHECK-NEXT:  Lxray_sled_0:
 ; CHECK-NEXT:    .ascii "\353\t"
 ; CHECK-NEXT:    nopw 512(%rax,%rax)
 ; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    .p2align 1, 0x90
+; CHECK-NEXT:    .p2align 1
 ; CHECK-NEXT:  Lxray_sled_1:
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:    nopw %cs:512(%rax,%rax)

diff  --git a/llvm/test/CodeGen/X86/xray-tail-call-sled.ll b/llvm/test/CodeGen/X86/xray-tail-call-sled.ll
index 126e5db52a5b73..91b91f5360f928 100644
--- a/llvm/test/CodeGen/X86/xray-tail-call-sled.ll
+++ b/llvm/test/CodeGen/X86/xray-tail-call-sled.ll
@@ -2,12 +2,12 @@
 ; RUN: llc -mtriple=x86_64-darwin-unknown    < %s | FileCheck %s --check-prefixes=CHECK,CHECK-MACOS
 
 define dso_local i32 @callee() nounwind noinline uwtable "function-instrument"="xray-always" {
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_0:
 ; CHECK:       .ascii "\353\t"
 ; CHECK-NEXT:  nopw 512(%rax,%rax)
   ret i32 0
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_1:
 ; CHECK:       retq
 ; CHECK-NEXT:  nopw %cs:512(%rax,%rax)
@@ -34,11 +34,11 @@ define dso_local i32 @callee() nounwind noinline uwtable "function-instrument"="
 ; CHECK-MACOS-NEXT:    .quad 2
 
 define dso_local i32 @caller() nounwind noinline uwtable "function-instrument"="xray-always" {
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_2:
 ; CHECK:       .ascii "\353\t"
 ; CHECK-NEXT:  nopw 512(%rax,%rax)
-; CHECK:       .p2align 1, 0x90
+; CHECK:       .p2align 1
 ; CHECK-LABEL: Lxray_sled_3:
 ; CHECK-NEXT:  .ascii "\353\t"
 ; CHECK-NEXT:  nopw 512(%rax,%rax)
@@ -69,13 +69,13 @@ define dso_local i32 @caller() nounwind noinline uwtable "function-instrument"="
 
 define dso_local i32 @conditional_tail_call(i32 %cond) nounwind noinline uwtable "function-instrument"="xray-always" {
 ; CHECK-LABEL: conditional_tail_call:
-; CHECK:         .p2align 1, 0x90
+; CHECK:         .p2align 1
 ; CHECK-LABEL: Lxray_sled_4:
 ; CHECK:         .ascii "\353\t"
 ; CHECK-NEXT:    nopw 512(%rax,%rax)
 ; CHECK-NEXT:    testl %edi, %edi
 ; CHECK-NEXT:    je {{\.?Ltmp5}}
-; CHECK:         .p2align 1, 0x90
+; CHECK:         .p2align 1
 ; CHECK-LABEL: Lxray_sled_5:
 ; CHECK-NEXT:    .ascii "\353\t"
 ; CHECK-NEXT:    nopw 512(%rax,%rax)
@@ -83,7 +83,7 @@ define dso_local i32 @conditional_tail_call(i32 %cond) nounwind noinline uwtable
 ; CHECK-NEXT:    jmp {{.*}}callee {{.*}}# TAILCALL
 ; CHECK-LABEL: Ltmp5:
 ; CHECK:         xorl %eax, %eax
-; CHECK-NEXT:   .p2align  1, 0x90
+; CHECK-NEXT:   .p2align  1
 ; CHECK-LABEL: Lxray_sled_6:
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:    nopw %cs:512(%rax,%rax)

diff  --git a/llvm/test/DebugInfo/COFF/pieces.ll b/llvm/test/DebugInfo/COFF/pieces.ll
index 8f5b6b2d7c1afd..8e62ad0093aa3b 100644
--- a/llvm/test/DebugInfo/COFF/pieces.ll
+++ b/llvm/test/DebugInfo/COFF/pieces.ll
@@ -41,7 +41,7 @@
 ; ASM:         xorl    %edi, %edi
 ; ASM:         xorl    %esi, %esi
 ; ASM: [[oy_ox_start:\.Ltmp[0-9]+]]:
-; ASM:         .p2align        4, 0x90
+; ASM:         .p2align        4
 ; ASM: .LBB0_3:                                # %for.body
 ; ASM:        #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 0 32] $edi
 ; ASM:        #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 32 32] $esi

diff  --git a/llvm/test/DebugInfo/X86/header.ll b/llvm/test/DebugInfo/X86/header.ll
index 3f3d9f95419c1d..f90aa3b50f4671 100644
--- a/llvm/test/DebugInfo/X86/header.ll
+++ b/llvm/test/DebugInfo/X86/header.ll
@@ -5,7 +5,7 @@
 ; CHECK:       .text
 ; CHECK-NEXT: .file	"<stdin>"
 ; CHECK-NEXT: .globl	f
-; CHECK-NEXT: .p2align	4, 0x90
+; CHECK-NEXT: .p2align	4
 ; CHECK-NEXT: .type	f, at function
 ; CHECK-NEXT: f:                                      # @f
 

diff  --git a/llvm/test/DebugInfo/X86/loop-align-debug.ll b/llvm/test/DebugInfo/X86/loop-align-debug.ll
index a0302d08faa0c3..d6f6395b26544a 100644
--- a/llvm/test/DebugInfo/X86/loop-align-debug.ll
+++ b/llvm/test/DebugInfo/X86/loop-align-debug.ll
@@ -13,7 +13,7 @@
 
 ; ASM:      .loc    0 0 0 is_stmt 0
 ; ASM-NEXT: .L{{.*}}:
-; ASM-NEXT: .p2align        4, 0x90
+; ASM-NEXT: .p2align        4
 
 ;; $ cat test.cpp
 ;; void g();

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/2011-11-29-postincphi.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/2011-11-29-postincphi.ll
index 841836c7d2dd86..7ae78ae6a1fd40 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/2011-11-29-postincphi.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/2011-11-29-postincphi.ll
@@ -17,7 +17,7 @@ define i64 @sqlite3DropTriggerPtr() nounwind {
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movl $1, %ebx
 ; CHECK-NEXT:    callq check at PLT
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %bb1
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    testb %al, %al

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index 39e2d6f1acca45..93203ef6e17f53 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -20,7 +20,7 @@ define i32 @simple(ptr %a, ptr %b, i32 %x) nounwind {
 ; X64-NEXT:    shlq $2, %rcx
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    leaq (%rcx,%rcx), %rdx
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB0_1: # %loop
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    addl (%rdi), %eax
@@ -47,7 +47,7 @@ define i32 @simple(ptr %a, ptr %b, i32 %x) nounwind {
 ; X32-NEXT:    shll $2, %edx
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    leal (%edx,%edx), %esi
-; X32-NEXT:    .p2align 4, 0x90
+; X32-NEXT:    .p2align 4
 ; X32-NEXT:  .LBB0_1: # %loop
 ; X32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X32-NEXT:    addl (%edi), %eax
@@ -101,7 +101,7 @@ define i32 @user(ptr %a, ptr %b, i32 %x) nounwind {
 ; X64-NEXT:    leaq (,%rcx,4), %rax
 ; X64-NEXT:    leaq (%rax,%rax,2), %r8
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB1_1: # %loop
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    addl (%rdi), %eax
@@ -128,7 +128,7 @@ define i32 @user(ptr %a, ptr %b, i32 %x) nounwind {
 ; X32-NEXT:    leal (,%ecx,4), %eax
 ; X32-NEXT:    leal (%eax,%eax,2), %ebx
 ; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    .p2align 4, 0x90
+; X32-NEXT:    .p2align 4
 ; X32-NEXT:  .LBB1_1: # %loop
 ; X32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X32-NEXT:    addl (%esi), %eax
@@ -200,7 +200,7 @@ define void @extrastride(ptr nocapture %main, i32 %main_stride, ptr nocapture %r
 ; X64-NEXT:    movslq %r8d, %r8
 ; X64-NEXT:    shlq $2, %r8
 ; X64-NEXT:    movslq %ebx, %r11
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB2_2: # %for.body
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl (%rdi,%rsi), %ebx
@@ -235,7 +235,7 @@ define void @extrastride(ptr nocapture %main, i32 %main_stride, ptr nocapture %r
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X32-NEXT:    addl %esi, %edi
 ; X32-NEXT:    shll $2, %ecx
-; X32-NEXT:    .p2align 4, 0x90
+; X32-NEXT:    .p2align 4
 ; X32-NEXT:  .LBB2_2: # %for.body
 ; X32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X32-NEXT:    movl (%ebx,%esi), %ebp
@@ -309,7 +309,7 @@ define void @foldedidx(ptr nocapture %a, ptr nocapture %b, ptr nocapture %c) nou
 ; X64-LABEL: foldedidx:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movl $3, %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB3_1: # %for.body
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movzbl -3(%rdi,%rax), %ecx
@@ -343,7 +343,7 @@ define void @foldedidx(ptr nocapture %a, ptr nocapture %b, ptr nocapture %c) nou
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    .p2align 4, 0x90
+; X32-NEXT:    .p2align 4
 ; X32-NEXT:  .LBB3_1: # %for.body
 ; X32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X32-NEXT:    movzbl -3(%esi,%eax), %edi
@@ -433,7 +433,7 @@ define void @multioper(ptr %a, i32 %n) nounwind {
 ; X64-LABEL: multioper:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB4_1: # %for.body
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movl %eax, (%rdi,%rax,4)
@@ -455,7 +455,7 @@ define void @multioper(ptr %a, i32 %n) nounwind {
 ; X32-NEXT:    xorl %eax, %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    .p2align 4, 0x90
+; X32-NEXT:    .p2align 4
 ; X32-NEXT:  .LBB4_1: # %for.body
 ; X32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X32-NEXT:    movl %eax, (%edx,%eax,4)
@@ -511,7 +511,7 @@ define void @testCmpZero(ptr %src, ptr %dst, i32 %srcidx, i32 %dstidx, i32 %len)
 ; X64-NEXT:    movslq %r8d, %rcx
 ; X64-NEXT:    subq %rdx, %rcx
 ; X64-NEXT:    xorl %edx, %edx
-; X64-NEXT:    .p2align 4, 0x90
+; X64-NEXT:    .p2align 4
 ; X64-NEXT:  .LBB5_1: # %for.body82.us
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X64-NEXT:    movzbl (%rax,%rdx,4), %esi
@@ -532,7 +532,7 @@ define void @testCmpZero(ptr %src, ptr %dst, i32 %srcidx, i32 %dstidx, i32 %len)
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    addl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    xorl %esi, %esi
-; X32-NEXT:    .p2align 4, 0x90
+; X32-NEXT:    .p2align 4
 ; X32-NEXT:  .LBB5_1: # %for.body82.us
 ; X32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; X32-NEXT:    movzbl (%edx,%esi,4), %ebx

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
index 753aaed7d77c0b..1c953515a31083 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
@@ -65,7 +65,7 @@ define void @foo(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr nocap
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
-; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  .LBB0_1: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl 4096(%rsi,%rax), %ecx

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/macro-fuse-cmp.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/macro-fuse-cmp.ll
index feff010300c1e1..ef2ed1f0e87e20 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/macro-fuse-cmp.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/macro-fuse-cmp.ll
@@ -79,7 +79,7 @@ define void @maxArray(ptr noalias nocapture %x, ptr noalias nocapture readonly %
 ; BASE-LABEL: maxArray:
 ; BASE:       # %bb.0: # %entry
 ; BASE-NEXT:    movq $-524288, %rax # imm = 0xFFF80000
-; BASE-NEXT:    .p2align 4, 0x90
+; BASE-NEXT:    .p2align 4
 ; BASE-NEXT:  .LBB0_1: # %vector.body
 ; BASE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; BASE-NEXT:    movupd 524288(%rdi,%rax), %xmm0
@@ -93,7 +93,7 @@ define void @maxArray(ptr noalias nocapture %x, ptr noalias nocapture readonly %
 ; FUSE-LABEL: maxArray:
 ; FUSE:       # %bb.0: # %entry
 ; FUSE-NEXT:    xorl %eax, %eax
-; FUSE-NEXT:    .p2align 4, 0x90
+; FUSE-NEXT:    .p2align 4
 ; FUSE-NEXT:  .LBB0_1: # %vector.body
 ; FUSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; FUSE-NEXT:    movupd (%rdi,%rax,8), %xmm0


        


More information about the cfe-commits mailing list