r297189 - fix test to not check optimized IR; NFCI

Sanjay Patel via cfe-commits cfe-commits at lists.llvm.org
Tue Mar 7 11:24:55 PST 2017


Author: spatel
Date: Tue Mar  7 13:24:54 2017
New Revision: 297189

URL: http://llvm.org/viewvc/llvm-project?rev=297189&view=rev
Log:
fix test to not check optimized IR; NFCI

This test broke with an LLVM instcombine patch (r297166).
I changed the RUN line to only run -mem2reg (to save time checking this large chunk of tests)
and updated the checks using the script attached to D17999:
https://reviews.llvm.org/D17999

The goal is to make this test immune to optimizer changes. If there's something in these
tests that was checking for an IR optimization, that should be tested in LLVM, not Clang.

Modified:
    cfe/trunk/test/CodeGen/zvector.c

Modified: cfe/trunk/test/CodeGen/zvector.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/zvector.c?rev=297189&r1=297188&r2=297189&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/zvector.c (original)
+++ cfe/trunk/test/CodeGen/zvector.c Tue Mar  7 13:24:54 2017
@@ -1,5 +1,4 @@
-// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector \
-// RUN:  -O -emit-llvm -o - -W -Wall -Werror %s | FileCheck %s
+// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector -emit-llvm -o - -W -Wall -Werror %s | opt -S -mem2reg | FileCheck %s
 
 volatile vector signed char sc, sc2;
 volatile vector unsigned char uc, uc2;
@@ -21,2778 +20,3349 @@ volatile vector double fd, fd2;
 
 volatile int cnt;
 
-void test_assign (void)
-{
-// CHECK-LABEL: test_assign
+// CHECK-LABEL: define void @test_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   store volatile <16 x i8> [[TMP0]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   store volatile <16 x i8> [[TMP1]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   store volatile <8 x i16> [[TMP2]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   store volatile <8 x i16> [[TMP3]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   store volatile <4 x i32> [[TMP4]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   store volatile <4 x i32> [[TMP5]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   store volatile <2 x i64> [[TMP6]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   store volatile <2 x i64> [[TMP7]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   store volatile <2 x double> [[TMP8]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_assign(void) {
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc
   sc = sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc
   uc = uc2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss
   ss = ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us
   us = us2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si
   si = si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui
   ui = ui2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl
   sl = sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul
   ul = ul2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd
   fd = fd2;
 }
 
-void test_pos (void)
-{
-// CHECK-LABEL: test_pos
+// CHECK-LABEL: define void @test_pos() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   store volatile <16 x i8> [[TMP0]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   store volatile <16 x i8> [[TMP1]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   store volatile <8 x i16> [[TMP2]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   store volatile <8 x i16> [[TMP3]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   store volatile <4 x i32> [[TMP4]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   store volatile <4 x i32> [[TMP5]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   store volatile <2 x i64> [[TMP6]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   store volatile <2 x i64> [[TMP7]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   store volatile <2 x double> [[TMP8]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_pos(void) {
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc
   sc = +sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc
   uc = +uc2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss
   ss = +ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us
   us = +us2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si
   si = +si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui
   ui = +ui2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl
   sl = +sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul
   ul = +ul2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd
   fd = +fd2;
 }
 
-void test_neg (void)
-{
-// CHECK-LABEL: test_neg
+// CHECK-LABEL: define void @test_neg() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[SUB:%.*]] = sub <16 x i8> zeroinitializer, [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[SUB]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[SUB1:%.*]] = sub <8 x i16> zeroinitializer, [[TMP1]]
+// CHECK:   store volatile <8 x i16> [[SUB1]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[SUB2:%.*]] = sub <4 x i32> zeroinitializer, [[TMP2]]
+// CHECK:   store volatile <4 x i32> [[SUB2]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[SUB3:%.*]] = sub <2 x i64> zeroinitializer, [[TMP3]]
+// CHECK:   store volatile <2 x i64> [[SUB3]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[SUB4:%.*]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[TMP4]]
+// CHECK:   store volatile <2 x double> [[SUB4]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_neg(void) {
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = sub <16 x i8> zeroinitializer, [[VAL]]
   sc = -sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = sub <8 x i16> zeroinitializer, [[VAL]]
   ss = -ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = sub <4 x i32> zeroinitializer, [[VAL]]
   si = -si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = sub <2 x i64> zeroinitializer, [[VAL]]
   sl = -sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[VAL]]
   fd = -fd2;
 }
 
-void test_preinc (void)
-{
-// CHECK-LABEL: test_preinc
+// CHECK-LABEL: define void @test_preinc() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[INC:%.*]] = add <16 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK:   store volatile <16 x i8> [[INC]], <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[INC1:%.*]] = add <16 x i8> [[TMP1]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK:   store volatile <16 x i8> [[INC1]], <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[INC2:%.*]] = add <8 x i16> [[TMP2]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK:   store volatile <8 x i16> [[INC2]], <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[INC3:%.*]] = add <8 x i16> [[TMP3]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK:   store volatile <8 x i16> [[INC3]], <8 x i16>* @us2, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[INC4:%.*]] = add <4 x i32> [[TMP4]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK:   store volatile <4 x i32> [[INC4]], <4 x i32>* @si2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[INC5:%.*]] = add <4 x i32> [[TMP5]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK:   store volatile <4 x i32> [[INC5]], <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[INC6:%.*]] = add <2 x i64> [[TMP6]], <i64 1, i64 1>
+// CHECK:   store volatile <2 x i64> [[INC6]], <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[INC7:%.*]] = add <2 x i64> [[TMP7]], <i64 1, i64 1>
+// CHECK:   store volatile <2 x i64> [[INC7]], <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[INC8:%.*]] = fadd <2 x double> [[TMP8]], <double 1.000000e+00, double 1.000000e+00>
+// CHECK:   store volatile <2 x double> [[INC8]], <2 x double>* @fd2, align 8
+// CHECK:   ret void
+void test_preinc(void) {
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ++sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ++uc2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ++ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ++us2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
   ++si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
   ++ui2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
   ++sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
   ++ul2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double 1.000000e+00, double 1.000000e+00>
   ++fd2;
 }
 
-void test_postinc (void)
-{
-// CHECK-LABEL: test_postinc
+// CHECK-LABEL: define void @test_postinc() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[INC:%.*]] = add <16 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK:   store volatile <16 x i8> [[INC]], <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[INC1:%.*]] = add <16 x i8> [[TMP1]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+// CHECK:   store volatile <16 x i8> [[INC1]], <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[INC2:%.*]] = add <8 x i16> [[TMP2]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK:   store volatile <8 x i16> [[INC2]], <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[INC3:%.*]] = add <8 x i16> [[TMP3]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+// CHECK:   store volatile <8 x i16> [[INC3]], <8 x i16>* @us2, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[INC4:%.*]] = add <4 x i32> [[TMP4]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK:   store volatile <4 x i32> [[INC4]], <4 x i32>* @si2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[INC5:%.*]] = add <4 x i32> [[TMP5]], <i32 1, i32 1, i32 1, i32 1>
+// CHECK:   store volatile <4 x i32> [[INC5]], <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[INC6:%.*]] = add <2 x i64> [[TMP6]], <i64 1, i64 1>
+// CHECK:   store volatile <2 x i64> [[INC6]], <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[INC7:%.*]] = add <2 x i64> [[TMP7]], <i64 1, i64 1>
+// CHECK:   store volatile <2 x i64> [[INC7]], <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[INC8:%.*]] = fadd <2 x double> [[TMP8]], <double 1.000000e+00, double 1.000000e+00>
+// CHECK:   store volatile <2 x double> [[INC8]], <2 x double>* @fd2, align 8
+// CHECK:   ret void
+void test_postinc(void) {
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   sc2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   uc2++;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ss2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   us2++;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
   si2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 1, i32 1, i32 1, i32 1>
   ui2++;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
   sl2++;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 1, i64 1>
   ul2++;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double 1.000000e+00, double 1.000000e+00>
   fd2++;
 }
 
-void test_predec (void)
-{
-// CHECK-LABEL: test_predec
+// CHECK-LABEL: define void @test_predec() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[DEC:%.*]] = add <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[DEC]], <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[DEC1:%.*]] = add <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[DEC1]], <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[DEC2:%.*]] = add <8 x i16> [[TMP2]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[DEC2]], <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[DEC3:%.*]] = add <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[DEC3]], <8 x i16>* @us2, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[DEC4:%.*]] = add <4 x i32> [[TMP4]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[DEC4]], <4 x i32>* @si2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[DEC5:%.*]] = add <4 x i32> [[TMP5]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[DEC5]], <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[DEC6:%.*]] = add <2 x i64> [[TMP6]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[DEC6]], <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[DEC7:%.*]] = add <2 x i64> [[TMP7]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[DEC7]], <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[DEC8:%.*]] = fadd <2 x double> [[TMP8]], <double -1.000000e+00, double -1.000000e+00>
+// CHECK:   store volatile <2 x double> [[DEC8]], <2 x double>* @fd2, align 8
+// CHECK:   ret void
+void test_predec(void) {
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   --sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   --uc2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   --ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   --us2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
   --si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
   --ui2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
   --sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
   --ul2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double -1.000000e+00, double -1.000000e+00>
   --fd2;
 }
 
-void test_postdec (void)
-{
-// CHECK-LABEL: test_postdec
+// CHECK-LABEL: define void @test_postdec() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[DEC:%.*]] = add <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[DEC]], <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[DEC1:%.*]] = add <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[DEC1]], <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[DEC2:%.*]] = add <8 x i16> [[TMP2]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[DEC2]], <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[DEC3:%.*]] = add <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[DEC3]], <8 x i16>* @us2, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[DEC4:%.*]] = add <4 x i32> [[TMP4]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[DEC4]], <4 x i32>* @si2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[DEC5:%.*]] = add <4 x i32> [[TMP5]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[DEC5]], <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[DEC6:%.*]] = add <2 x i64> [[TMP6]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[DEC6]], <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[DEC7:%.*]] = add <2 x i64> [[TMP7]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[DEC7]], <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[DEC8:%.*]] = fadd <2 x double> [[TMP8]], <double -1.000000e+00, double -1.000000e+00>
+// CHECK:   store volatile <2 x double> [[DEC8]], <2 x double>* @fd2, align 8
+// CHECK:   ret void
+void test_postdec(void) {
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   sc2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   uc2--;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   ss2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   us2--;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
   si2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
   ui2--;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
   sl2--;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL]], <i64 -1, i64 -1>
   ul2--;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], <double -1.000000e+00, double -1.000000e+00>
   fd2--;
 }
 
-void test_add (void)
-{
-// CHECK-LABEL: test_add
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
+// CHECK-LABEL: define void @test_add() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[ADD:%.*]] = add <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   store volatile <16 x i8> [[ADD]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[ADD1:%.*]] = add <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   store volatile <16 x i8> [[ADD1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[ADD2:%.*]] = add <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   store volatile <16 x i8> [[ADD2]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[ADD3:%.*]] = add <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK:   store volatile <16 x i8> [[ADD3]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[ADD4:%.*]] = add <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK:   store volatile <16 x i8> [[ADD4]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[ADD5:%.*]] = add <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK:   store volatile <16 x i8> [[ADD5]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[ADD6:%.*]] = add <8 x i16> [[TMP12]], [[TMP13]]
+// CHECK:   store volatile <8 x i16> [[ADD6]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[ADD7:%.*]] = add <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK:   store volatile <8 x i16> [[ADD7]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[ADD8:%.*]] = add <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK:   store volatile <8 x i16> [[ADD8]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[ADD9:%.*]] = add <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK:   store volatile <8 x i16> [[ADD9]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[ADD10:%.*]] = add <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK:   store volatile <8 x i16> [[ADD10]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[ADD11:%.*]] = add <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK:   store volatile <8 x i16> [[ADD11]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[ADD12:%.*]] = add <4 x i32> [[TMP24]], [[TMP25]]
+// CHECK:   store volatile <4 x i32> [[ADD12]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[ADD13:%.*]] = add <4 x i32> [[TMP26]], [[TMP27]]
+// CHECK:   store volatile <4 x i32> [[ADD13]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[ADD14:%.*]] = add <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK:   store volatile <4 x i32> [[ADD14]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[ADD15:%.*]] = add <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK:   store volatile <4 x i32> [[ADD15]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[ADD16:%.*]] = add <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK:   store volatile <4 x i32> [[ADD16]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[ADD17:%.*]] = add <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK:   store volatile <4 x i32> [[ADD17]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[ADD18:%.*]] = add <2 x i64> [[TMP36]], [[TMP37]]
+// CHECK:   store volatile <2 x i64> [[ADD18]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[ADD19:%.*]] = add <2 x i64> [[TMP38]], [[TMP39]]
+// CHECK:   store volatile <2 x i64> [[ADD19]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP40:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[ADD20:%.*]] = add <2 x i64> [[TMP40]], [[TMP41]]
+// CHECK:   store volatile <2 x i64> [[ADD20]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[ADD21:%.*]] = add <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK:   store volatile <2 x i64> [[ADD21]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[ADD22:%.*]] = add <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK:   store volatile <2 x i64> [[ADD22]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[ADD23:%.*]] = add <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK:   store volatile <2 x i64> [[ADD23]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[ADD24:%.*]] = fadd <2 x double> [[TMP48]], [[TMP49]]
+// CHECK:   store volatile <2 x double> [[ADD24]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_add(void) {
+
   sc = sc + sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
   sc = sc + bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
   sc = bc + sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
   uc = uc + uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
   uc = uc + bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]]
   uc = bc + uc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
   ss = ss + ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
   ss = ss + bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
   ss = bs + ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
   us = us + us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
   us = us + bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]]
   us = bs + us2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
   si = si + si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
   si = si + bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
   si = bi + si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
   ui = ui + ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
   ui = ui + bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]]
   ui = bi + ui2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
   sl = sl + sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
   sl = sl + bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
   sl = bl + sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
   ul = ul + ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
   ul = ul + bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]]
   ul = bl + ul2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL1]], [[VAL2]]
   fd = fd + fd2;
 }
 
-void test_add_assign (void)
-{
-// CHECK-LABEL: test_add_assign
-
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_add_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[ADD:%.*]] = add <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[ADD]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[ADD1:%.*]] = add <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   store volatile <16 x i8> [[ADD1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[ADD2:%.*]] = add <16 x i8> [[TMP5]], [[TMP4]]
+// CHECK:   store volatile <16 x i8> [[ADD2]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[ADD3:%.*]] = add <16 x i8> [[TMP7]], [[TMP6]]
+// CHECK:   store volatile <16 x i8> [[ADD3]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[ADD4:%.*]] = add <8 x i16> [[TMP9]], [[TMP8]]
+// CHECK:   store volatile <8 x i16> [[ADD4]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[ADD5:%.*]] = add <8 x i16> [[TMP11]], [[TMP10]]
+// CHECK:   store volatile <8 x i16> [[ADD5]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[ADD6:%.*]] = add <8 x i16> [[TMP13]], [[TMP12]]
+// CHECK:   store volatile <8 x i16> [[ADD6]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[ADD7:%.*]] = add <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK:   store volatile <8 x i16> [[ADD7]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[ADD8:%.*]] = add <4 x i32> [[TMP17]], [[TMP16]]
+// CHECK:   store volatile <4 x i32> [[ADD8]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[ADD9:%.*]] = add <4 x i32> [[TMP19]], [[TMP18]]
+// CHECK:   store volatile <4 x i32> [[ADD9]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[ADD10:%.*]] = add <4 x i32> [[TMP21]], [[TMP20]]
+// CHECK:   store volatile <4 x i32> [[ADD10]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[ADD11:%.*]] = add <4 x i32> [[TMP23]], [[TMP22]]
+// CHECK:   store volatile <4 x i32> [[ADD11]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[ADD12:%.*]] = add <2 x i64> [[TMP25]], [[TMP24]]
+// CHECK:   store volatile <2 x i64> [[ADD12]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[ADD13:%.*]] = add <2 x i64> [[TMP27]], [[TMP26]]
+// CHECK:   store volatile <2 x i64> [[ADD13]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[ADD14:%.*]] = add <2 x i64> [[TMP29]], [[TMP28]]
+// CHECK:   store volatile <2 x i64> [[ADD14]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[ADD15:%.*]] = add <2 x i64> [[TMP31]], [[TMP30]]
+// CHECK:   store volatile <2 x i64> [[ADD15]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[ADD16:%.*]] = fadd <2 x double> [[TMP33]], [[TMP32]]
+// CHECK:   store volatile <2 x double> [[ADD16]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_add_assign(void) {
+
   sc += sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
   sc += bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
   uc += uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]]
   uc += bc2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
   ss += ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
   ss += bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
   us += us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]]
   us += bs2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
   si += si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
   si += bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
   ui += ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]]
   ui += bi2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
   sl += sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
   sl += bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
   ul += ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]]
   ul += bl2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: %{{.*}} = fadd <2 x double> [[VAL2]], [[VAL1]]
   fd += fd2;
 }
 
-void test_sub (void)
-{
-// CHECK-LABEL: test_sub
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_sub() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[SUB:%.*]] = sub <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   store volatile <16 x i8> [[SUB]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[SUB1:%.*]] = sub <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   store volatile <16 x i8> [[SUB1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[SUB2:%.*]] = sub <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   store volatile <16 x i8> [[SUB2]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[SUB3:%.*]] = sub <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK:   store volatile <16 x i8> [[SUB3]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[SUB4:%.*]] = sub <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK:   store volatile <16 x i8> [[SUB4]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[SUB5:%.*]] = sub <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK:   store volatile <16 x i8> [[SUB5]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[SUB6:%.*]] = sub <8 x i16> [[TMP12]], [[TMP13]]
+// CHECK:   store volatile <8 x i16> [[SUB6]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[SUB7:%.*]] = sub <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK:   store volatile <8 x i16> [[SUB7]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[SUB8:%.*]] = sub <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK:   store volatile <8 x i16> [[SUB8]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[SUB9:%.*]] = sub <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK:   store volatile <8 x i16> [[SUB9]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[SUB10:%.*]] = sub <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK:   store volatile <8 x i16> [[SUB10]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[SUB11:%.*]] = sub <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK:   store volatile <8 x i16> [[SUB11]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[SUB12:%.*]] = sub <4 x i32> [[TMP24]], [[TMP25]]
+// CHECK:   store volatile <4 x i32> [[SUB12]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[SUB13:%.*]] = sub <4 x i32> [[TMP26]], [[TMP27]]
+// CHECK:   store volatile <4 x i32> [[SUB13]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[SUB14:%.*]] = sub <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK:   store volatile <4 x i32> [[SUB14]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[SUB15:%.*]] = sub <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK:   store volatile <4 x i32> [[SUB15]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[SUB16:%.*]] = sub <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK:   store volatile <4 x i32> [[SUB16]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[SUB17:%.*]] = sub <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK:   store volatile <4 x i32> [[SUB17]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[SUB18:%.*]] = sub <2 x i64> [[TMP36]], [[TMP37]]
+// CHECK:   store volatile <2 x i64> [[SUB18]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[SUB19:%.*]] = sub <2 x i64> [[TMP38]], [[TMP39]]
+// CHECK:   store volatile <2 x i64> [[SUB19]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP40:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[SUB20:%.*]] = sub <2 x i64> [[TMP40]], [[TMP41]]
+// CHECK:   store volatile <2 x i64> [[SUB20]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[SUB21:%.*]] = sub <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK:   store volatile <2 x i64> [[SUB21]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[SUB22:%.*]] = sub <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK:   store volatile <2 x i64> [[SUB22]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[SUB23:%.*]] = sub <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK:   store volatile <2 x i64> [[SUB23]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[SUB24:%.*]] = fsub <2 x double> [[TMP48]], [[TMP49]]
+// CHECK:   store volatile <2 x double> [[SUB24]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_sub(void) {
+
   sc = sc - sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
   sc = sc - bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
   sc = bc - sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
   uc = uc - uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
   uc = uc - bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
   uc = bc - uc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
   ss = ss - ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
   ss = ss - bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
   ss = bs - ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
   us = us - us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
   us = us - bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
   us = bs - us2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
   si = si - si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
   si = si - bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
   si = bi - si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
   ui = ui - ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
   ui = ui - bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
   ui = bi - ui2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
   sl = sl - sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
   sl = sl - bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
   sl = bl - sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
   ul = ul - ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
   ul = ul - bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
   ul = bl - ul2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]]
   fd = fd - fd2;
 }
 
-void test_sub_assign (void)
-{
-// CHECK-LABEL: test_sub_assign
-
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_sub_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SUB:%.*]] = sub <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[SUB]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SUB1:%.*]] = sub <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   store volatile <16 x i8> [[SUB1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SUB2:%.*]] = sub <16 x i8> [[TMP5]], [[TMP4]]
+// CHECK:   store volatile <16 x i8> [[SUB2]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SUB3:%.*]] = sub <16 x i8> [[TMP7]], [[TMP6]]
+// CHECK:   store volatile <16 x i8> [[SUB3]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SUB4:%.*]] = sub <8 x i16> [[TMP9]], [[TMP8]]
+// CHECK:   store volatile <8 x i16> [[SUB4]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SUB5:%.*]] = sub <8 x i16> [[TMP11]], [[TMP10]]
+// CHECK:   store volatile <8 x i16> [[SUB5]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SUB6:%.*]] = sub <8 x i16> [[TMP13]], [[TMP12]]
+// CHECK:   store volatile <8 x i16> [[SUB6]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SUB7:%.*]] = sub <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK:   store volatile <8 x i16> [[SUB7]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SUB8:%.*]] = sub <4 x i32> [[TMP17]], [[TMP16]]
+// CHECK:   store volatile <4 x i32> [[SUB8]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SUB9:%.*]] = sub <4 x i32> [[TMP19]], [[TMP18]]
+// CHECK:   store volatile <4 x i32> [[SUB9]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SUB10:%.*]] = sub <4 x i32> [[TMP21]], [[TMP20]]
+// CHECK:   store volatile <4 x i32> [[SUB10]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SUB11:%.*]] = sub <4 x i32> [[TMP23]], [[TMP22]]
+// CHECK:   store volatile <4 x i32> [[SUB11]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SUB12:%.*]] = sub <2 x i64> [[TMP25]], [[TMP24]]
+// CHECK:   store volatile <2 x i64> [[SUB12]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SUB13:%.*]] = sub <2 x i64> [[TMP27]], [[TMP26]]
+// CHECK:   store volatile <2 x i64> [[SUB13]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SUB14:%.*]] = sub <2 x i64> [[TMP29]], [[TMP28]]
+// CHECK:   store volatile <2 x i64> [[SUB14]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SUB15:%.*]] = sub <2 x i64> [[TMP31]], [[TMP30]]
+// CHECK:   store volatile <2 x i64> [[SUB15]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[SUB16:%.*]] = fsub <2 x double> [[TMP33]], [[TMP32]]
+// CHECK:   store volatile <2 x double> [[SUB16]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_sub_assign(void) {
+
   sc -= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
   sc -= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
   uc -= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]]
   uc -= bc2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
   ss -= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
   ss -= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
   us -= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]]
   us -= bs2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
   si -= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
   si -= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
   ui -= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]]
   ui -= bi2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
   sl -= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
   sl -= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
   ul -= ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]]
   ul -= bl2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]]
   fd -= fd2;
 }
 
-void test_mul (void)
-{
-// CHECK-LABEL: test_mul
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]]
+// CHECK-LABEL: define void @test_mul() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[MUL:%.*]] = mul <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   store volatile <16 x i8> [[MUL]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[MUL1:%.*]] = mul <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   store volatile <16 x i8> [[MUL1]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[MUL2:%.*]] = mul <8 x i16> [[TMP4]], [[TMP5]]
+// CHECK:   store volatile <8 x i16> [[MUL2]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[MUL3:%.*]] = mul <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK:   store volatile <8 x i16> [[MUL3]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[MUL4:%.*]] = mul <4 x i32> [[TMP8]], [[TMP9]]
+// CHECK:   store volatile <4 x i32> [[MUL4]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[MUL5:%.*]] = mul <4 x i32> [[TMP10]], [[TMP11]]
+// CHECK:   store volatile <4 x i32> [[MUL5]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[MUL6:%.*]] = mul <2 x i64> [[TMP12]], [[TMP13]]
+// CHECK:   store volatile <2 x i64> [[MUL6]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[MUL7:%.*]] = mul <2 x i64> [[TMP14]], [[TMP15]]
+// CHECK:   store volatile <2 x i64> [[MUL7]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[MUL8:%.*]] = fmul <2 x double> [[TMP16]], [[TMP17]]
+// CHECK:   store volatile <2 x double> [[MUL8]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_mul(void) {
+
   sc = sc * sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]]
   uc = uc * uc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]]
   ss = ss * ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]]
   us = us * us2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]]
   si = si * si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]]
   ui = ui * ui2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]]
   sl = sl * sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]]
   ul = ul * ul2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fmul <2 x double> [[VAL1]], [[VAL2]]
   fd = fd * fd2;
 }
 
-void test_mul_assign (void)
-{
-// CHECK-LABEL: test_mul_assign
-
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_mul_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[MUL:%.*]] = mul <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[MUL]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[MUL1:%.*]] = mul <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   store volatile <16 x i8> [[MUL1]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[MUL2:%.*]] = mul <8 x i16> [[TMP5]], [[TMP4]]
+// CHECK:   store volatile <8 x i16> [[MUL2]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[MUL3:%.*]] = mul <8 x i16> [[TMP7]], [[TMP6]]
+// CHECK:   store volatile <8 x i16> [[MUL3]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[MUL4:%.*]] = mul <4 x i32> [[TMP9]], [[TMP8]]
+// CHECK:   store volatile <4 x i32> [[MUL4]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[MUL5:%.*]] = mul <4 x i32> [[TMP11]], [[TMP10]]
+// CHECK:   store volatile <4 x i32> [[MUL5]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[MUL6:%.*]] = mul <2 x i64> [[TMP13]], [[TMP12]]
+// CHECK:   store volatile <2 x i64> [[MUL6]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[MUL7:%.*]] = mul <2 x i64> [[TMP15]], [[TMP14]]
+// CHECK:   store volatile <2 x i64> [[MUL7]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[MUL8:%.*]] = fmul <2 x double> [[TMP17]], [[TMP16]]
+// CHECK:   store volatile <2 x double> [[MUL8]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_mul_assign(void) {
+
   sc *= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]]
   uc *= uc2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]]
   ss *= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]]
   us *= us2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]]
   si *= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]]
   ui *= ui2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]]
   sl *= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]]
   ul *= ul2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: %{{.*}} = fmul <2 x double> [[VAL2]], [[VAL1]]
   fd *= fd2;
 }
 
-void test_div (void)
-{
-// CHECK-LABEL: test_div
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_div() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[DIV:%.*]] = sdiv <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   store volatile <16 x i8> [[DIV]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[DIV1:%.*]] = udiv <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   store volatile <16 x i8> [[DIV1]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[DIV2:%.*]] = sdiv <8 x i16> [[TMP4]], [[TMP5]]
+// CHECK:   store volatile <8 x i16> [[DIV2]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[DIV3:%.*]] = udiv <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK:   store volatile <8 x i16> [[DIV3]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[DIV4:%.*]] = sdiv <4 x i32> [[TMP8]], [[TMP9]]
+// CHECK:   store volatile <4 x i32> [[DIV4]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[DIV5:%.*]] = udiv <4 x i32> [[TMP10]], [[TMP11]]
+// CHECK:   store volatile <4 x i32> [[DIV5]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[DIV6:%.*]] = sdiv <2 x i64> [[TMP12]], [[TMP13]]
+// CHECK:   store volatile <2 x i64> [[DIV6]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[DIV7:%.*]] = udiv <2 x i64> [[TMP14]], [[TMP15]]
+// CHECK:   store volatile <2 x i64> [[DIV7]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[DIV8:%.*]] = fdiv <2 x double> [[TMP16]], [[TMP17]]
+// CHECK:   store volatile <2 x double> [[DIV8]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_div(void) {
+
   sc = sc / sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]]
   uc = uc / uc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]]
   ss = ss / ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]]
   us = us / us2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]]
   si = si / si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]]
   ui = ui / ui2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]]
   sl = sl / sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]]
   ul = ul / ul2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]]
   fd = fd / fd2;
 }
 
-void test_div_assign (void)
-{
-// CHECK-LABEL: test_div_assign
-
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_div_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[DIV:%.*]] = sdiv <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[DIV]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[DIV1:%.*]] = udiv <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   store volatile <16 x i8> [[DIV1]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[DIV2:%.*]] = sdiv <8 x i16> [[TMP5]], [[TMP4]]
+// CHECK:   store volatile <8 x i16> [[DIV2]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[DIV3:%.*]] = udiv <8 x i16> [[TMP7]], [[TMP6]]
+// CHECK:   store volatile <8 x i16> [[DIV3]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[DIV4:%.*]] = sdiv <4 x i32> [[TMP9]], [[TMP8]]
+// CHECK:   store volatile <4 x i32> [[DIV4]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[DIV5:%.*]] = udiv <4 x i32> [[TMP11]], [[TMP10]]
+// CHECK:   store volatile <4 x i32> [[DIV5]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[DIV6:%.*]] = sdiv <2 x i64> [[TMP13]], [[TMP12]]
+// CHECK:   store volatile <2 x i64> [[DIV6]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[DIV7:%.*]] = udiv <2 x i64> [[TMP15]], [[TMP14]]
+// CHECK:   store volatile <2 x i64> [[DIV7]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[DIV8:%.*]] = fdiv <2 x double> [[TMP17]], [[TMP16]]
+// CHECK:   store volatile <2 x double> [[DIV8]], <2 x double>* @fd, align 8
+// CHECK:   ret void
+void test_div_assign(void) {
+
   sc /= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]]
   uc /= uc2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]]
   ss /= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]]
   us /= us2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]]
   si /= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]]
   ui /= ui2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]]
   sl /= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]]
   ul /= ul2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]]
   fd /= fd2;
 }
 
-void test_rem (void)
-{
-// CHECK-LABEL: test_rem
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_rem() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[REM:%.*]] = srem <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   store volatile <16 x i8> [[REM]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[REM1:%.*]] = urem <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   store volatile <16 x i8> [[REM1]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[REM2:%.*]] = srem <8 x i16> [[TMP4]], [[TMP5]]
+// CHECK:   store volatile <8 x i16> [[REM2]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[REM3:%.*]] = urem <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK:   store volatile <8 x i16> [[REM3]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[REM4:%.*]] = srem <4 x i32> [[TMP8]], [[TMP9]]
+// CHECK:   store volatile <4 x i32> [[REM4]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[REM5:%.*]] = urem <4 x i32> [[TMP10]], [[TMP11]]
+// CHECK:   store volatile <4 x i32> [[REM5]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[REM6:%.*]] = srem <2 x i64> [[TMP12]], [[TMP13]]
+// CHECK:   store volatile <2 x i64> [[REM6]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[REM7:%.*]] = urem <2 x i64> [[TMP14]], [[TMP15]]
+// CHECK:   store volatile <2 x i64> [[REM7]], <2 x i64>* @ul, align 8
+// CHECK:   ret void
+void test_rem(void) {
+
   sc = sc % sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]]
   uc = uc % uc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]]
   ss = ss % ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]]
   us = us % us2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]]
   si = si % si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]]
   ui = ui % ui2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]]
   sl = sl % sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]]
   ul = ul % ul2;
 }
 
-void test_rem_assign (void)
-{
-// CHECK-LABEL: test_rem_assign
-
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_rem_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[REM:%.*]] = srem <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[REM]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[REM1:%.*]] = urem <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   store volatile <16 x i8> [[REM1]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[REM2:%.*]] = srem <8 x i16> [[TMP5]], [[TMP4]]
+// CHECK:   store volatile <8 x i16> [[REM2]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[REM3:%.*]] = urem <8 x i16> [[TMP7]], [[TMP6]]
+// CHECK:   store volatile <8 x i16> [[REM3]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[REM4:%.*]] = srem <4 x i32> [[TMP9]], [[TMP8]]
+// CHECK:   store volatile <4 x i32> [[REM4]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[REM5:%.*]] = urem <4 x i32> [[TMP11]], [[TMP10]]
+// CHECK:   store volatile <4 x i32> [[REM5]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[REM6:%.*]] = srem <2 x i64> [[TMP13]], [[TMP12]]
+// CHECK:   store volatile <2 x i64> [[REM6]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[REM7:%.*]] = urem <2 x i64> [[TMP15]], [[TMP14]]
+// CHECK:   store volatile <2 x i64> [[REM7]], <2 x i64>* @ul, align 8
+// CHECK:   ret void
+void test_rem_assign(void) {
+
   sc %= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]]
   uc %= uc2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]]
   ss %= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]]
   us %= us2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]]
   si %= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]]
   ui %= ui2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]]
   sl %= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]]
   ul %= ul2;
 }
 
-void test_not (void)
-{
-// CHECK-LABEL: test_not
+// CHECK-LABEL: define void @test_not() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[NEG:%.*]] = xor <16 x i8> [[TMP0]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[NEG]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[NEG1:%.*]] = xor <16 x i8> [[TMP1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[NEG1]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[NEG2:%.*]] = xor <16 x i8> [[TMP2]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK:   store volatile <16 x i8> [[NEG2]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[NEG3:%.*]] = xor <8 x i16> [[TMP3]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[NEG3]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[NEG4:%.*]] = xor <8 x i16> [[TMP4]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[NEG4]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[NEG5:%.*]] = xor <8 x i16> [[TMP5]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK:   store volatile <8 x i16> [[NEG5]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[NEG6:%.*]] = xor <4 x i32> [[TMP6]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[NEG6]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[NEG7:%.*]] = xor <4 x i32> [[TMP7]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[NEG7]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[NEG8:%.*]] = xor <4 x i32> [[TMP8]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK:   store volatile <4 x i32> [[NEG8]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[NEG9:%.*]] = xor <2 x i64> [[TMP9]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[NEG9]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[NEG10:%.*]] = xor <2 x i64> [[TMP10]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[NEG10]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[NEG11:%.*]] = xor <2 x i64> [[TMP11]], <i64 -1, i64 -1>
+// CHECK:   store volatile <2 x i64> [[NEG11]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_not(void) {
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   sc = ~sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   uc = ~uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   bc = ~bc2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   ss = ~ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   us = ~us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   bs = ~bs2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
   si = ~si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
   ui = ~ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], <i32 -1, i32 -1, i32 -1, i32 -1>
   bi = ~bi2;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
   sl = ~sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
   ul = ~ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], <i64 -1, i64 -1>
   bl = ~bl2;
 }
 
-void test_and (void)
-{
-// CHECK-LABEL: test_and
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
+// CHECK-LABEL: define void @test_and() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[AND:%.*]] = and <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   store volatile <16 x i8> [[AND]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[AND1:%.*]] = and <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   store volatile <16 x i8> [[AND1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[AND2:%.*]] = and <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   store volatile <16 x i8> [[AND2]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[AND3:%.*]] = and <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK:   store volatile <16 x i8> [[AND3]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[AND4:%.*]] = and <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK:   store volatile <16 x i8> [[AND4]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[AND5:%.*]] = and <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK:   store volatile <16 x i8> [[AND5]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[AND6:%.*]] = and <16 x i8> [[TMP12]], [[TMP13]]
+// CHECK:   store volatile <16 x i8> [[AND6]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[AND7:%.*]] = and <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK:   store volatile <8 x i16> [[AND7]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[AND8:%.*]] = and <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK:   store volatile <8 x i16> [[AND8]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[AND9:%.*]] = and <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK:   store volatile <8 x i16> [[AND9]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[AND10:%.*]] = and <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK:   store volatile <8 x i16> [[AND10]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[AND11:%.*]] = and <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK:   store volatile <8 x i16> [[AND11]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[AND12:%.*]] = and <8 x i16> [[TMP24]], [[TMP25]]
+// CHECK:   store volatile <8 x i16> [[AND12]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[AND13:%.*]] = and <8 x i16> [[TMP26]], [[TMP27]]
+// CHECK:   store volatile <8 x i16> [[AND13]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[AND14:%.*]] = and <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK:   store volatile <4 x i32> [[AND14]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[AND15:%.*]] = and <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK:   store volatile <4 x i32> [[AND15]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[AND16:%.*]] = and <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK:   store volatile <4 x i32> [[AND16]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[AND17:%.*]] = and <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK:   store volatile <4 x i32> [[AND17]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[AND18:%.*]] = and <4 x i32> [[TMP36]], [[TMP37]]
+// CHECK:   store volatile <4 x i32> [[AND18]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[AND19:%.*]] = and <4 x i32> [[TMP38]], [[TMP39]]
+// CHECK:   store volatile <4 x i32> [[AND19]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[AND20:%.*]] = and <4 x i32> [[TMP40]], [[TMP41]]
+// CHECK:   store volatile <4 x i32> [[AND20]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[AND21:%.*]] = and <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK:   store volatile <2 x i64> [[AND21]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[AND22:%.*]] = and <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK:   store volatile <2 x i64> [[AND22]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[AND23:%.*]] = and <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK:   store volatile <2 x i64> [[AND23]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[AND24:%.*]] = and <2 x i64> [[TMP48]], [[TMP49]]
+// CHECK:   store volatile <2 x i64> [[AND24]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[AND25:%.*]] = and <2 x i64> [[TMP50]], [[TMP51]]
+// CHECK:   store volatile <2 x i64> [[AND25]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[AND26:%.*]] = and <2 x i64> [[TMP52]], [[TMP53]]
+// CHECK:   store volatile <2 x i64> [[AND26]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[AND27:%.*]] = and <2 x i64> [[TMP54]], [[TMP55]]
+// CHECK:   store volatile <2 x i64> [[AND27]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_and(void) {
+
   sc = sc & sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
   sc = sc & bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
   sc = bc & sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
   uc = uc & uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
   uc = uc & bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
   uc = bc & uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]]
   bc = bc & bc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
   ss = ss & ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
   ss = ss & bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
   ss = bs & ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
   us = us & us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
   us = us & bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
   us = bs & us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]]
   bs = bs & bs2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
   si = si & si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
   si = si & bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
   si = bi & si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
   ui = ui & ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
   ui = ui & bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
   ui = bi & ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]]
   bi = bi & bi2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
   sl = sl & sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
   sl = sl & bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
   sl = bl & sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
   ul = ul & ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
   ul = ul & bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
   ul = bl & ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]]
   bl = bl & bl2;
 }
 
-void test_and_assign (void)
-{
-// CHECK-LABEL: test_and_assign
-
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_and_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[AND:%.*]] = and <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[AND]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[AND1:%.*]] = and <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   store volatile <16 x i8> [[AND1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[AND2:%.*]] = and <16 x i8> [[TMP5]], [[TMP4]]
+// CHECK:   store volatile <16 x i8> [[AND2]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[AND3:%.*]] = and <16 x i8> [[TMP7]], [[TMP6]]
+// CHECK:   store volatile <16 x i8> [[AND3]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[AND4:%.*]] = and <16 x i8> [[TMP9]], [[TMP8]]
+// CHECK:   store volatile <16 x i8> [[AND4]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[AND5:%.*]] = and <8 x i16> [[TMP11]], [[TMP10]]
+// CHECK:   store volatile <8 x i16> [[AND5]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[AND6:%.*]] = and <8 x i16> [[TMP13]], [[TMP12]]
+// CHECK:   store volatile <8 x i16> [[AND6]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[AND7:%.*]] = and <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK:   store volatile <8 x i16> [[AND7]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[AND8:%.*]] = and <8 x i16> [[TMP17]], [[TMP16]]
+// CHECK:   store volatile <8 x i16> [[AND8]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[AND9:%.*]] = and <8 x i16> [[TMP19]], [[TMP18]]
+// CHECK:   store volatile <8 x i16> [[AND9]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[AND10:%.*]] = and <4 x i32> [[TMP21]], [[TMP20]]
+// CHECK:   store volatile <4 x i32> [[AND10]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[AND11:%.*]] = and <4 x i32> [[TMP23]], [[TMP22]]
+// CHECK:   store volatile <4 x i32> [[AND11]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[AND12:%.*]] = and <4 x i32> [[TMP25]], [[TMP24]]
+// CHECK:   store volatile <4 x i32> [[AND12]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[AND13:%.*]] = and <4 x i32> [[TMP27]], [[TMP26]]
+// CHECK:   store volatile <4 x i32> [[AND13]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[AND14:%.*]] = and <4 x i32> [[TMP29]], [[TMP28]]
+// CHECK:   store volatile <4 x i32> [[AND14]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[AND15:%.*]] = and <2 x i64> [[TMP31]], [[TMP30]]
+// CHECK:   store volatile <2 x i64> [[AND15]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[AND16:%.*]] = and <2 x i64> [[TMP33]], [[TMP32]]
+// CHECK:   store volatile <2 x i64> [[AND16]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[AND17:%.*]] = and <2 x i64> [[TMP35]], [[TMP34]]
+// CHECK:   store volatile <2 x i64> [[AND17]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[AND18:%.*]] = and <2 x i64> [[TMP37]], [[TMP36]]
+// CHECK:   store volatile <2 x i64> [[AND18]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[AND19:%.*]] = and <2 x i64> [[TMP39]], [[TMP38]]
+// CHECK:   store volatile <2 x i64> [[AND19]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_and_assign(void) {
+
   sc &= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
   sc &= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
   uc &= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
   uc &= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]]
   bc &= bc2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
   ss &= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
   ss &= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
   us &= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
   us &= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]]
   bs &= bs2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
   si &= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
   si &= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
   ui &= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
   ui &= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]]
   bi &= bi2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
   sl &= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
   sl &= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
   ul &= ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
   ul &= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]]
   bl &= bl2;
 }
 
-void test_or (void)
-{
-// CHECK-LABEL: test_or
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
+// CHECK-LABEL: define void @test_or() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[OR:%.*]] = or <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   store volatile <16 x i8> [[OR]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[OR1:%.*]] = or <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   store volatile <16 x i8> [[OR1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[OR2:%.*]] = or <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   store volatile <16 x i8> [[OR2]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[OR3:%.*]] = or <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK:   store volatile <16 x i8> [[OR3]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[OR4:%.*]] = or <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK:   store volatile <16 x i8> [[OR4]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[OR5:%.*]] = or <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK:   store volatile <16 x i8> [[OR5]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[OR6:%.*]] = or <16 x i8> [[TMP12]], [[TMP13]]
+// CHECK:   store volatile <16 x i8> [[OR6]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[OR7:%.*]] = or <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK:   store volatile <8 x i16> [[OR7]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[OR8:%.*]] = or <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK:   store volatile <8 x i16> [[OR8]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[OR9:%.*]] = or <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK:   store volatile <8 x i16> [[OR9]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[OR10:%.*]] = or <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK:   store volatile <8 x i16> [[OR10]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[OR11:%.*]] = or <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK:   store volatile <8 x i16> [[OR11]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[OR12:%.*]] = or <8 x i16> [[TMP24]], [[TMP25]]
+// CHECK:   store volatile <8 x i16> [[OR12]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[OR13:%.*]] = or <8 x i16> [[TMP26]], [[TMP27]]
+// CHECK:   store volatile <8 x i16> [[OR13]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[OR14:%.*]] = or <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK:   store volatile <4 x i32> [[OR14]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[OR15:%.*]] = or <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK:   store volatile <4 x i32> [[OR15]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[OR16:%.*]] = or <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK:   store volatile <4 x i32> [[OR16]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[OR17:%.*]] = or <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK:   store volatile <4 x i32> [[OR17]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[OR18:%.*]] = or <4 x i32> [[TMP36]], [[TMP37]]
+// CHECK:   store volatile <4 x i32> [[OR18]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[OR19:%.*]] = or <4 x i32> [[TMP38]], [[TMP39]]
+// CHECK:   store volatile <4 x i32> [[OR19]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[OR20:%.*]] = or <4 x i32> [[TMP40]], [[TMP41]]
+// CHECK:   store volatile <4 x i32> [[OR20]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[OR21:%.*]] = or <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK:   store volatile <2 x i64> [[OR21]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[OR22:%.*]] = or <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK:   store volatile <2 x i64> [[OR22]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[OR23:%.*]] = or <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK:   store volatile <2 x i64> [[OR23]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[OR24:%.*]] = or <2 x i64> [[TMP48]], [[TMP49]]
+// CHECK:   store volatile <2 x i64> [[OR24]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[OR25:%.*]] = or <2 x i64> [[TMP50]], [[TMP51]]
+// CHECK:   store volatile <2 x i64> [[OR25]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[OR26:%.*]] = or <2 x i64> [[TMP52]], [[TMP53]]
+// CHECK:   store volatile <2 x i64> [[OR26]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[OR27:%.*]] = or <2 x i64> [[TMP54]], [[TMP55]]
+// CHECK:   store volatile <2 x i64> [[OR27]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_or(void) {
+
   sc = sc | sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
   sc = sc | bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
   sc = bc | sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
   uc = uc | uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
   uc = uc | bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
   uc = bc | uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]]
   bc = bc | bc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
   ss = ss | ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
   ss = ss | bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
   ss = bs | ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
   us = us | us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
   us = us | bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
   us = bs | us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]]
   bs = bs | bs2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
   si = si | si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
   si = si | bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
   si = bi | si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
   ui = ui | ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
   ui = ui | bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
   ui = bi | ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]]
   bi = bi | bi2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
   sl = sl | sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
   sl = sl | bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
   sl = bl | sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
   ul = ul | ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
   ul = ul | bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
   ul = bl | ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]]
   bl = bl | bl2;
 }
 
-void test_or_assign (void)
-{
-// CHECK-LABEL: test_or_assign
-
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_or_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[OR:%.*]] = or <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[OR]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[OR1:%.*]] = or <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   store volatile <16 x i8> [[OR1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[OR2:%.*]] = or <16 x i8> [[TMP5]], [[TMP4]]
+// CHECK:   store volatile <16 x i8> [[OR2]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[OR3:%.*]] = or <16 x i8> [[TMP7]], [[TMP6]]
+// CHECK:   store volatile <16 x i8> [[OR3]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[OR4:%.*]] = or <16 x i8> [[TMP9]], [[TMP8]]
+// CHECK:   store volatile <16 x i8> [[OR4]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[OR5:%.*]] = or <8 x i16> [[TMP11]], [[TMP10]]
+// CHECK:   store volatile <8 x i16> [[OR5]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[OR6:%.*]] = or <8 x i16> [[TMP13]], [[TMP12]]
+// CHECK:   store volatile <8 x i16> [[OR6]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[OR7:%.*]] = or <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK:   store volatile <8 x i16> [[OR7]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[OR8:%.*]] = or <8 x i16> [[TMP17]], [[TMP16]]
+// CHECK:   store volatile <8 x i16> [[OR8]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[OR9:%.*]] = or <8 x i16> [[TMP19]], [[TMP18]]
+// CHECK:   store volatile <8 x i16> [[OR9]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[OR10:%.*]] = or <4 x i32> [[TMP21]], [[TMP20]]
+// CHECK:   store volatile <4 x i32> [[OR10]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[OR11:%.*]] = or <4 x i32> [[TMP23]], [[TMP22]]
+// CHECK:   store volatile <4 x i32> [[OR11]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[OR12:%.*]] = or <4 x i32> [[TMP25]], [[TMP24]]
+// CHECK:   store volatile <4 x i32> [[OR12]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[OR13:%.*]] = or <4 x i32> [[TMP27]], [[TMP26]]
+// CHECK:   store volatile <4 x i32> [[OR13]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[OR14:%.*]] = or <4 x i32> [[TMP29]], [[TMP28]]
+// CHECK:   store volatile <4 x i32> [[OR14]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[OR15:%.*]] = or <2 x i64> [[TMP31]], [[TMP30]]
+// CHECK:   store volatile <2 x i64> [[OR15]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[OR16:%.*]] = or <2 x i64> [[TMP33]], [[TMP32]]
+// CHECK:   store volatile <2 x i64> [[OR16]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[OR17:%.*]] = or <2 x i64> [[TMP35]], [[TMP34]]
+// CHECK:   store volatile <2 x i64> [[OR17]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[OR18:%.*]] = or <2 x i64> [[TMP37]], [[TMP36]]
+// CHECK:   store volatile <2 x i64> [[OR18]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[OR19:%.*]] = or <2 x i64> [[TMP39]], [[TMP38]]
+// CHECK:   store volatile <2 x i64> [[OR19]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_or_assign(void) {
+
   sc |= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
   sc |= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
   uc |= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
   uc |= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]]
   bc |= bc2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
   ss |= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
   ss |= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
   us |= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
   us |= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]]
   bs |= bs2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
   si |= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
   si |= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
   ui |= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
   ui |= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]]
   bi |= bi2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
   sl |= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
   sl |= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
   ul |= ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
   ul |= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]]
   bl |= bl2;
 }
 
-void test_xor (void)
-{
-// CHECK-LABEL: test_xor
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
+// CHECK-LABEL: define void @test_xor() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[XOR:%.*]] = xor <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   store volatile <16 x i8> [[XOR]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[XOR1:%.*]] = xor <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   store volatile <16 x i8> [[XOR1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[XOR2:%.*]] = xor <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   store volatile <16 x i8> [[XOR2]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[XOR3:%.*]] = xor <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK:   store volatile <16 x i8> [[XOR3]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[XOR4:%.*]] = xor <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK:   store volatile <16 x i8> [[XOR4]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[XOR5:%.*]] = xor <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK:   store volatile <16 x i8> [[XOR5]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[XOR6:%.*]] = xor <16 x i8> [[TMP12]], [[TMP13]]
+// CHECK:   store volatile <16 x i8> [[XOR6]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[XOR7:%.*]] = xor <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK:   store volatile <8 x i16> [[XOR7]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[XOR8:%.*]] = xor <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK:   store volatile <8 x i16> [[XOR8]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[XOR9:%.*]] = xor <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK:   store volatile <8 x i16> [[XOR9]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[XOR10:%.*]] = xor <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK:   store volatile <8 x i16> [[XOR10]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[XOR11:%.*]] = xor <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK:   store volatile <8 x i16> [[XOR11]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[XOR12:%.*]] = xor <8 x i16> [[TMP24]], [[TMP25]]
+// CHECK:   store volatile <8 x i16> [[XOR12]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[XOR13:%.*]] = xor <8 x i16> [[TMP26]], [[TMP27]]
+// CHECK:   store volatile <8 x i16> [[XOR13]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[XOR14:%.*]] = xor <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK:   store volatile <4 x i32> [[XOR14]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[XOR15:%.*]] = xor <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK:   store volatile <4 x i32> [[XOR15]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[XOR16:%.*]] = xor <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK:   store volatile <4 x i32> [[XOR16]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[XOR17:%.*]] = xor <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK:   store volatile <4 x i32> [[XOR17]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[XOR18:%.*]] = xor <4 x i32> [[TMP36]], [[TMP37]]
+// CHECK:   store volatile <4 x i32> [[XOR18]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[XOR19:%.*]] = xor <4 x i32> [[TMP38]], [[TMP39]]
+// CHECK:   store volatile <4 x i32> [[XOR19]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[XOR20:%.*]] = xor <4 x i32> [[TMP40]], [[TMP41]]
+// CHECK:   store volatile <4 x i32> [[XOR20]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[XOR21:%.*]] = xor <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK:   store volatile <2 x i64> [[XOR21]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[XOR22:%.*]] = xor <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK:   store volatile <2 x i64> [[XOR22]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[XOR23:%.*]] = xor <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK:   store volatile <2 x i64> [[XOR23]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[XOR24:%.*]] = xor <2 x i64> [[TMP48]], [[TMP49]]
+// CHECK:   store volatile <2 x i64> [[XOR24]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[XOR25:%.*]] = xor <2 x i64> [[TMP50]], [[TMP51]]
+// CHECK:   store volatile <2 x i64> [[XOR25]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[XOR26:%.*]] = xor <2 x i64> [[TMP52]], [[TMP53]]
+// CHECK:   store volatile <2 x i64> [[XOR26]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[XOR27:%.*]] = xor <2 x i64> [[TMP54]], [[TMP55]]
+// CHECK:   store volatile <2 x i64> [[XOR27]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_xor(void) {
+
   sc = sc ^ sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
   sc = sc ^ bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
   sc = bc ^ sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
   uc = uc ^ uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
   uc = uc ^ bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
   uc = bc ^ uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]]
   bc = bc ^ bc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
   ss = ss ^ ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
   ss = ss ^ bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
   ss = bs ^ ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
   us = us ^ us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
   us = us ^ bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
   us = bs ^ us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]]
   bs = bs ^ bs2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
   si = si ^ si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
   si = si ^ bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
   si = bi ^ si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
   ui = ui ^ ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
   ui = ui ^ bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
   ui = bi ^ ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]]
   bi = bi ^ bi2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
   sl = sl ^ sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
   sl = sl ^ bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
   sl = bl ^ sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
   ul = ul ^ ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
   ul = ul ^ bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
   ul = bl ^ ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]]
   bl = bl ^ bl2;
 }
 
-void test_xor_assign (void)
-{
-// CHECK-LABEL: test_xor_assign
-
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
+// CHECK-LABEL: define void @test_xor_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[XOR:%.*]] = xor <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[XOR]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[XOR1:%.*]] = xor <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   store volatile <16 x i8> [[XOR1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[XOR2:%.*]] = xor <16 x i8> [[TMP5]], [[TMP4]]
+// CHECK:   store volatile <16 x i8> [[XOR2]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[XOR3:%.*]] = xor <16 x i8> [[TMP7]], [[TMP6]]
+// CHECK:   store volatile <16 x i8> [[XOR3]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[XOR4:%.*]] = xor <16 x i8> [[TMP9]], [[TMP8]]
+// CHECK:   store volatile <16 x i8> [[XOR4]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[XOR5:%.*]] = xor <8 x i16> [[TMP11]], [[TMP10]]
+// CHECK:   store volatile <8 x i16> [[XOR5]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[XOR6:%.*]] = xor <8 x i16> [[TMP13]], [[TMP12]]
+// CHECK:   store volatile <8 x i16> [[XOR6]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[XOR7:%.*]] = xor <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK:   store volatile <8 x i16> [[XOR7]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[XOR8:%.*]] = xor <8 x i16> [[TMP17]], [[TMP16]]
+// CHECK:   store volatile <8 x i16> [[XOR8]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[XOR9:%.*]] = xor <8 x i16> [[TMP19]], [[TMP18]]
+// CHECK:   store volatile <8 x i16> [[XOR9]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[XOR10:%.*]] = xor <4 x i32> [[TMP21]], [[TMP20]]
+// CHECK:   store volatile <4 x i32> [[XOR10]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[XOR11:%.*]] = xor <4 x i32> [[TMP23]], [[TMP22]]
+// CHECK:   store volatile <4 x i32> [[XOR11]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[XOR12:%.*]] = xor <4 x i32> [[TMP25]], [[TMP24]]
+// CHECK:   store volatile <4 x i32> [[XOR12]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[XOR13:%.*]] = xor <4 x i32> [[TMP27]], [[TMP26]]
+// CHECK:   store volatile <4 x i32> [[XOR13]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[XOR14:%.*]] = xor <4 x i32> [[TMP29]], [[TMP28]]
+// CHECK:   store volatile <4 x i32> [[XOR14]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[XOR15:%.*]] = xor <2 x i64> [[TMP31]], [[TMP30]]
+// CHECK:   store volatile <2 x i64> [[XOR15]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[XOR16:%.*]] = xor <2 x i64> [[TMP33]], [[TMP32]]
+// CHECK:   store volatile <2 x i64> [[XOR16]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[XOR17:%.*]] = xor <2 x i64> [[TMP35]], [[TMP34]]
+// CHECK:   store volatile <2 x i64> [[XOR17]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[XOR18:%.*]] = xor <2 x i64> [[TMP37]], [[TMP36]]
+// CHECK:   store volatile <2 x i64> [[XOR18]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[XOR19:%.*]] = xor <2 x i64> [[TMP39]], [[TMP38]]
+// CHECK:   store volatile <2 x i64> [[XOR19]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_xor_assign(void) {
+
   sc ^= sc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
   sc ^= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
   uc ^= uc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
   uc ^= bc2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]]
   bc ^= bc2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
   ss ^= ss2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
   ss ^= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
   us ^= us2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
   us ^= bs2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]]
   bs ^= bs2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
   si ^= si2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
   si ^= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
   ui ^= ui2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
   ui ^= bi2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]]
   bi ^= bi2;
 
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
   sl ^= sl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
   sl ^= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
   ul ^= ul2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
   ul ^= bl2;
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]]
   bl ^= bl2;
 }
 
-void test_sl (void)
-{
-// CHECK-LABEL: test_sl
-
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+// CHECK-LABEL: define void @test_sl() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[SHL:%.*]] = shl <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   store volatile <16 x i8> [[SHL]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[SHL1:%.*]] = shl <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   store volatile <16 x i8> [[SHL1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP5]], i32 0
+// CHECK:   [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK:   [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8>
+// CHECK:   [[SHL2:%.*]] = shl <16 x i8> [[TMP4]], [[SH_PROM]]
+// CHECK:   store volatile <16 x i8> [[SHL2]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SHL3:%.*]] = shl <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK:   store volatile <16 x i8> [[SHL3]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[SHL4:%.*]] = shl <16 x i8> [[TMP7]], [[TMP8]]
+// CHECK:   store volatile <16 x i8> [[SHL4]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[SHL5:%.*]] = shl <16 x i8> [[TMP9]], [[TMP10]]
+// CHECK:   store volatile <16 x i8> [[SHL5]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP12]], i32 0
+// CHECK:   [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK:   [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8>
+// CHECK:   [[SHL9:%.*]] = shl <16 x i8> [[TMP11]], [[SH_PROM8]]
+// CHECK:   store volatile <16 x i8> [[SHL9]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SHL10:%.*]] = shl <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK:   store volatile <16 x i8> [[SHL10]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[SHL11:%.*]] = shl <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK:   store volatile <8 x i16> [[SHL11]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[SHL12:%.*]] = shl <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK:   store volatile <8 x i16> [[SHL12]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP19]], i32 0
+// CHECK:   [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK:   [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16>
+// CHECK:   [[SHL16:%.*]] = shl <8 x i16> [[TMP18]], [[SH_PROM15]]
+// CHECK:   store volatile <8 x i16> [[SHL16]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SHL17:%.*]] = shl <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK:   store volatile <8 x i16> [[SHL17]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[SHL18:%.*]] = shl <8 x i16> [[TMP21]], [[TMP22]]
+// CHECK:   store volatile <8 x i16> [[SHL18]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[SHL19:%.*]] = shl <8 x i16> [[TMP23]], [[TMP24]]
+// CHECK:   store volatile <8 x i16> [[SHL19]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP26]], i32 0
+// CHECK:   [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK:   [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16>
+// CHECK:   [[SHL23:%.*]] = shl <8 x i16> [[TMP25]], [[SH_PROM22]]
+// CHECK:   store volatile <8 x i16> [[SHL23]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SHL24:%.*]] = shl <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK:   store volatile <8 x i16> [[SHL24]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[SHL25:%.*]] = shl <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK:   store volatile <4 x i32> [[SHL25]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[SHL26:%.*]] = shl <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK:   store volatile <4 x i32> [[SHL26]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP33]], i32 0
+// CHECK:   [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK:   [[SHL29:%.*]] = shl <4 x i32> [[TMP32]], [[SPLAT_SPLAT28]]
+// CHECK:   store volatile <4 x i32> [[SHL29]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SHL30:%.*]] = shl <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK:   store volatile <4 x i32> [[SHL30]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[SHL31:%.*]] = shl <4 x i32> [[TMP35]], [[TMP36]]
+// CHECK:   store volatile <4 x i32> [[SHL31]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[SHL32:%.*]] = shl <4 x i32> [[TMP37]], [[TMP38]]
+// CHECK:   store volatile <4 x i32> [[SHL32]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP40:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP40]], i32 0
+// CHECK:   [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK:   [[SHL35:%.*]] = shl <4 x i32> [[TMP39]], [[SPLAT_SPLAT34]]
+// CHECK:   store volatile <4 x i32> [[SHL35]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SHL36:%.*]] = shl <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK:   store volatile <4 x i32> [[SHL36]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[SHL37:%.*]] = shl <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK:   store volatile <2 x i64> [[SHL37]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[SHL38:%.*]] = shl <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK:   store volatile <2 x i64> [[SHL38]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP47:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP47]], i32 0
+// CHECK:   [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK:   [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64>
+// CHECK:   [[SHL42:%.*]] = shl <2 x i64> [[TMP46]], [[SH_PROM41]]
+// CHECK:   store volatile <2 x i64> [[SHL42]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SHL43:%.*]] = shl <2 x i64> [[TMP48]], <i64 5, i64 5>
+// CHECK:   store volatile <2 x i64> [[SHL43]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[SHL44:%.*]] = shl <2 x i64> [[TMP49]], [[TMP50]]
+// CHECK:   store volatile <2 x i64> [[SHL44]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[SHL45:%.*]] = shl <2 x i64> [[TMP51]], [[TMP52]]
+// CHECK:   store volatile <2 x i64> [[SHL45]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP54:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP54]], i32 0
+// CHECK:   [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK:   [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64>
+// CHECK:   [[SHL49:%.*]] = shl <2 x i64> [[TMP53]], [[SH_PROM48]]
+// CHECK:   store volatile <2 x i64> [[SHL49]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SHL50:%.*]] = shl <2 x i64> [[TMP55]], <i64 5, i64 5>
+// CHECK:   store volatile <2 x i64> [[SHL50]], <2 x i64>* @ul, align 8
+// CHECK:   ret void
+void test_sl(void) {
+
   sc = sc << sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
   sc = sc << uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
   sc = sc << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
   sc = sc << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
   uc = uc << sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
   uc = uc << uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
   uc = uc << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
   uc = uc << 5;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   ss = ss << ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   ss = ss << us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   ss = ss << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
   ss = ss << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   us = us << ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   us = us << us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   us = us << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
   us = us << 5;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   si = si << si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   si = si << ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   si = si << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
   si = si << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   ui = ui << si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   ui = ui << ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   ui = ui << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
   ui = ui << 5;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   sl = sl << sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   sl = sl << ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   sl = sl << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
   sl = sl << 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   ul = ul << sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   ul = ul << ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   ul = ul << cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
   ul = ul << 5;
 }
 
-void test_sl_assign (void)
-{
-// CHECK-LABEL: test_sl_assign
-
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
+// CHECK-LABEL: define void @test_sl_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SHL:%.*]] = shl <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[SHL]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SHL1:%.*]] = shl <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   store volatile <16 x i8> [[SHL1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP4]], i32 0
+// CHECK:   [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8>
+// CHECK:   [[SHL2:%.*]] = shl <16 x i8> [[TMP5]], [[SH_PROM]]
+// CHECK:   store volatile <16 x i8> [[SHL2]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SHL3:%.*]] = shl <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK:   store volatile <16 x i8> [[SHL3]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SHL4:%.*]] = shl <16 x i8> [[TMP8]], [[TMP7]]
+// CHECK:   store volatile <16 x i8> [[SHL4]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SHL5:%.*]] = shl <16 x i8> [[TMP10]], [[TMP9]]
+// CHECK:   store volatile <16 x i8> [[SHL5]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP11]], i32 0
+// CHECK:   [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK:   [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8>
+// CHECK:   [[SHL9:%.*]] = shl <16 x i8> [[TMP12]], [[SH_PROM8]]
+// CHECK:   store volatile <16 x i8> [[SHL9]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SHL10:%.*]] = shl <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK:   store volatile <16 x i8> [[SHL10]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SHL11:%.*]] = shl <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK:   store volatile <8 x i16> [[SHL11]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SHL12:%.*]] = shl <8 x i16> [[TMP17]], [[TMP16]]
+// CHECK:   store volatile <8 x i16> [[SHL12]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP18]], i32 0
+// CHECK:   [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16>
+// CHECK:   [[SHL16:%.*]] = shl <8 x i16> [[TMP19]], [[SH_PROM15]]
+// CHECK:   store volatile <8 x i16> [[SHL16]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SHL17:%.*]] = shl <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK:   store volatile <8 x i16> [[SHL17]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SHL18:%.*]] = shl <8 x i16> [[TMP22]], [[TMP21]]
+// CHECK:   store volatile <8 x i16> [[SHL18]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SHL19:%.*]] = shl <8 x i16> [[TMP24]], [[TMP23]]
+// CHECK:   store volatile <8 x i16> [[SHL19]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP25]], i32 0
+// CHECK:   [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK:   [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16>
+// CHECK:   [[SHL23:%.*]] = shl <8 x i16> [[TMP26]], [[SH_PROM22]]
+// CHECK:   store volatile <8 x i16> [[SHL23]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SHL24:%.*]] = shl <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK:   store volatile <8 x i16> [[SHL24]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SHL25:%.*]] = shl <4 x i32> [[TMP29]], [[TMP28]]
+// CHECK:   store volatile <4 x i32> [[SHL25]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SHL26:%.*]] = shl <4 x i32> [[TMP31]], [[TMP30]]
+// CHECK:   store volatile <4 x i32> [[SHL26]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP32]], i32 0
+// CHECK:   [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK:   [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SHL29:%.*]] = shl <4 x i32> [[TMP33]], [[SPLAT_SPLAT28]]
+// CHECK:   store volatile <4 x i32> [[SHL29]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SHL30:%.*]] = shl <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK:   store volatile <4 x i32> [[SHL30]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SHL31:%.*]] = shl <4 x i32> [[TMP36]], [[TMP35]]
+// CHECK:   store volatile <4 x i32> [[SHL31]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SHL32:%.*]] = shl <4 x i32> [[TMP38]], [[TMP37]]
+// CHECK:   store volatile <4 x i32> [[SHL32]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP39]], i32 0
+// CHECK:   [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK:   [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SHL35:%.*]] = shl <4 x i32> [[TMP40]], [[SPLAT_SPLAT34]]
+// CHECK:   store volatile <4 x i32> [[SHL35]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SHL36:%.*]] = shl <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK:   store volatile <4 x i32> [[SHL36]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SHL37:%.*]] = shl <2 x i64> [[TMP43]], [[TMP42]]
+// CHECK:   store volatile <2 x i64> [[SHL37]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SHL38:%.*]] = shl <2 x i64> [[TMP45]], [[TMP44]]
+// CHECK:   store volatile <2 x i64> [[SHL38]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP46]], i32 0
+// CHECK:   [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK:   [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64>
+// CHECK:   [[SHL42:%.*]] = shl <2 x i64> [[TMP47]], [[SH_PROM41]]
+// CHECK:   store volatile <2 x i64> [[SHL42]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SHL43:%.*]] = shl <2 x i64> [[TMP48]], <i64 5, i64 5>
+// CHECK:   store volatile <2 x i64> [[SHL43]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SHL44:%.*]] = shl <2 x i64> [[TMP50]], [[TMP49]]
+// CHECK:   store volatile <2 x i64> [[SHL44]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SHL45:%.*]] = shl <2 x i64> [[TMP52]], [[TMP51]]
+// CHECK:   store volatile <2 x i64> [[SHL45]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP53:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP53]], i32 0
+// CHECK:   [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK:   [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64>
+// CHECK:   [[SHL49:%.*]] = shl <2 x i64> [[TMP54]], [[SH_PROM48]]
+// CHECK:   store volatile <2 x i64> [[SHL49]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SHL50:%.*]] = shl <2 x i64> [[TMP55]], <i64 5, i64 5>
+// CHECK:   store volatile <2 x i64> [[SHL50]], <2 x i64>* @ul, align 8
+// CHECK:   ret void
+void test_sl_assign(void) {
+
   sc <<= sc2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
   sc <<= uc2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
   sc <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
   sc <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
   uc <<= sc2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
   uc <<= uc2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]]
   uc <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
   uc <<= 5;
 
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   ss <<= ss2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   ss <<= us2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   ss <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
   ss <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   us <<= ss2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   us <<= us2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]]
   us <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
   us <<= 5;
 
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   si <<= si2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   si <<= ui2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   si <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
   si <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   ui <<= si2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   ui <<= ui2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]]
   ui <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
   ui <<= 5;
 
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   sl <<= sl2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   sl <<= ul2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   sl <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
   sl <<= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   ul <<= sl2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   ul <<= ul2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]]
   ul <<= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], <i64 5, i64 5>
   ul <<= 5;
 }
 
-void test_sr (void)
-{
-// CHECK-LABEL: test_sr
-
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+// CHECK-LABEL: define void @test_sr() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[SHR:%.*]] = ashr <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   store volatile <16 x i8> [[SHR]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[SHR1:%.*]] = ashr <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   store volatile <16 x i8> [[SHR1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP5]], i32 0
+// CHECK:   [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK:   [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8>
+// CHECK:   [[SHR2:%.*]] = ashr <16 x i8> [[TMP4]], [[SH_PROM]]
+// CHECK:   store volatile <16 x i8> [[SHR2]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SHR3:%.*]] = ashr <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK:   store volatile <16 x i8> [[SHR3]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[SHR4:%.*]] = lshr <16 x i8> [[TMP7]], [[TMP8]]
+// CHECK:   store volatile <16 x i8> [[SHR4]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[SHR5:%.*]] = lshr <16 x i8> [[TMP9]], [[TMP10]]
+// CHECK:   store volatile <16 x i8> [[SHR5]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP12]], i32 0
+// CHECK:   [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK:   [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8>
+// CHECK:   [[SHR9:%.*]] = lshr <16 x i8> [[TMP11]], [[SH_PROM8]]
+// CHECK:   store volatile <16 x i8> [[SHR9]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SHR10:%.*]] = lshr <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK:   store volatile <16 x i8> [[SHR10]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[SHR11:%.*]] = ashr <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK:   store volatile <8 x i16> [[SHR11]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[SHR12:%.*]] = ashr <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK:   store volatile <8 x i16> [[SHR12]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP19]], i32 0
+// CHECK:   [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK:   [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16>
+// CHECK:   [[SHR16:%.*]] = ashr <8 x i16> [[TMP18]], [[SH_PROM15]]
+// CHECK:   store volatile <8 x i16> [[SHR16]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SHR17:%.*]] = ashr <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK:   store volatile <8 x i16> [[SHR17]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[SHR18:%.*]] = lshr <8 x i16> [[TMP21]], [[TMP22]]
+// CHECK:   store volatile <8 x i16> [[SHR18]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[SHR19:%.*]] = lshr <8 x i16> [[TMP23]], [[TMP24]]
+// CHECK:   store volatile <8 x i16> [[SHR19]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP26]], i32 0
+// CHECK:   [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK:   [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16>
+// CHECK:   [[SHR23:%.*]] = lshr <8 x i16> [[TMP25]], [[SH_PROM22]]
+// CHECK:   store volatile <8 x i16> [[SHR23]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SHR24:%.*]] = lshr <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK:   store volatile <8 x i16> [[SHR24]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[SHR25:%.*]] = ashr <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK:   store volatile <4 x i32> [[SHR25]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[SHR26:%.*]] = ashr <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK:   store volatile <4 x i32> [[SHR26]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP33]], i32 0
+// CHECK:   [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK:   [[SHR29:%.*]] = ashr <4 x i32> [[TMP32]], [[SPLAT_SPLAT28]]
+// CHECK:   store volatile <4 x i32> [[SHR29]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SHR30:%.*]] = ashr <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK:   store volatile <4 x i32> [[SHR30]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[SHR31:%.*]] = lshr <4 x i32> [[TMP35]], [[TMP36]]
+// CHECK:   store volatile <4 x i32> [[SHR31]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[SHR32:%.*]] = lshr <4 x i32> [[TMP37]], [[TMP38]]
+// CHECK:   store volatile <4 x i32> [[SHR32]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP40:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP40]], i32 0
+// CHECK:   [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK:   [[SHR35:%.*]] = lshr <4 x i32> [[TMP39]], [[SPLAT_SPLAT34]]
+// CHECK:   store volatile <4 x i32> [[SHR35]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SHR36:%.*]] = lshr <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK:   store volatile <4 x i32> [[SHR36]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[SHR37:%.*]] = ashr <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK:   store volatile <2 x i64> [[SHR37]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[SHR38:%.*]] = ashr <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK:   store volatile <2 x i64> [[SHR38]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP47:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP47]], i32 0
+// CHECK:   [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK:   [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64>
+// CHECK:   [[SHR42:%.*]] = ashr <2 x i64> [[TMP46]], [[SH_PROM41]]
+// CHECK:   store volatile <2 x i64> [[SHR42]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SHR43:%.*]] = ashr <2 x i64> [[TMP48]], <i64 5, i64 5>
+// CHECK:   store volatile <2 x i64> [[SHR43]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[SHR44:%.*]] = lshr <2 x i64> [[TMP49]], [[TMP50]]
+// CHECK:   store volatile <2 x i64> [[SHR44]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[SHR45:%.*]] = lshr <2 x i64> [[TMP51]], [[TMP52]]
+// CHECK:   store volatile <2 x i64> [[SHR45]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP54:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP54]], i32 0
+// CHECK:   [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK:   [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64>
+// CHECK:   [[SHR49:%.*]] = lshr <2 x i64> [[TMP53]], [[SH_PROM48]]
+// CHECK:   store volatile <2 x i64> [[SHR49]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SHR50:%.*]] = lshr <2 x i64> [[TMP55]], <i64 5, i64 5>
+// CHECK:   store volatile <2 x i64> [[SHR50]], <2 x i64>* @ul, align 8
+// CHECK:   ret void
+void test_sr(void) {
+
   sc = sc >> sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
   sc = sc >> uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
   sc = sc >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
   sc = sc >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
   uc = uc >> sc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
   uc = uc >> uc2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
   uc = uc >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
   uc = uc >> 5;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
   ss = ss >> ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
   ss = ss >> us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
   ss = ss >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
   ss = ss >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
   us = us >> ss2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
   us = us >> us2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
   us = us >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
   us = us >> 5;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
   si = si >> si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
   si = si >> ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
   si = si >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
   si = si >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
   ui = ui >> si2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
   ui = ui >> ui2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
   ui = ui >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
   ui = ui >> 5;
 
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
   sl = sl >> sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
   sl = sl >> ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
   sl = sl >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], <i64 5, i64 5>
   sl = sl >> 5;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
   ul = ul >> sl2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
   ul = ul >> ul2;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
   ul = ul >> cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], <i64 5, i64 5>
   ul = ul >> 5;
 }
 
-void test_sr_assign (void)
-{
-// CHECK-LABEL: test_sr_assign
-
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
+// CHECK-LABEL: define void @test_sr_assign() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SHR:%.*]] = ashr <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   store volatile <16 x i8> [[SHR]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SHR1:%.*]] = ashr <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   store volatile <16 x i8> [[SHR1]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> undef, i32 [[TMP4]], i32 0
+// CHECK:   [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLAT:%.*]]insert, <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8>
+// CHECK:   [[SHR2:%.*]] = ashr <16 x i8> [[TMP5]], [[SH_PROM]]
+// CHECK:   store volatile <16 x i8> [[SHR2]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[SHR3:%.*]] = ashr <16 x i8> [[TMP6]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK:   store volatile <16 x i8> [[SHR3]], <16 x i8>* @sc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SHR4:%.*]] = lshr <16 x i8> [[TMP8]], [[TMP7]]
+// CHECK:   store volatile <16 x i8> [[SHR4]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SHR5:%.*]] = lshr <16 x i8> [[TMP10]], [[TMP9]]
+// CHECK:   store volatile <16 x i8> [[SHR5]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> undef, i32 [[TMP11]], i32 0
+// CHECK:   [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> undef, <16 x i32> zeroinitializer
+// CHECK:   [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8>
+// CHECK:   [[SHR9:%.*]] = lshr <16 x i8> [[TMP12]], [[SH_PROM8]]
+// CHECK:   store volatile <16 x i8> [[SHR9]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[SHR10:%.*]] = lshr <16 x i8> [[TMP13]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
+// CHECK:   store volatile <16 x i8> [[SHR10]], <16 x i8>* @uc, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SHR11:%.*]] = ashr <8 x i16> [[TMP15]], [[TMP14]]
+// CHECK:   store volatile <8 x i16> [[SHR11]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SHR12:%.*]] = ashr <8 x i16> [[TMP17]], [[TMP16]]
+// CHECK:   store volatile <8 x i16> [[SHR12]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> undef, i32 [[TMP18]], i32 0
+// CHECK:   [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16>
+// CHECK:   [[SHR16:%.*]] = ashr <8 x i16> [[TMP19]], [[SH_PROM15]]
+// CHECK:   store volatile <8 x i16> [[SHR16]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[SHR17:%.*]] = ashr <8 x i16> [[TMP20]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK:   store volatile <8 x i16> [[SHR17]], <8 x i16>* @ss, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SHR18:%.*]] = lshr <8 x i16> [[TMP22]], [[TMP21]]
+// CHECK:   store volatile <8 x i16> [[SHR18]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SHR19:%.*]] = lshr <8 x i16> [[TMP24]], [[TMP23]]
+// CHECK:   store volatile <8 x i16> [[SHR19]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> undef, i32 [[TMP25]], i32 0
+// CHECK:   [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> undef, <8 x i32> zeroinitializer
+// CHECK:   [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16>
+// CHECK:   [[SHR23:%.*]] = lshr <8 x i16> [[TMP26]], [[SH_PROM22]]
+// CHECK:   store volatile <8 x i16> [[SHR23]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[SHR24:%.*]] = lshr <8 x i16> [[TMP27]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
+// CHECK:   store volatile <8 x i16> [[SHR24]], <8 x i16>* @us, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SHR25:%.*]] = ashr <4 x i32> [[TMP29]], [[TMP28]]
+// CHECK:   store volatile <4 x i32> [[SHR25]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SHR26:%.*]] = ashr <4 x i32> [[TMP31]], [[TMP30]]
+// CHECK:   store volatile <4 x i32> [[SHR26]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TMP32]], i32 0
+// CHECK:   [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK:   [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SHR29:%.*]] = ashr <4 x i32> [[TMP33]], [[SPLAT_SPLAT28]]
+// CHECK:   store volatile <4 x i32> [[SHR29]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[SHR30:%.*]] = ashr <4 x i32> [[TMP34]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK:   store volatile <4 x i32> [[SHR30]], <4 x i32>* @si, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SHR31:%.*]] = lshr <4 x i32> [[TMP36]], [[TMP35]]
+// CHECK:   store volatile <4 x i32> [[SHR31]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SHR32:%.*]] = lshr <4 x i32> [[TMP38]], [[TMP37]]
+// CHECK:   store volatile <4 x i32> [[SHR32]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> undef, i32 [[TMP39]], i32 0
+// CHECK:   [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> undef, <4 x i32> zeroinitializer
+// CHECK:   [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SHR35:%.*]] = lshr <4 x i32> [[TMP40]], [[SPLAT_SPLAT34]]
+// CHECK:   store volatile <4 x i32> [[SHR35]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[SHR36:%.*]] = lshr <4 x i32> [[TMP41]], <i32 5, i32 5, i32 5, i32 5>
+// CHECK:   store volatile <4 x i32> [[SHR36]], <4 x i32>* @ui, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SHR37:%.*]] = ashr <2 x i64> [[TMP43]], [[TMP42]]
+// CHECK:   store volatile <2 x i64> [[SHR37]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SHR38:%.*]] = ashr <2 x i64> [[TMP45]], [[TMP44]]
+// CHECK:   store volatile <2 x i64> [[SHR38]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> undef, i32 [[TMP46]], i32 0
+// CHECK:   [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK:   [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64>
+// CHECK:   [[SHR42:%.*]] = ashr <2 x i64> [[TMP47]], [[SH_PROM41]]
+// CHECK:   store volatile <2 x i64> [[SHR42]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[SHR43:%.*]] = ashr <2 x i64> [[TMP48]], <i64 5, i64 5>
+// CHECK:   store volatile <2 x i64> [[SHR43]], <2 x i64>* @sl, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SHR44:%.*]] = lshr <2 x i64> [[TMP50]], [[TMP49]]
+// CHECK:   store volatile <2 x i64> [[SHR44]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SHR45:%.*]] = lshr <2 x i64> [[TMP52]], [[TMP51]]
+// CHECK:   store volatile <2 x i64> [[SHR45]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP53:%.*]] = load volatile i32, i32* @cnt, align 4
+// CHECK:   [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> undef, i32 [[TMP53]], i32 0
+// CHECK:   [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> undef, <2 x i32> zeroinitializer
+// CHECK:   [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64>
+// CHECK:   [[SHR49:%.*]] = lshr <2 x i64> [[TMP54]], [[SH_PROM48]]
+// CHECK:   store volatile <2 x i64> [[SHR49]], <2 x i64>* @ul, align 8
+// CHECK:   [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[SHR50:%.*]] = lshr <2 x i64> [[TMP55]], <i64 5, i64 5>
+// CHECK:   store volatile <2 x i64> [[SHR50]], <2 x i64>* @ul, align 8
+// CHECK:   ret void
+void test_sr_assign(void) {
+
   sc >>= sc2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
   sc >>= uc2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]]
   sc >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
   sc >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
   uc >>= sc2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
   uc >>= uc2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8>
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]]
   uc >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
   uc >>= 5;
 
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
   ss >>= ss2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
   ss >>= us2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]]
   ss >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
   ss >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
   us >>= ss2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
   us >>= us2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16>
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]]
   us >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
   us >>= 5;
 
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
   si >>= si2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
   si >>= ui2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]]
   si >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
   si >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
   ui >>= si2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
   ui >>= ui2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]]
   ui >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], <i32 5, i32 5, i32 5, i32 5>
   ui >>= 5;
 
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
   sl >>= sl2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
   sl >>= ul2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]]
   sl >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], <i64 5, i64 5>
   sl >>= 5;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
   ul >>= sl2;
-// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
   ul >>= ul2;
-// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt
-// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0
-// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64>
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]]
   ul >>= cnt;
-// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], <i64 5, i64 5>
   ul >>= 5;
 }
 
 
-void test_cmpeq (void)
-{
-// CHECK-LABEL: test_cmpeq
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK-LABEL: define void @test_cmpeq() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[CMP:%.*]] = icmp eq <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[CMP1:%.*]] = icmp eq <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[CMP3:%.*]] = icmp eq <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[CMP5:%.*]] = icmp eq <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK:   [[SEXT6:%.*]] = sext <16 x i1> [[CMP5]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT6]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[CMP7:%.*]] = icmp eq <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK:   [[SEXT8:%.*]] = sext <16 x i1> [[CMP7]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT8]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[CMP9:%.*]] = icmp eq <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK:   [[SEXT10:%.*]] = sext <16 x i1> [[CMP9]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT10]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[CMP11:%.*]] = icmp eq <16 x i8> [[TMP12]], [[TMP13]]
+// CHECK:   [[SEXT12:%.*]] = sext <16 x i1> [[CMP11]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT12]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[CMP13:%.*]] = icmp eq <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK:   [[SEXT14:%.*]] = sext <8 x i1> [[CMP13]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT14]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[CMP15:%.*]] = icmp eq <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK:   [[SEXT16:%.*]] = sext <8 x i1> [[CMP15]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT16]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[CMP17:%.*]] = icmp eq <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK:   [[SEXT18:%.*]] = sext <8 x i1> [[CMP17]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT18]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[CMP19:%.*]] = icmp eq <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK:   [[SEXT20:%.*]] = sext <8 x i1> [[CMP19]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT20]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[CMP21:%.*]] = icmp eq <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK:   [[SEXT22:%.*]] = sext <8 x i1> [[CMP21]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT22]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[CMP23:%.*]] = icmp eq <8 x i16> [[TMP24]], [[TMP25]]
+// CHECK:   [[SEXT24:%.*]] = sext <8 x i1> [[CMP23]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT24]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[CMP25:%.*]] = icmp eq <8 x i16> [[TMP26]], [[TMP27]]
+// CHECK:   [[SEXT26:%.*]] = sext <8 x i1> [[CMP25]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT26]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[CMP27:%.*]] = icmp eq <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK:   [[SEXT28:%.*]] = sext <4 x i1> [[CMP27]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT28]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[CMP29:%.*]] = icmp eq <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK:   [[SEXT30:%.*]] = sext <4 x i1> [[CMP29]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT30]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[CMP31:%.*]] = icmp eq <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK:   [[SEXT32:%.*]] = sext <4 x i1> [[CMP31]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT32]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[CMP33:%.*]] = icmp eq <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK:   [[SEXT34:%.*]] = sext <4 x i1> [[CMP33]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT34]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[CMP35:%.*]] = icmp eq <4 x i32> [[TMP36]], [[TMP37]]
+// CHECK:   [[SEXT36:%.*]] = sext <4 x i1> [[CMP35]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT36]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[CMP37:%.*]] = icmp eq <4 x i32> [[TMP38]], [[TMP39]]
+// CHECK:   [[SEXT38:%.*]] = sext <4 x i1> [[CMP37]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT38]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[CMP39:%.*]] = icmp eq <4 x i32> [[TMP40]], [[TMP41]]
+// CHECK:   [[SEXT40:%.*]] = sext <4 x i1> [[CMP39]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT40]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[CMP41:%.*]] = icmp eq <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK:   [[SEXT42:%.*]] = sext <2 x i1> [[CMP41]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT42]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[CMP43:%.*]] = icmp eq <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK:   [[SEXT44:%.*]] = sext <2 x i1> [[CMP43]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT44]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[CMP45:%.*]] = icmp eq <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK:   [[SEXT46:%.*]] = sext <2 x i1> [[CMP45]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT46]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[CMP47:%.*]] = icmp eq <2 x i64> [[TMP48]], [[TMP49]]
+// CHECK:   [[SEXT48:%.*]] = sext <2 x i1> [[CMP47]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT48]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[CMP49:%.*]] = icmp eq <2 x i64> [[TMP50]], [[TMP51]]
+// CHECK:   [[SEXT50:%.*]] = sext <2 x i1> [[CMP49]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT50]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[CMP51:%.*]] = icmp eq <2 x i64> [[TMP52]], [[TMP53]]
+// CHECK:   [[SEXT52:%.*]] = sext <2 x i1> [[CMP51]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT52]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[CMP53:%.*]] = icmp eq <2 x i64> [[TMP54]], [[TMP55]]
+// CHECK:   [[SEXT54:%.*]] = sext <2 x i1> [[CMP53]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT54]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP56:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[TMP57:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[CMP55:%.*]] = fcmp oeq <2 x double> [[TMP56]], [[TMP57]]
+// CHECK:   [[SEXT56:%.*]] = sext <2 x i1> [[CMP55]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT56]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_cmpeq(void) {
+
   bc = sc == sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = sc == bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = bc == sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = uc == uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = uc == bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = bc == uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = bc == bc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = ss == ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = ss == bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = bs == ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = us == us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = us == bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = bs == us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = bs == bs2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = si == si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = si == bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = bi == si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = ui == ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = ui == bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = bi == ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = bi == bi2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = sl == sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = sl == bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = bl == sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = ul == ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = ul == bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = bl == ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = bl == bl2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp oeq <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = fd == fd2;
 }
 
-void test_cmpne (void)
-{
-// CHECK-LABEL: test_cmpne
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK-LABEL: define void @test_cmpne() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[CMP:%.*]] = icmp ne <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[CMP1:%.*]] = icmp ne <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[CMP3:%.*]] = icmp ne <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[CMP5:%.*]] = icmp ne <16 x i8> [[TMP6]], [[TMP7]]
+// CHECK:   [[SEXT6:%.*]] = sext <16 x i1> [[CMP5]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT6]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[CMP7:%.*]] = icmp ne <16 x i8> [[TMP8]], [[TMP9]]
+// CHECK:   [[SEXT8:%.*]] = sext <16 x i1> [[CMP7]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT8]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[CMP9:%.*]] = icmp ne <16 x i8> [[TMP10]], [[TMP11]]
+// CHECK:   [[SEXT10:%.*]] = sext <16 x i1> [[CMP9]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT10]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[CMP11:%.*]] = icmp ne <16 x i8> [[TMP12]], [[TMP13]]
+// CHECK:   [[SEXT12:%.*]] = sext <16 x i1> [[CMP11]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT12]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[CMP13:%.*]] = icmp ne <8 x i16> [[TMP14]], [[TMP15]]
+// CHECK:   [[SEXT14:%.*]] = sext <8 x i1> [[CMP13]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT14]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[CMP15:%.*]] = icmp ne <8 x i16> [[TMP16]], [[TMP17]]
+// CHECK:   [[SEXT16:%.*]] = sext <8 x i1> [[CMP15]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT16]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[CMP17:%.*]] = icmp ne <8 x i16> [[TMP18]], [[TMP19]]
+// CHECK:   [[SEXT18:%.*]] = sext <8 x i1> [[CMP17]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT18]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[CMP19:%.*]] = icmp ne <8 x i16> [[TMP20]], [[TMP21]]
+// CHECK:   [[SEXT20:%.*]] = sext <8 x i1> [[CMP19]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT20]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[CMP21:%.*]] = icmp ne <8 x i16> [[TMP22]], [[TMP23]]
+// CHECK:   [[SEXT22:%.*]] = sext <8 x i1> [[CMP21]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT22]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[CMP23:%.*]] = icmp ne <8 x i16> [[TMP24]], [[TMP25]]
+// CHECK:   [[SEXT24:%.*]] = sext <8 x i1> [[CMP23]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT24]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP26:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP27:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[CMP25:%.*]] = icmp ne <8 x i16> [[TMP26]], [[TMP27]]
+// CHECK:   [[SEXT26:%.*]] = sext <8 x i1> [[CMP25]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT26]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP28:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP29:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[CMP27:%.*]] = icmp ne <4 x i32> [[TMP28]], [[TMP29]]
+// CHECK:   [[SEXT28:%.*]] = sext <4 x i1> [[CMP27]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT28]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP30:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP31:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[CMP29:%.*]] = icmp ne <4 x i32> [[TMP30]], [[TMP31]]
+// CHECK:   [[SEXT30:%.*]] = sext <4 x i1> [[CMP29]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT30]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP32:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP33:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[CMP31:%.*]] = icmp ne <4 x i32> [[TMP32]], [[TMP33]]
+// CHECK:   [[SEXT32:%.*]] = sext <4 x i1> [[CMP31]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT32]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP34:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP35:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[CMP33:%.*]] = icmp ne <4 x i32> [[TMP34]], [[TMP35]]
+// CHECK:   [[SEXT34:%.*]] = sext <4 x i1> [[CMP33]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT34]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP36:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP37:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[CMP35:%.*]] = icmp ne <4 x i32> [[TMP36]], [[TMP37]]
+// CHECK:   [[SEXT36:%.*]] = sext <4 x i1> [[CMP35]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT36]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP38:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP39:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[CMP37:%.*]] = icmp ne <4 x i32> [[TMP38]], [[TMP39]]
+// CHECK:   [[SEXT38:%.*]] = sext <4 x i1> [[CMP37]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT38]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP40:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP41:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[CMP39:%.*]] = icmp ne <4 x i32> [[TMP40]], [[TMP41]]
+// CHECK:   [[SEXT40:%.*]] = sext <4 x i1> [[CMP39]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT40]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP42:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP43:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[CMP41:%.*]] = icmp ne <2 x i64> [[TMP42]], [[TMP43]]
+// CHECK:   [[SEXT42:%.*]] = sext <2 x i1> [[CMP41]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT42]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP44:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP45:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[CMP43:%.*]] = icmp ne <2 x i64> [[TMP44]], [[TMP45]]
+// CHECK:   [[SEXT44:%.*]] = sext <2 x i1> [[CMP43]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT44]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP46:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP47:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[CMP45:%.*]] = icmp ne <2 x i64> [[TMP46]], [[TMP47]]
+// CHECK:   [[SEXT46:%.*]] = sext <2 x i1> [[CMP45]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT46]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP48:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP49:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[CMP47:%.*]] = icmp ne <2 x i64> [[TMP48]], [[TMP49]]
+// CHECK:   [[SEXT48:%.*]] = sext <2 x i1> [[CMP47]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT48]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP50:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP51:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[CMP49:%.*]] = icmp ne <2 x i64> [[TMP50]], [[TMP51]]
+// CHECK:   [[SEXT50:%.*]] = sext <2 x i1> [[CMP49]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT50]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP52:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP53:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[CMP51:%.*]] = icmp ne <2 x i64> [[TMP52]], [[TMP53]]
+// CHECK:   [[SEXT52:%.*]] = sext <2 x i1> [[CMP51]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT52]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP54:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP55:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[CMP53:%.*]] = icmp ne <2 x i64> [[TMP54]], [[TMP55]]
+// CHECK:   [[SEXT54:%.*]] = sext <2 x i1> [[CMP53]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT54]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP56:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[TMP57:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[CMP55:%.*]] = fcmp une <2 x double> [[TMP56]], [[TMP57]]
+// CHECK:   [[SEXT56:%.*]] = sext <2 x i1> [[CMP55]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT56]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_cmpne(void) {
+
   bc = sc != sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = sc != bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = bc != sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = uc != uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = uc != bc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = bc != uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = bc != bc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = ss != ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = ss != bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = bs != ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = us != us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = us != bs2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = bs != us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = bs != bs2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = si != si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = si != bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = bi != si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = ui != ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = ui != bi2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = bi != ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = bi != bi2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = sl != sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = sl != bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = bl != sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = ul != ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = ul != bl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = bl != ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = bl != bl2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp une <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = fd != fd2;
 }
 
-void test_cmpge (void)
-{
-// CHECK-LABEL: test_cmpge
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp sge <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK-LABEL: define void @test_cmpge() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[CMP:%.*]] = icmp sge <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[CMP1:%.*]] = icmp uge <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[CMP3:%.*]] = icmp uge <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[CMP5:%.*]] = icmp sge <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK:   [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[CMP7:%.*]] = icmp uge <8 x i16> [[TMP8]], [[TMP9]]
+// CHECK:   [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[CMP9:%.*]] = icmp uge <8 x i16> [[TMP10]], [[TMP11]]
+// CHECK:   [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[CMP11:%.*]] = icmp sge <4 x i32> [[TMP12]], [[TMP13]]
+// CHECK:   [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[CMP13:%.*]] = icmp uge <4 x i32> [[TMP14]], [[TMP15]]
+// CHECK:   [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[CMP15:%.*]] = icmp uge <4 x i32> [[TMP16]], [[TMP17]]
+// CHECK:   [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[CMP17:%.*]] = icmp sge <2 x i64> [[TMP18]], [[TMP19]]
+// CHECK:   [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[CMP19:%.*]] = icmp uge <2 x i64> [[TMP20]], [[TMP21]]
+// CHECK:   [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[CMP21:%.*]] = icmp uge <2 x i64> [[TMP22]], [[TMP23]]
+// CHECK:   [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[CMP23:%.*]] = fcmp oge <2 x double> [[TMP24]], [[TMP25]]
+// CHECK:   [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_cmpge(void) {
+
   bc = sc >= sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = uc >= uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = bc >= bc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp sge <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = ss >= ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = us >= us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = bs >= bs2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp sge <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = si >= si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = ui >= ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = bi >= bi2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp sge <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = sl >= sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = ul >= ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = bl >= bl2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp oge <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = fd >= fd2;
 }
 
-void test_cmpgt (void)
-{
-// CHECK-LABEL: test_cmpgt
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp sgt <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK-LABEL: define void @test_cmpgt() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[CMP:%.*]] = icmp sgt <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[CMP1:%.*]] = icmp ugt <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[CMP3:%.*]] = icmp ugt <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[CMP5:%.*]] = icmp sgt <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK:   [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[CMP7:%.*]] = icmp ugt <8 x i16> [[TMP8]], [[TMP9]]
+// CHECK:   [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[CMP9:%.*]] = icmp ugt <8 x i16> [[TMP10]], [[TMP11]]
+// CHECK:   [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[CMP11:%.*]] = icmp sgt <4 x i32> [[TMP12]], [[TMP13]]
+// CHECK:   [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[CMP13:%.*]] = icmp ugt <4 x i32> [[TMP14]], [[TMP15]]
+// CHECK:   [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[CMP15:%.*]] = icmp ugt <4 x i32> [[TMP16]], [[TMP17]]
+// CHECK:   [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[CMP17:%.*]] = icmp sgt <2 x i64> [[TMP18]], [[TMP19]]
+// CHECK:   [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[CMP19:%.*]] = icmp ugt <2 x i64> [[TMP20]], [[TMP21]]
+// CHECK:   [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[CMP21:%.*]] = icmp ugt <2 x i64> [[TMP22]], [[TMP23]]
+// CHECK:   [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[CMP23:%.*]] = fcmp ogt <2 x double> [[TMP24]], [[TMP25]]
+// CHECK:   [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_cmpgt(void) {
+
   bc = sc > sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = uc > uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = bc > bc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp sgt <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = ss > ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = us > us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = bs > bs2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp sgt <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = si > si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = ui > ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = bi > bi2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp sgt <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = sl > sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = ul > ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = bl > bl2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp ogt <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = fd > fd2;
 }
 
-void test_cmple (void)
-{
-// CHECK-LABEL: test_cmple
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp sle <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK-LABEL: define void @test_cmple() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[CMP:%.*]] = icmp sle <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[CMP1:%.*]] = icmp ule <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[CMP3:%.*]] = icmp ule <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[CMP5:%.*]] = icmp sle <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK:   [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[CMP7:%.*]] = icmp ule <8 x i16> [[TMP8]], [[TMP9]]
+// CHECK:   [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[CMP9:%.*]] = icmp ule <8 x i16> [[TMP10]], [[TMP11]]
+// CHECK:   [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[CMP11:%.*]] = icmp sle <4 x i32> [[TMP12]], [[TMP13]]
+// CHECK:   [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[CMP13:%.*]] = icmp ule <4 x i32> [[TMP14]], [[TMP15]]
+// CHECK:   [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[CMP15:%.*]] = icmp ule <4 x i32> [[TMP16]], [[TMP17]]
+// CHECK:   [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[CMP17:%.*]] = icmp sle <2 x i64> [[TMP18]], [[TMP19]]
+// CHECK:   [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[CMP19:%.*]] = icmp ule <2 x i64> [[TMP20]], [[TMP21]]
+// CHECK:   [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[CMP21:%.*]] = icmp ule <2 x i64> [[TMP22]], [[TMP23]]
+// CHECK:   [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[CMP23:%.*]] = fcmp ole <2 x double> [[TMP24]], [[TMP25]]
+// CHECK:   [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_cmple(void) {
+
   bc = sc <= sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = uc <= uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = bc <= bc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp sle <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = ss <= ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = us <= us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = bs <= bs2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp sle <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = si <= si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = ui <= ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = bi <= bi2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp sle <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = sl <= sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = ul <= ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = bl <= bl2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp ole <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = fd <= fd2;
 }
 
-void test_cmplt (void)
-{
-// CHECK-LABEL: test_cmplt
-
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2
-// CHECK: [[CMP:%[^ ]+]] = icmp slt <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK-LABEL: define void @test_cmplt() #0 {
+// CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
+// CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
+// CHECK:   [[CMP:%.*]] = icmp slt <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
+// CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
+// CHECK:   [[CMP1:%.*]] = icmp ult <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT2]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP4:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc, align 8
+// CHECK:   [[TMP5:%.*]] = load volatile <16 x i8>, <16 x i8>* @bc2, align 8
+// CHECK:   [[CMP3:%.*]] = icmp ult <16 x i8> [[TMP4]], [[TMP5]]
+// CHECK:   [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8>
+// CHECK:   store volatile <16 x i8> [[SEXT4]], <16 x i8>* @bc, align 8
+// CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
+// CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
+// CHECK:   [[CMP5:%.*]] = icmp slt <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK:   [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT6]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP8:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
+// CHECK:   [[TMP9:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
+// CHECK:   [[CMP7:%.*]] = icmp ult <8 x i16> [[TMP8]], [[TMP9]]
+// CHECK:   [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT8]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP10:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs, align 8
+// CHECK:   [[TMP11:%.*]] = load volatile <8 x i16>, <8 x i16>* @bs2, align 8
+// CHECK:   [[CMP9:%.*]] = icmp ult <8 x i16> [[TMP10]], [[TMP11]]
+// CHECK:   [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16>
+// CHECK:   store volatile <8 x i16> [[SEXT10]], <8 x i16>* @bs, align 8
+// CHECK:   [[TMP12:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
+// CHECK:   [[TMP13:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
+// CHECK:   [[CMP11:%.*]] = icmp slt <4 x i32> [[TMP12]], [[TMP13]]
+// CHECK:   [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT12]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP14:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
+// CHECK:   [[TMP15:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
+// CHECK:   [[CMP13:%.*]] = icmp ult <4 x i32> [[TMP14]], [[TMP15]]
+// CHECK:   [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT14]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP16:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi, align 8
+// CHECK:   [[TMP17:%.*]] = load volatile <4 x i32>, <4 x i32>* @bi2, align 8
+// CHECK:   [[CMP15:%.*]] = icmp ult <4 x i32> [[TMP16]], [[TMP17]]
+// CHECK:   [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32>
+// CHECK:   store volatile <4 x i32> [[SEXT16]], <4 x i32>* @bi, align 8
+// CHECK:   [[TMP18:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
+// CHECK:   [[TMP19:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
+// CHECK:   [[CMP17:%.*]] = icmp slt <2 x i64> [[TMP18]], [[TMP19]]
+// CHECK:   [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT18]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP20:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
+// CHECK:   [[TMP21:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
+// CHECK:   [[CMP19:%.*]] = icmp ult <2 x i64> [[TMP20]], [[TMP21]]
+// CHECK:   [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT20]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP22:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl, align 8
+// CHECK:   [[TMP23:%.*]] = load volatile <2 x i64>, <2 x i64>* @bl2, align 8
+// CHECK:   [[CMP21:%.*]] = icmp ult <2 x i64> [[TMP22]], [[TMP23]]
+// CHECK:   [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT22]], <2 x i64>* @bl, align 8
+// CHECK:   [[TMP24:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
+// CHECK:   [[TMP25:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
+// CHECK:   [[CMP23:%.*]] = fcmp olt <2 x double> [[TMP24]], [[TMP25]]
+// CHECK:   [[SEXT24:%.*]] = sext <2 x i1> [[CMP23]] to <2 x i64>
+// CHECK:   store volatile <2 x i64> [[SEXT24]], <2 x i64>* @bl, align 8
+// CHECK:   ret void
+void test_cmplt(void) {
+
   bc = sc < sc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = uc < uc2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8>
   bc = bc < bc2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2
-// CHECK: [[CMP:%[^ ]+]] = icmp slt <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = ss < ss2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = us < us2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16>
   bs = bs < bs2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2
-// CHECK: [[CMP:%[^ ]+]] = icmp slt <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = si < si2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = ui < ui2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32>
   bi = bi < bi2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2
-// CHECK: [[CMP:%[^ ]+]] = icmp slt <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = sl < sl2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = ul < ul2;
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2
-// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = bl < bl2;
 
-// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd
-// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2
-// CHECK: [[CMP:%[^ ]+]] = fcmp olt <2 x double> [[VAL1]], [[VAL2]]
-// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64>
   bl = fd < fd2;
 }
 




More information about the cfe-commits mailing list