[flang-commits] [flang] [flang] Fixing PPC lit failure due to alloca reordering. (PR #95621)

Vijay Kandiah via flang-commits flang-commits at lists.llvm.org
Fri Jun 14 16:05:07 PDT 2024


https://github.com/VijayKandiah created https://github.com/llvm/llvm-project/pull/95621

This change fixes the PowerPC lit tests that are failing due to the recent change to hoist constant-sized allocas at flang codegen. Three of these changed lit tests are entirely rewritten to use variables instead of numbered LLVM IR.

>From b9d36f33b66d301b004a5337ff7e57fdde9cbb82 Mon Sep 17 00:00:00 2001
From: Vijay Kandiah <vkandiah at sky6.pgi.net>
Date: Wed, 12 Jun 2024 13:19:39 -0700
Subject: [PATCH 1/5] [Flang] Hoisting constant-sized allocas at flang codegen.

---
 .../flang/Optimizer/CodeGen/FIROpPatterns.h   | 11 +++--
 flang/lib/Optimizer/CodeGen/CodeGen.cpp       | 18 ++++++--
 flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp | 43 ++++++++++---------
 3 files changed, 44 insertions(+), 28 deletions(-)

diff --git a/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h b/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
index 211acdc8a38e6..6ace73e2d16af 100644
--- a/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
+++ b/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
@@ -51,7 +51,9 @@ class ConvertFIRToLLVMPattern : public mlir::ConvertToLLVMPattern {
   /// appropriate reified structures.
   mlir::Value integerCast(mlir::Location loc,
                           mlir::ConversionPatternRewriter &rewriter,
-                          mlir::Type ty, mlir::Value val) const;
+                          mlir::Type ty, mlir::Value val,
+                          bool fold = false) const;
+  
   struct TypePair {
     mlir::Type fir;
     mlir::Type llvm;
@@ -144,9 +146,10 @@ class ConvertFIRToLLVMPattern : public mlir::ConvertToLLVMPattern {
   // Find the Block in which the alloca should be inserted.
   // The order to recursively find the proper block:
   // 1. An OpenMP Op that will be outlined.
-  // 2. A LLVMFuncOp
-  // 3. The first ancestor that is an OpenMP Op or a LLVMFuncOp
-  mlir::Block *getBlockForAllocaInsert(mlir::Operation *op) const;
+  // 2. An OpenMP or OpenACC Op with one or more regions holding executable code.
+  // 3. A LLVMFuncOp
+  // 4. The first ancestor that is one of the above.
+  mlir::Block *getBlockForAllocaInsert(mlir::Operation *op, mlir::Region *parentRegion) const;
 
   // Generate an alloca of size 1 for an object of type \p llvmObjectTy in the
   // allocation address space provided for the architecture in the DataLayout
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index 9f21c6b0cf097..d078a000ccd65 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -218,7 +218,7 @@ struct AllocaOpConversion : public fir::FIROpConversion<fir::AllocaOp> {
             chrTy.getContext(), chrTy.getFKind());
         llvmObjectType = convertType(rawCharTy);
         assert(end == 1);
-        size = integerCast(loc, rewriter, ity, lenParams[0]);
+        size = integerCast(loc, rewriter, ity, lenParams[0], /*fold=*/true);
       } else if (auto recTy = mlir::dyn_cast<fir::RecordType>(scalarType)) {
         mlir::LLVM::LLVMFuncOp memSizeFn =
             getDependentTypeMemSizeFn(recTy, alloc, rewriter);
@@ -236,17 +236,27 @@ struct AllocaOpConversion : public fir::FIROpConversion<fir::AllocaOp> {
       }
     }
     if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter))
-      size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
+      size = rewriter.createOrFold<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
     if (alloc.hasShapeOperands()) {
       unsigned end = operands.size();
       for (; i < end; ++i)
-        size = rewriter.create<mlir::LLVM::MulOp>(
-            loc, ity, size, integerCast(loc, rewriter, ity, operands[i]));
+        size = rewriter.createOrFold<mlir::LLVM::MulOp>(
+            loc, ity, size,
+            integerCast(loc, rewriter, ity, operands[i], /*fold=*/true));
     }
 
     unsigned allocaAs = getAllocaAddressSpace(rewriter);
     unsigned programAs = getProgramAddressSpace(rewriter);
 
+    if (mlir::isa<mlir::LLVM::ConstantOp>(size.getDefiningOp())) {
+      // Set the Block in which the llvm alloca should be inserted.
+      mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
+      mlir::Region *parentRegion = rewriter.getInsertionBlock()->getParent();
+      mlir::Block *insertBlock = getBlockForAllocaInsert(parentOp, parentRegion);
+      size.getDefiningOp()->moveAfter(insertBlock, insertBlock->begin());
+      rewriter.setInsertionPointAfter(size.getDefiningOp());
+    }
+
     // NOTE: we used to pass alloc->getAttrs() in the builder for non opaque
     // pointers! Only propagate pinned and bindc_name to help debugging, but
     // this should have no functional purpose (and passing the operand segment
diff --git a/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp b/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
index 72e072db37432..6d86879cd3219 100644
--- a/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
+++ b/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
@@ -65,7 +65,7 @@ mlir::LLVM::ConstantOp ConvertFIRToLLVMPattern::genConstantOffset(
 mlir::Value
 ConvertFIRToLLVMPattern::integerCast(mlir::Location loc,
                                      mlir::ConversionPatternRewriter &rewriter,
-                                     mlir::Type ty, mlir::Value val) const {
+                                     mlir::Type ty, mlir::Value val, bool fold) const {
   auto valTy = val.getType();
   // If the value was not yet lowered, lower its type so that it can
   // be used in getPrimitiveTypeSizeInBits.
@@ -73,10 +73,17 @@ ConvertFIRToLLVMPattern::integerCast(mlir::Location loc,
     valTy = convertType(valTy);
   auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty);
   auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy);
-  if (toSize < fromSize)
-    return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val);
-  if (toSize > fromSize)
-    return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val);
+  if (fold) {
+    if (toSize < fromSize)
+      return rewriter.createOrFold<mlir::LLVM::TruncOp>(loc, ty, val);
+    if (toSize > fromSize)
+      return rewriter.createOrFold<mlir::LLVM::SExtOp>(loc, ty, val);
+  } else {
+    if (toSize < fromSize)
+      return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val);
+    if (toSize > fromSize)
+      return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val);
+  }
   return val;
 }
 
@@ -274,16 +281,19 @@ mlir::Value ConvertFIRToLLVMPattern::computeBoxSize(
 // Find the Block in which the alloca should be inserted.
 // The order to recursively find the proper block:
 // 1. An OpenMP Op that will be outlined.
-// 2. A LLVMFuncOp
-// 3. The first ancestor that is an OpenMP Op or a LLVMFuncOp
-mlir::Block *
-ConvertFIRToLLVMPattern::getBlockForAllocaInsert(mlir::Operation *op) const {
+// 2. An OpenMP or OpenACC Op with one or more regions holding executable code.
+// 3. A LLVMFuncOp
+// 4. The first ancestor that is one of the above.
+mlir::Block *ConvertFIRToLLVMPattern::getBlockForAllocaInsert(
+    mlir::Operation *op, mlir::Region *parentRegion) const {
   if (auto iface = mlir::dyn_cast<mlir::omp::OutlineableOpenMPOpInterface>(op))
     return iface.getAllocaBlock();
+  if (auto recipeIface = mlir::dyn_cast<mlir::accomp::RecipeInterface>(op))
+    return recipeIface.getAllocaBlock(*parentRegion);
   if (auto llvmFuncOp = mlir::dyn_cast<mlir::LLVM::LLVMFuncOp>(op))
     return &llvmFuncOp.front();
 
-  return getBlockForAllocaInsert(op->getParentOp());
+  return getBlockForAllocaInsert(op->getParentOp(), parentRegion);
 }
 
 // Generate an alloca of size 1 for an object of type \p llvmObjectTy in the
@@ -297,16 +307,9 @@ mlir::Value ConvertFIRToLLVMPattern::genAllocaAndAddrCastWithType(
     mlir::ConversionPatternRewriter &rewriter) const {
   auto thisPt = rewriter.saveInsertionPoint();
   mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
-  if (mlir::isa<mlir::omp::DeclareReductionOp>(parentOp) ||
-      mlir::isa<mlir::omp::PrivateClauseOp>(parentOp)) {
-    // DeclareReductionOp & PrivateClauseOp have multiple child regions. We want
-    // to get the first block of whichever of those regions we are currently in
-    mlir::Region *parentRegion = rewriter.getInsertionBlock()->getParent();
-    rewriter.setInsertionPointToStart(&parentRegion->front());
-  } else {
-    mlir::Block *insertBlock = getBlockForAllocaInsert(parentOp);
-    rewriter.setInsertionPointToStart(insertBlock);
-  }
+  mlir::Region *parentRegion = rewriter.getInsertionBlock()->getParent();
+  mlir::Block *insertBlock = getBlockForAllocaInsert(parentOp, parentRegion);
+  rewriter.setInsertionPointToStart(insertBlock);
   auto size = genI32Constant(loc, rewriter, 1);
   unsigned allocaAs = getAllocaAddressSpace(rewriter);
   unsigned programAs = getProgramAddressSpace(rewriter);

>From 52e0a94fe2c51915dfbc2f67463aa831427ccce0 Mon Sep 17 00:00:00 2001
From: Vijay Kandiah <vkandiah at sky6.pgi.net>
Date: Wed, 12 Jun 2024 14:12:40 -0700
Subject: [PATCH 2/5] [flang] code-formatting fixes for hoisting alloca pass.

---
 flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h | 8 +++++---
 flang/lib/Optimizer/CodeGen/CodeGen.cpp               | 6 ++++--
 flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp         | 7 +++----
 3 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h b/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
index 6ace73e2d16af..ac095664f6188 100644
--- a/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
+++ b/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
@@ -53,7 +53,7 @@ class ConvertFIRToLLVMPattern : public mlir::ConvertToLLVMPattern {
                           mlir::ConversionPatternRewriter &rewriter,
                           mlir::Type ty, mlir::Value val,
                           bool fold = false) const;
-  
+
   struct TypePair {
     mlir::Type fir;
     mlir::Type llvm;
@@ -146,10 +146,12 @@ class ConvertFIRToLLVMPattern : public mlir::ConvertToLLVMPattern {
   // Find the Block in which the alloca should be inserted.
   // The order to recursively find the proper block:
   // 1. An OpenMP Op that will be outlined.
-  // 2. An OpenMP or OpenACC Op with one or more regions holding executable code.
+  // 2. An OpenMP or OpenACC Op with one or more regions holding executable
+  // code.
   // 3. A LLVMFuncOp
   // 4. The first ancestor that is one of the above.
-  mlir::Block *getBlockForAllocaInsert(mlir::Operation *op, mlir::Region *parentRegion) const;
+  mlir::Block *getBlockForAllocaInsert(mlir::Operation *op,
+                                       mlir::Region *parentRegion) const;
 
   // Generate an alloca of size 1 for an object of type \p llvmObjectTy in the
   // allocation address space provided for the architecture in the DataLayout
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index d078a000ccd65..4448224024f20 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -236,7 +236,8 @@ struct AllocaOpConversion : public fir::FIROpConversion<fir::AllocaOp> {
       }
     }
     if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter))
-      size = rewriter.createOrFold<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
+      size =
+          rewriter.createOrFold<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
     if (alloc.hasShapeOperands()) {
       unsigned end = operands.size();
       for (; i < end; ++i)
@@ -252,7 +253,8 @@ struct AllocaOpConversion : public fir::FIROpConversion<fir::AllocaOp> {
       // Set the Block in which the llvm alloca should be inserted.
       mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
       mlir::Region *parentRegion = rewriter.getInsertionBlock()->getParent();
-      mlir::Block *insertBlock = getBlockForAllocaInsert(parentOp, parentRegion);
+      mlir::Block *insertBlock =
+          getBlockForAllocaInsert(parentOp, parentRegion);
       size.getDefiningOp()->moveAfter(insertBlock, insertBlock->begin());
       rewriter.setInsertionPointAfter(size.getDefiningOp());
     }
diff --git a/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp b/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
index 6d86879cd3219..b9a28b89d9a55 100644
--- a/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
+++ b/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
@@ -62,10 +62,9 @@ mlir::LLVM::ConstantOp ConvertFIRToLLVMPattern::genConstantOffset(
 /// to the specific target may involve some sign-extending or truncation of
 /// values, particularly to fit them from abstract box types to the
 /// appropriate reified structures.
-mlir::Value
-ConvertFIRToLLVMPattern::integerCast(mlir::Location loc,
-                                     mlir::ConversionPatternRewriter &rewriter,
-                                     mlir::Type ty, mlir::Value val, bool fold) const {
+mlir::Value ConvertFIRToLLVMPattern::integerCast(
+    mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
+    mlir::Type ty, mlir::Value val, bool fold) const {
   auto valTy = val.getType();
   // If the value was not yet lowered, lower its type so that it can
   // be used in getPrimitiveTypeSizeInBits.

>From 457b34fcfeddee71c98b98cfa8884165444831c5 Mon Sep 17 00:00:00 2001
From: Vijay Kandiah <vkandiah at sky6.pgi.net>
Date: Thu, 13 Jun 2024 11:51:57 -0700
Subject: [PATCH 3/5] [flang] lit test fixes for hoisting alloca change.

---
 flang/test/Fir/alloc.fir                      | 12 +++---
 flang/test/Fir/boxproc.fir                    | 10 ++---
 .../Fir/convert-to-llvm-openmp-and-fir.fir    | 39 ++++++++++---------
 flang/test/Fir/convert-to-llvm.fir            | 22 +++++------
 flang/test/Integration/OpenMP/copyprivate.f90 |  2 +-
 flang/test/Transforms/debug-local-var-2.f90   | 10 ++---
 6 files changed, 48 insertions(+), 47 deletions(-)

diff --git a/flang/test/Fir/alloc.fir b/flang/test/Fir/alloc.fir
index ca624c0d1f9d6..e00fc9d6649c4 100644
--- a/flang/test/Fir/alloc.fir
+++ b/flang/test/Fir/alloc.fir
@@ -156,7 +156,7 @@ func.func @allocmem_array_of_dynchar(%l: i32) -> !fir.heap<!fir.array<3x3x!fir.c
 
 // CHECK-LABEL: define ptr @alloca_dynarray_of_nonchar(
 // CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 1, %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 %[[extent]], 1
 // CHECK: alloca [3 x i32], i64 %[[prod1]]
 func.func @alloca_dynarray_of_nonchar(%e: index) -> !fir.ref<!fir.array<3x?xi32>> {
   %1 = fir.alloca !fir.array<3x?xi32>, %e
@@ -165,7 +165,7 @@ func.func @alloca_dynarray_of_nonchar(%e: index) -> !fir.ref<!fir.array<3x?xi32>
 
 // CHECK-LABEL: define ptr @alloca_dynarray_of_nonchar2(
 // CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 1, %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 %[[extent]], 1
 // CHECK: %[[prod2:.*]] = mul i64 %[[prod1]], %[[extent]]
 // CHECK: alloca i32, i64 %[[prod2]]
 func.func @alloca_dynarray_of_nonchar2(%e: index) -> !fir.ref<!fir.array<?x?xi32>> {
@@ -194,7 +194,7 @@ func.func @allocmem_dynarray_of_nonchar2(%e: index) -> !fir.heap<!fir.array<?x?x
 
 // CHECK-LABEL: define ptr @alloca_dynarray_of_char(
 // CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 1, %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 %[[extent]], 1
 // CHECK: alloca [3 x [10 x i16]], i64 %[[prod1]]
 func.func @alloca_dynarray_of_char(%e : index) -> !fir.ref<!fir.array<3x?x!fir.char<2,10>>> {
   %1 = fir.alloca !fir.array<3x?x!fir.char<2,10>>, %e
@@ -203,7 +203,7 @@ func.func @alloca_dynarray_of_char(%e : index) -> !fir.ref<!fir.array<3x?x!fir.c
 
 // CHECK-LABEL: define ptr @alloca_dynarray_of_char2(
 // CHECK-SAME: i64 %[[extent:.*]])
-// CHECK: %[[prod1:.*]] = mul i64 1, %[[extent]]
+// CHECK: %[[prod1:.*]] = mul i64 %[[extent]], 1
 // CHECK: %[[prod2:.*]] = mul i64 %[[prod1]], %[[extent]]
 // CHECK: alloca [10 x i16], i64 %[[prod2]]
 func.func @alloca_dynarray_of_char2(%e : index) -> !fir.ref<!fir.array<?x?x!fir.char<2,10>>> {
@@ -334,10 +334,10 @@ func.func @allocmem_array_with_holes_dynchar(%arg0: index, %arg1: index) -> !fir
 }
 
 // CHECK-LABEL: define void @alloca_unlimited_polymorphic_box
-// CHECK:    %[[VAL_0:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, i64 1
 // CHECK:    %[[VAL_1:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, i64 1
-// CHECK:    %[[VAL_2:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, i64 1
+// CHECK:    %[[VAL_0:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, i64 1
 // CHECK:    %[[VAL_3:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, i64 1
+// CHECK:    %[[VAL_2:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, i64 1
 
 func.func @alloca_unlimited_polymorphic_box() {
   %0 = fir.alloca !fir.class<none>
diff --git a/flang/test/Fir/boxproc.fir b/flang/test/Fir/boxproc.fir
index 1fed16a808af0..834017bff71aa 100644
--- a/flang/test/Fir/boxproc.fir
+++ b/flang/test/Fir/boxproc.fir
@@ -1,12 +1,12 @@
 // RUN: tco %s | FileCheck %s
 
 // CHECK-LABEL: define void @_QPtest_proc_dummy()
-// CHECK:         %[[VAL_0:.*]] = alloca i32, i64 1, align 4
+// CHECK:         %[[VAL_3:.*]] = alloca [32 x i8], i64 1, align 1
 // CHECK:         %[[VAL_1:.*]] = alloca { ptr }, i64 1, align 8
+// CHECK:         %[[VAL_0:.*]] = alloca i32, i64 1, align 4
 // CHECK:         %[[VAL_2:.*]] = getelementptr { ptr }, ptr %[[VAL_1]], i32 0, i32 0
 // CHECK:         store ptr %[[VAL_0]], ptr %[[VAL_2]], align 8
 // CHECK:         store i32 1, ptr %[[VAL_0]], align 4
-// CHECK:         %[[VAL_3:.*]] = alloca [32 x i8], i64 1, align 1
 // CHECK:         call void @llvm.init.trampoline(ptr %[[VAL_3]], ptr @_QFtest_proc_dummyPtest_proc_dummy_a, ptr %[[VAL_1]])
 // CHECK:         %[[VAL_6:.*]] = call ptr @llvm.adjust.trampoline(ptr %[[VAL_3]])
 // CHECK:         call void @_QPtest_proc_dummy_other(ptr %[[VAL_6]])
@@ -61,9 +61,10 @@ func.func @_QPtest_proc_dummy_other(%arg0: !fir.boxproc<() -> ()>) {
 }
 
 // CHECK-LABEL: define void @_QPtest_proc_dummy_char()
-// CHECK:         %[[VAL_0:.*]] = alloca [40 x i8], i64 1, align 1
-// CHECK:         %[[VAL_1:.*]] = alloca [10 x i8], i64 1, align 1
+// CHECK:         %[[VAL_20:.*]] = alloca [32 x i8], i64 1, align 1
 // CHECK:         %[[VAL_2:.*]] = alloca { { ptr, i64 } }, i64 1, align 8
+// CHECK:         %[[VAL_1:.*]] = alloca [10 x i8], i64 1, align 1
+// CHECK:         %[[VAL_0:.*]] = alloca [40 x i8], i64 1, align 1
 // CHECK:         %[[VAL_3:.*]] = getelementptr { { ptr, i64 } }, ptr %[[VAL_2]], i32 0, i32 0
 // CHECK:         %[[VAL_5:.*]] = insertvalue { ptr, i64 } undef, ptr %[[VAL_1]], 0
 // CHECK:         %[[VAL_6:.*]] = insertvalue { ptr, i64 } %[[VAL_5]], i64 10, 1
@@ -75,7 +76,6 @@ func.func @_QPtest_proc_dummy_other(%arg0: !fir.boxproc<() -> ()>) {
 // CHECK:         %[[VAL_15:.*]] = icmp sgt i64 %[[VAL_13]], 0
 // CHECK:         %[[VAL_18:.*]] = getelementptr [10 x [1 x i8]], ptr %[[VAL_1]], i32 0, i64 %[[VAL_11]]
 // CHECK:         store [1 x i8] c" ", ptr %[[VAL_18]], align 1
-// CHECK:         %[[VAL_20:.*]] = alloca [32 x i8], i64 1, align 1
 // CHECK:         call void @llvm.init.trampoline(ptr %[[VAL_20]], ptr @_QFtest_proc_dummy_charPgen_message, ptr %[[VAL_2]])
 // CHECK:         %[[VAL_23:.*]] = call ptr @llvm.adjust.trampoline(ptr %[[VAL_20]])
 // CHECK:         %[[VAL_25:.*]] = insertvalue { ptr, i64 } undef, ptr %[[VAL_23]], 0
diff --git a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
index 72cd0a763e71a..db5d0af98a692 100644
--- a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
+++ b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
@@ -282,17 +282,17 @@ func.func @_QPomp_target_data() {
 
  // CHECK-LABEL:  llvm.func @_QPomp_target_data() {
  // CHECK:    %0 = llvm.mlir.constant(1024 : index) : i64
- // CHECK:    %[[VAL_0:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK:    %[[VAL_1:.*]] = llvm.alloca %[[VAL_0]] x !llvm.array<1024 x i32> {bindc_name = "a"} : (i64) -> !llvm.ptr
- // CHECK:    %3 = llvm.mlir.constant(1024 : index) : i64
- // CHECK:    %[[VAL_2:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK:    %[[VAL_3:.*]] = llvm.alloca %[[VAL_2]] x !llvm.array<1024 x i32> {bindc_name = "b"} : (i64) -> !llvm.ptr
- // CHECK:    %6 = llvm.mlir.constant(1024 : index) : i64
+ // CHECK:    %[[VAL_6:.*]] = llvm.mlir.constant(1 : i64) : i64
+ // CHECK:    %[[VAL_7:.*]] = llvm.alloca %[[VAL_6]] x !llvm.array<1024 x i32> {bindc_name = "d"} : (i64) -> !llvm.ptr
  // CHECK:    %[[VAL_4:.*]] = llvm.mlir.constant(1 : i64) : i64
  // CHECK:    %[[VAL_5:.*]] = llvm.alloca %[[VAL_4]] x !llvm.array<1024 x i32> {bindc_name = "c"} : (i64) -> !llvm.ptr
+ // CHECK:    %[[VAL_2:.*]] = llvm.mlir.constant(1 : i64) : i64
+ // CHECK:    %[[VAL_3:.*]] = llvm.alloca %[[VAL_2]] x !llvm.array<1024 x i32> {bindc_name = "b"} : (i64) -> !llvm.ptr
+ // CHECK:    %[[VAL_0:.*]] = llvm.mlir.constant(1 : i64) : i64
+ // CHECK:    %[[VAL_1:.*]] = llvm.alloca %[[VAL_0]] x !llvm.array<1024 x i32> {bindc_name = "a"} : (i64) -> !llvm.ptr
  // CHECK:    %9 = llvm.mlir.constant(1024 : index) : i64
- // CHECK:    %[[VAL_6:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK:    %[[VAL_7:.*]] = llvm.alloca %[[VAL_6]] x !llvm.array<1024 x i32> {bindc_name = "d"} : (i64) -> !llvm.ptr
+ // CHECK:    %10 = llvm.mlir.constant(1024 : index) : i64
+ // CHECK:    %11 = llvm.mlir.constant(1024 : index) : i64
  // CHECK:    %12 = llvm.mlir.constant(1 : index) : i64
  // CHECK:    %13 = llvm.mlir.constant(0 : index) : i64
  // CHECK:    %14 = llvm.mlir.constant(1023 : index) : i64
@@ -301,12 +301,12 @@ func.func @_QPomp_target_data() {
  // CHECK:    %17 = llvm.mlir.constant(1 : index) : i64
  // CHECK:    %18 = llvm.mlir.constant(0 : index) : i64
  // CHECK:    %19 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %20 = omp.map.bounds   lower_bound(%18 : i64) upper_bound(%19 : i64) extent(%3 : i64) stride(%17 : i64) start_idx(%17 : i64)
+ // CHECK:    %20 = omp.map.bounds   lower_bound(%18 : i64) upper_bound(%19 : i64) extent(%9 : i64) stride(%17 : i64) start_idx(%17 : i64)
  // CHECK:    %21 = omp.map.info var_ptr(%[[VAL_3]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(to) capture(ByRef) bounds(%20) -> !llvm.ptr {name = "b"}
  // CHECK:    %22 = llvm.mlir.constant(1 : index) : i64
  // CHECK:    %23 = llvm.mlir.constant(0 : index) : i64
  // CHECK:    %24 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %25 = omp.map.bounds   lower_bound(%23 : i64) upper_bound(%24 : i64) extent(%6 : i64) stride(%22 : i64) start_idx(%22 : i64)
+ // CHECK:    %25 = omp.map.bounds   lower_bound(%23 : i64) upper_bound(%24 : i64) extent(%10 : i64) stride(%22 : i64) start_idx(%22 : i64)
  // CHECK:    %26 = omp.map.info var_ptr(%[[VAL_5]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(always, exit_release_or_enter_alloc) capture(ByRef) bounds(%25) -> !llvm.ptr {name = "c"}
  // CHECK:    omp.target_enter_data   map_entries(%16, %21, %26 : !llvm.ptr, !llvm.ptr, !llvm.ptr)
  // CHECK:    %27 = llvm.mlir.constant(1 : index) : i64
@@ -317,17 +317,17 @@ func.func @_QPomp_target_data() {
  // CHECK:    %32 = llvm.mlir.constant(1 : index) : i64
  // CHECK:    %33 = llvm.mlir.constant(0 : index) : i64
  // CHECK:    %34 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %35 = omp.map.bounds   lower_bound(%33 : i64) upper_bound(%34 : i64) extent(%3 : i64) stride(%32 : i64) start_idx(%32 : i64)
+ // CHECK:    %35 = omp.map.bounds   lower_bound(%33 : i64) upper_bound(%34 : i64) extent(%9 : i64) stride(%32 : i64) start_idx(%32 : i64)
  // CHECK:    %36 = omp.map.info var_ptr(%[[VAL_3]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(from) capture(ByRef) bounds(%35) -> !llvm.ptr {name = "b"}
  // CHECK:    %37 = llvm.mlir.constant(1 : index) : i64
  // CHECK:    %38 = llvm.mlir.constant(0 : index) : i64
  // CHECK:    %39 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %40 = omp.map.bounds   lower_bound(%38 : i64) upper_bound(%39 : i64) extent(%6 : i64) stride(%37 : i64) start_idx(%37 : i64)
+ // CHECK:    %40 = omp.map.bounds   lower_bound(%38 : i64) upper_bound(%39 : i64) extent(%10 : i64) stride(%37 : i64) start_idx(%37 : i64)
  // CHECK:    %41 = omp.map.info var_ptr(%[[VAL_5]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(exit_release_or_enter_alloc) capture(ByRef) bounds(%40) -> !llvm.ptr {name = "c"}
  // CHECK:    %42 = llvm.mlir.constant(1 : index) : i64
  // CHECK:    %43 = llvm.mlir.constant(0 : index) : i64
  // CHECK:    %44 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %45 = omp.map.bounds   lower_bound(%43 : i64) upper_bound(%44 : i64) extent(%9 : i64) stride(%42 : i64) start_idx(%42 : i64)
+ // CHECK:    %45 = omp.map.bounds   lower_bound(%43 : i64) upper_bound(%44 : i64) extent(%11 : i64) stride(%42 : i64) start_idx(%42 : i64)
  // CHECK:    %46 = omp.map.info var_ptr(%[[VAL_7]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(always, delete) capture(ByRef) bounds(%45) -> !llvm.ptr {name = "d"}
  // CHECK:    omp.target_exit_data   map_entries(%31, %36, %41, %46 : !llvm.ptr, !llvm.ptr, !llvm.ptr, !llvm.ptr)
  // CHECK:    llvm.return
@@ -374,9 +374,9 @@ func.func @_QPopenmp_target_data_region() {
 
 // CHECK-LABEL:   llvm.func @_QPopenmp_target_data_region() {
 // CHECK:           %[[VAL_0:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK:           %[[VAL_1:.*]] = llvm.alloca %[[VAL_0]] x !llvm.array<1024 x i32> {bindc_name = "a"} : (i64) -> !llvm.ptr
 // CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i64) : i64
 // CHECK:           %[[VAL_3:.*]] = llvm.alloca %[[VAL_2]] x i32 {bindc_name = "i"} : (i64) -> !llvm.ptr
+// CHECK:           %[[VAL_1:.*]] = llvm.alloca %[[VAL_0]] x !llvm.array<1024 x i32> {bindc_name = "a"} : (i64) -> !llvm.ptr
 // CHECK:           %[[VAL_MAX:.*]] = llvm.mlir.constant(1024 : index) : i64
 // CHECK:           %[[VAL_ONE:.*]] = llvm.mlir.constant(1 : index) : i64
 // CHECK:           %[[VAL_ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
@@ -675,9 +675,9 @@ func.func @_QPsb() {
 }
 
 // CHECK:  llvm.func @_QPsb() {
-// CHECK:    %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
 // CHECK:    %[[SIZE:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK:    %[[LI_REF:.*]] = llvm.alloca %6 x i32 {bindc_name = "li"} : (i64) -> !llvm.ptr
+// CHECK:    %[[LI_REF:.*]] = llvm.alloca %[[SIZE]] x i32 {bindc_name = "li"} : (i64) -> !llvm.ptr
+// CHECK:    %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
 // CHECK:    omp.sections   {
 // CHECK:      omp.section {
 // CHECK:        llvm.br ^[[BB_ENTRY:.*]]({{.*}})
@@ -715,7 +715,7 @@ func.func @_QPsb() {
 // CHECK:  }
 // CHECK-LABEL:  @_QPsimple_reduction
 // CHECK-SAME: %[[ARRAY_REF:.*]]: !llvm.ptr
-// CHECK:    %[[RED_ACCUMULATOR:.*]] = llvm.alloca %2 x i32 {bindc_name = "x"} : (i64) -> !llvm.ptr
+// CHECK:    %[[RED_ACCUMULATOR:.*]] = llvm.alloca %1 x i32 {bindc_name = "x"} : (i64) -> !llvm.ptr
 // CHECK:    omp.parallel   {
 // CHECK:      omp.wsloop reduction(@[[EQV_REDUCTION]] %[[RED_ACCUMULATOR]] -> %[[PRV:.+]] : !llvm.ptr) {
 // CHECK-NEXT:   omp.loop_nest
@@ -797,6 +797,7 @@ func.func @_QPs(%arg0: !fir.ref<!fir.complex<4>> {fir.bindc_name = "x"}) {
 
 // Test if llvm.alloca is properly inserted in the omp section
 
+//CHECK:  %[[CONST0:.*]] = llvm.mlir.constant(1 : i64) : i64
 //CHECK:  %[[CONST:.*]] = llvm.mlir.constant(1 : i64) : i64
 //CHECK:  %[[ALLOCA:.*]] = llvm.alloca %[[CONST]] x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> {bindc_name = "iattr"} : (i64) -> !llvm.ptr
 //CHECK:  omp.parallel   {
@@ -907,9 +908,9 @@ omp.critical.declare @help hint(contended)
 
 // CHECK: llvm.func @omp_critical_() {
 func.func @omp_critical_() {
-// CHECK: %[[X_REF:.*]] = llvm.alloca %{{.*}} x i32 {bindc_name = "x"} : (i64) -> !llvm.ptr
-  %0 = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFomp_criticalEx"}
 // CHECK: %[[Y_REF:.*]] = llvm.alloca %{{.*}} x i32 {bindc_name = "y"} : (i64) -> !llvm.ptr
+  %0 = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFomp_criticalEx"}
+// CHECK: %[[X_REF:.*]] = llvm.alloca %{{.*}} x i32 {bindc_name = "x"} : (i64) -> !llvm.ptr
   %1 = fir.alloca i32 {bindc_name = "y", uniq_name = "_QFomp_criticalEy"}
 // CHECK: omp.critical(@help)
   omp.critical(@help) {
diff --git a/flang/test/Fir/convert-to-llvm.fir b/flang/test/Fir/convert-to-llvm.fir
index ee116e998c22f..d7059671d3a88 100644
--- a/flang/test/Fir/convert-to-llvm.fir
+++ b/flang/test/Fir/convert-to-llvm.fir
@@ -1178,7 +1178,7 @@ func.func @alloca_fixed_char_array(%e : index) -> !fir.ref<!fir.array<?x?x!fir.c
 // CHECK-LABEL: llvm.func @alloca_fixed_char_array
 // CHECK-SAME: ([[E:%.*]]: i64) -> !llvm.ptr
 // CHECK-DAG: [[ONE:%.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: [[PROD1:%.*]] = llvm.mul [[ONE]], [[E]] : i64
+// CHECK: [[PROD1:%.*]] = llvm.mul [[E]], [[ONE]] : i64
 // CHECK: [[PROD2:%.*]] = llvm.mul [[PROD1]], [[E]] : i64
 // GENERIC: [[A:%.*]] = llvm.alloca [[PROD2]] x !llvm.array<8 x i8>
 // AMDGPU: [[AA:%.*]] = llvm.alloca [[PROD2]] x !llvm.array<8 x i8> : (i64) -> !llvm.ptr<5>
@@ -1225,7 +1225,7 @@ func.func @alloca_multidim_array(%0 : index) -> !fir.ref<!fir.array<8x16x32xf32>
 // CHECK-SAME: ([[OP1:%.*]]: i64) -> !llvm.ptr
 // CHECK: [[OP2:%.*]] = llvm.mlir.constant(24 : index) : i64
 // CHECK: [[ONE:%.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: [[MUL1:%.*]] = llvm.mul [[ONE]], [[OP1]] : i64
+// CHECK: [[MUL1:%.*]] = llvm.mul [[OP1]], [[ONE]] : i64
 // CHECK: [[TOTAL:%.*]] = llvm.mul [[MUL1]], [[OP2]] : i64
 // GENERIC: [[A:%.*]] = llvm.alloca [[TOTAL]] x !llvm.array<32 x array<16 x array<8 x f32>>>
 // AMDGPU: [[AA:%.*]] = llvm.alloca [[TOTAL]] x !llvm.array<32 x array<16 x array<8 x f32>>> : (i64) -> !llvm.ptr<5>
@@ -1246,7 +1246,7 @@ func.func @alloca_const_interior_array(%0 : index) -> !fir.ref<!fir.array<8x9x?x
 // CHECK-SAME: ([[OP1:%.*]]: i64) -> !llvm.ptr
 // CHECK: [[OP2:%.*]] = llvm.mlir.constant(64 : index) : i64
 // CHECK: [[ONE:%.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: [[MUL1:%.*]] = llvm.mul [[ONE]], [[OP1]] : i64
+// CHECK: [[MUL1:%.*]] = llvm.mul [[OP1]], [[ONE]] : i64
 // CHECK: [[TOTAL:%.*]] = llvm.mul [[MUL1]], [[OP2]] : i64
 // GENERIC: [[A:%.*]] = llvm.alloca [[TOTAL]] x !llvm.array<9 x array<8 x f32>>
 // AMDGPU: [[AA:%.*]] = llvm.alloca [[TOTAL]] x !llvm.array<9 x array<8 x f32>> : (i64) -> !llvm.ptr<5>
@@ -1937,7 +1937,7 @@ func.func private @_QPxb(!fir.box<!fir.array<?x?xf64>>)
 // CHECK:         %[[N2_TMP:.*]] = llvm.sub %[[N]], %[[SH2]]  : i64
 // CHECK:         %[[N2:.*]] = llvm.add %[[N2_TMP]], %[[C1]]  : i64
 // CHECK:         %[[C1_0:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK:         %[[ARR_SIZE_TMP1:.*]] = llvm.mul %[[C1_0]], %[[N1]]  : i64
+// CHECK:         %[[ARR_SIZE_TMP1:.*]] = llvm.mul %[[N1]], %[[C1_0]]  : i64
 // CHECK:         %[[ARR_SIZE:.*]] = llvm.mul %[[ARR_SIZE_TMP1]], %[[N2]]  : i64
 // GENERIC:       %[[ARR:.*]] = llvm.alloca %[[ARR_SIZE]] x f64 {bindc_name = "arr"} : (i64) -> !llvm.ptr
 // AMDGPU:        %[[AR:.*]] = llvm.alloca %[[ARR_SIZE]] x f64 {bindc_name = "arr"} : (i64) -> !llvm.ptr<5>
@@ -2015,17 +2015,17 @@ func.func private @_QPtest_dt_callee(%arg0: !fir.box<!fir.array<?xi32>>)
 // AMDGPU:        %[[AA:.*]] = llvm.alloca %[[ALLOCA_SIZE]] x !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<5>
 // AMDGPU:        %[[ALLOCA:.*]] = llvm.addrspacecast %[[AA]] : !llvm.ptr<5> to !llvm.ptr
 // CHECK:         %[[C20:.*]] = llvm.mlir.constant(20 : index) : i64
-// CHECK:         %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK:         %[[C10:.*]] = llvm.mlir.constant(10 : i64) : i64
-// CHECK:         %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
-// CHECK:         %[[ALLOCA_SIZE_V:.*]] = llvm.mlir.constant(1 : i64) : i64
-// GENERIC:       %[[V:.*]] = llvm.alloca %[[ALLOCA_SIZE_V]] x i32 {bindc_name = "v"} : (i64) -> !llvm.ptr
-// AMDGPU:        %[[AB:.*]] = llvm.alloca %[[ALLOCA_SIZE_V]] x i32 {bindc_name = "v"} : (i64) -> !llvm.ptr<5>
-// AMDGPU:        %[[V:.*]] = llvm.addrspacecast %[[AB]] : !llvm.ptr<5> to !llvm.ptr
 // CHECK:         %[[ALLOCA_SIZE_X:.*]] = llvm.mlir.constant(1 : i64) : i64
 // GENERIC:       %[[X:.*]] = llvm.alloca %[[ALLOCA_SIZE_X]] x !llvm.array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>> {bindc_name = "x"} : (i64) -> !llvm.ptr
 // AMDGPU:        %[[AC:.*]] = llvm.alloca %[[ALLOCA_SIZE_X]] x !llvm.array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>> {bindc_name = "x"} : (i64) -> !llvm.ptr<5>
 // AMDGPU:        %[[X:.*]] = llvm.addrspacecast %[[AC]] : !llvm.ptr<5> to !llvm.ptr
+// CHECK:         %[[ALLOCA_SIZE_V:.*]] = llvm.mlir.constant(1 : i64) : i64
+// GENERIC:       %[[V:.*]] = llvm.alloca %[[ALLOCA_SIZE_V]] x i32 {bindc_name = "v"} : (i64) -> !llvm.ptr
+// AMDGPU:        %[[AB:.*]] = llvm.alloca %[[ALLOCA_SIZE_V]] x i32 {bindc_name = "v"} : (i64) -> !llvm.ptr<5>
+// AMDGPU:        %[[V:.*]] = llvm.addrspacecast %[[AB]] : !llvm.ptr<5> to !llvm.ptr
+// CHECK:         %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK:         %[[C10:.*]] = llvm.mlir.constant(10 : i64) : i64
+// CHECK:         %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
 // CHECK:         %[[TYPE_CODE:.*]] = llvm.mlir.constant(9 : i32) : i32
 // CHECK:         %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
 // CHECK:         %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
diff --git a/flang/test/Integration/OpenMP/copyprivate.f90 b/flang/test/Integration/OpenMP/copyprivate.f90
index d32319a18c28b..dd69ebdb881a1 100644
--- a/flang/test/Integration/OpenMP/copyprivate.f90
+++ b/flang/test/Integration/OpenMP/copyprivate.f90
@@ -33,8 +33,8 @@
 !CHECK-NEXT:  }
 
 !CHECK-LABEL: define internal void @test_scalar_..omp_par({{.*}})
-!CHECK:         %[[I:.*]] = alloca i32, i64 1
 !CHECK:         %[[J:.*]] = alloca i32, i64 1
+!CHECK:         %[[I:.*]] = alloca i32, i64 1
 !CHECK:         %[[DID_IT:.*]] = alloca i32
 !CHECK:         store i32 0, ptr %[[DID_IT]]
 !CHECK:         %[[THREAD_NUM1:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[LOC:.*]])
diff --git a/flang/test/Transforms/debug-local-var-2.f90 b/flang/test/Transforms/debug-local-var-2.f90
index 3b2873a1edaaf..24814fc4f456a 100644
--- a/flang/test/Transforms/debug-local-var-2.f90
+++ b/flang/test/Transforms/debug-local-var-2.f90
@@ -6,12 +6,12 @@
 ! This tests checks the debug information for local variables in llvm IR.
 
 ! BOTH-LABEL: define void @_QQmain
-! BOTH-DAG: %[[AL11:.*]] = alloca i32
-! BOTH-DAG: %[[AL12:.*]] = alloca i64
-! BOTH-DAG: %[[AL13:.*]] = alloca i8
-! BOTH-DAG: %[[AL14:.*]] = alloca i32
-! BOTH-DAG: %[[AL15:.*]] = alloca float
 ! BOTH-DAG: %[[AL16:.*]] = alloca double
+! BOTH-DAG: %[[AL15:.*]] = alloca float
+! BOTH-DAG: %[[AL14:.*]] = alloca i32
+! BOTH-DAG: %[[AL13:.*]] = alloca i8
+! BOTH-DAG: %[[AL12:.*]] = alloca i64
+! BOTH-DAG: %[[AL11:.*]] = alloca i32
 ! INTRINSICS-DAG: call void @llvm.dbg.declare(metadata ptr %[[AL11]], metadata ![[I4:.*]], metadata !DIExpression())
 ! INTRINSICS-DAG: call void @llvm.dbg.declare(metadata ptr %[[AL12]], metadata ![[I8:.*]], metadata !DIExpression())
 ! INTRINSICS-DAG: call void @llvm.dbg.declare(metadata ptr %[[AL13]], metadata ![[L1:.*]], metadata !DIExpression())

>From b851f58a3cba189627ca61ab253463b7723300df Mon Sep 17 00:00:00 2001
From: Vijay Kandiah <vkandiah at sky6.pgi.net>
Date: Thu, 13 Jun 2024 12:35:53 -0700
Subject: [PATCH 4/5] [flang] fixing lit test
 convert-to-llvm-openmp-and-fir.fir

---
 .../Fir/convert-to-llvm-openmp-and-fir.fir    | 104 +++++++++---------
 1 file changed, 52 insertions(+), 52 deletions(-)

diff --git a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
index db5d0af98a692..45ff89bc40943 100644
--- a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
+++ b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
@@ -280,58 +280,58 @@ func.func @_QPomp_target_data() {
   return
 }
 
- // CHECK-LABEL:  llvm.func @_QPomp_target_data() {
- // CHECK:    %0 = llvm.mlir.constant(1024 : index) : i64
- // CHECK:    %[[VAL_6:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK:    %[[VAL_7:.*]] = llvm.alloca %[[VAL_6]] x !llvm.array<1024 x i32> {bindc_name = "d"} : (i64) -> !llvm.ptr
- // CHECK:    %[[VAL_4:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK:    %[[VAL_5:.*]] = llvm.alloca %[[VAL_4]] x !llvm.array<1024 x i32> {bindc_name = "c"} : (i64) -> !llvm.ptr
- // CHECK:    %[[VAL_2:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK:    %[[VAL_3:.*]] = llvm.alloca %[[VAL_2]] x !llvm.array<1024 x i32> {bindc_name = "b"} : (i64) -> !llvm.ptr
- // CHECK:    %[[VAL_0:.*]] = llvm.mlir.constant(1 : i64) : i64
- // CHECK:    %[[VAL_1:.*]] = llvm.alloca %[[VAL_0]] x !llvm.array<1024 x i32> {bindc_name = "a"} : (i64) -> !llvm.ptr
- // CHECK:    %9 = llvm.mlir.constant(1024 : index) : i64
- // CHECK:    %10 = llvm.mlir.constant(1024 : index) : i64
- // CHECK:    %11 = llvm.mlir.constant(1024 : index) : i64
- // CHECK:    %12 = llvm.mlir.constant(1 : index) : i64
- // CHECK:    %13 = llvm.mlir.constant(0 : index) : i64
- // CHECK:    %14 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %15 = omp.map.bounds   lower_bound(%13 : i64) upper_bound(%14 : i64) extent(%0 : i64) stride(%12 : i64) start_idx(%12 : i64)
- // CHECK:    %16 = omp.map.info var_ptr(%[[VAL_1]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(to) capture(ByRef) bounds(%15) -> !llvm.ptr {name = "a"}
- // CHECK:    %17 = llvm.mlir.constant(1 : index) : i64
- // CHECK:    %18 = llvm.mlir.constant(0 : index) : i64
- // CHECK:    %19 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %20 = omp.map.bounds   lower_bound(%18 : i64) upper_bound(%19 : i64) extent(%9 : i64) stride(%17 : i64) start_idx(%17 : i64)
- // CHECK:    %21 = omp.map.info var_ptr(%[[VAL_3]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(to) capture(ByRef) bounds(%20) -> !llvm.ptr {name = "b"}
- // CHECK:    %22 = llvm.mlir.constant(1 : index) : i64
- // CHECK:    %23 = llvm.mlir.constant(0 : index) : i64
- // CHECK:    %24 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %25 = omp.map.bounds   lower_bound(%23 : i64) upper_bound(%24 : i64) extent(%10 : i64) stride(%22 : i64) start_idx(%22 : i64)
- // CHECK:    %26 = omp.map.info var_ptr(%[[VAL_5]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(always, exit_release_or_enter_alloc) capture(ByRef) bounds(%25) -> !llvm.ptr {name = "c"}
- // CHECK:    omp.target_enter_data   map_entries(%16, %21, %26 : !llvm.ptr, !llvm.ptr, !llvm.ptr)
- // CHECK:    %27 = llvm.mlir.constant(1 : index) : i64
- // CHECK:    %28 = llvm.mlir.constant(0 : index) : i64
- // CHECK:    %29 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %30 = omp.map.bounds   lower_bound(%28 : i64) upper_bound(%29 : i64) extent(%0 : i64) stride(%27 : i64) start_idx(%27 : i64)
- // CHECK:    %31 = omp.map.info var_ptr(%[[VAL_1]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(from) capture(ByRef) bounds(%30) -> !llvm.ptr {name = "a"}
- // CHECK:    %32 = llvm.mlir.constant(1 : index) : i64
- // CHECK:    %33 = llvm.mlir.constant(0 : index) : i64
- // CHECK:    %34 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %35 = omp.map.bounds   lower_bound(%33 : i64) upper_bound(%34 : i64) extent(%9 : i64) stride(%32 : i64) start_idx(%32 : i64)
- // CHECK:    %36 = omp.map.info var_ptr(%[[VAL_3]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(from) capture(ByRef) bounds(%35) -> !llvm.ptr {name = "b"}
- // CHECK:    %37 = llvm.mlir.constant(1 : index) : i64
- // CHECK:    %38 = llvm.mlir.constant(0 : index) : i64
- // CHECK:    %39 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %40 = omp.map.bounds   lower_bound(%38 : i64) upper_bound(%39 : i64) extent(%10 : i64) stride(%37 : i64) start_idx(%37 : i64)
- // CHECK:    %41 = omp.map.info var_ptr(%[[VAL_5]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(exit_release_or_enter_alloc) capture(ByRef) bounds(%40) -> !llvm.ptr {name = "c"}
- // CHECK:    %42 = llvm.mlir.constant(1 : index) : i64
- // CHECK:    %43 = llvm.mlir.constant(0 : index) : i64
- // CHECK:    %44 = llvm.mlir.constant(1023 : index) : i64
- // CHECK:    %45 = omp.map.bounds   lower_bound(%43 : i64) upper_bound(%44 : i64) extent(%11 : i64) stride(%42 : i64) start_idx(%42 : i64)
- // CHECK:    %46 = omp.map.info var_ptr(%[[VAL_7]] : !llvm.ptr, !llvm.array<1024 x i32>)   map_clauses(always, delete) capture(ByRef) bounds(%45) -> !llvm.ptr {name = "d"}
- // CHECK:    omp.target_exit_data   map_entries(%31, %36, %41, %46 : !llvm.ptr, !llvm.ptr, !llvm.ptr, !llvm.ptr)
- // CHECK:    llvm.return
- // CHECK: }
+// CHECK-LABEL:   llvm.func @_QPomp_target_data() {
+// CHECK:           %[[VAL_0:.*]] = llvm.mlir.constant(1024 : index) : i64
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = llvm.alloca %[[VAL_1]] x !llvm.array<1024 x i32> {bindc_name = "d"} : (i64) -> !llvm.ptr
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK:           %[[VAL_4:.*]] = llvm.alloca %[[VAL_3]] x !llvm.array<1024 x i32> {bindc_name = "c"} : (i64) -> !llvm.ptr
+// CHECK:           %[[VAL_5:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK:           %[[VAL_6:.*]] = llvm.alloca %[[VAL_5]] x !llvm.array<1024 x i32> {bindc_name = "b"} : (i64) -> !llvm.ptr
+// CHECK:           %[[VAL_7:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK:           %[[VAL_8:.*]] = llvm.alloca %[[VAL_7]] x !llvm.array<1024 x i32> {bindc_name = "a"} : (i64) -> !llvm.ptr
+// CHECK:           %[[VAL_9:.*]] = llvm.mlir.constant(1024 : index) : i64
+// CHECK:           %[[VAL_10:.*]] = llvm.mlir.constant(1024 : index) : i64
+// CHECK:           %[[VAL_11:.*]] = llvm.mlir.constant(1024 : index) : i64
+// CHECK:           %[[VAL_12:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK:           %[[VAL_13:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK:           %[[VAL_14:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK:           %[[VAL_15:.*]] = omp.map.bounds lower_bound(%[[VAL_13]] : i64) upper_bound(%[[VAL_14]] : i64) extent(%[[VAL_0]] : i64) stride(%[[VAL_12]] : i64) start_idx(%[[VAL_12]] : i64)
+// CHECK:           %[[VAL_16:.*]] = omp.map.info var_ptr(%[[VAL_8]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(to) capture(ByRef) bounds(%[[VAL_15]]) -> !llvm.ptr {name = "a"}
+// CHECK:           %[[VAL_17:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK:           %[[VAL_18:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK:           %[[VAL_19:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK:           %[[VAL_20:.*]] = omp.map.bounds lower_bound(%[[VAL_18]] : i64) upper_bound(%[[VAL_19]] : i64) extent(%[[VAL_9]] : i64) stride(%[[VAL_17]] : i64) start_idx(%[[VAL_17]] : i64)
+// CHECK:           %[[VAL_21:.*]] = omp.map.info var_ptr(%[[VAL_6]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(to) capture(ByRef) bounds(%[[VAL_20]]) -> !llvm.ptr {name = "b"}
+// CHECK:           %[[VAL_22:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK:           %[[VAL_23:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK:           %[[VAL_24:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK:           %[[VAL_25:.*]] = omp.map.bounds lower_bound(%[[VAL_23]] : i64) upper_bound(%[[VAL_24]] : i64) extent(%[[VAL_10]] : i64) stride(%[[VAL_22]] : i64) start_idx(%[[VAL_22]] : i64)
+// CHECK:           %[[VAL_26:.*]] = omp.map.info var_ptr(%[[VAL_4]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(always, exit_release_or_enter_alloc) capture(ByRef) bounds(%[[VAL_25]]) -> !llvm.ptr {name = "c"}
+// CHECK:           omp.target_enter_data map_entries(%[[VAL_16]], %[[VAL_21]], %[[VAL_26]] : !llvm.ptr, !llvm.ptr, !llvm.ptr)
+// CHECK:           %[[VAL_27:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK:           %[[VAL_28:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK:           %[[VAL_29:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK:           %[[VAL_30:.*]] = omp.map.bounds lower_bound(%[[VAL_28]] : i64) upper_bound(%[[VAL_29]] : i64) extent(%[[VAL_0]] : i64) stride(%[[VAL_27]] : i64) start_idx(%[[VAL_27]] : i64)
+// CHECK:           %[[VAL_31:.*]] = omp.map.info var_ptr(%[[VAL_8]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(from) capture(ByRef) bounds(%[[VAL_30]]) -> !llvm.ptr {name = "a"}
+// CHECK:           %[[VAL_32:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK:           %[[VAL_33:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK:           %[[VAL_34:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK:           %[[VAL_35:.*]] = omp.map.bounds lower_bound(%[[VAL_33]] : i64) upper_bound(%[[VAL_34]] : i64) extent(%[[VAL_9]] : i64) stride(%[[VAL_32]] : i64) start_idx(%[[VAL_32]] : i64)
+// CHECK:           %[[VAL_36:.*]] = omp.map.info var_ptr(%[[VAL_6]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(from) capture(ByRef) bounds(%[[VAL_35]]) -> !llvm.ptr {name = "b"}
+// CHECK:           %[[VAL_37:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK:           %[[VAL_38:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK:           %[[VAL_39:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK:           %[[VAL_40:.*]] = omp.map.bounds lower_bound(%[[VAL_38]] : i64) upper_bound(%[[VAL_39]] : i64) extent(%[[VAL_10]] : i64) stride(%[[VAL_37]] : i64) start_idx(%[[VAL_37]] : i64)
+// CHECK:           %[[VAL_41:.*]] = omp.map.info var_ptr(%[[VAL_4]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(exit_release_or_enter_alloc) capture(ByRef) bounds(%[[VAL_40]]) -> !llvm.ptr {name = "c"}
+// CHECK:           %[[VAL_42:.*]] = llvm.mlir.constant(1 : index) : i64
+// CHECK:           %[[VAL_43:.*]] = llvm.mlir.constant(0 : index) : i64
+// CHECK:           %[[VAL_44:.*]] = llvm.mlir.constant(1023 : index) : i64
+// CHECK:           %[[VAL_45:.*]] = omp.map.bounds lower_bound(%[[VAL_43]] : i64) upper_bound(%[[VAL_44]] : i64) extent(%[[VAL_11]] : i64) stride(%[[VAL_42]] : i64) start_idx(%[[VAL_42]] : i64)
+// CHECK:           %[[VAL_46:.*]] = omp.map.info var_ptr(%[[VAL_2]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(always, delete) capture(ByRef) bounds(%[[VAL_45]]) -> !llvm.ptr {name = "d"}
+// CHECK:           omp.target_exit_data map_entries(%[[VAL_31]], %[[VAL_36]], %[[VAL_41]], %[[VAL_46]] : !llvm.ptr, !llvm.ptr, !llvm.ptr, !llvm.ptr)
+// CHECK:           llvm.return
+// CHECK:         }
 
 // -----
 

>From 1647f3c783eab64ad2f453f96564c027a33306d5 Mon Sep 17 00:00:00 2001
From: Vijay Kandiah <vkandiah at sky6.pgi.net>
Date: Fri, 14 Jun 2024 16:00:21 -0700
Subject: [PATCH 5/5] [flang] Fixing PPC lit test failures due to recent alloca
 ordering change.

---
 .../PowerPC/ppc-mma-assemble-disassemble.f90  |  761 ++++-----
 .../Lower/PowerPC/ppc-mma-outer-product-1.f90 | 1512 ++++++++---------
 .../Lower/PowerPC/ppc-mma-outer-product-2.f90 |  754 ++++----
 .../PowerPC/ppc-pwr10-vec-intrinsics.f90      |    8 +-
 flang/test/Lower/PowerPC/ppc-vector-types.f90 |    8 +-
 5 files changed, 1522 insertions(+), 1521 deletions(-)

diff --git a/flang/test/Lower/PowerPC/ppc-mma-assemble-disassemble.f90 b/flang/test/Lower/PowerPC/ppc-mma-assemble-disassemble.f90
index d3872891853d4..17603535760bb 100644
--- a/flang/test/Lower/PowerPC/ppc-mma-assemble-disassemble.f90
+++ b/flang/test/Lower/PowerPC/ppc-mma-assemble-disassemble.f90
@@ -12,17 +12,18 @@ subroutine test_assemble_acc_i1()
       end subroutine test_assemble_acc_i1
 
 ! CHECK-LABEL: @test_assemble_acc_i1
-! LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-! LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-! LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-! LLVMIR:  %4 = alloca <16 x i8>, i64 1, align 16
-! LLVMIR:  %5 = alloca <16 x i8>, i64 1, align 16
-! LLVMIR:  %6 = load <16 x i8>, ptr %2, align 16
-! LLVMIR:  %7 = load <16 x i8>, ptr %3, align 16
-! LLVMIR:  %8 = load <16 x i8>, ptr %4, align 16
-! LLVMIR:  %9 = load <16 x i8>, ptr %5, align 16
-! LLVMIR:  %10 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %6, <16 x i8> %7, <16 x i8> %8, <16 x i8> %9)
-! LLVMIR:  store <512 x i1> %10, ptr %1, align 64
+
+! LLVMIR:         %[[VAL_0:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_1:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_2:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_3:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_4:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_5:.*]] = load <16 x i8>, ptr %[[VAL_3]], align 16
+! LLVMIR:         %[[VAL_6:.*]] = load <16 x i8>, ptr %[[VAL_2]], align 16
+! LLVMIR:         %[[VAL_7:.*]] = load <16 x i8>, ptr %[[VAL_1]], align 16
+! LLVMIR:         %[[VAL_8:.*]] = load <16 x i8>, ptr %[[VAL_0]], align 16
+! LLVMIR:         %[[VAL_9:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_5]], <16 x i8> %[[VAL_6]], <16 x i8> %[[VAL_7]], <16 x i8> %[[VAL_8]])
+! LLVMIR:         store <512 x i1> %[[VAL_9]], ptr %[[VAL_4]], align 64
 
       subroutine test_assemble_acc_i2()
       use, intrinsic :: mma
@@ -33,21 +34,21 @@ subroutine test_assemble_acc_i2()
       end subroutine test_assemble_acc_i2
 
 ! CHECK-LABEL: @test_assemble_acc_i2
-! LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-! LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-! LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-! LLVMIR:  %4 = alloca <8 x i16>, i64 1, align 16
-! LLVMIR:  %5 = alloca <8 x i16>, i64 1, align 16
-! LLVMIR:  %6 = load <8 x i16>, ptr %2, align 16
-! LLVMIR:  %7 = load <8 x i16>, ptr %3, align 16
-! LLVMIR:  %8 = load <8 x i16>, ptr %4, align 16
-! LLVMIR:  %9 = load <8 x i16>, ptr %5, align 16
-! LLVMIR:  %10 = bitcast <8 x i16> %6 to <16 x i8>
-! LLVMIR:  %11 = bitcast <8 x i16> %7 to <16 x i8>
-! LLVMIR:  %12 = bitcast <8 x i16> %8 to <16 x i8>
-! LLVMIR:  %13 = bitcast <8 x i16> %9 to <16 x i8>
-! LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-! LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_10:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_11:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_12:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_13:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_14:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_15:.*]] = load <8 x i16>, ptr %[[VAL_13]], align 16
+! LLVMIR:         %[[VAL_16:.*]] = load <8 x i16>, ptr %[[VAL_12]], align 16
+! LLVMIR:         %[[VAL_17:.*]] = load <8 x i16>, ptr %[[VAL_11]], align 16
+! LLVMIR:         %[[VAL_18:.*]] = load <8 x i16>, ptr %[[VAL_10]], align 16
+! LLVMIR:         %[[VAL_19:.*]] = bitcast <8 x i16> %[[VAL_15]] to <16 x i8>
+! LLVMIR:         %[[VAL_20:.*]] = bitcast <8 x i16> %[[VAL_16]] to <16 x i8>
+! LLVMIR:         %[[VAL_21:.*]] = bitcast <8 x i16> %[[VAL_17]] to <16 x i8>
+! LLVMIR:         %[[VAL_22:.*]] = bitcast <8 x i16> %[[VAL_18]] to <16 x i8>
+! LLVMIR:         %[[VAL_23:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_19]], <16 x i8> %[[VAL_20]], <16 x i8> %[[VAL_21]], <16 x i8> %[[VAL_22]])
+! LLVMIR:         store <512 x i1> %[[VAL_23]], ptr %[[VAL_14]], align 64
 
 
       subroutine test_assemble_acc_i4()
@@ -59,21 +60,21 @@ subroutine test_assemble_acc_i4()
       end subroutine test_assemble_acc_i4
 
 ! CHECK-LABEL: @test_assemble_acc_i4
-! LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-! LLVMIR:  %2 = alloca <4 x i32>, i64 1, align 16
-! LLVMIR:  %3 = alloca <4 x i32>, i64 1, align 16
-! LLVMIR:  %4 = alloca <4 x i32>, i64 1, align 16
-! LLVMIR:  %5 = alloca <4 x i32>, i64 1, align 16
-! LLVMIR:  %6 = load <4 x i32>, ptr %2, align 16
-! LLVMIR:  %7 = load <4 x i32>, ptr %3, align 16
-! LLVMIR:  %8 = load <4 x i32>, ptr %4, align 16
-! LLVMIR:  %9 = load <4 x i32>, ptr %5, align 16
-! LLVMIR:  %10 = bitcast <4 x i32> %6 to <16 x i8>
-! LLVMIR:  %11 = bitcast <4 x i32> %7 to <16 x i8>
-! LLVMIR:  %12 = bitcast <4 x i32> %8 to <16 x i8>
-! LLVMIR:  %13 = bitcast <4 x i32> %9 to <16 x i8>
-! LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-! LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_24:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_25:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_26:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_27:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_28:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_29:.*]] = load <4 x i32>, ptr %[[VAL_27]], align 16
+! LLVMIR:         %[[VAL_30:.*]] = load <4 x i32>, ptr %[[VAL_26]], align 16
+! LLVMIR:         %[[VAL_31:.*]] = load <4 x i32>, ptr %[[VAL_25]], align 16
+! LLVMIR:         %[[VAL_32:.*]] = load <4 x i32>, ptr %[[VAL_24]], align 16
+! LLVMIR:         %[[VAL_33:.*]] = bitcast <4 x i32> %[[VAL_29]] to <16 x i8>
+! LLVMIR:         %[[VAL_34:.*]] = bitcast <4 x i32> %[[VAL_30]] to <16 x i8>
+! LLVMIR:         %[[VAL_35:.*]] = bitcast <4 x i32> %[[VAL_31]] to <16 x i8>
+! LLVMIR:         %[[VAL_36:.*]] = bitcast <4 x i32> %[[VAL_32]] to <16 x i8>
+! LLVMIR:         %[[VAL_37:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_33]], <16 x i8> %[[VAL_34]], <16 x i8> %[[VAL_35]], <16 x i8> %[[VAL_36]])
+! LLVMIR:         store <512 x i1> %[[VAL_37]], ptr %[[VAL_28]], align 64
 
       subroutine test_assemble_acc_i8()
       use, intrinsic :: mma
@@ -84,21 +85,21 @@ subroutine test_assemble_acc_i8()
       end subroutine test_assemble_acc_i8
 
 ! CHECK-LABEL: @test_assemble_acc_i8
-! LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-! LLVMIR:  %2 = alloca <2 x i64>, i64 1, align 16
-! LLVMIR:  %3 = alloca <2 x i64>, i64 1, align 16
-! LLVMIR:  %4 = alloca <2 x i64>, i64 1, align 16
-! LLVMIR:  %5 = alloca <2 x i64>, i64 1, align 16
-! LLVMIR:  %6 = load <2 x i64>, ptr %2, align 16
-! LLVMIR:  %7 = load <2 x i64>, ptr %3, align 16
-! LLVMIR:  %8 = load <2 x i64>, ptr %4, align 16
-! LLVMIR:  %9 = load <2 x i64>, ptr %5, align 16
-! LLVMIR:  %10 = bitcast <2 x i64> %6 to <16 x i8>
-! LLVMIR:  %11 = bitcast <2 x i64> %7 to <16 x i8>
-! LLVMIR:  %12 = bitcast <2 x i64> %8 to <16 x i8>
-! LLVMIR:  %13 = bitcast <2 x i64> %9 to <16 x i8>
-! LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-! LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_38:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_39:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_40:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_41:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_42:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_43:.*]] = load <2 x i64>, ptr %[[VAL_41]], align 16
+! LLVMIR:         %[[VAL_44:.*]] = load <2 x i64>, ptr %[[VAL_40]], align 16
+! LLVMIR:         %[[VAL_45:.*]] = load <2 x i64>, ptr %[[VAL_39]], align 16
+! LLVMIR:         %[[VAL_46:.*]] = load <2 x i64>, ptr %[[VAL_38]], align 16
+! LLVMIR:         %[[VAL_47:.*]] = bitcast <2 x i64> %[[VAL_43]] to <16 x i8>
+! LLVMIR:         %[[VAL_48:.*]] = bitcast <2 x i64> %[[VAL_44]] to <16 x i8>
+! LLVMIR:         %[[VAL_49:.*]] = bitcast <2 x i64> %[[VAL_45]] to <16 x i8>
+! LLVMIR:         %[[VAL_50:.*]] = bitcast <2 x i64> %[[VAL_46]] to <16 x i8>
+! LLVMIR:         %[[VAL_51:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_47]], <16 x i8> %[[VAL_48]], <16 x i8> %[[VAL_49]], <16 x i8> %[[VAL_50]])
+! LLVMIR:         store <512 x i1> %[[VAL_51]], ptr %[[VAL_42]], align 64
 
 
       subroutine test_assemble_acc_u1()
@@ -110,17 +111,17 @@ subroutine test_assemble_acc_u1()
       end subroutine test_assemble_acc_u1
 
 ! CHECK-LABEL: @test_assemble_acc_u1
-! LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-! LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-! LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-! LLVMIR:  %4 = alloca <16 x i8>, i64 1, align 16
-! LLVMIR:  %5 = alloca <16 x i8>, i64 1, align 16
-! LLVMIR:  %6 = load <16 x i8>, ptr %2, align 16
-! LLVMIR:  %7 = load <16 x i8>, ptr %3, align 16
-! LLVMIR:  %8 = load <16 x i8>, ptr %4, align 16
-! LLVMIR:  %9 = load <16 x i8>, ptr %5, align 16
-! LLVMIR:  %10 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %6, <16 x i8> %7, <16 x i8> %8, <16 x i8> %9)
-! LLVMIR:  store <512 x i1> %10, ptr %1, align 64
+! LLVMIR:         %[[VAL_52:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_53:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_54:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_55:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_56:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_57:.*]] = load <16 x i8>, ptr %[[VAL_55]], align 16
+! LLVMIR:         %[[VAL_58:.*]] = load <16 x i8>, ptr %[[VAL_54]], align 16
+! LLVMIR:         %[[VAL_59:.*]] = load <16 x i8>, ptr %[[VAL_53]], align 16
+! LLVMIR:         %[[VAL_60:.*]] = load <16 x i8>, ptr %[[VAL_52]], align 16
+! LLVMIR:         %[[VAL_61:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_57]], <16 x i8> %[[VAL_58]], <16 x i8> %[[VAL_59]], <16 x i8> %[[VAL_60]])
+! LLVMIR:         store <512 x i1> %[[VAL_61]], ptr %[[VAL_56]], align 64
 
       subroutine test_assemble_acc_u2()
       use, intrinsic :: mma
@@ -131,21 +132,21 @@ subroutine test_assemble_acc_u2()
       end subroutine test_assemble_acc_u2
 
 ! CHECK-LABEL: @test_assemble_acc_u2
-! LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-! LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-! LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-! LLVMIR:  %4 = alloca <8 x i16>, i64 1, align 16
-! LLVMIR:  %5 = alloca <8 x i16>, i64 1, align 16
-! LLVMIR:  %6 = load <8 x i16>, ptr %2, align 16
-! LLVMIR:  %7 = load <8 x i16>, ptr %3, align 16
-! LLVMIR:  %8 = load <8 x i16>, ptr %4, align 16
-! LLVMIR:  %9 = load <8 x i16>, ptr %5, align 16
-! LLVMIR:  %10 = bitcast <8 x i16> %6 to <16 x i8>
-! LLVMIR:  %11 = bitcast <8 x i16> %7 to <16 x i8>
-! LLVMIR:  %12 = bitcast <8 x i16> %8 to <16 x i8>
-! LLVMIR:  %13 = bitcast <8 x i16> %9 to <16 x i8>
-! LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-! LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_62:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_63:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_64:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_65:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_66:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_67:.*]] = load <8 x i16>, ptr %[[VAL_65]], align 16
+! LLVMIR:         %[[VAL_68:.*]] = load <8 x i16>, ptr %[[VAL_64]], align 16
+! LLVMIR:         %[[VAL_69:.*]] = load <8 x i16>, ptr %[[VAL_63]], align 16
+! LLVMIR:         %[[VAL_70:.*]] = load <8 x i16>, ptr %[[VAL_62]], align 16
+! LLVMIR:         %[[VAL_71:.*]] = bitcast <8 x i16> %[[VAL_67]] to <16 x i8>
+! LLVMIR:         %[[VAL_72:.*]] = bitcast <8 x i16> %[[VAL_68]] to <16 x i8>
+! LLVMIR:         %[[VAL_73:.*]] = bitcast <8 x i16> %[[VAL_69]] to <16 x i8>
+! LLVMIR:         %[[VAL_74:.*]] = bitcast <8 x i16> %[[VAL_70]] to <16 x i8>
+! LLVMIR:         %[[VAL_75:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_71]], <16 x i8> %[[VAL_72]], <16 x i8> %[[VAL_73]], <16 x i8> %[[VAL_74]])
+! LLVMIR:         store <512 x i1> %[[VAL_75]], ptr %[[VAL_66]], align 64
 
       subroutine test_assemble_acc_u4()
       use, intrinsic :: mma
@@ -156,21 +157,21 @@ subroutine test_assemble_acc_u4()
       end subroutine test_assemble_acc_u4
 
 ! CHECK-LABEL: @test_assemble_acc_u4
-! LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-! LLVMIR:  %2 = alloca <4 x i32>, i64 1, align 16
-! LLVMIR:  %3 = alloca <4 x i32>, i64 1, align 16
-! LLVMIR:  %4 = alloca <4 x i32>, i64 1, align 16
-! LLVMIR:  %5 = alloca <4 x i32>, i64 1, align 16
-! LLVMIR:  %6 = load <4 x i32>, ptr %2, align 16
-! LLVMIR:  %7 = load <4 x i32>, ptr %3, align 16
-! LLVMIR:  %8 = load <4 x i32>, ptr %4, align 16
-! LLVMIR:  %9 = load <4 x i32>, ptr %5, align 16
-! LLVMIR:  %10 = bitcast <4 x i32> %6 to <16 x i8>
-! LLVMIR:  %11 = bitcast <4 x i32> %7 to <16 x i8>
-! LLVMIR:  %12 = bitcast <4 x i32> %8 to <16 x i8>
-! LLVMIR:  %13 = bitcast <4 x i32> %9 to <16 x i8>
-! LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-! LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_76:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_77:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_78:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_79:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_80:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_81:.*]] = load <4 x i32>, ptr %[[VAL_79]], align 16
+! LLVMIR:         %[[VAL_82:.*]] = load <4 x i32>, ptr %[[VAL_78]], align 16
+! LLVMIR:         %[[VAL_83:.*]] = load <4 x i32>, ptr %[[VAL_77]], align 16
+! LLVMIR:         %[[VAL_84:.*]] = load <4 x i32>, ptr %[[VAL_76]], align 16
+! LLVMIR:         %[[VAL_85:.*]] = bitcast <4 x i32> %[[VAL_81]] to <16 x i8>
+! LLVMIR:         %[[VAL_86:.*]] = bitcast <4 x i32> %[[VAL_82]] to <16 x i8>
+! LLVMIR:         %[[VAL_87:.*]] = bitcast <4 x i32> %[[VAL_83]] to <16 x i8>
+! LLVMIR:         %[[VAL_88:.*]] = bitcast <4 x i32> %[[VAL_84]] to <16 x i8>
+! LLVMIR:         %[[VAL_89:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_85]], <16 x i8> %[[VAL_86]], <16 x i8> %[[VAL_87]], <16 x i8> %[[VAL_88]])
+! LLVMIR:         store <512 x i1> %[[VAL_89]], ptr %[[VAL_80]], align 64
 
       subroutine test_assemble_acc_u8()
       use, intrinsic :: mma
@@ -181,21 +182,21 @@ subroutine test_assemble_acc_u8()
       end subroutine test_assemble_acc_u8
 
 ! CHECK-LABEL: @test_assemble_acc_u8
-! LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-! LLVMIR:  %2 = alloca <2 x i64>, i64 1, align 16
-! LLVMIR:  %3 = alloca <2 x i64>, i64 1, align 16
-! LLVMIR:  %4 = alloca <2 x i64>, i64 1, align 16
-! LLVMIR:  %5 = alloca <2 x i64>, i64 1, align 16
-! LLVMIR:  %6 = load <2 x i64>, ptr %2, align 16
-! LLVMIR:  %7 = load <2 x i64>, ptr %3, align 16
-! LLVMIR:  %8 = load <2 x i64>, ptr %4, align 16
-! LLVMIR:  %9 = load <2 x i64>, ptr %5, align 16
-! LLVMIR:  %10 = bitcast <2 x i64> %6 to <16 x i8>
-! LLVMIR:  %11 = bitcast <2 x i64> %7 to <16 x i8>
-! LLVMIR:  %12 = bitcast <2 x i64> %8 to <16 x i8>
-! LLVMIR:  %13 = bitcast <2 x i64> %9 to <16 x i8>
-! LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-! LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_90:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_91:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_92:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_93:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_94:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_95:.*]] = load <2 x i64>, ptr %[[VAL_93]], align 16
+! LLVMIR:         %[[VAL_96:.*]] = load <2 x i64>, ptr %[[VAL_92]], align 16
+! LLVMIR:         %[[VAL_97:.*]] = load <2 x i64>, ptr %[[VAL_91]], align 16
+! LLVMIR:         %[[VAL_98:.*]] = load <2 x i64>, ptr %[[VAL_90]], align 16
+! LLVMIR:         %[[VAL_99:.*]] = bitcast <2 x i64> %[[VAL_95]] to <16 x i8>
+! LLVMIR:         %[[VAL_100:.*]] = bitcast <2 x i64> %[[VAL_96]] to <16 x i8>
+! LLVMIR:         %[[VAL_101:.*]] = bitcast <2 x i64> %[[VAL_97]] to <16 x i8>
+! LLVMIR:         %[[VAL_102:.*]] = bitcast <2 x i64> %[[VAL_98]] to <16 x i8>
+! LLVMIR:         %[[VAL_103:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_99]], <16 x i8> %[[VAL_100]], <16 x i8> %[[VAL_101]], <16 x i8> %[[VAL_102]])
+! LLVMIR:         store <512 x i1> %[[VAL_103]], ptr %[[VAL_94]], align 64
 
       subroutine test_assemble_acc_r4()
       use, intrinsic :: mma
@@ -206,21 +207,21 @@ subroutine test_assemble_acc_r4()
       end subroutine test_assemble_acc_r4
 
 ! CHECK-LABEL: @test_assemble_acc_r4
-! LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-! LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-! LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-! LLVMIR:  %4 = alloca <4 x float>, i64 1, align 16
-! LLVMIR:  %5 = alloca <4 x float>, i64 1, align 16
-! LLVMIR:  %6 = load <4 x float>, ptr %2, align 16
-! LLVMIR:  %7 = load <4 x float>, ptr %3, align 16
-! LLVMIR:  %8 = load <4 x float>, ptr %4, align 16
-! LLVMIR:  %9 = load <4 x float>, ptr %5, align 16
-! LLVMIR:  %10 = bitcast <4 x float> %6 to <16 x i8>
-! LLVMIR:  %11 = bitcast <4 x float> %7 to <16 x i8>
-! LLVMIR:  %12 = bitcast <4 x float> %8 to <16 x i8>
-! LLVMIR:  %13 = bitcast <4 x float> %9 to <16 x i8>
-! LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-! LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_104:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_105:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_106:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_107:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_108:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_109:.*]] = load <4 x float>, ptr %[[VAL_107]], align 16
+! LLVMIR:         %[[VAL_110:.*]] = load <4 x float>, ptr %[[VAL_106]], align 16
+! LLVMIR:         %[[VAL_111:.*]] = load <4 x float>, ptr %[[VAL_105]], align 16
+! LLVMIR:         %[[VAL_112:.*]] = load <4 x float>, ptr %[[VAL_104]], align 16
+! LLVMIR:         %[[VAL_113:.*]] = bitcast <4 x float> %[[VAL_109]] to <16 x i8>
+! LLVMIR:         %[[VAL_114:.*]] = bitcast <4 x float> %[[VAL_110]] to <16 x i8>
+! LLVMIR:         %[[VAL_115:.*]] = bitcast <4 x float> %[[VAL_111]] to <16 x i8>
+! LLVMIR:         %[[VAL_116:.*]] = bitcast <4 x float> %[[VAL_112]] to <16 x i8>
+! LLVMIR:         %[[VAL_117:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_113]], <16 x i8> %[[VAL_114]], <16 x i8> %[[VAL_115]], <16 x i8> %[[VAL_116]])
+! LLVMIR:         store <512 x i1> %[[VAL_117]], ptr %[[VAL_108]], align 64
 
       subroutine test_assemble_acc_r8()
       use, intrinsic :: mma
@@ -231,21 +232,21 @@ subroutine test_assemble_acc_r8()
       end subroutine test_assemble_acc_r8
 
 !CHECK-LABEL: @test_assemble_acc_r8
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:   %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:   %4 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:   %5 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:   %6 = load <2 x double>, ptr %2, align 16
-!LLVMIR:   %7 = load <2 x double>, ptr %3, align 16
-!LLVMIR:   %8 = load <2 x double>, ptr %4, align 16
-!LLVMIR:   %9 = load <2 x double>, ptr %5, align 16
-!LLVMIR:   %10 = bitcast <2 x double> %6 to <16 x i8>
-!LLVMIR:   %11 = bitcast <2 x double> %7 to <16 x i8>
-!LLVMIR:   %12 = bitcast <2 x double> %8 to <16 x i8>
-!LLVMIR:   %13 = bitcast <2 x double> %9 to <16 x i8>
-!LLVMIR:   %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-!LLVMIR:   store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_118:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_119:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_120:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_121:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_122:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_123:.*]] = load <2 x double>, ptr %[[VAL_121]], align 16
+! LLVMIR:         %[[VAL_124:.*]] = load <2 x double>, ptr %[[VAL_120]], align 16
+! LLVMIR:         %[[VAL_125:.*]] = load <2 x double>, ptr %[[VAL_119]], align 16
+! LLVMIR:         %[[VAL_126:.*]] = load <2 x double>, ptr %[[VAL_118]], align 16
+! LLVMIR:         %[[VAL_127:.*]] = bitcast <2 x double> %[[VAL_123]] to <16 x i8>
+! LLVMIR:         %[[VAL_128:.*]] = bitcast <2 x double> %[[VAL_124]] to <16 x i8>
+! LLVMIR:         %[[VAL_129:.*]] = bitcast <2 x double> %[[VAL_125]] to <16 x i8>
+! LLVMIR:         %[[VAL_130:.*]] = bitcast <2 x double> %[[VAL_126]] to <16 x i8>
+! LLVMIR:         %[[VAL_131:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_127]], <16 x i8> %[[VAL_128]], <16 x i8> %[[VAL_129]], <16 x i8> %[[VAL_130]])
+! LLVMIR:         store <512 x i1> %[[VAL_131]], ptr %[[VAL_122]], align 64
 
 ! mma_assemble_pair
 
@@ -258,13 +259,13 @@ subroutine test_mma_assemble_pair_i1()
       end subroutine test_mma_assemble_pair_i1
 
 !LLVMIR: @test_mma_assemble_pair_i1_
-!LLVMIR:  %1 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %4 = load <16 x i8>, ptr %1, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %6 = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <256 x i1> %6, ptr %3, align 32
+! LLVMIR:         %[[VAL_132:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_133:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_134:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_135:.*]] = load <16 x i8>, ptr %[[VAL_134]], align 16
+! LLVMIR:         %[[VAL_136:.*]] = load <16 x i8>, ptr %[[VAL_133]], align 16
+! LLVMIR:         %[[VAL_137:.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %[[VAL_135]], <16 x i8> %[[VAL_136]])
+! LLVMIR:         store <256 x i1> %[[VAL_137]], ptr %[[VAL_132]], align 32
 
       subroutine test_mma_assemble_pair_i2()
       use, intrinsic :: mma
@@ -275,15 +276,15 @@ subroutine test_mma_assemble_pair_i2()
       end subroutine test_mma_assemble_pair_i2
 
 !LLVMIR: @test_mma_assemble_pair_i2_
-!LLVMIR:  %1 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %4 = load <8 x i16>, ptr %1, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %6 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %8 = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:  store <256 x i1> %8, ptr %3, align 32
+! LLVMIR:         %[[VAL_138:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_139:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_140:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_141:.*]] = load <8 x i16>, ptr %[[VAL_140]], align 16
+! LLVMIR:         %[[VAL_142:.*]] = load <8 x i16>, ptr %[[VAL_139]], align 16
+! LLVMIR:         %[[VAL_143:.*]] = bitcast <8 x i16> %[[VAL_141]] to <16 x i8>
+! LLVMIR:         %[[VAL_144:.*]] = bitcast <8 x i16> %[[VAL_142]] to <16 x i8>
+! LLVMIR:         %[[VAL_145:.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %[[VAL_143]], <16 x i8> %[[VAL_144]])
+! LLVMIR:         store <256 x i1> %[[VAL_145]], ptr %[[VAL_138]], align 32
 
       subroutine test_mma_assemble_pair_i4()
       use, intrinsic :: mma
@@ -294,15 +295,15 @@ subroutine test_mma_assemble_pair_i4()
       end subroutine test_mma_assemble_pair_i4
 
 !LLVMIR: @test_mma_assemble_pair_i4_
-!LLVMIR:  %1 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %2 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %3 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %4 = load <4 x i32>, ptr %1, align 16
-!LLVMIR:  %5 = load <4 x i32>, ptr %2, align 16
-!LLVMIR:  %6 = bitcast <4 x i32> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <4 x i32> %5 to <16 x i8>
-!LLVMIR:  %8 = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:  store <256 x i1> %8, ptr %3, align 32
+! LLVMIR:         %[[VAL_146:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_147:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_148:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_149:.*]] = load <4 x i32>, ptr %[[VAL_148]], align 16
+! LLVMIR:         %[[VAL_150:.*]] = load <4 x i32>, ptr %[[VAL_147]], align 16
+! LLVMIR:         %[[VAL_151:.*]] = bitcast <4 x i32> %[[VAL_149]] to <16 x i8>
+! LLVMIR:         %[[VAL_152:.*]] = bitcast <4 x i32> %[[VAL_150]] to <16 x i8>
+! LLVMIR:         %[[VAL_153:.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %[[VAL_151]], <16 x i8> %[[VAL_152]])
+! LLVMIR:         store <256 x i1> %[[VAL_153]], ptr %[[VAL_146]], align 32
 
       subroutine test_mma_assemble_pair_i8()
       use, intrinsic :: mma
@@ -313,15 +314,15 @@ subroutine test_mma_assemble_pair_i8()
       end subroutine test_mma_assemble_pair_i8
 
 !LLVMIR: @test_mma_assemble_pair_i8_
-!LLVMIR:  %1 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %2 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %3 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %4 = load <2 x i64>, ptr %1, align 16
-!LLVMIR:  %5 = load <2 x i64>, ptr %2, align 16
-!LLVMIR:  %6 = bitcast <2 x i64> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <2 x i64> %5 to <16 x i8>
-!LLVMIR:  %8 = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:  store <256 x i1> %8, ptr %3, align 32
+! LLVMIR:         %[[VAL_154:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_155:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_156:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_157:.*]] = load <2 x i64>, ptr %[[VAL_156]], align 16
+! LLVMIR:         %[[VAL_158:.*]] = load <2 x i64>, ptr %[[VAL_155]], align 16
+! LLVMIR:         %[[VAL_159:.*]] = bitcast <2 x i64> %[[VAL_157]] to <16 x i8>
+! LLVMIR:         %[[VAL_160:.*]] = bitcast <2 x i64> %[[VAL_158]] to <16 x i8>
+! LLVMIR:         %[[VAL_161:.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %[[VAL_159]], <16 x i8> %[[VAL_160]])
+! LLVMIR:         store <256 x i1> %[[VAL_161]], ptr %[[VAL_154]], align 32
 
       subroutine test_mma_assemble_pair_u1()
       use, intrinsic :: mma
@@ -332,13 +333,13 @@ subroutine test_mma_assemble_pair_u1()
       end subroutine test_mma_assemble_pair_u1
 
 !LLVMIR: @test_mma_assemble_pair_u1_
-!LLVMIR:  %1 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %4 = load <16 x i8>, ptr %1, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %6 = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <256 x i1> %6, ptr %3, align 32
+! LLVMIR:         %[[VAL_162:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_163:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_164:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_165:.*]] = load <16 x i8>, ptr %[[VAL_164]], align 16
+! LLVMIR:         %[[VAL_166:.*]] = load <16 x i8>, ptr %[[VAL_163]], align 16
+! LLVMIR:         %[[VAL_167:.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %[[VAL_165]], <16 x i8> %[[VAL_166]])
+! LLVMIR:         store <256 x i1> %[[VAL_167]], ptr %[[VAL_162]], align 32
 
       subroutine test_mma_assemble_pair_u2()
       use, intrinsic :: mma
@@ -349,15 +350,15 @@ subroutine test_mma_assemble_pair_u2()
       end subroutine test_mma_assemble_pair_u2
 
 !LLVMIR: @test_mma_assemble_pair_u2_
-!LLVMIR:  %1 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %4 = load <8 x i16>, ptr %1, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %6 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %8 = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:  store <256 x i1> %8, ptr %3, align 32
+! LLVMIR:         %[[VAL_168:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_169:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_170:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_171:.*]] = load <8 x i16>, ptr %[[VAL_170]], align 16
+! LLVMIR:         %[[VAL_172:.*]] = load <8 x i16>, ptr %[[VAL_169]], align 16
+! LLVMIR:         %[[VAL_173:.*]] = bitcast <8 x i16> %[[VAL_171]] to <16 x i8>
+! LLVMIR:         %[[VAL_174:.*]] = bitcast <8 x i16> %[[VAL_172]] to <16 x i8>
+! LLVMIR:         %[[VAL_175:.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %[[VAL_173]], <16 x i8> %[[VAL_174]])
+! LLVMIR:         store <256 x i1> %[[VAL_175]], ptr %[[VAL_168]], align 32
 
       subroutine test_mma_assemble_pair_u4()
       use, intrinsic :: mma
@@ -368,15 +369,15 @@ subroutine test_mma_assemble_pair_u4()
       end subroutine test_mma_assemble_pair_u4
 
 !LLVMIR: @test_mma_assemble_pair_u4_
-!LLVMIR:  %1 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %2 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %3 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %4 = load <4 x i32>, ptr %1, align 16
-!LLVMIR:  %5 = load <4 x i32>, ptr %2, align 16
-!LLVMIR:  %6 = bitcast <4 x i32> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <4 x i32> %5 to <16 x i8>
-!LLVMIR:  %8 = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:  store <256 x i1> %8, ptr %3, align 32
+! LLVMIR:         %[[VAL_176:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_177:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_178:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_179:.*]] = load <4 x i32>, ptr %[[VAL_178]], align 16
+! LLVMIR:         %[[VAL_180:.*]] = load <4 x i32>, ptr %[[VAL_177]], align 16
+! LLVMIR:         %[[VAL_181:.*]] = bitcast <4 x i32> %[[VAL_179]] to <16 x i8>
+! LLVMIR:         %[[VAL_182:.*]] = bitcast <4 x i32> %[[VAL_180]] to <16 x i8>
+! LLVMIR:         %[[VAL_183:.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %[[VAL_181]], <16 x i8> %[[VAL_182]])
+! LLVMIR:         store <256 x i1> %[[VAL_183]], ptr %[[VAL_176]], align 32
 
       subroutine test_mma_assemble_pair_u8()
       use, intrinsic :: mma
@@ -387,15 +388,15 @@ subroutine test_mma_assemble_pair_u8()
       end subroutine test_mma_assemble_pair_u8
 
 !LLVMIR: @test_mma_assemble_pair_u8_
-!LLVMIR:  %1 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %2 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %3 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %4 = load <2 x i64>, ptr %1, align 16
-!LLVMIR:  %5 = load <2 x i64>, ptr %2, align 16
-!LLVMIR:  %6 = bitcast <2 x i64> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <2 x i64> %5 to <16 x i8>
-!LLVMIR:  %8 = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:  store <256 x i1> %8, ptr %3, align 32
+! LLVMIR:         %[[VAL_184:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_185:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_186:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_187:.*]] = load <2 x i64>, ptr %[[VAL_186]], align 16
+! LLVMIR:         %[[VAL_188:.*]] = load <2 x i64>, ptr %[[VAL_185]], align 16
+! LLVMIR:         %[[VAL_189:.*]] = bitcast <2 x i64> %[[VAL_187]] to <16 x i8>
+! LLVMIR:         %[[VAL_190:.*]] = bitcast <2 x i64> %[[VAL_188]] to <16 x i8>
+! LLVMIR:         %[[VAL_191:.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %[[VAL_189]], <16 x i8> %[[VAL_190]])
+! LLVMIR:         store <256 x i1> %[[VAL_191]], ptr %[[VAL_184]], align 32
 
       subroutine test_mma_assemble_pair_r4()
       use, intrinsic :: mma
@@ -406,15 +407,15 @@ subroutine test_mma_assemble_pair_r4()
       end subroutine test_mma_assemble_pair_r4
 
 !LLVMIR: @test_mma_assemble_pair_r4_
-!LLVMIR:  %1 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %4 = load <4 x float>, ptr %1, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %6 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %8 = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:  store <256 x i1> %8, ptr %3, align 32
+! LLVMIR:         %[[VAL_192:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_193:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_194:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_195:.*]] = load <4 x float>, ptr %[[VAL_194]], align 16
+! LLVMIR:         %[[VAL_196:.*]] = load <4 x float>, ptr %[[VAL_193]], align 16
+! LLVMIR:         %[[VAL_197:.*]] = bitcast <4 x float> %[[VAL_195]] to <16 x i8>
+! LLVMIR:         %[[VAL_198:.*]] = bitcast <4 x float> %[[VAL_196]] to <16 x i8>
+! LLVMIR:         %[[VAL_199:.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %[[VAL_197]], <16 x i8> %[[VAL_198]])
+! LLVMIR:         store <256 x i1> %[[VAL_199]], ptr %[[VAL_192]], align 32
 
       subroutine test_mma_assemble_pair_r8()
       use, intrinsic :: mma
@@ -425,15 +426,15 @@ subroutine test_mma_assemble_pair_r8()
       end subroutine test_mma_assemble_pair_r8
 
 !LLVMIR: @test_mma_assemble_pair_r8_
-!LLVMIR:  %1 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %2 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %3 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %4 = load <2 x double>, ptr %1, align 16
-!LLVMIR:  %5 = load <2 x double>, ptr %2, align 16
-!LLVMIR:  %6 = bitcast <2 x double> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:  store <256 x i1> %8, ptr %3, align 32
+! LLVMIR:         %[[VAL_200:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_201:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_202:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_203:.*]] = load <2 x double>, ptr %[[VAL_202]], align 16
+! LLVMIR:         %[[VAL_204:.*]] = load <2 x double>, ptr %[[VAL_201]], align 16
+! LLVMIR:         %[[VAL_205:.*]] = bitcast <2 x double> %[[VAL_203]] to <16 x i8>
+! LLVMIR:         %[[VAL_206:.*]] = bitcast <2 x double> %[[VAL_204]] to <16 x i8>
+! LLVMIR:         %[[VAL_207:.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> %[[VAL_205]], <16 x i8> %[[VAL_206]])
+! LLVMIR:         store <256 x i1> %[[VAL_207]], ptr %[[VAL_200]], align 32
 
 ! mma_disassemble_acc
 
@@ -446,17 +447,17 @@ subroutine test_mma_build_acc_i1()
       end subroutine test_mma_build_acc_i1
 
 !CHECK-LABEL: @test_mma_build_acc_i1
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %5 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %6 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %7 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %8 = load <16 x i8>, ptr %4, align 16
-!LLVMIR:  %9 = load <16 x i8>, ptr %5, align 16
-!LLVMIR:  %10 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %9, <16 x i8> %8, <16 x i8> %7, <16 x i8> %6)
-!LLVMIR:  store <512 x i1> %10, ptr %1, align 64
+! LLVMIR:         %[[VAL_208:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_209:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_210:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_211:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_212:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_213:.*]] = load <16 x i8>, ptr %[[VAL_211]], align 16
+! LLVMIR:         %[[VAL_214:.*]] = load <16 x i8>, ptr %[[VAL_210]], align 16
+! LLVMIR:         %[[VAL_215:.*]] = load <16 x i8>, ptr %[[VAL_209]], align 16
+! LLVMIR:         %[[VAL_216:.*]] = load <16 x i8>, ptr %[[VAL_208]], align 16
+! LLVMIR:         %[[VAL_217:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_216]], <16 x i8> %[[VAL_215]], <16 x i8> %[[VAL_214]], <16 x i8> %[[VAL_213]])
+! LLVMIR:         store <512 x i1> %[[VAL_217]], ptr %[[VAL_212]], align 64
 
       subroutine test_mma_build_acc_i2()
       use, intrinsic :: mma
@@ -467,21 +468,21 @@ subroutine test_mma_build_acc_i2()
       end subroutine test_mma_build_acc_i2
 
 !CHECK-LABEL: @test_mma_build_acc_i2
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %5 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %6 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %7 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %8 = load <8 x i16>, ptr %4, align 16
-!LLVMIR:  %9 = load <8 x i16>, ptr %5, align 16
-!LLVMIR:  %10 = bitcast <8 x i16> %9 to <16 x i8>
-!LLVMIR:  %11 = bitcast <8 x i16> %8 to <16 x i8>
-!LLVMIR:  %12 = bitcast <8 x i16> %7 to <16 x i8>
-!LLVMIR:  %13 = bitcast <8 x i16> %6 to <16 x i8>
-!LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-!LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_218:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_219:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_220:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_221:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_222:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_223:.*]] = load <8 x i16>, ptr %[[VAL_221]], align 16
+! LLVMIR:         %[[VAL_224:.*]] = load <8 x i16>, ptr %[[VAL_220]], align 16
+! LLVMIR:         %[[VAL_225:.*]] = load <8 x i16>, ptr %[[VAL_219]], align 16
+! LLVMIR:         %[[VAL_226:.*]] = load <8 x i16>, ptr %[[VAL_218]], align 16
+! LLVMIR:         %[[VAL_227:.*]] = bitcast <8 x i16> %[[VAL_226]] to <16 x i8>
+! LLVMIR:         %[[VAL_228:.*]] = bitcast <8 x i16> %[[VAL_225]] to <16 x i8>
+! LLVMIR:         %[[VAL_229:.*]] = bitcast <8 x i16> %[[VAL_224]] to <16 x i8>
+! LLVMIR:         %[[VAL_230:.*]] = bitcast <8 x i16> %[[VAL_223]] to <16 x i8>
+! LLVMIR:         %[[VAL_231:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_227]], <16 x i8> %[[VAL_228]], <16 x i8> %[[VAL_229]], <16 x i8> %[[VAL_230]])
+! LLVMIR:         store <512 x i1> %[[VAL_231]], ptr %[[VAL_222]], align 64
 
       subroutine test_mma_build_acc_i4()
       use, intrinsic :: mma
@@ -492,21 +493,21 @@ subroutine test_mma_build_acc_i4()
       end subroutine test_mma_build_acc_i4
 
 !CHECK-LABEL: @test_mma_build_acc_i4
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %4 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %5 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %6 = load <4 x i32>, ptr %2, align 16
-!LLVMIR:  %7 = load <4 x i32>, ptr %3, align 16
-!LLVMIR:  %8 = load <4 x i32>, ptr %4, align 16
-!LLVMIR:  %9 = load <4 x i32>, ptr %5, align 16
-!LLVMIR:  %10 = bitcast <4 x i32> %9 to <16 x i8>
-!LLVMIR:  %11 = bitcast <4 x i32> %8 to <16 x i8>
-!LLVMIR:  %12 = bitcast <4 x i32> %7 to <16 x i8>
-!LLVMIR:  %13 = bitcast <4 x i32> %6 to <16 x i8>
-!LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-!LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_232:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_233:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_234:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_235:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_236:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_237:.*]] = load <4 x i32>, ptr %[[VAL_235]], align 16
+! LLVMIR:         %[[VAL_238:.*]] = load <4 x i32>, ptr %[[VAL_234]], align 16
+! LLVMIR:         %[[VAL_239:.*]] = load <4 x i32>, ptr %[[VAL_233]], align 16
+! LLVMIR:         %[[VAL_240:.*]] = load <4 x i32>, ptr %[[VAL_232]], align 16
+! LLVMIR:         %[[VAL_241:.*]] = bitcast <4 x i32> %[[VAL_240]] to <16 x i8>
+! LLVMIR:         %[[VAL_242:.*]] = bitcast <4 x i32> %[[VAL_239]] to <16 x i8>
+! LLVMIR:         %[[VAL_243:.*]] = bitcast <4 x i32> %[[VAL_238]] to <16 x i8>
+! LLVMIR:         %[[VAL_244:.*]] = bitcast <4 x i32> %[[VAL_237]] to <16 x i8>
+! LLVMIR:         %[[VAL_245:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_241]], <16 x i8> %[[VAL_242]], <16 x i8> %[[VAL_243]], <16 x i8> %[[VAL_244]])
+! LLVMIR:         store <512 x i1> %[[VAL_245]], ptr %[[VAL_236]], align 64
 
       subroutine test_mma_build_acc_i8()
       use, intrinsic :: mma
@@ -517,21 +518,21 @@ subroutine test_mma_build_acc_i8()
       end subroutine test_mma_build_acc_i8
 
 !CHECK-LABEL: @test_mma_build_acc_i8
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %3 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %4 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %5 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %6 = load <2 x i64>, ptr %2, align 16
-!LLVMIR:  %7 = load <2 x i64>, ptr %3, align 16
-!LLVMIR:  %8 = load <2 x i64>, ptr %4, align 16
-!LLVMIR:  %9 = load <2 x i64>, ptr %5, align 16
-!LLVMIR:  %10 = bitcast <2 x i64> %9 to <16 x i8>
-!LLVMIR:  %11 = bitcast <2 x i64> %8 to <16 x i8>
-!LLVMIR:  %12 = bitcast <2 x i64> %7 to <16 x i8>
-!LLVMIR:  %13 = bitcast <2 x i64> %6 to <16 x i8>
-!LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-!LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_246:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_247:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_248:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_249:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_250:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_251:.*]] = load <2 x i64>, ptr %[[VAL_249]], align 16
+! LLVMIR:         %[[VAL_252:.*]] = load <2 x i64>, ptr %[[VAL_248]], align 16
+! LLVMIR:         %[[VAL_253:.*]] = load <2 x i64>, ptr %[[VAL_247]], align 16
+! LLVMIR:         %[[VAL_254:.*]] = load <2 x i64>, ptr %[[VAL_246]], align 16
+! LLVMIR:         %[[VAL_255:.*]] = bitcast <2 x i64> %[[VAL_254]] to <16 x i8>
+! LLVMIR:         %[[VAL_256:.*]] = bitcast <2 x i64> %[[VAL_253]] to <16 x i8>
+! LLVMIR:         %[[VAL_257:.*]] = bitcast <2 x i64> %[[VAL_252]] to <16 x i8>
+! LLVMIR:         %[[VAL_258:.*]] = bitcast <2 x i64> %[[VAL_251]] to <16 x i8>
+! LLVMIR:         %[[VAL_259:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_255]], <16 x i8> %[[VAL_256]], <16 x i8> %[[VAL_257]], <16 x i8> %[[VAL_258]])
+! LLVMIR:         store <512 x i1> %[[VAL_259]], ptr %[[VAL_250]], align 64
 
       subroutine test_mma_build_acc_u1()
       use, intrinsic :: mma
@@ -542,17 +543,17 @@ subroutine test_mma_build_acc_u1()
       end subroutine test_mma_build_acc_u1
 
 !CHECK-LABEL: @test_mma_build_acc_u1
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %5 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %6 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %7 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %8 = load <16 x i8>, ptr %4, align 16
-!LLVMIR:  %9 = load <16 x i8>, ptr %5, align 16
-!LLVMIR:  %10 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %9, <16 x i8> %8, <16 x i8> %7, <16 x i8> %6)
-!LLVMIR:  store <512 x i1> %10, ptr %1, align 64
+! LLVMIR:         %[[VAL_260:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_261:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_262:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_263:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_264:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_265:.*]] = load <16 x i8>, ptr %[[VAL_263]], align 16
+! LLVMIR:         %[[VAL_266:.*]] = load <16 x i8>, ptr %[[VAL_262]], align 16
+! LLVMIR:         %[[VAL_267:.*]] = load <16 x i8>, ptr %[[VAL_261]], align 16
+! LLVMIR:         %[[VAL_268:.*]] = load <16 x i8>, ptr %[[VAL_260]], align 16
+! LLVMIR:         %[[VAL_269:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_268]], <16 x i8> %[[VAL_267]], <16 x i8> %[[VAL_266]], <16 x i8> %[[VAL_265]])
+! LLVMIR:         store <512 x i1> %[[VAL_269]], ptr %[[VAL_264]], align 64
 
       subroutine test_mma_build_acc_u2()
       use, intrinsic :: mma
@@ -563,21 +564,21 @@ subroutine test_mma_build_acc_u2()
       end subroutine test_mma_build_acc_u2
 
 !CHECK-LABEL: @test_mma_build_acc_u2
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %5 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %6 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %7 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %8 = load <8 x i16>, ptr %4, align 16
-!LLVMIR:  %9 = load <8 x i16>, ptr %5, align 16
-!LLVMIR:  %10 = bitcast <8 x i16> %9 to <16 x i8>
-!LLVMIR:  %11 = bitcast <8 x i16> %8 to <16 x i8>
-!LLVMIR:  %12 = bitcast <8 x i16> %7 to <16 x i8>
-!LLVMIR:  %13 = bitcast <8 x i16> %6 to <16 x i8>
-!LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-!LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_270:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_271:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_272:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_273:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_274:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_275:.*]] = load <8 x i16>, ptr %[[VAL_273]], align 16
+! LLVMIR:         %[[VAL_276:.*]] = load <8 x i16>, ptr %[[VAL_272]], align 16
+! LLVMIR:         %[[VAL_277:.*]] = load <8 x i16>, ptr %[[VAL_271]], align 16
+! LLVMIR:         %[[VAL_278:.*]] = load <8 x i16>, ptr %[[VAL_270]], align 16
+! LLVMIR:         %[[VAL_279:.*]] = bitcast <8 x i16> %[[VAL_278]] to <16 x i8>
+! LLVMIR:         %[[VAL_280:.*]] = bitcast <8 x i16> %[[VAL_277]] to <16 x i8>
+! LLVMIR:         %[[VAL_281:.*]] = bitcast <8 x i16> %[[VAL_276]] to <16 x i8>
+! LLVMIR:         %[[VAL_282:.*]] = bitcast <8 x i16> %[[VAL_275]] to <16 x i8>
+! LLVMIR:         %[[VAL_283:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_279]], <16 x i8> %[[VAL_280]], <16 x i8> %[[VAL_281]], <16 x i8> %[[VAL_282]])
+! LLVMIR:         store <512 x i1> %[[VAL_283]], ptr %[[VAL_274]], align 64
 
       subroutine test_mma_build_acc_u4()
       use, intrinsic :: mma
@@ -588,21 +589,21 @@ subroutine test_mma_build_acc_u4()
       end subroutine test_mma_build_acc_u4
 
 !CHECK-LABEL: @test_mma_build_acc_u4
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %4 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %5 = alloca <4 x i32>, i64 1, align 16
-!LLVMIR:  %6 = load <4 x i32>, ptr %2, align 16
-!LLVMIR:  %7 = load <4 x i32>, ptr %3, align 16
-!LLVMIR:  %8 = load <4 x i32>, ptr %4, align 16
-!LLVMIR:  %9 = load <4 x i32>, ptr %5, align 16
-!LLVMIR:  %10 = bitcast <4 x i32> %9 to <16 x i8>
-!LLVMIR:  %11 = bitcast <4 x i32> %8 to <16 x i8>
-!LLVMIR:  %12 = bitcast <4 x i32> %7 to <16 x i8>
-!LLVMIR:  %13 = bitcast <4 x i32> %6 to <16 x i8>
-!LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-!LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_284:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_285:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_286:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_287:.*]] = alloca <4 x i32>, i64 1, align 16
+! LLVMIR:         %[[VAL_288:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_289:.*]] = load <4 x i32>, ptr %[[VAL_287]], align 16
+! LLVMIR:         %[[VAL_290:.*]] = load <4 x i32>, ptr %[[VAL_286]], align 16
+! LLVMIR:         %[[VAL_291:.*]] = load <4 x i32>, ptr %[[VAL_285]], align 16
+! LLVMIR:         %[[VAL_292:.*]] = load <4 x i32>, ptr %[[VAL_284]], align 16
+! LLVMIR:         %[[VAL_293:.*]] = bitcast <4 x i32> %[[VAL_292]] to <16 x i8>
+! LLVMIR:         %[[VAL_294:.*]] = bitcast <4 x i32> %[[VAL_291]] to <16 x i8>
+! LLVMIR:         %[[VAL_295:.*]] = bitcast <4 x i32> %[[VAL_290]] to <16 x i8>
+! LLVMIR:         %[[VAL_296:.*]] = bitcast <4 x i32> %[[VAL_289]] to <16 x i8>
+! LLVMIR:         %[[VAL_297:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_293]], <16 x i8> %[[VAL_294]], <16 x i8> %[[VAL_295]], <16 x i8> %[[VAL_296]])
+! LLVMIR:         store <512 x i1> %[[VAL_297]], ptr %[[VAL_288]], align 64
 
       subroutine test_mma_build_acc_u8()
       use, intrinsic :: mma
@@ -613,21 +614,21 @@ subroutine test_mma_build_acc_u8()
       end subroutine test_mma_build_acc_u8
 
 !CHECK-LABEL: @test_mma_build_acc_u8
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %3 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %4 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %5 = alloca <2 x i64>, i64 1, align 16
-!LLVMIR:  %6 = load <2 x i64>, ptr %2, align 16
-!LLVMIR:  %7 = load <2 x i64>, ptr %3, align 16
-!LLVMIR:  %8 = load <2 x i64>, ptr %4, align 16
-!LLVMIR:  %9 = load <2 x i64>, ptr %5, align 16
-!LLVMIR:  %10 = bitcast <2 x i64> %9 to <16 x i8>
-!LLVMIR:  %11 = bitcast <2 x i64> %8 to <16 x i8>
-!LLVMIR:  %12 = bitcast <2 x i64> %7 to <16 x i8>
-!LLVMIR:  %13 = bitcast <2 x i64> %6 to <16 x i8>
-!LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-!LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_298:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_299:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_300:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_301:.*]] = alloca <2 x i64>, i64 1, align 16
+! LLVMIR:         %[[VAL_302:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_303:.*]] = load <2 x i64>, ptr %[[VAL_301]], align 16
+! LLVMIR:         %[[VAL_304:.*]] = load <2 x i64>, ptr %[[VAL_300]], align 16
+! LLVMIR:         %[[VAL_305:.*]] = load <2 x i64>, ptr %[[VAL_299]], align 16
+! LLVMIR:         %[[VAL_306:.*]] = load <2 x i64>, ptr %[[VAL_298]], align 16
+! LLVMIR:         %[[VAL_307:.*]] = bitcast <2 x i64> %[[VAL_306]] to <16 x i8>
+! LLVMIR:         %[[VAL_308:.*]] = bitcast <2 x i64> %[[VAL_305]] to <16 x i8>
+! LLVMIR:         %[[VAL_309:.*]] = bitcast <2 x i64> %[[VAL_304]] to <16 x i8>
+! LLVMIR:         %[[VAL_310:.*]] = bitcast <2 x i64> %[[VAL_303]] to <16 x i8>
+! LLVMIR:         %[[VAL_311:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_307]], <16 x i8> %[[VAL_308]], <16 x i8> %[[VAL_309]], <16 x i8> %[[VAL_310]])
+! LLVMIR:         store <512 x i1> %[[VAL_311]], ptr %[[VAL_302]], align 64
 
 
       subroutine test_mma_build_acc_r4()
@@ -639,21 +640,21 @@ subroutine test_mma_build_acc_r4()
       end subroutine test_mma_build_acc_r4
 
 !CHECK-LABEL: @test_mma_build_acc_r4
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %5 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %6 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %7 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %8 = load <4 x float>, ptr %4, align 16
-!LLVMIR:  %9 = load <4 x float>, ptr %5, align 16
-!LLVMIR:  %10 = bitcast <4 x float> %9 to <16 x i8>
-!LLVMIR:  %11 = bitcast <4 x float> %8 to <16 x i8>
-!LLVMIR:  %12 = bitcast <4 x float> %7 to <16 x i8>
-!LLVMIR:  %13 = bitcast <4 x float> %6 to <16 x i8>
-!LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-!LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_312:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_313:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_314:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_315:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_316:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_317:.*]] = load <4 x float>, ptr %[[VAL_315]], align 16
+! LLVMIR:         %[[VAL_318:.*]] = load <4 x float>, ptr %[[VAL_314]], align 16
+! LLVMIR:         %[[VAL_319:.*]] = load <4 x float>, ptr %[[VAL_313]], align 16
+! LLVMIR:         %[[VAL_320:.*]] = load <4 x float>, ptr %[[VAL_312]], align 16
+! LLVMIR:         %[[VAL_321:.*]] = bitcast <4 x float> %[[VAL_320]] to <16 x i8>
+! LLVMIR:         %[[VAL_322:.*]] = bitcast <4 x float> %[[VAL_319]] to <16 x i8>
+! LLVMIR:         %[[VAL_323:.*]] = bitcast <4 x float> %[[VAL_318]] to <16 x i8>
+! LLVMIR:         %[[VAL_324:.*]] = bitcast <4 x float> %[[VAL_317]] to <16 x i8>
+! LLVMIR:         %[[VAL_325:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_321]], <16 x i8> %[[VAL_322]], <16 x i8> %[[VAL_323]], <16 x i8> %[[VAL_324]])
+! LLVMIR:         store <512 x i1> %[[VAL_325]], ptr %[[VAL_316]], align 64
 
 
       subroutine test_mma_build_acc_r8()
@@ -665,21 +666,21 @@ subroutine test_mma_build_acc_r8()
       end subroutine test_mma_build_acc_r8
 
 !CHECK-LABEL: @test_mma_build_acc_r8
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %5 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %6 = load <2 x double>, ptr %2, align 16
-!LLVMIR:  %7 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %8 = load <2 x double>, ptr %4, align 16
-!LLVMIR:  %9 = load <2 x double>, ptr %5, align 16
-!LLVMIR:  %10 = bitcast <2 x double> %9 to <16 x i8>
-!LLVMIR:  %11 = bitcast <2 x double> %8 to <16 x i8>
-!LLVMIR:  %12 = bitcast <2 x double> %7 to <16 x i8>
-!LLVMIR:  %13 = bitcast <2 x double> %6 to <16 x i8>
-!LLVMIR:  %14 = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %10, <16 x i8> %11, <16 x i8> %12, <16 x i8> %13)
-!LLVMIR:  store <512 x i1> %14, ptr %1, align 64
+! LLVMIR:         %[[VAL_326:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_327:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_328:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_329:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_330:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_331:.*]] = load <2 x double>, ptr %[[VAL_329]], align 16
+! LLVMIR:         %[[VAL_332:.*]] = load <2 x double>, ptr %[[VAL_328]], align 16
+! LLVMIR:         %[[VAL_333:.*]] = load <2 x double>, ptr %[[VAL_327]], align 16
+! LLVMIR:         %[[VAL_334:.*]] = load <2 x double>, ptr %[[VAL_326]], align 16
+! LLVMIR:         %[[VAL_335:.*]] = bitcast <2 x double> %[[VAL_334]] to <16 x i8>
+! LLVMIR:         %[[VAL_336:.*]] = bitcast <2 x double> %[[VAL_333]] to <16 x i8>
+! LLVMIR:         %[[VAL_337:.*]] = bitcast <2 x double> %[[VAL_332]] to <16 x i8>
+! LLVMIR:         %[[VAL_338:.*]] = bitcast <2 x double> %[[VAL_331]] to <16 x i8>
+! LLVMIR:         %[[VAL_339:.*]] = call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %[[VAL_335]], <16 x i8> %[[VAL_336]], <16 x i8> %[[VAL_337]], <16 x i8> %[[VAL_338]])
+! LLVMIR:         store <512 x i1> %[[VAL_339]], ptr %[[VAL_330]], align 64
 
 ! mma_disassemble_acc
 
@@ -692,11 +693,11 @@ subroutine test_disassemble_acc()
       end subroutine
 
 !CHECK-LABEL: @test_disassemble_acc_
-!LLVMIR:  %1 = alloca float, i64 1, align 4
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %3)
-!LLVMIR:  store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %4, ptr %1, align 16
+! LLVMIR:         %[[VAL_340:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_341:.*]] = alloca float, i64 1, align 4
+! LLVMIR:         %[[VAL_342:.*]] = load <512 x i1>, ptr %[[VAL_340]], align 64
+! LLVMIR:         %[[VAL_343:.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %[[VAL_342]])
+! LLVMIR:         store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %[[VAL_343]], ptr %[[VAL_341]], align 16
 
 ! mma_disassemble_pair
 
@@ -709,8 +710,8 @@ subroutine test_disassemble_pair()
       end subroutine
 
 !CHECK-LABEL: @test_disassemble_pair_
-!LLVMIR:  %1 = alloca float, i64 1, align 4
-!LLVMIR:  %2 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %3 = load <256 x i1>, ptr %2, align 32
-!LLVMIR:  %4 = call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> %3)
-!LLVMIR:  store { <16 x i8>, <16 x i8> } %4, ptr %1, align 16
+! LLVMIR:         %[[VAL_344:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_345:.*]] = alloca float, i64 1, align 4
+! LLVMIR:         %[[VAL_346:.*]] = load <256 x i1>, ptr %[[VAL_344]], align 32
+! LLVMIR:         %[[VAL_347:.*]] = call { <16 x i8>, <16 x i8> } @llvm.ppc.vsx.disassemble.pair(<256 x i1> %[[VAL_346]])
+! LLVMIR:         store { <16 x i8>, <16 x i8> } %[[VAL_347]], ptr %[[VAL_345]], align 16
diff --git a/flang/test/Lower/PowerPC/ppc-mma-outer-product-1.f90 b/flang/test/Lower/PowerPC/ppc-mma-outer-product-1.f90
index 97bebc7683c02..6ad7958dedb90 100644
--- a/flang/test/Lower/PowerPC/ppc-mma-outer-product-1.f90
+++ b/flang/test/Lower/PowerPC/ppc-mma-outer-product-1.f90
@@ -10,13 +10,13 @@ subroutine test_pmxvbf16ger2_def()
       end subroutine test_pmxvbf16ger2_def
 
 !CHECK-LABEL: @test_pmxvbf16ger2_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_0:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_1:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_2:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_3:.*]] = load <16 x i8>, ptr %[[VAL_1]], align 16
+! LLVMIR:         %[[VAL_4:.*]] = load <16 x i8>, ptr %[[VAL_0]], align 16
+! LLVMIR:         %[[VAL_5:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2(<16 x i8> %[[VAL_3]], <16 x i8> %[[VAL_4]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_5]], ptr %[[VAL_2]], align 64
 
 
       subroutine test_pmxvbf16ger2_non_def()
@@ -28,13 +28,13 @@ subroutine test_pmxvbf16ger2_non_def()
       end subroutine test_pmxvbf16ger2_non_def
 
 !CHECK-LABEL: @test_pmxvbf16ger2_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_6:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_7:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_8:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_9:.*]] = load <16 x i8>, ptr %[[VAL_7]], align 16
+! LLVMIR:         %[[VAL_10:.*]] = load <16 x i8>, ptr %[[VAL_6]], align 16
+! LLVMIR:         %[[VAL_11:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2(<16 x i8> %[[VAL_9]], <16 x i8> %[[VAL_10]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_11]], ptr %[[VAL_8]], align 64
 
 
       subroutine test_pmxvbf16ger2nn_def()
@@ -46,14 +46,14 @@ subroutine test_pmxvbf16ger2nn_def()
       end subroutine test_pmxvbf16ger2nn_def
 
 !CHECK-LABEL: @test_pmxvbf16ger2nn_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2nn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_12:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_13:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_14:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_15:.*]] = load <16 x i8>, ptr %[[VAL_13]], align 16
+! LLVMIR:         %[[VAL_16:.*]] = load <16 x i8>, ptr %[[VAL_12]], align 16
+! LLVMIR:         %[[VAL_17:.*]] = load <512 x i1>, ptr %[[VAL_14]], align 64
+! LLVMIR:         %[[VAL_18:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2nn(<512 x i1> %[[VAL_17]], <16 x i8> %[[VAL_15]], <16 x i8> %[[VAL_16]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_18]], ptr %[[VAL_14]], align 64
 
       subroutine test_pmxvbf16ger2nn_non_def()
       use, intrinsic :: mma
@@ -64,14 +64,14 @@ subroutine test_pmxvbf16ger2nn_non_def()
       end subroutine test_pmxvbf16ger2nn_non_def
 
 !CHECK-LABEL: @test_pmxvbf16ger2nn_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2nn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_19:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_20:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_21:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_22:.*]] = load <16 x i8>, ptr %[[VAL_20]], align 16
+! LLVMIR:         %[[VAL_23:.*]] = load <16 x i8>, ptr %[[VAL_19]], align 16
+! LLVMIR:         %[[VAL_24:.*]] = load <512 x i1>, ptr %[[VAL_21]], align 64
+! LLVMIR:         %[[VAL_25:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2nn(<512 x i1> %[[VAL_24]], <16 x i8> %[[VAL_22]], <16 x i8> %[[VAL_23]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_25]], ptr %[[VAL_21]], align 64
 
       subroutine test_pmxvbf16ger2np_def()
       use, intrinsic :: mma
@@ -82,14 +82,14 @@ subroutine test_pmxvbf16ger2np_def()
       end subroutine test_pmxvbf16ger2np_def
 
 !CHECK-LABEL: @test_pmxvbf16ger2np_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2np(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_26:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_27:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_28:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_29:.*]] = load <16 x i8>, ptr %[[VAL_27]], align 16
+! LLVMIR:         %[[VAL_30:.*]] = load <16 x i8>, ptr %[[VAL_26]], align 16
+! LLVMIR:         %[[VAL_31:.*]] = load <512 x i1>, ptr %[[VAL_28]], align 64
+! LLVMIR:         %[[VAL_32:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2np(<512 x i1> %[[VAL_31]], <16 x i8> %[[VAL_29]], <16 x i8> %[[VAL_30]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_32]], ptr %[[VAL_28]], align 64
 
       subroutine test_pmxvbf16ger2np_non_def()
       use, intrinsic :: mma
@@ -100,14 +100,14 @@ subroutine test_pmxvbf16ger2np_non_def()
       end subroutine test_pmxvbf16ger2np_non_def
 
 !CHECK-LABEL: @test_pmxvbf16ger2np_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2np(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_33:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_34:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_35:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_36:.*]] = load <16 x i8>, ptr %[[VAL_34]], align 16
+! LLVMIR:         %[[VAL_37:.*]] = load <16 x i8>, ptr %[[VAL_33]], align 16
+! LLVMIR:         %[[VAL_38:.*]] = load <512 x i1>, ptr %[[VAL_35]], align 64
+! LLVMIR:         %[[VAL_39:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2np(<512 x i1> %[[VAL_38]], <16 x i8> %[[VAL_36]], <16 x i8> %[[VAL_37]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_39]], ptr %[[VAL_35]], align 64
 
       subroutine test_pmxvbf16ger2pn_def()
       use, intrinsic :: mma
@@ -118,14 +118,14 @@ subroutine test_pmxvbf16ger2pn_def()
       end subroutine test_pmxvbf16ger2pn_def
 
 !CHECK-LABEL: @test_pmxvbf16ger2pn_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_40:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_41:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_42:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_43:.*]] = load <16 x i8>, ptr %[[VAL_41]], align 16
+! LLVMIR:         %[[VAL_44:.*]] = load <16 x i8>, ptr %[[VAL_40]], align 16
+! LLVMIR:         %[[VAL_45:.*]] = load <512 x i1>, ptr %[[VAL_42]], align 64
+! LLVMIR:         %[[VAL_46:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pn(<512 x i1> %[[VAL_45]], <16 x i8> %[[VAL_43]], <16 x i8> %[[VAL_44]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_46]], ptr %[[VAL_42]], align 64
 
       subroutine test_pmxvbf16ger2pn_non_def()
       use, intrinsic :: mma
@@ -136,14 +136,14 @@ subroutine test_pmxvbf16ger2pn_non_def()
       end subroutine test_pmxvbf16ger2pn_non_def
 
 !CHECK-LABEL: @test_pmxvbf16ger2pn_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_47:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_48:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_49:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_50:.*]] = load <16 x i8>, ptr %[[VAL_48]], align 16
+! LLVMIR:         %[[VAL_51:.*]] = load <16 x i8>, ptr %[[VAL_47]], align 16
+! LLVMIR:         %[[VAL_52:.*]] = load <512 x i1>, ptr %[[VAL_49]], align 64
+! LLVMIR:         %[[VAL_53:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pn(<512 x i1> %[[VAL_52]], <16 x i8> %[[VAL_50]], <16 x i8> %[[VAL_51]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_53]], ptr %[[VAL_49]], align 64
 
       subroutine test_pmxvbf16ger2pp_def()
       use, intrinsic :: mma
@@ -154,14 +154,14 @@ subroutine test_pmxvbf16ger2pp_def()
       end subroutine test_pmxvbf16ger2pp_def
 
 !CHECK-LABEL: @test_pmxvbf16ger2pp_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_54:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_55:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_56:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_57:.*]] = load <16 x i8>, ptr %[[VAL_55]], align 16
+! LLVMIR:         %[[VAL_58:.*]] = load <16 x i8>, ptr %[[VAL_54]], align 16
+! LLVMIR:         %[[VAL_59:.*]] = load <512 x i1>, ptr %[[VAL_56]], align 64
+! LLVMIR:         %[[VAL_60:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pp(<512 x i1> %[[VAL_59]], <16 x i8> %[[VAL_57]], <16 x i8> %[[VAL_58]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_60]], ptr %[[VAL_56]], align 64
 
       subroutine test_pmxvbf16ger2pp_non_def()
       use, intrinsic :: mma
@@ -172,14 +172,14 @@ subroutine test_pmxvbf16ger2pp_non_def()
       end subroutine test_pmxvbf16ger2pp_non_def
 
 !CHECK-LABEL: @test_pmxvbf16ger2pp_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_61:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_62:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_63:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_64:.*]] = load <16 x i8>, ptr %[[VAL_62]], align 16
+! LLVMIR:         %[[VAL_65:.*]] = load <16 x i8>, ptr %[[VAL_61]], align 16
+! LLVMIR:         %[[VAL_66:.*]] = load <512 x i1>, ptr %[[VAL_63]], align 64
+! LLVMIR:         %[[VAL_67:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pp(<512 x i1> %[[VAL_66]], <16 x i8> %[[VAL_64]], <16 x i8> %[[VAL_65]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_67]], ptr %[[VAL_63]], align 64
 
       subroutine test_pmxvf16ger2_def()
       use, intrinsic :: mma
@@ -190,13 +190,13 @@ subroutine test_pmxvf16ger2_def()
       end subroutine test_pmxvf16ger2_def
 
 !CHECK-LABEL: @test_pmxvf16ger2_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_68:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_69:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_70:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_71:.*]] = load <16 x i8>, ptr %[[VAL_69]], align 16
+! LLVMIR:         %[[VAL_72:.*]] = load <16 x i8>, ptr %[[VAL_68]], align 16
+! LLVMIR:         %[[VAL_73:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2(<16 x i8> %[[VAL_71]], <16 x i8> %[[VAL_72]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_73]], ptr %[[VAL_70]], align 64
 
       subroutine test_pmxvf16ger2_non_def()
       use, intrinsic :: mma
@@ -207,13 +207,13 @@ subroutine test_pmxvf16ger2_non_def()
       end subroutine test_pmxvf16ger2_non_def
 
 !CHECK-LABEL: @test_pmxvf16ger2_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_74:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_75:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_76:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_77:.*]] = load <16 x i8>, ptr %[[VAL_75]], align 16
+! LLVMIR:         %[[VAL_78:.*]] = load <16 x i8>, ptr %[[VAL_74]], align 16
+! LLVMIR:         %[[VAL_79:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2(<16 x i8> %[[VAL_77]], <16 x i8> %[[VAL_78]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_79]], ptr %[[VAL_76]], align 64
 
       subroutine test_pmxvf16ger2nn_def()
       use, intrinsic :: mma
@@ -224,14 +224,14 @@ subroutine test_pmxvf16ger2nn_def()
       end subroutine test_pmxvf16ger2nn_def
 
 !CHECK-LABEL: @test_pmxvf16ger2nn_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2nn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_80:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_81:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_82:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_83:.*]] = load <16 x i8>, ptr %[[VAL_81]], align 16
+! LLVMIR:         %[[VAL_84:.*]] = load <16 x i8>, ptr %[[VAL_80]], align 16
+! LLVMIR:         %[[VAL_85:.*]] = load <512 x i1>, ptr %[[VAL_82]], align 64
+! LLVMIR:         %[[VAL_86:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2nn(<512 x i1> %[[VAL_85]], <16 x i8> %[[VAL_83]], <16 x i8> %[[VAL_84]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_86]], ptr %[[VAL_82]], align 64
 
       subroutine test_pmxvf16ger2nn_non_def()
       use, intrinsic :: mma
@@ -242,14 +242,14 @@ subroutine test_pmxvf16ger2nn_non_def()
       end subroutine test_pmxvf16ger2nn_non_def
 
 !CHECK-LABEL: @test_pmxvf16ger2nn_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2nn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_87:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_88:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_89:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_90:.*]] = load <16 x i8>, ptr %[[VAL_88]], align 16
+! LLVMIR:         %[[VAL_91:.*]] = load <16 x i8>, ptr %[[VAL_87]], align 16
+! LLVMIR:         %[[VAL_92:.*]] = load <512 x i1>, ptr %[[VAL_89]], align 64
+! LLVMIR:         %[[VAL_93:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2nn(<512 x i1> %[[VAL_92]], <16 x i8> %[[VAL_90]], <16 x i8> %[[VAL_91]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_93]], ptr %[[VAL_89]], align 64
 
       subroutine test_pmxvf16ger2np_def()
       use, intrinsic :: mma
@@ -260,14 +260,14 @@ subroutine test_pmxvf16ger2np_def()
       end subroutine test_pmxvf16ger2np_def
 
 !CHECK-LABEL: @test_pmxvf16ger2np_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2np(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_94:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_95:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_96:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_97:.*]] = load <16 x i8>, ptr %[[VAL_95]], align 16
+! LLVMIR:         %[[VAL_98:.*]] = load <16 x i8>, ptr %[[VAL_94]], align 16
+! LLVMIR:         %[[VAL_99:.*]] = load <512 x i1>, ptr %[[VAL_96]], align 64
+! LLVMIR:         %[[VAL_100:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2np(<512 x i1> %[[VAL_99]], <16 x i8> %[[VAL_97]], <16 x i8> %[[VAL_98]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_100]], ptr %[[VAL_96]], align 64
 
       subroutine test_pmxvf16ger2np_non_def()
       use, intrinsic :: mma
@@ -278,14 +278,14 @@ subroutine test_pmxvf16ger2np_non_def()
       end subroutine test_pmxvf16ger2np_non_def
 
 !CHECK-LABEL: @test_pmxvf16ger2np_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2np(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_101:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_102:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_103:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_104:.*]] = load <16 x i8>, ptr %[[VAL_102]], align 16
+! LLVMIR:         %[[VAL_105:.*]] = load <16 x i8>, ptr %[[VAL_101]], align 16
+! LLVMIR:         %[[VAL_106:.*]] = load <512 x i1>, ptr %[[VAL_103]], align 64
+! LLVMIR:         %[[VAL_107:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2np(<512 x i1> %[[VAL_106]], <16 x i8> %[[VAL_104]], <16 x i8> %[[VAL_105]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_107]], ptr %[[VAL_103]], align 64
 
       subroutine test_pmxvf16ger2pn_def()
       use, intrinsic :: mma
@@ -296,14 +296,14 @@ subroutine test_pmxvf16ger2pn_def()
       end subroutine test_pmxvf16ger2pn_def
 
 !CHECK-LABEL: @test_pmxvf16ger2pn_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2pn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_108:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_109:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_110:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_111:.*]] = load <16 x i8>, ptr %[[VAL_109]], align 16
+! LLVMIR:         %[[VAL_112:.*]] = load <16 x i8>, ptr %[[VAL_108]], align 16
+! LLVMIR:         %[[VAL_113:.*]] = load <512 x i1>, ptr %[[VAL_110]], align 64
+! LLVMIR:         %[[VAL_114:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2pn(<512 x i1> %[[VAL_113]], <16 x i8> %[[VAL_111]], <16 x i8> %[[VAL_112]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_114]], ptr %[[VAL_110]], align 64
 
       subroutine test_pmxvf16ger2pn_non_def()
       use, intrinsic :: mma
@@ -314,14 +314,14 @@ subroutine test_pmxvf16ger2pn_non_def()
       end subroutine test_pmxvf16ger2pn_non_def
 
 !CHECK-LABEL: @test_pmxvf16ger2pn_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2pn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_115:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_116:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_117:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_118:.*]] = load <16 x i8>, ptr %[[VAL_116]], align 16
+! LLVMIR:         %[[VAL_119:.*]] = load <16 x i8>, ptr %[[VAL_115]], align 16
+! LLVMIR:         %[[VAL_120:.*]] = load <512 x i1>, ptr %[[VAL_117]], align 64
+! LLVMIR:         %[[VAL_121:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2pn(<512 x i1> %[[VAL_120]], <16 x i8> %[[VAL_118]], <16 x i8> %[[VAL_119]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_121]], ptr %[[VAL_117]], align 64
 
       subroutine test_pmxvf16ger2pp_def()
       use, intrinsic :: mma
@@ -332,14 +332,14 @@ subroutine test_pmxvf16ger2pp_def()
       end subroutine test_pmxvf16ger2pp_def
 
 !CHECK-LABEL: @test_pmxvf16ger2pp_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_122:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_123:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_124:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_125:.*]] = load <16 x i8>, ptr %[[VAL_123]], align 16
+! LLVMIR:         %[[VAL_126:.*]] = load <16 x i8>, ptr %[[VAL_122]], align 16
+! LLVMIR:         %[[VAL_127:.*]] = load <512 x i1>, ptr %[[VAL_124]], align 64
+! LLVMIR:         %[[VAL_128:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2pp(<512 x i1> %[[VAL_127]], <16 x i8> %[[VAL_125]], <16 x i8> %[[VAL_126]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_128]], ptr %[[VAL_124]], align 64
 
       subroutine test_pmxvf16ger2pp_non_def()
       use, intrinsic :: mma
@@ -350,14 +350,14 @@ subroutine test_pmxvf16ger2pp_non_def()
       end subroutine test_pmxvf16ger2pp_non_def
 
 !CHECK-LABEL: @test_pmxvf16ger2pp_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_129:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_130:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_131:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_132:.*]] = load <16 x i8>, ptr %[[VAL_130]], align 16
+! LLVMIR:         %[[VAL_133:.*]] = load <16 x i8>, ptr %[[VAL_129]], align 16
+! LLVMIR:         %[[VAL_134:.*]] = load <512 x i1>, ptr %[[VAL_131]], align 64
+! LLVMIR:         %[[VAL_135:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf16ger2pp(<512 x i1> %[[VAL_134]], <16 x i8> %[[VAL_132]], <16 x i8> %[[VAL_133]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_135]], ptr %[[VAL_131]], align 64
 
       subroutine test_pmxvf32ger_u1_def()
       use, intrinsic :: mma
@@ -368,13 +368,13 @@ subroutine test_pmxvf32ger_u1_def()
       end subroutine test_pmxvf32ger_u1_def
 
 !CHECK-LABEL: @test_pmxvf32ger_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvf32ger(<16 x i8> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_136:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_137:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_138:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_139:.*]] = load <16 x i8>, ptr %[[VAL_137]], align 16
+! LLVMIR:         %[[VAL_140:.*]] = load <16 x i8>, ptr %[[VAL_136]], align 16
+! LLVMIR:         %[[VAL_141:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32ger(<16 x i8> %[[VAL_139]], <16 x i8> %[[VAL_140]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_141]], ptr %[[VAL_138]], align 64
 
       subroutine test_pmxvf32ger_u1_non_def()
       use, intrinsic :: mma
@@ -385,13 +385,13 @@ subroutine test_pmxvf32ger_u1_non_def()
       end subroutine test_pmxvf32ger_u1_non_def
 
 !CHECK-LABEL: @test_pmxvf32ger_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvf32ger(<16 x i8> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_142:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_143:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_144:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_145:.*]] = load <16 x i8>, ptr %[[VAL_143]], align 16
+! LLVMIR:         %[[VAL_146:.*]] = load <16 x i8>, ptr %[[VAL_142]], align 16
+! LLVMIR:         %[[VAL_147:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32ger(<16 x i8> %[[VAL_145]], <16 x i8> %[[VAL_146]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_147]], ptr %[[VAL_144]], align 64
 
       subroutine test_pmxvf32ger_r4_def()
       use, intrinsic :: mma
@@ -402,15 +402,15 @@ subroutine test_pmxvf32ger_r4_def()
       end subroutine test_pmxvf32ger_r4_def
 
 !CHECK-LABEL: @test_pmxvf32ger_r4_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvf32ger(<16 x i8> %6, <16 x i8> %7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %1, align 64
+! LLVMIR:         %[[VAL_148:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_149:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_150:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_151:.*]] = load <4 x float>, ptr %[[VAL_149]], align 16
+! LLVMIR:         %[[VAL_152:.*]] = load <4 x float>, ptr %[[VAL_148]], align 16
+! LLVMIR:         %[[VAL_153:.*]] = bitcast <4 x float> %[[VAL_151]] to <16 x i8>
+! LLVMIR:         %[[VAL_154:.*]] = bitcast <4 x float> %[[VAL_152]] to <16 x i8>
+! LLVMIR:         %[[VAL_155:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32ger(<16 x i8> %[[VAL_153]], <16 x i8> %[[VAL_154]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_155]], ptr %[[VAL_150]], align 64
 
       subroutine test_pmxvf32ger_r4_non_def()
       use, intrinsic :: mma
@@ -421,15 +421,15 @@ subroutine test_pmxvf32ger_r4_non_def()
       end subroutine test_pmxvf32ger_r4_non_def
 
 !CHECK-LABEL: @test_pmxvf32ger_r4_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvf32ger(<16 x i8> %6, <16 x i8> %7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %1, align 64
+! LLVMIR:         %[[VAL_156:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_157:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_158:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_159:.*]] = load <4 x float>, ptr %[[VAL_157]], align 16
+! LLVMIR:         %[[VAL_160:.*]] = load <4 x float>, ptr %[[VAL_156]], align 16
+! LLVMIR:         %[[VAL_161:.*]] = bitcast <4 x float> %[[VAL_159]] to <16 x i8>
+! LLVMIR:         %[[VAL_162:.*]] = bitcast <4 x float> %[[VAL_160]] to <16 x i8>
+! LLVMIR:         %[[VAL_163:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32ger(<16 x i8> %[[VAL_161]], <16 x i8> %[[VAL_162]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_163]], ptr %[[VAL_158]], align 64
 
       subroutine test_pmxvf32gernn_u1_def()
       use, intrinsic :: mma
@@ -440,14 +440,14 @@ subroutine test_pmxvf32gernn_u1_def()
       end subroutine test_pmxvf32gernn_u1_def
 
 !CHECK-LABEL: @test_pmxvf32gernn_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_164:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_165:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_166:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_167:.*]] = load <16 x i8>, ptr %[[VAL_165]], align 16
+! LLVMIR:         %[[VAL_168:.*]] = load <16 x i8>, ptr %[[VAL_164]], align 16
+! LLVMIR:         %[[VAL_169:.*]] = load <512 x i1>, ptr %[[VAL_166]], align 64
+! LLVMIR:         %[[VAL_170:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1> %[[VAL_169]], <16 x i8> %[[VAL_167]], <16 x i8> %[[VAL_168]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_170]], ptr %[[VAL_166]], align 64
 
       subroutine test_pmxvf32gernn_u1_non_def()
       use, intrinsic :: mma
@@ -458,14 +458,14 @@ subroutine test_pmxvf32gernn_u1_non_def()
       end subroutine test_pmxvf32gernn_u1_non_def
 
 !CHECK-LABEL: @test_pmxvf32gernn_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_171:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_172:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_173:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_174:.*]] = load <16 x i8>, ptr %[[VAL_172]], align 16
+! LLVMIR:         %[[VAL_175:.*]] = load <16 x i8>, ptr %[[VAL_171]], align 16
+! LLVMIR:         %[[VAL_176:.*]] = load <512 x i1>, ptr %[[VAL_173]], align 64
+! LLVMIR:         %[[VAL_177:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1> %[[VAL_176]], <16 x i8> %[[VAL_174]], <16 x i8> %[[VAL_175]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_177]], ptr %[[VAL_173]], align 64
 
       subroutine test_pmxvf32gernn_r4_def()
       use, intrinsic :: mma
@@ -476,16 +476,16 @@ subroutine test_pmxvf32gernn_r4_def()
       end subroutine test_pmxvf32gernn_r4_def
 
 !CHECK-LABEL: @test_pmxvf32gernn_r4_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_178:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_179:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_180:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_181:.*]] = load <4 x float>, ptr %[[VAL_179]], align 16
+! LLVMIR:         %[[VAL_182:.*]] = load <4 x float>, ptr %[[VAL_178]], align 16
+! LLVMIR:         %[[VAL_183:.*]] = load <512 x i1>, ptr %[[VAL_180]], align 64
+! LLVMIR:         %[[VAL_184:.*]] = bitcast <4 x float> %[[VAL_181]] to <16 x i8>
+! LLVMIR:         %[[VAL_185:.*]] = bitcast <4 x float> %[[VAL_182]] to <16 x i8>
+! LLVMIR:         %[[VAL_186:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1> %[[VAL_183]], <16 x i8> %[[VAL_184]], <16 x i8> %[[VAL_185]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_186]], ptr %[[VAL_180]], align 64
 
       subroutine test_pmxvf32gernn_r4_non_def()
       use, intrinsic :: mma
@@ -496,16 +496,16 @@ subroutine test_pmxvf32gernn_r4_non_def()
       end subroutine test_pmxvf32gernn_r4_non_def
 
 !CHECK-LABEL: @test_pmxvf32gernn_r4_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_187:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_188:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_189:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_190:.*]] = load <4 x float>, ptr %[[VAL_188]], align 16
+! LLVMIR:         %[[VAL_191:.*]] = load <4 x float>, ptr %[[VAL_187]], align 16
+! LLVMIR:         %[[VAL_192:.*]] = load <512 x i1>, ptr %[[VAL_189]], align 64
+! LLVMIR:         %[[VAL_193:.*]] = bitcast <4 x float> %[[VAL_190]] to <16 x i8>
+! LLVMIR:         %[[VAL_194:.*]] = bitcast <4 x float> %[[VAL_191]] to <16 x i8>
+! LLVMIR:         %[[VAL_195:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1> %[[VAL_192]], <16 x i8> %[[VAL_193]], <16 x i8> %[[VAL_194]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_195]], ptr %[[VAL_189]], align 64
 
       subroutine test_pmxvf32gernp_u1_def()
       use, intrinsic :: mma
@@ -516,14 +516,14 @@ subroutine test_pmxvf32gernp_u1_def()
       end subroutine test_pmxvf32gernp_u1_def
 
 !CHECK-LABEL: @test_pmxvf32gernp_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf32gernp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_196:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_197:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_198:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_199:.*]] = load <16 x i8>, ptr %[[VAL_197]], align 16
+! LLVMIR:         %[[VAL_200:.*]] = load <16 x i8>, ptr %[[VAL_196]], align 16
+! LLVMIR:         %[[VAL_201:.*]] = load <512 x i1>, ptr %[[VAL_198]], align 64
+! LLVMIR:         %[[VAL_202:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gernp(<512 x i1> %[[VAL_201]], <16 x i8> %[[VAL_199]], <16 x i8> %[[VAL_200]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_202]], ptr %[[VAL_198]], align 64
 
       subroutine test_pmxvf32gernp_u1_non_def()
       use, intrinsic :: mma
@@ -534,14 +534,14 @@ subroutine test_pmxvf32gernp_u1_non_def()
       end subroutine test_pmxvf32gernp_u1_non_def
 
 !CHECK-LABEL: @test_pmxvf32gernp_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf32gernp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_203:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_204:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_205:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_206:.*]] = load <16 x i8>, ptr %[[VAL_204]], align 16
+! LLVMIR:         %[[VAL_207:.*]] = load <16 x i8>, ptr %[[VAL_203]], align 16
+! LLVMIR:         %[[VAL_208:.*]] = load <512 x i1>, ptr %[[VAL_205]], align 64
+! LLVMIR:         %[[VAL_209:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gernp(<512 x i1> %[[VAL_208]], <16 x i8> %[[VAL_206]], <16 x i8> %[[VAL_207]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_209]], ptr %[[VAL_205]], align 64
 
       subroutine test_pmxvf32gernp_r4_def()
       use, intrinsic :: mma
@@ -552,16 +552,16 @@ subroutine test_pmxvf32gernp_r4_def()
       end subroutine test_pmxvf32gernp_r4_def
 
 !CHECK-LABEL: @test_pmxvf32gernp_r4_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvf32gernp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_210:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_211:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_212:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_213:.*]] = load <4 x float>, ptr %[[VAL_211]], align 16
+! LLVMIR:         %[[VAL_214:.*]] = load <4 x float>, ptr %[[VAL_210]], align 16
+! LLVMIR:         %[[VAL_215:.*]] = load <512 x i1>, ptr %[[VAL_212]], align 64
+! LLVMIR:         %[[VAL_216:.*]] = bitcast <4 x float> %[[VAL_213]] to <16 x i8>
+! LLVMIR:         %[[VAL_217:.*]] = bitcast <4 x float> %[[VAL_214]] to <16 x i8>
+! LLVMIR:         %[[VAL_218:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gernp(<512 x i1> %[[VAL_215]], <16 x i8> %[[VAL_216]], <16 x i8> %[[VAL_217]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_218]], ptr %[[VAL_212]], align 64
 
       subroutine test_pmxvf32gernp_r4_non_def()
       use, intrinsic :: mma
@@ -572,16 +572,16 @@ subroutine test_pmxvf32gernp_r4_non_def()
       end subroutine test_pmxvf32gernp_r4_non_def
 
 !CHECK-LABEL: @test_pmxvf32gernp_r4_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvf32gernp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_219:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_220:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_221:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_222:.*]] = load <4 x float>, ptr %[[VAL_220]], align 16
+! LLVMIR:         %[[VAL_223:.*]] = load <4 x float>, ptr %[[VAL_219]], align 16
+! LLVMIR:         %[[VAL_224:.*]] = load <512 x i1>, ptr %[[VAL_221]], align 64
+! LLVMIR:         %[[VAL_225:.*]] = bitcast <4 x float> %[[VAL_222]] to <16 x i8>
+! LLVMIR:         %[[VAL_226:.*]] = bitcast <4 x float> %[[VAL_223]] to <16 x i8>
+! LLVMIR:         %[[VAL_227:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gernp(<512 x i1> %[[VAL_224]], <16 x i8> %[[VAL_225]], <16 x i8> %[[VAL_226]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_227]], ptr %[[VAL_221]], align 64
 
       subroutine test_pmxvf32gerpn_u1_def()
       use, intrinsic :: mma
@@ -592,14 +592,14 @@ subroutine test_pmxvf32gerpn_u1_def()
       end subroutine test_pmxvf32gerpn_u1_def
 
 !CHECK-LABEL: @test_pmxvf32gerpn_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_228:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_229:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_230:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_231:.*]] = load <16 x i8>, ptr %[[VAL_229]], align 16
+! LLVMIR:         %[[VAL_232:.*]] = load <16 x i8>, ptr %[[VAL_228]], align 16
+! LLVMIR:         %[[VAL_233:.*]] = load <512 x i1>, ptr %[[VAL_230]], align 64
+! LLVMIR:         %[[VAL_234:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1> %[[VAL_233]], <16 x i8> %[[VAL_231]], <16 x i8> %[[VAL_232]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_234]], ptr %[[VAL_230]], align 64
 
       subroutine test_pmxvf32gerpn_u1_non_def()
       use, intrinsic :: mma
@@ -610,14 +610,14 @@ subroutine test_pmxvf32gerpn_u1_non_def()
       end subroutine test_pmxvf32gerpn_u1_non_def
 
 !CHECK-LABEL: @test_pmxvf32gerpn_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_235:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_236:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_237:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_238:.*]] = load <16 x i8>, ptr %[[VAL_236]], align 16
+! LLVMIR:         %[[VAL_239:.*]] = load <16 x i8>, ptr %[[VAL_235]], align 16
+! LLVMIR:         %[[VAL_240:.*]] = load <512 x i1>, ptr %[[VAL_237]], align 64
+! LLVMIR:         %[[VAL_241:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1> %[[VAL_240]], <16 x i8> %[[VAL_238]], <16 x i8> %[[VAL_239]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_241]], ptr %[[VAL_237]], align 64
 
       subroutine test_pmxvf32gerpn_r4_def()
       use, intrinsic :: mma
@@ -628,16 +628,16 @@ subroutine test_pmxvf32gerpn_r4_def()
       end subroutine test_pmxvf32gerpn_r4_def
 
 !CHECK-LABEL: @test_pmxvf32gerpn_r4_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_242:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_243:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_244:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_245:.*]] = load <4 x float>, ptr %[[VAL_243]], align 16
+! LLVMIR:         %[[VAL_246:.*]] = load <4 x float>, ptr %[[VAL_242]], align 16
+! LLVMIR:         %[[VAL_247:.*]] = load <512 x i1>, ptr %[[VAL_244]], align 64
+! LLVMIR:         %[[VAL_248:.*]] = bitcast <4 x float> %[[VAL_245]] to <16 x i8>
+! LLVMIR:         %[[VAL_249:.*]] = bitcast <4 x float> %[[VAL_246]] to <16 x i8>
+! LLVMIR:         %[[VAL_250:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1> %[[VAL_247]], <16 x i8> %[[VAL_248]], <16 x i8> %[[VAL_249]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_250]], ptr %[[VAL_244]], align 64
 
       subroutine test_pmxvf32gerpn_r4_non_def()
       use, intrinsic :: mma
@@ -648,16 +648,16 @@ subroutine test_pmxvf32gerpn_r4_non_def()
       end subroutine test_pmxvf32gerpn_r4_non_def
 
 !CHECK-LABEL: @test_pmxvf32gerpn_r4_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_251:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_252:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_253:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_254:.*]] = load <4 x float>, ptr %[[VAL_252]], align 16
+! LLVMIR:         %[[VAL_255:.*]] = load <4 x float>, ptr %[[VAL_251]], align 16
+! LLVMIR:         %[[VAL_256:.*]] = load <512 x i1>, ptr %[[VAL_253]], align 64
+! LLVMIR:         %[[VAL_257:.*]] = bitcast <4 x float> %[[VAL_254]] to <16 x i8>
+! LLVMIR:         %[[VAL_258:.*]] = bitcast <4 x float> %[[VAL_255]] to <16 x i8>
+! LLVMIR:         %[[VAL_259:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1> %[[VAL_256]], <16 x i8> %[[VAL_257]], <16 x i8> %[[VAL_258]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_259]], ptr %[[VAL_253]], align 64
 
       subroutine test_pmxvf32gerpp_u1_def()
       use, intrinsic :: mma
@@ -668,14 +668,14 @@ subroutine test_pmxvf32gerpp_u1_def()
       end subroutine test_pmxvf32gerpp_u1_def
 
 !CHECK-LABEL: @test_pmxvf32gerpp_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_260:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_261:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_262:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_263:.*]] = load <16 x i8>, ptr %[[VAL_261]], align 16
+! LLVMIR:         %[[VAL_264:.*]] = load <16 x i8>, ptr %[[VAL_260]], align 16
+! LLVMIR:         %[[VAL_265:.*]] = load <512 x i1>, ptr %[[VAL_262]], align 64
+! LLVMIR:         %[[VAL_266:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpp(<512 x i1> %[[VAL_265]], <16 x i8> %[[VAL_263]], <16 x i8> %[[VAL_264]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_266]], ptr %[[VAL_262]], align 64
 
       subroutine test_pmxvf32gerpp_u1_non_def()
       use, intrinsic :: mma
@@ -686,14 +686,14 @@ subroutine test_pmxvf32gerpp_u1_non_def()
       end subroutine test_pmxvf32gerpp_u1_non_def
 
 !CHECK-LABEL: @test_pmxvf32gerpp_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_267:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_268:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_269:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_270:.*]] = load <16 x i8>, ptr %[[VAL_268]], align 16
+! LLVMIR:         %[[VAL_271:.*]] = load <16 x i8>, ptr %[[VAL_267]], align 16
+! LLVMIR:         %[[VAL_272:.*]] = load <512 x i1>, ptr %[[VAL_269]], align 64
+! LLVMIR:         %[[VAL_273:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpp(<512 x i1> %[[VAL_272]], <16 x i8> %[[VAL_270]], <16 x i8> %[[VAL_271]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_273]], ptr %[[VAL_269]], align 64
 
       subroutine test_pmxvf32gerpp_r4_def()
       use, intrinsic :: mma
@@ -704,16 +704,16 @@ subroutine test_pmxvf32gerpp_r4_def()
       end subroutine test_pmxvf32gerpp_r4_def
 
 !CHECK-LABEL: @test_pmxvf32gerpp_r4_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_274:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_275:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_276:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_277:.*]] = load <4 x float>, ptr %[[VAL_275]], align 16
+! LLVMIR:         %[[VAL_278:.*]] = load <4 x float>, ptr %[[VAL_274]], align 16
+! LLVMIR:         %[[VAL_279:.*]] = load <512 x i1>, ptr %[[VAL_276]], align 64
+! LLVMIR:         %[[VAL_280:.*]] = bitcast <4 x float> %[[VAL_277]] to <16 x i8>
+! LLVMIR:         %[[VAL_281:.*]] = bitcast <4 x float> %[[VAL_278]] to <16 x i8>
+! LLVMIR:         %[[VAL_282:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpp(<512 x i1> %[[VAL_279]], <16 x i8> %[[VAL_280]], <16 x i8> %[[VAL_281]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_282]], ptr %[[VAL_276]], align 64
 
       subroutine test_pmxvf32gerpp_r4_non_def()
       use, intrinsic :: mma
@@ -724,16 +724,16 @@ subroutine test_pmxvf32gerpp_r4_non_def()
       end subroutine test_pmxvf32gerpp_r4_non_def
 
 !CHECK-LABEL: @test_pmxvf32gerpp_r4_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_283:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_284:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_285:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_286:.*]] = load <4 x float>, ptr %[[VAL_284]], align 16
+! LLVMIR:         %[[VAL_287:.*]] = load <4 x float>, ptr %[[VAL_283]], align 16
+! LLVMIR:         %[[VAL_288:.*]] = load <512 x i1>, ptr %[[VAL_285]], align 64
+! LLVMIR:         %[[VAL_289:.*]] = bitcast <4 x float> %[[VAL_286]] to <16 x i8>
+! LLVMIR:         %[[VAL_290:.*]] = bitcast <4 x float> %[[VAL_287]] to <16 x i8>
+! LLVMIR:         %[[VAL_291:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf32gerpp(<512 x i1> %[[VAL_288]], <16 x i8> %[[VAL_289]], <16 x i8> %[[VAL_290]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_291]], ptr %[[VAL_285]], align 64
 
       subroutine test_pmxvf64ger_u1_def()
       use, intrinsic :: mma
@@ -745,13 +745,13 @@ subroutine test_pmxvf64ger_u1_def()
       end subroutine test_pmxvf64ger_u1_def
 
 !CHECK-LABEL: @test_pmxvf64ger_u1_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvf64ger(<256 x i1> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %2, align 64
+! LLVMIR:         %[[VAL_292:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_293:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_294:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_295:.*]] = load <256 x i1>, ptr %[[VAL_294]], align 32
+! LLVMIR:         %[[VAL_296:.*]] = load <16 x i8>, ptr %[[VAL_292]], align 16
+! LLVMIR:         %[[VAL_297:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64ger(<256 x i1> %[[VAL_295]], <16 x i8> %[[VAL_296]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_297]], ptr %[[VAL_293]], align 64
 
       subroutine test_pmxvf64ger_u1_non_def()
       use, intrinsic :: mma
@@ -763,13 +763,13 @@ subroutine test_pmxvf64ger_u1_non_def()
       end subroutine test_pmxvf64ger_u1_non_def
 
 !CHECK-LABEL: @test_pmxvf64ger_u1_non_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvf64ger(<256 x i1> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %2, align 64
+! LLVMIR:         %[[VAL_298:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_299:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_300:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_301:.*]] = load <256 x i1>, ptr %[[VAL_300]], align 32
+! LLVMIR:         %[[VAL_302:.*]] = load <16 x i8>, ptr %[[VAL_298]], align 16
+! LLVMIR:         %[[VAL_303:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64ger(<256 x i1> %[[VAL_301]], <16 x i8> %[[VAL_302]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_303]], ptr %[[VAL_299]], align 64
 
       subroutine test_pmxvf64ger_r8_def()
       use, intrinsic :: mma
@@ -781,14 +781,14 @@ subroutine test_pmxvf64ger_r8_def()
       end subroutine test_pmxvf64ger_r8_def
 
 !CHECK-LABEL: @test_pmxvf64ger_r8_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf64ger(<256 x i1> %4, <16 x i8> %6, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_304:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_305:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_306:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_307:.*]] = load <256 x i1>, ptr %[[VAL_306]], align 32
+! LLVMIR:         %[[VAL_308:.*]] = load <2 x double>, ptr %[[VAL_304]], align 16
+! LLVMIR:         %[[VAL_309:.*]] = bitcast <2 x double> %[[VAL_308]] to <16 x i8>
+! LLVMIR:         %[[VAL_310:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64ger(<256 x i1> %[[VAL_307]], <16 x i8> %[[VAL_309]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_310]], ptr %[[VAL_305]], align 64
 
       subroutine test_pmxvf64ger_r8_non_def()
       use, intrinsic :: mma
@@ -800,14 +800,14 @@ subroutine test_pmxvf64ger_r8_non_def()
       end subroutine test_pmxvf64ger_r8_non_def
 
 !CHECK-LABEL: @test_pmxvf64ger_r8_non_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf64ger(<256 x i1> %4, <16 x i8> %6, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_311:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_312:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_313:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_314:.*]] = load <256 x i1>, ptr %[[VAL_313]], align 32
+! LLVMIR:         %[[VAL_315:.*]] = load <2 x double>, ptr %[[VAL_311]], align 16
+! LLVMIR:         %[[VAL_316:.*]] = bitcast <2 x double> %[[VAL_315]] to <16 x i8>
+! LLVMIR:         %[[VAL_317:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64ger(<256 x i1> %[[VAL_314]], <16 x i8> %[[VAL_316]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_317]], ptr %[[VAL_312]], align 64
 
       subroutine test_pmxvf64gernn_u1_def()
       use, intrinsic :: mma
@@ -819,14 +819,14 @@ subroutine test_pmxvf64gernn_u1_def()
       end subroutine test_pmxvf64gernn_u1_def
 
 !CHECK-LABEL: @test_pmxvf64gernn_u1_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_318:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_319:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_320:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_321:.*]] = load <256 x i1>, ptr %[[VAL_320]], align 32
+! LLVMIR:         %[[VAL_322:.*]] = load <16 x i8>, ptr %[[VAL_318]], align 16
+! LLVMIR:         %[[VAL_323:.*]] = load <512 x i1>, ptr %[[VAL_319]], align 64
+! LLVMIR:         %[[VAL_324:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1> %[[VAL_323]], <256 x i1> %[[VAL_321]], <16 x i8> %[[VAL_322]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_324]], ptr %[[VAL_319]], align 64
 
       subroutine test_pmxvf64gernn_u1_non_def()
       use, intrinsic :: mma
@@ -838,14 +838,14 @@ subroutine test_pmxvf64gernn_u1_non_def()
       end subroutine test_pmxvf64gernn_u1_non_def
 
 !CHECK-LABEL: @test_pmxvf64gernn_u1_non_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_325:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_326:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_327:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_328:.*]] = load <256 x i1>, ptr %[[VAL_327]], align 32
+! LLVMIR:         %[[VAL_329:.*]] = load <16 x i8>, ptr %[[VAL_325]], align 16
+! LLVMIR:         %[[VAL_330:.*]] = load <512 x i1>, ptr %[[VAL_326]], align 64
+! LLVMIR:         %[[VAL_331:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1> %[[VAL_330]], <256 x i1> %[[VAL_328]], <16 x i8> %[[VAL_329]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_331]], ptr %[[VAL_326]], align 64
 
       subroutine test_pmxvf64gernn_r8_def()
       use, intrinsic :: mma
@@ -857,15 +857,15 @@ subroutine test_pmxvf64gernn_r8_def()
       end subroutine test_pmxvf64gernn_r8_def
 
 !CHECK-LABEL: @test_pmxvf64gernn_r8_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_332:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_333:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_334:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_335:.*]] = load <256 x i1>, ptr %[[VAL_334]], align 32
+! LLVMIR:         %[[VAL_336:.*]] = load <2 x double>, ptr %[[VAL_332]], align 16
+! LLVMIR:         %[[VAL_337:.*]] = load <512 x i1>, ptr %[[VAL_333]], align 64
+! LLVMIR:         %[[VAL_338:.*]] = bitcast <2 x double> %[[VAL_336]] to <16 x i8>
+! LLVMIR:         %[[VAL_339:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1> %[[VAL_337]], <256 x i1> %[[VAL_335]], <16 x i8> %[[VAL_338]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_339]], ptr %[[VAL_333]], align 64
 
       subroutine test_pmxvf64gernn_r8_non_def()
       use, intrinsic :: mma
@@ -877,15 +877,15 @@ subroutine test_pmxvf64gernn_r8_non_def()
       end subroutine test_pmxvf64gernn_r8_non_def
 
 !CHECK-LABEL: @test_pmxvf64gernn_r8_non_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_340:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_341:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_342:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_343:.*]] = load <256 x i1>, ptr %[[VAL_342]], align 32
+! LLVMIR:         %[[VAL_344:.*]] = load <2 x double>, ptr %[[VAL_340]], align 16
+! LLVMIR:         %[[VAL_345:.*]] = load <512 x i1>, ptr %[[VAL_341]], align 64
+! LLVMIR:         %[[VAL_346:.*]] = bitcast <2 x double> %[[VAL_344]] to <16 x i8>
+! LLVMIR:         %[[VAL_347:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1> %[[VAL_345]], <256 x i1> %[[VAL_343]], <16 x i8> %[[VAL_346]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_347]], ptr %[[VAL_341]], align 64
 
       subroutine test_pmxvf64gernp_u1_def()
       use, intrinsic :: mma
@@ -897,14 +897,14 @@ subroutine test_pmxvf64gernp_u1_def()
       end subroutine test_pmxvf64gernp_u1_def
 
 !CHECK-LABEL: @test_pmxvf64gernp_u1_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_348:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_349:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_350:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_351:.*]] = load <256 x i1>, ptr %[[VAL_350]], align 32
+! LLVMIR:         %[[VAL_352:.*]] = load <16 x i8>, ptr %[[VAL_348]], align 16
+! LLVMIR:         %[[VAL_353:.*]] = load <512 x i1>, ptr %[[VAL_349]], align 64
+! LLVMIR:         %[[VAL_354:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1> %[[VAL_353]], <256 x i1> %[[VAL_351]], <16 x i8> %[[VAL_352]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_354]], ptr %[[VAL_349]], align 64
 
       subroutine test_pmxvf64gernp_u1_non_def()
       use, intrinsic :: mma
@@ -916,14 +916,14 @@ subroutine test_pmxvf64gernp_u1_non_def()
       end subroutine test_pmxvf64gernp_u1_non_def
 
 !CHECK-LABEL: @test_pmxvf64gernp_u1_non_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_355:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_356:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_357:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_358:.*]] = load <256 x i1>, ptr %[[VAL_357]], align 32
+! LLVMIR:         %[[VAL_359:.*]] = load <16 x i8>, ptr %[[VAL_355]], align 16
+! LLVMIR:         %[[VAL_360:.*]] = load <512 x i1>, ptr %[[VAL_356]], align 64
+! LLVMIR:         %[[VAL_361:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1> %[[VAL_360]], <256 x i1> %[[VAL_358]], <16 x i8> %[[VAL_359]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_361]], ptr %[[VAL_356]], align 64
 
       subroutine test_pmxvf64gernp_r8_def()
       use, intrinsic :: mma
@@ -935,15 +935,15 @@ subroutine test_pmxvf64gernp_r8_def()
       end subroutine test_pmxvf64gernp_r8_def
 
 !CHECK-LABEL: @test_pmxvf64gernp_r8_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_362:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_363:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_364:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_365:.*]] = load <256 x i1>, ptr %[[VAL_364]], align 32
+! LLVMIR:         %[[VAL_366:.*]] = load <2 x double>, ptr %[[VAL_362]], align 16
+! LLVMIR:         %[[VAL_367:.*]] = load <512 x i1>, ptr %[[VAL_363]], align 64
+! LLVMIR:         %[[VAL_368:.*]] = bitcast <2 x double> %[[VAL_366]] to <16 x i8>
+! LLVMIR:         %[[VAL_369:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1> %[[VAL_367]], <256 x i1> %[[VAL_365]], <16 x i8> %[[VAL_368]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_369]], ptr %[[VAL_363]], align 64
 
       subroutine test_pmxvf64gernp_r8_non_def()
       use, intrinsic :: mma
@@ -955,15 +955,15 @@ subroutine test_pmxvf64gernp_r8_non_def()
       end subroutine test_pmxvf64gernp_r8_non_def
 
 !CHECK-LABEL: @test_pmxvf64gernp_r8_non_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_370:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_371:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_372:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_373:.*]] = load <256 x i1>, ptr %[[VAL_372]], align 32
+! LLVMIR:         %[[VAL_374:.*]] = load <2 x double>, ptr %[[VAL_370]], align 16
+! LLVMIR:         %[[VAL_375:.*]] = load <512 x i1>, ptr %[[VAL_371]], align 64
+! LLVMIR:         %[[VAL_376:.*]] = bitcast <2 x double> %[[VAL_374]] to <16 x i8>
+! LLVMIR:         %[[VAL_377:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1> %[[VAL_375]], <256 x i1> %[[VAL_373]], <16 x i8> %[[VAL_376]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_377]], ptr %[[VAL_371]], align 64
 
       subroutine test_pmxvf64gerpn_u1_def()
       use, intrinsic :: mma
@@ -975,14 +975,14 @@ subroutine test_pmxvf64gerpn_u1_def()
       end subroutine test_pmxvf64gerpn_u1_def
 
 !CHECK-LABEL: @test_pmxvf64gerpn_u1_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_378:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_379:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_380:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_381:.*]] = load <256 x i1>, ptr %[[VAL_380]], align 32
+! LLVMIR:         %[[VAL_382:.*]] = load <16 x i8>, ptr %[[VAL_378]], align 16
+! LLVMIR:         %[[VAL_383:.*]] = load <512 x i1>, ptr %[[VAL_379]], align 64
+! LLVMIR:         %[[VAL_384:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpn(<512 x i1> %[[VAL_383]], <256 x i1> %[[VAL_381]], <16 x i8> %[[VAL_382]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_384]], ptr %[[VAL_379]], align 64
 
       subroutine test_pmxvf64gerpn_u1_non_def()
       use, intrinsic :: mma
@@ -994,14 +994,14 @@ subroutine test_pmxvf64gerpn_u1_non_def()
       end subroutine test_pmxvf64gerpn_u1_non_def
 
 !CHECK-LABEL: @test_pmxvf64gerpn_u1_non_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_385:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_386:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_387:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_388:.*]] = load <256 x i1>, ptr %[[VAL_387]], align 32
+! LLVMIR:         %[[VAL_389:.*]] = load <16 x i8>, ptr %[[VAL_385]], align 16
+! LLVMIR:         %[[VAL_390:.*]] = load <512 x i1>, ptr %[[VAL_386]], align 64
+! LLVMIR:         %[[VAL_391:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpn(<512 x i1> %[[VAL_390]], <256 x i1> %[[VAL_388]], <16 x i8> %[[VAL_389]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_391]], ptr %[[VAL_386]], align 64
 
       subroutine test_pmxvf64gerpn_r8_def()
       use, intrinsic :: mma
@@ -1013,15 +1013,15 @@ subroutine test_pmxvf64gerpn_r8_def()
       end subroutine test_pmxvf64gerpn_r8_def
 
 !CHECK-LABEL: @test_pmxvf64gerpn_r8_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_392:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_393:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_394:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_395:.*]] = load <256 x i1>, ptr %[[VAL_394]], align 32
+! LLVMIR:         %[[VAL_396:.*]] = load <2 x double>, ptr %[[VAL_392]], align 16
+! LLVMIR:         %[[VAL_397:.*]] = load <512 x i1>, ptr %[[VAL_393]], align 64
+! LLVMIR:         %[[VAL_398:.*]] = bitcast <2 x double> %[[VAL_396]] to <16 x i8>
+! LLVMIR:         %[[VAL_399:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpn(<512 x i1> %[[VAL_397]], <256 x i1> %[[VAL_395]], <16 x i8> %[[VAL_398]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_399]], ptr %[[VAL_393]], align 64
 
       subroutine test_pmxvf64gerpn_r8_non_def()
       use, intrinsic :: mma
@@ -1033,15 +1033,15 @@ subroutine test_pmxvf64gerpn_r8_non_def()
       end subroutine test_pmxvf64gerpn_r8_non_def
 
 !CHECK-LABEL: @test_pmxvf64gerpn_r8_non_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_400:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_401:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_402:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_403:.*]] = load <256 x i1>, ptr %[[VAL_402]], align 32
+! LLVMIR:         %[[VAL_404:.*]] = load <2 x double>, ptr %[[VAL_400]], align 16
+! LLVMIR:         %[[VAL_405:.*]] = load <512 x i1>, ptr %[[VAL_401]], align 64
+! LLVMIR:         %[[VAL_406:.*]] = bitcast <2 x double> %[[VAL_404]] to <16 x i8>
+! LLVMIR:         %[[VAL_407:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpn(<512 x i1> %[[VAL_405]], <256 x i1> %[[VAL_403]], <16 x i8> %[[VAL_406]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_407]], ptr %[[VAL_401]], align 64
 
       subroutine test_pmxvf64gerpp_u1_def()
       use, intrinsic :: mma
@@ -1053,14 +1053,14 @@ subroutine test_pmxvf64gerpp_u1_def()
       end subroutine test_pmxvf64gerpp_u1_def
 
 !CHECK-LABEL: @test_pmxvf64gerpp_u1_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_408:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_409:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_410:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_411:.*]] = load <256 x i1>, ptr %[[VAL_410]], align 32
+! LLVMIR:         %[[VAL_412:.*]] = load <16 x i8>, ptr %[[VAL_408]], align 16
+! LLVMIR:         %[[VAL_413:.*]] = load <512 x i1>, ptr %[[VAL_409]], align 64
+! LLVMIR:         %[[VAL_414:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpp(<512 x i1> %[[VAL_413]], <256 x i1> %[[VAL_411]], <16 x i8> %[[VAL_412]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_414]], ptr %[[VAL_409]], align 64
 
       subroutine test_pmxvf64gerpp_u1_non_def()
       use, intrinsic :: mma
@@ -1072,14 +1072,14 @@ subroutine test_pmxvf64gerpp_u1_non_def()
       end subroutine test_pmxvf64gerpp_u1_non_def
 
 !CHECK-LABEL: @test_pmxvf64gerpp_u1_non_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_415:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_416:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_417:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_418:.*]] = load <256 x i1>, ptr %[[VAL_417]], align 32
+! LLVMIR:         %[[VAL_419:.*]] = load <16 x i8>, ptr %[[VAL_415]], align 16
+! LLVMIR:         %[[VAL_420:.*]] = load <512 x i1>, ptr %[[VAL_416]], align 64
+! LLVMIR:         %[[VAL_421:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpp(<512 x i1> %[[VAL_420]], <256 x i1> %[[VAL_418]], <16 x i8> %[[VAL_419]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_421]], ptr %[[VAL_416]], align 64
 
       subroutine test_pmxvf64gerpp_r8_def()
       use, intrinsic :: mma
@@ -1091,15 +1091,15 @@ subroutine test_pmxvf64gerpp_r8_def()
       end subroutine test_pmxvf64gerpp_r8_def
 
 !CHECK-LABEL: @test_pmxvf64gerpp_r8_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_422:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_423:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_424:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_425:.*]] = load <256 x i1>, ptr %[[VAL_424]], align 32
+! LLVMIR:         %[[VAL_426:.*]] = load <2 x double>, ptr %[[VAL_422]], align 16
+! LLVMIR:         %[[VAL_427:.*]] = load <512 x i1>, ptr %[[VAL_423]], align 64
+! LLVMIR:         %[[VAL_428:.*]] = bitcast <2 x double> %[[VAL_426]] to <16 x i8>
+! LLVMIR:         %[[VAL_429:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpp(<512 x i1> %[[VAL_427]], <256 x i1> %[[VAL_425]], <16 x i8> %[[VAL_428]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_429]], ptr %[[VAL_423]], align 64
 
       subroutine test_pmxvf64gerpp_r8_non_def()
       use, intrinsic :: mma
@@ -1111,15 +1111,15 @@ subroutine test_pmxvf64gerpp_r8_non_def()
       end subroutine test_pmxvf64gerpp_r8_non_def
 
 !CHECK-LABEL: @test_pmxvf64gerpp_r8_non_def_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_430:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_431:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_432:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_433:.*]] = load <256 x i1>, ptr %[[VAL_432]], align 32
+! LLVMIR:         %[[VAL_434:.*]] = load <2 x double>, ptr %[[VAL_430]], align 16
+! LLVMIR:         %[[VAL_435:.*]] = load <512 x i1>, ptr %[[VAL_431]], align 64
+! LLVMIR:         %[[VAL_436:.*]] = bitcast <2 x double> %[[VAL_434]] to <16 x i8>
+! LLVMIR:         %[[VAL_437:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvf64gerpp(<512 x i1> %[[VAL_435]], <256 x i1> %[[VAL_433]], <16 x i8> %[[VAL_436]], i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_437]], ptr %[[VAL_431]], align 64
 
       subroutine test_pmxvi16ger2_u1_def()
       use, intrinsic :: mma
@@ -1130,13 +1130,13 @@ subroutine test_pmxvi16ger2_u1_def()
       end subroutine test_pmxvi16ger2_u1_def
 
 !CHECK-LABEL: @test_pmxvi16ger2_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_438:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_439:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_440:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_441:.*]] = load <16 x i8>, ptr %[[VAL_439]], align 16
+! LLVMIR:         %[[VAL_442:.*]] = load <16 x i8>, ptr %[[VAL_438]], align 16
+! LLVMIR:         %[[VAL_443:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2(<16 x i8> %[[VAL_441]], <16 x i8> %[[VAL_442]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_443]], ptr %[[VAL_440]], align 64
 
       subroutine test_pmxvi16ger2_u1_non_def()
       use, intrinsic :: mma
@@ -1147,13 +1147,13 @@ subroutine test_pmxvi16ger2_u1_non_def()
       end subroutine test_pmxvi16ger2_u1_non_def
 
 !CHECK-LABEL: @test_pmxvi16ger2_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_444:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_445:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_446:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_447:.*]] = load <16 x i8>, ptr %[[VAL_445]], align 16
+! LLVMIR:         %[[VAL_448:.*]] = load <16 x i8>, ptr %[[VAL_444]], align 16
+! LLVMIR:         %[[VAL_449:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2(<16 x i8> %[[VAL_447]], <16 x i8> %[[VAL_448]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_449]], ptr %[[VAL_446]], align 64
 
       subroutine test_pmxvi16ger2_i2_def()
       use, intrinsic :: mma
@@ -1164,15 +1164,15 @@ subroutine test_pmxvi16ger2_i2_def()
       end subroutine test_pmxvi16ger2_i2_def
 
 !CHECK-LABEL: @test_pmxvi16ger2_i2_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2(<16 x i8> %6, <16 x i8> %7, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %1, align 64
+! LLVMIR:         %[[VAL_450:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_451:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_452:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_453:.*]] = load <8 x i16>, ptr %[[VAL_451]], align 16
+! LLVMIR:         %[[VAL_454:.*]] = load <8 x i16>, ptr %[[VAL_450]], align 16
+! LLVMIR:         %[[VAL_455:.*]] = bitcast <8 x i16> %[[VAL_453]] to <16 x i8>
+! LLVMIR:         %[[VAL_456:.*]] = bitcast <8 x i16> %[[VAL_454]] to <16 x i8>
+! LLVMIR:         %[[VAL_457:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2(<16 x i8> %[[VAL_455]], <16 x i8> %[[VAL_456]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_457]], ptr %[[VAL_452]], align 64
 
       subroutine test_pmxvi16ger2_i2_non_def()
       use, intrinsic :: mma
@@ -1183,15 +1183,15 @@ subroutine test_pmxvi16ger2_i2_non_def()
       end subroutine test_pmxvi16ger2_i2_non_def
 
 !CHECK-LABEL: @test_pmxvi16ger2_i2_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2(<16 x i8> %6, <16 x i8> %7, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %1, align 64
+! LLVMIR:         %[[VAL_458:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_459:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_460:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_461:.*]] = load <8 x i16>, ptr %[[VAL_459]], align 16
+! LLVMIR:         %[[VAL_462:.*]] = load <8 x i16>, ptr %[[VAL_458]], align 16
+! LLVMIR:         %[[VAL_463:.*]] = bitcast <8 x i16> %[[VAL_461]] to <16 x i8>
+! LLVMIR:         %[[VAL_464:.*]] = bitcast <8 x i16> %[[VAL_462]] to <16 x i8>
+! LLVMIR:         %[[VAL_465:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2(<16 x i8> %[[VAL_463]], <16 x i8> %[[VAL_464]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_465]], ptr %[[VAL_460]], align 64
 
       subroutine test_pmxvi16ger2pp_u1_def()
       use, intrinsic :: mma
@@ -1202,14 +1202,14 @@ subroutine test_pmxvi16ger2pp_u1_def()
       end subroutine test_pmxvi16ger2pp_u1_def
 
 !CHECK-LABEL: @test_pmxvi16ger2pp_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_466:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_467:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_468:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_469:.*]] = load <16 x i8>, ptr %[[VAL_467]], align 16
+! LLVMIR:         %[[VAL_470:.*]] = load <16 x i8>, ptr %[[VAL_466]], align 16
+! LLVMIR:         %[[VAL_471:.*]] = load <512 x i1>, ptr %[[VAL_468]], align 64
+! LLVMIR:         %[[VAL_472:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2pp(<512 x i1> %[[VAL_471]], <16 x i8> %[[VAL_469]], <16 x i8> %[[VAL_470]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_472]], ptr %[[VAL_468]], align 64
 
       subroutine test_pmxvi16ger2pp_u1_non_def()
       use, intrinsic :: mma
@@ -1220,14 +1220,14 @@ subroutine test_pmxvi16ger2pp_u1_non_def()
       end subroutine test_pmxvi16ger2pp_u1_non_def
 
 !CHECK-LABEL: @test_pmxvi16ger2pp_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_473:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_474:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_475:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_476:.*]] = load <16 x i8>, ptr %[[VAL_474]], align 16
+! LLVMIR:         %[[VAL_477:.*]] = load <16 x i8>, ptr %[[VAL_473]], align 16
+! LLVMIR:         %[[VAL_478:.*]] = load <512 x i1>, ptr %[[VAL_475]], align 64
+! LLVMIR:         %[[VAL_479:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2pp(<512 x i1> %[[VAL_478]], <16 x i8> %[[VAL_476]], <16 x i8> %[[VAL_477]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_479]], ptr %[[VAL_475]], align 64
 
       subroutine test_pmxvi16ger2pp_i2_def()
       use, intrinsic :: mma
@@ -1238,16 +1238,16 @@ subroutine test_pmxvi16ger2pp_i2_def()
       end subroutine test_pmxvi16ger2pp_i2_def
 
 !CHECK-LABEL: @test_pmxvi16ger2pp_i2_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2pp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_480:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_481:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_482:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_483:.*]] = load <8 x i16>, ptr %[[VAL_481]], align 16
+! LLVMIR:         %[[VAL_484:.*]] = load <8 x i16>, ptr %[[VAL_480]], align 16
+! LLVMIR:         %[[VAL_485:.*]] = load <512 x i1>, ptr %[[VAL_482]], align 64
+! LLVMIR:         %[[VAL_486:.*]] = bitcast <8 x i16> %[[VAL_483]] to <16 x i8>
+! LLVMIR:         %[[VAL_487:.*]] = bitcast <8 x i16> %[[VAL_484]] to <16 x i8>
+! LLVMIR:         %[[VAL_488:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2pp(<512 x i1> %[[VAL_485]], <16 x i8> %[[VAL_486]], <16 x i8> %[[VAL_487]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_488]], ptr %[[VAL_482]], align 64
 
       subroutine test_pmxvi16ger2pp_i2_non_def()
       use, intrinsic :: mma
@@ -1258,16 +1258,16 @@ subroutine test_pmxvi16ger2pp_i2_non_def()
       end subroutine test_pmxvi16ger2pp_i2_non_def
 
 !CHECK-LABEL: @test_pmxvi16ger2pp_i2_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2pp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_489:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_490:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_491:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_492:.*]] = load <8 x i16>, ptr %[[VAL_490]], align 16
+! LLVMIR:         %[[VAL_493:.*]] = load <8 x i16>, ptr %[[VAL_489]], align 16
+! LLVMIR:         %[[VAL_494:.*]] = load <512 x i1>, ptr %[[VAL_491]], align 64
+! LLVMIR:         %[[VAL_495:.*]] = bitcast <8 x i16> %[[VAL_492]] to <16 x i8>
+! LLVMIR:         %[[VAL_496:.*]] = bitcast <8 x i16> %[[VAL_493]] to <16 x i8>
+! LLVMIR:         %[[VAL_497:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2pp(<512 x i1> %[[VAL_494]], <16 x i8> %[[VAL_495]], <16 x i8> %[[VAL_496]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_497]], ptr %[[VAL_491]], align 64
 
       subroutine test_pmxvi16ger2s_u1_def()
       use, intrinsic :: mma
@@ -1278,13 +1278,13 @@ subroutine test_pmxvi16ger2s_u1_def()
       end subroutine test_pmxvi16ger2s_u1_def
 
 !CHECK-LABEL: @test_pmxvi16ger2s_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2s(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_498:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_499:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_500:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_501:.*]] = load <16 x i8>, ptr %[[VAL_499]], align 16
+! LLVMIR:         %[[VAL_502:.*]] = load <16 x i8>, ptr %[[VAL_498]], align 16
+! LLVMIR:         %[[VAL_503:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2s(<16 x i8> %[[VAL_501]], <16 x i8> %[[VAL_502]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_503]], ptr %[[VAL_500]], align 64
 
       subroutine test_pmxvi16ger2s_u1_non_def()
       use, intrinsic :: mma
@@ -1295,13 +1295,13 @@ subroutine test_pmxvi16ger2s_u1_non_def()
       end subroutine test_pmxvi16ger2s_u1_non_def
 
 !CHECK-LABEL: @test_pmxvi16ger2s_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2s(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_504:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_505:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_506:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_507:.*]] = load <16 x i8>, ptr %[[VAL_505]], align 16
+! LLVMIR:         %[[VAL_508:.*]] = load <16 x i8>, ptr %[[VAL_504]], align 16
+! LLVMIR:         %[[VAL_509:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2s(<16 x i8> %[[VAL_507]], <16 x i8> %[[VAL_508]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_509]], ptr %[[VAL_506]], align 64
 
       subroutine test_pmxvi16ger2s_i2_def()
       use, intrinsic :: mma
@@ -1312,15 +1312,15 @@ subroutine test_pmxvi16ger2s_i2_def()
       end subroutine test_pmxvi16ger2s_i2_def
 
 !CHECK-LABEL: @test_pmxvi16ger2s_i2_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2s(<16 x i8> %6, <16 x i8> %7, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %1, align 64
+! LLVMIR:         %[[VAL_510:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_511:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_512:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_513:.*]] = load <8 x i16>, ptr %[[VAL_511]], align 16
+! LLVMIR:         %[[VAL_514:.*]] = load <8 x i16>, ptr %[[VAL_510]], align 16
+! LLVMIR:         %[[VAL_515:.*]] = bitcast <8 x i16> %[[VAL_513]] to <16 x i8>
+! LLVMIR:         %[[VAL_516:.*]] = bitcast <8 x i16> %[[VAL_514]] to <16 x i8>
+! LLVMIR:         %[[VAL_517:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2s(<16 x i8> %[[VAL_515]], <16 x i8> %[[VAL_516]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_517]], ptr %[[VAL_512]], align 64
 
       subroutine test_pmxvi16ger2s_i2_non_def()
       use, intrinsic :: mma
@@ -1331,15 +1331,15 @@ subroutine test_pmxvi16ger2s_i2_non_def()
       end subroutine test_pmxvi16ger2s_i2_non_def
 
 !CHECK-LABEL: @test_pmxvi16ger2s_i2_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2s(<16 x i8> %6, <16 x i8> %7, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %8, ptr %1, align 64
+! LLVMIR:         %[[VAL_518:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_519:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_520:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_521:.*]] = load <8 x i16>, ptr %[[VAL_519]], align 16
+! LLVMIR:         %[[VAL_522:.*]] = load <8 x i16>, ptr %[[VAL_518]], align 16
+! LLVMIR:         %[[VAL_523:.*]] = bitcast <8 x i16> %[[VAL_521]] to <16 x i8>
+! LLVMIR:         %[[VAL_524:.*]] = bitcast <8 x i16> %[[VAL_522]] to <16 x i8>
+! LLVMIR:         %[[VAL_525:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2s(<16 x i8> %[[VAL_523]], <16 x i8> %[[VAL_524]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_525]], ptr %[[VAL_520]], align 64
 
       subroutine test_pmxvi16ger2spp_u1_def()
       use, intrinsic :: mma
@@ -1350,14 +1350,14 @@ subroutine test_pmxvi16ger2spp_u1_def()
       end subroutine test_pmxvi16ger2spp_u1_def
 
 !CHECK-LABEL: @test_pmxvi16ger2spp_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2spp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_526:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_527:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_528:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_529:.*]] = load <16 x i8>, ptr %[[VAL_527]], align 16
+! LLVMIR:         %[[VAL_530:.*]] = load <16 x i8>, ptr %[[VAL_526]], align 16
+! LLVMIR:         %[[VAL_531:.*]] = load <512 x i1>, ptr %[[VAL_528]], align 64
+! LLVMIR:         %[[VAL_532:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2spp(<512 x i1> %[[VAL_531]], <16 x i8> %[[VAL_529]], <16 x i8> %[[VAL_530]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_532]], ptr %[[VAL_528]], align 64
 
       subroutine test_pmxvi16ger2spp_u1_non_def()
       use, intrinsic :: mma
@@ -1368,14 +1368,14 @@ subroutine test_pmxvi16ger2spp_u1_non_def()
       end subroutine test_pmxvi16ger2spp_u1_non_def
 
 !CHECK-LABEL: @test_pmxvi16ger2spp_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2spp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_533:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_534:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_535:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_536:.*]] = load <16 x i8>, ptr %[[VAL_534]], align 16
+! LLVMIR:         %[[VAL_537:.*]] = load <16 x i8>, ptr %[[VAL_533]], align 16
+! LLVMIR:         %[[VAL_538:.*]] = load <512 x i1>, ptr %[[VAL_535]], align 64
+! LLVMIR:         %[[VAL_539:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2spp(<512 x i1> %[[VAL_538]], <16 x i8> %[[VAL_536]], <16 x i8> %[[VAL_537]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_539]], ptr %[[VAL_535]], align 64
 
       subroutine test_pmxvi16ger2spp_i2_def()
       use, intrinsic :: mma
@@ -1386,16 +1386,16 @@ subroutine test_pmxvi16ger2spp_i2_def()
       end subroutine test_pmxvi16ger2spp_i2_def
 
 !CHECK-LABEL: @test_pmxvi16ger2spp_i2_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2spp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_540:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_541:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_542:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_543:.*]] = load <8 x i16>, ptr %[[VAL_541]], align 16
+! LLVMIR:         %[[VAL_544:.*]] = load <8 x i16>, ptr %[[VAL_540]], align 16
+! LLVMIR:         %[[VAL_545:.*]] = load <512 x i1>, ptr %[[VAL_542]], align 64
+! LLVMIR:         %[[VAL_546:.*]] = bitcast <8 x i16> %[[VAL_543]] to <16 x i8>
+! LLVMIR:         %[[VAL_547:.*]] = bitcast <8 x i16> %[[VAL_544]] to <16 x i8>
+! LLVMIR:         %[[VAL_548:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2spp(<512 x i1> %[[VAL_545]], <16 x i8> %[[VAL_546]], <16 x i8> %[[VAL_547]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_548]], ptr %[[VAL_542]], align 64
 
       subroutine test_pmxvi16ger2spp_i2_non_def()
       use, intrinsic :: mma
@@ -1406,16 +1406,16 @@ subroutine test_pmxvi16ger2spp_i2_non_def()
       end subroutine test_pmxvi16ger2spp_i2_non_def
 
 !CHECK-LABEL: @test_pmxvi16ger2spp_i2_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2spp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_549:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_550:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_551:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_552:.*]] = load <8 x i16>, ptr %[[VAL_550]], align 16
+! LLVMIR:         %[[VAL_553:.*]] = load <8 x i16>, ptr %[[VAL_549]], align 16
+! LLVMIR:         %[[VAL_554:.*]] = load <512 x i1>, ptr %[[VAL_551]], align 64
+! LLVMIR:         %[[VAL_555:.*]] = bitcast <8 x i16> %[[VAL_552]] to <16 x i8>
+! LLVMIR:         %[[VAL_556:.*]] = bitcast <8 x i16> %[[VAL_553]] to <16 x i8>
+! LLVMIR:         %[[VAL_557:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi16ger2spp(<512 x i1> %[[VAL_554]], <16 x i8> %[[VAL_555]], <16 x i8> %[[VAL_556]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_557]], ptr %[[VAL_551]], align 64
 
 
       subroutine test_pmxvi4ger8_def()
@@ -1427,13 +1427,13 @@ subroutine test_pmxvi4ger8_def()
       end subroutine test_pmxvi4ger8_def
 
 !CHECK-LABEL: @test_pmxvi4ger8_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvi4ger8(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_558:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_559:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_560:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_561:.*]] = load <16 x i8>, ptr %[[VAL_559]], align 16
+! LLVMIR:         %[[VAL_562:.*]] = load <16 x i8>, ptr %[[VAL_558]], align 16
+! LLVMIR:         %[[VAL_563:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi4ger8(<16 x i8> %[[VAL_561]], <16 x i8> %[[VAL_562]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_563]], ptr %[[VAL_560]], align 64
 
       subroutine test_pmxvi4ger8_non_def()
       use, intrinsic :: mma
@@ -1444,13 +1444,13 @@ subroutine test_pmxvi4ger8_non_def()
       end subroutine test_pmxvi4ger8_non_def
 
 !CHECK-LABEL: @test_pmxvi4ger8_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvi4ger8(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_564:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_565:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_566:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_567:.*]] = load <16 x i8>, ptr %[[VAL_565]], align 16
+! LLVMIR:         %[[VAL_568:.*]] = load <16 x i8>, ptr %[[VAL_564]], align 16
+! LLVMIR:         %[[VAL_569:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi4ger8(<16 x i8> %[[VAL_567]], <16 x i8> %[[VAL_568]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_569]], ptr %[[VAL_566]], align 64
 
       subroutine test_pmxvi4ger8pp_def()
       use, intrinsic :: mma
@@ -1461,14 +1461,14 @@ subroutine test_pmxvi4ger8pp_def()
       end subroutine test_pmxvi4ger8pp_def
 
 !CHECK-LABEL: @test_pmxvi4ger8pp_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi4ger8pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_570:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_571:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_572:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_573:.*]] = load <16 x i8>, ptr %[[VAL_571]], align 16
+! LLVMIR:         %[[VAL_574:.*]] = load <16 x i8>, ptr %[[VAL_570]], align 16
+! LLVMIR:         %[[VAL_575:.*]] = load <512 x i1>, ptr %[[VAL_572]], align 64
+! LLVMIR:         %[[VAL_576:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi4ger8pp(<512 x i1> %[[VAL_575]], <16 x i8> %[[VAL_573]], <16 x i8> %[[VAL_574]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_576]], ptr %[[VAL_572]], align 64
 
       subroutine test_pmxvi4ger8pp_non_def()
       use, intrinsic :: mma
@@ -1479,14 +1479,14 @@ subroutine test_pmxvi4ger8pp_non_def()
       end subroutine test_pmxvi4ger8pp_non_def
 
 !CHECK-LABEL: @test_pmxvi4ger8pp_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi4ger8pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_577:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_578:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_579:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_580:.*]] = load <16 x i8>, ptr %[[VAL_578]], align 16
+! LLVMIR:         %[[VAL_581:.*]] = load <16 x i8>, ptr %[[VAL_577]], align 16
+! LLVMIR:         %[[VAL_582:.*]] = load <512 x i1>, ptr %[[VAL_579]], align 64
+! LLVMIR:         %[[VAL_583:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi4ger8pp(<512 x i1> %[[VAL_582]], <16 x i8> %[[VAL_580]], <16 x i8> %[[VAL_581]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_583]], ptr %[[VAL_579]], align 64
 
       subroutine test_pmxvi8ger4_u1_def()
       use, intrinsic :: mma
@@ -1497,13 +1497,13 @@ subroutine test_pmxvi8ger4_u1_def()
       end subroutine test_pmxvi8ger4_u1_def
 
 !CHECK-LABEL: @test_pmxvi8ger4_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_584:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_585:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_586:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_587:.*]] = load <16 x i8>, ptr %[[VAL_585]], align 16
+! LLVMIR:         %[[VAL_588:.*]] = load <16 x i8>, ptr %[[VAL_584]], align 16
+! LLVMIR:         %[[VAL_589:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4(<16 x i8> %[[VAL_587]], <16 x i8> %[[VAL_588]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_589]], ptr %[[VAL_586]], align 64
 
       subroutine test_pmxvi8ger4_u1_non_def()
       use, intrinsic :: mma
@@ -1514,13 +1514,13 @@ subroutine test_pmxvi8ger4_u1_non_def()
       end subroutine test_pmxvi8ger4_u1_non_def
 
 !CHECK-LABEL: @test_pmxvi8ger4_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_590:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_591:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_592:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_593:.*]] = load <16 x i8>, ptr %[[VAL_591]], align 16
+! LLVMIR:         %[[VAL_594:.*]] = load <16 x i8>, ptr %[[VAL_590]], align 16
+! LLVMIR:         %[[VAL_595:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4(<16 x i8> %[[VAL_593]], <16 x i8> %[[VAL_594]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_595]], ptr %[[VAL_592]], align 64
 
       subroutine test_pmxvi8ger4_i1_def()
       use, intrinsic :: mma
@@ -1531,13 +1531,13 @@ subroutine test_pmxvi8ger4_i1_def()
       end subroutine test_pmxvi8ger4_i1_def
 
 !CHECK-LABEL: @test_pmxvi8ger4_i1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_596:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_597:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_598:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_599:.*]] = load <16 x i8>, ptr %[[VAL_597]], align 16
+! LLVMIR:         %[[VAL_600:.*]] = load <16 x i8>, ptr %[[VAL_596]], align 16
+! LLVMIR:         %[[VAL_601:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4(<16 x i8> %[[VAL_599]], <16 x i8> %[[VAL_600]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_601]], ptr %[[VAL_598]], align 64
 
       subroutine test_pmxvi8ger4_i1_non_def()
       use, intrinsic :: mma
@@ -1548,13 +1548,13 @@ subroutine test_pmxvi8ger4_i1_non_def()
       end subroutine test_pmxvi8ger4_i1_non_def
 
 !CHECK-LABEL: @test_pmxvi8ger4_i1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4(<16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_602:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_603:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_604:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_605:.*]] = load <16 x i8>, ptr %[[VAL_603]], align 16
+! LLVMIR:         %[[VAL_606:.*]] = load <16 x i8>, ptr %[[VAL_602]], align 16
+! LLVMIR:         %[[VAL_607:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4(<16 x i8> %[[VAL_605]], <16 x i8> %[[VAL_606]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_607]], ptr %[[VAL_604]], align 64
 
       subroutine test_pmxvi8ger4pp_u1_def()
       use, intrinsic :: mma
@@ -1565,14 +1565,14 @@ subroutine test_pmxvi8ger4pp_u1_def()
       end subroutine test_pmxvi8ger4pp_u1_def
 
 !CHECK-LABEL: @test_pmxvi8ger4pp_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_608:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_609:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_610:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_611:.*]] = load <16 x i8>, ptr %[[VAL_609]], align 16
+! LLVMIR:         %[[VAL_612:.*]] = load <16 x i8>, ptr %[[VAL_608]], align 16
+! LLVMIR:         %[[VAL_613:.*]] = load <512 x i1>, ptr %[[VAL_610]], align 64
+! LLVMIR:         %[[VAL_614:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4pp(<512 x i1> %[[VAL_613]], <16 x i8> %[[VAL_611]], <16 x i8> %[[VAL_612]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_614]], ptr %[[VAL_610]], align 64
 
       subroutine test_pmxvi8ger4pp_u1_non_def()
       use, intrinsic :: mma
@@ -1583,14 +1583,14 @@ subroutine test_pmxvi8ger4pp_u1_non_def()
       end subroutine test_pmxvi8ger4pp_u1_non_def
 
 !CHECK-LABEL: @test_pmxvi8ger4pp_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_615:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_616:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_617:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_618:.*]] = load <16 x i8>, ptr %[[VAL_616]], align 16
+! LLVMIR:         %[[VAL_619:.*]] = load <16 x i8>, ptr %[[VAL_615]], align 16
+! LLVMIR:         %[[VAL_620:.*]] = load <512 x i1>, ptr %[[VAL_617]], align 64
+! LLVMIR:         %[[VAL_621:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4pp(<512 x i1> %[[VAL_620]], <16 x i8> %[[VAL_618]], <16 x i8> %[[VAL_619]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_621]], ptr %[[VAL_617]], align 64
 
       subroutine test_pmxvi8ger4pp_i1_def()
       use, intrinsic :: mma
@@ -1601,14 +1601,14 @@ subroutine test_pmxvi8ger4pp_i1_def()
       end subroutine test_pmxvi8ger4pp_i1_def
 
 !CHECK-LABEL: @test_pmxvi8ger4pp_i1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_622:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_623:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_624:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_625:.*]] = load <16 x i8>, ptr %[[VAL_623]], align 16
+! LLVMIR:         %[[VAL_626:.*]] = load <16 x i8>, ptr %[[VAL_622]], align 16
+! LLVMIR:         %[[VAL_627:.*]] = load <512 x i1>, ptr %[[VAL_624]], align 64
+! LLVMIR:         %[[VAL_628:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4pp(<512 x i1> %[[VAL_627]], <16 x i8> %[[VAL_625]], <16 x i8> %[[VAL_626]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_628]], ptr %[[VAL_624]], align 64
 
       subroutine test_pmxvi8ger4pp_i1_non_def()
       use, intrinsic :: mma
@@ -1619,14 +1619,14 @@ subroutine test_pmxvi8ger4pp_i1_non_def()
       end subroutine test_pmxvi8ger4pp_i1_non_def
 
 !CHECK-LABEL: @test_pmxvi8ger4pp_i1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_629:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_630:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_631:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_632:.*]] = load <16 x i8>, ptr %[[VAL_630]], align 16
+! LLVMIR:         %[[VAL_633:.*]] = load <16 x i8>, ptr %[[VAL_629]], align 16
+! LLVMIR:         %[[VAL_634:.*]] = load <512 x i1>, ptr %[[VAL_631]], align 64
+! LLVMIR:         %[[VAL_635:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4pp(<512 x i1> %[[VAL_634]], <16 x i8> %[[VAL_632]], <16 x i8> %[[VAL_633]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_635]], ptr %[[VAL_631]], align 64
 
       subroutine test_pmxvi8ger4spp_u1_def()
       use, intrinsic :: mma
@@ -1637,14 +1637,14 @@ subroutine test_pmxvi8ger4spp_u1_def()
       end subroutine test_pmxvi8ger4spp_u1_def
 
 !CHECK-LABEL: @test_pmxvi8ger4spp_u1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4spp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_636:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_637:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_638:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_639:.*]] = load <16 x i8>, ptr %[[VAL_637]], align 16
+! LLVMIR:         %[[VAL_640:.*]] = load <16 x i8>, ptr %[[VAL_636]], align 16
+! LLVMIR:         %[[VAL_641:.*]] = load <512 x i1>, ptr %[[VAL_638]], align 64
+! LLVMIR:         %[[VAL_642:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4spp(<512 x i1> %[[VAL_641]], <16 x i8> %[[VAL_639]], <16 x i8> %[[VAL_640]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_642]], ptr %[[VAL_638]], align 64
 
       subroutine test_pmxvi8ger4spp_u1_non_def()
       use, intrinsic :: mma
@@ -1655,14 +1655,14 @@ subroutine test_pmxvi8ger4spp_u1_non_def()
       end subroutine test_pmxvi8ger4spp_u1_non_def
 
 !CHECK-LABEL: @test_pmxvi8ger4spp_u1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4spp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_643:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_644:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_645:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_646:.*]] = load <16 x i8>, ptr %[[VAL_644]], align 16
+! LLVMIR:         %[[VAL_647:.*]] = load <16 x i8>, ptr %[[VAL_643]], align 16
+! LLVMIR:         %[[VAL_648:.*]] = load <512 x i1>, ptr %[[VAL_645]], align 64
+! LLVMIR:         %[[VAL_649:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4spp(<512 x i1> %[[VAL_648]], <16 x i8> %[[VAL_646]], <16 x i8> %[[VAL_647]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_649]], ptr %[[VAL_645]], align 64
 
       subroutine test_pmxvi8ger4spp_i1_def()
       use, intrinsic :: mma
@@ -1673,14 +1673,14 @@ subroutine test_pmxvi8ger4spp_i1_def()
       end subroutine test_pmxvi8ger4spp_i1_def
 
 !CHECK-LABEL: @test_pmxvi8ger4spp_i1_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4spp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_650:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_651:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_652:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_653:.*]] = load <16 x i8>, ptr %[[VAL_651]], align 16
+! LLVMIR:         %[[VAL_654:.*]] = load <16 x i8>, ptr %[[VAL_650]], align 16
+! LLVMIR:         %[[VAL_655:.*]] = load <512 x i1>, ptr %[[VAL_652]], align 64
+! LLVMIR:         %[[VAL_656:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4spp(<512 x i1> %[[VAL_655]], <16 x i8> %[[VAL_653]], <16 x i8> %[[VAL_654]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_656]], ptr %[[VAL_652]], align 64
 
       subroutine test_pmxvi8ger4spp_i1_non_def()
       use, intrinsic :: mma
@@ -1691,11 +1691,11 @@ subroutine test_pmxvi8ger4spp_i1_non_def()
       end subroutine test_pmxvi8ger4spp_i1_non_def
 
 !CHECK-LABEL: @test_pmxvi8ger4spp_i1_non_def_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4spp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5, i32 7, i32 7, i32 2)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_657:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_658:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_659:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_660:.*]] = load <16 x i8>, ptr %[[VAL_658]], align 16
+! LLVMIR:         %[[VAL_661:.*]] = load <16 x i8>, ptr %[[VAL_657]], align 16
+! LLVMIR:         %[[VAL_662:.*]] = load <512 x i1>, ptr %[[VAL_659]], align 64
+! LLVMIR:         %[[VAL_663:.*]] = call <512 x i1> @llvm.ppc.mma.pmxvi8ger4spp(<512 x i1> %[[VAL_662]], <16 x i8> %[[VAL_660]], <16 x i8> %[[VAL_661]], i32 7, i32 7, i32 2)
+! LLVMIR:         store <512 x i1> %[[VAL_663]], ptr %[[VAL_659]], align 64
diff --git a/flang/test/Lower/PowerPC/ppc-mma-outer-product-2.f90 b/flang/test/Lower/PowerPC/ppc-mma-outer-product-2.f90
index 3ef17b2f963fc..b37010bfd0d16 100644
--- a/flang/test/Lower/PowerPC/ppc-mma-outer-product-2.f90
+++ b/flang/test/Lower/PowerPC/ppc-mma-outer-product-2.f90
@@ -10,13 +10,13 @@ subroutine test_xvbf16ger2()
       end subroutine test_xvbf16ger2
 
 !CHECK-LABEL: @test_xvbf16ger2_
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:   %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:   %6 = call <512 x i1> @llvm.ppc.mma.xvbf16ger2(<16 x i8> %4, <16 x i8> %5)
-!LLVMIR:   store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_0:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_1:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_2:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_3:.*]] = load <16 x i8>, ptr %[[VAL_1]], align 16
+! LLVMIR:         %[[VAL_4:.*]] = load <16 x i8>, ptr %[[VAL_0]], align 16
+! LLVMIR:         %[[VAL_5:.*]] = call <512 x i1> @llvm.ppc.mma.xvbf16ger2(<16 x i8> %[[VAL_3]], <16 x i8> %[[VAL_4]])
+! LLVMIR:         store <512 x i1> %[[VAL_5]], ptr %[[VAL_2]], align 64
 
 
       subroutine test_xvbf16ger2nn()
@@ -28,14 +28,14 @@ subroutine test_xvbf16ger2nn()
       end subroutine test_xvbf16ger2nn
 
 !CHECK-LABEL: @test_xvbf16ger2nn_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvbf16ger2nn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_6:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_7:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_8:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_9:.*]] = load <16 x i8>, ptr %[[VAL_7]], align 16
+! LLVMIR:         %[[VAL_10:.*]] = load <16 x i8>, ptr %[[VAL_6]], align 16
+! LLVMIR:         %[[VAL_11:.*]] = load <512 x i1>, ptr %[[VAL_8]], align 64
+! LLVMIR:         %[[VAL_12:.*]] = call <512 x i1> @llvm.ppc.mma.xvbf16ger2nn(<512 x i1> %[[VAL_11]], <16 x i8> %[[VAL_9]], <16 x i8> %[[VAL_10]])
+! LLVMIR:         store <512 x i1> %[[VAL_12]], ptr %[[VAL_8]], align 64
 
       subroutine test_xvbf16ger2np()
       use, intrinsic :: mma
@@ -46,14 +46,14 @@ subroutine test_xvbf16ger2np()
       end subroutine test_xvbf16ger2np
 
 !CHECK-LABEL: @test_xvbf16ger2np_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvbf16ger2np(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_13:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_14:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_15:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_16:.*]] = load <16 x i8>, ptr %[[VAL_14]], align 16
+! LLVMIR:         %[[VAL_17:.*]] = load <16 x i8>, ptr %[[VAL_13]], align 16
+! LLVMIR:         %[[VAL_18:.*]] = load <512 x i1>, ptr %[[VAL_15]], align 64
+! LLVMIR:         %[[VAL_19:.*]] = call <512 x i1> @llvm.ppc.mma.xvbf16ger2np(<512 x i1> %[[VAL_18]], <16 x i8> %[[VAL_16]], <16 x i8> %[[VAL_17]])
+! LLVMIR:         store <512 x i1> %[[VAL_19]], ptr %[[VAL_15]], align 64
 
       subroutine test_xvbf16ger2pn()
       use, intrinsic :: mma
@@ -64,14 +64,14 @@ subroutine test_xvbf16ger2pn()
       end subroutine test_xvbf16ger2pn
 
 !CHECK-LABEL: @test_xvbf16ger2pn_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvbf16ger2pn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_20:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_21:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_22:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_23:.*]] = load <16 x i8>, ptr %[[VAL_21]], align 16
+! LLVMIR:         %[[VAL_24:.*]] = load <16 x i8>, ptr %[[VAL_20]], align 16
+! LLVMIR:         %[[VAL_25:.*]] = load <512 x i1>, ptr %[[VAL_22]], align 64
+! LLVMIR:         %[[VAL_26:.*]] = call <512 x i1> @llvm.ppc.mma.xvbf16ger2pn(<512 x i1> %[[VAL_25]], <16 x i8> %[[VAL_23]], <16 x i8> %[[VAL_24]])
+! LLVMIR:         store <512 x i1> %[[VAL_26]], ptr %[[VAL_22]], align 64
 
       subroutine test_xvbf16ger2pp()
       use, intrinsic :: mma
@@ -82,14 +82,14 @@ subroutine test_xvbf16ger2pp()
       end subroutine test_xvbf16ger2pp
 
 !CHECK-LABEL: @test_xvbf16ger2pp_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvbf16ger2pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_27:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_28:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_29:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_30:.*]] = load <16 x i8>, ptr %[[VAL_28]], align 16
+! LLVMIR:         %[[VAL_31:.*]] = load <16 x i8>, ptr %[[VAL_27]], align 16
+! LLVMIR:         %[[VAL_32:.*]] = load <512 x i1>, ptr %[[VAL_29]], align 64
+! LLVMIR:         %[[VAL_33:.*]] = call <512 x i1> @llvm.ppc.mma.xvbf16ger2pp(<512 x i1> %[[VAL_32]], <16 x i8> %[[VAL_30]], <16 x i8> %[[VAL_31]])
+! LLVMIR:         store <512 x i1> %[[VAL_33]], ptr %[[VAL_29]], align 64
 
       subroutine test_xvf16ger2()
       use, intrinsic :: mma
@@ -100,13 +100,13 @@ subroutine test_xvf16ger2()
       end subroutine test_xvf16ger2
 
 !CHECK-LABEL: @test_xvf16ger2_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.xvf16ger2(<16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_34:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_35:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_36:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_37:.*]] = load <16 x i8>, ptr %[[VAL_35]], align 16
+! LLVMIR:         %[[VAL_38:.*]] = load <16 x i8>, ptr %[[VAL_34]], align 16
+! LLVMIR:         %[[VAL_39:.*]] = call <512 x i1> @llvm.ppc.mma.xvf16ger2(<16 x i8> %[[VAL_37]], <16 x i8> %[[VAL_38]])
+! LLVMIR:         store <512 x i1> %[[VAL_39]], ptr %[[VAL_36]], align 64
 
       subroutine test_xvf16ger2nn()
       use, intrinsic :: mma
@@ -117,14 +117,14 @@ subroutine test_xvf16ger2nn()
       end subroutine test_xvf16ger2nn
 
 !CHECK-LABEL: @test_xvf16ger2nn_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf16ger2nn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_40:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_41:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_42:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_43:.*]] = load <16 x i8>, ptr %[[VAL_41]], align 16
+! LLVMIR:         %[[VAL_44:.*]] = load <16 x i8>, ptr %[[VAL_40]], align 16
+! LLVMIR:         %[[VAL_45:.*]] = load <512 x i1>, ptr %[[VAL_42]], align 64
+! LLVMIR:         %[[VAL_46:.*]] = call <512 x i1> @llvm.ppc.mma.xvf16ger2nn(<512 x i1> %[[VAL_45]], <16 x i8> %[[VAL_43]], <16 x i8> %[[VAL_44]])
+! LLVMIR:         store <512 x i1> %[[VAL_46]], ptr %[[VAL_42]], align 64
 
       subroutine test_xvf16ger2np()
       use, intrinsic :: mma
@@ -135,14 +135,14 @@ subroutine test_xvf16ger2np()
       end subroutine test_xvf16ger2np
 
 !CHECK-LABEL: @test_xvf16ger2np_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf16ger2np(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_47:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_48:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_49:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_50:.*]] = load <16 x i8>, ptr %[[VAL_48]], align 16
+! LLVMIR:         %[[VAL_51:.*]] = load <16 x i8>, ptr %[[VAL_47]], align 16
+! LLVMIR:         %[[VAL_52:.*]] = load <512 x i1>, ptr %[[VAL_49]], align 64
+! LLVMIR:         %[[VAL_53:.*]] = call <512 x i1> @llvm.ppc.mma.xvf16ger2np(<512 x i1> %[[VAL_52]], <16 x i8> %[[VAL_50]], <16 x i8> %[[VAL_51]])
+! LLVMIR:         store <512 x i1> %[[VAL_53]], ptr %[[VAL_49]], align 64
 
       subroutine test_xvf16ger2pn()
       use, intrinsic :: mma
@@ -153,14 +153,14 @@ subroutine test_xvf16ger2pn()
       end subroutine test_xvf16ger2pn
 
 !CHECK-LABEL: @test_xvf16ger2pn_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf16ger2pn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_54:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_55:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_56:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_57:.*]] = load <16 x i8>, ptr %[[VAL_55]], align 16
+! LLVMIR:         %[[VAL_58:.*]] = load <16 x i8>, ptr %[[VAL_54]], align 16
+! LLVMIR:         %[[VAL_59:.*]] = load <512 x i1>, ptr %[[VAL_56]], align 64
+! LLVMIR:         %[[VAL_60:.*]] = call <512 x i1> @llvm.ppc.mma.xvf16ger2pn(<512 x i1> %[[VAL_59]], <16 x i8> %[[VAL_57]], <16 x i8> %[[VAL_58]])
+! LLVMIR:         store <512 x i1> %[[VAL_60]], ptr %[[VAL_56]], align 64
 
       subroutine test_xvf16ger2pp()
       use, intrinsic :: mma
@@ -171,14 +171,14 @@ subroutine test_xvf16ger2pp()
       end subroutine test_xvf16ger2pp
 
 !CHECK-LABEL: @test_xvf16ger2pp_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf16ger2pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_61:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_62:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_63:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_64:.*]] = load <16 x i8>, ptr %[[VAL_62]], align 16
+! LLVMIR:         %[[VAL_65:.*]] = load <16 x i8>, ptr %[[VAL_61]], align 16
+! LLVMIR:         %[[VAL_66:.*]] = load <512 x i1>, ptr %[[VAL_63]], align 64
+! LLVMIR:         %[[VAL_67:.*]] = call <512 x i1> @llvm.ppc.mma.xvf16ger2pp(<512 x i1> %[[VAL_66]], <16 x i8> %[[VAL_64]], <16 x i8> %[[VAL_65]])
+! LLVMIR:         store <512 x i1> %[[VAL_67]], ptr %[[VAL_63]], align 64
 
       subroutine test_xvf32ger_u1()
       use, intrinsic :: mma
@@ -189,13 +189,13 @@ subroutine test_xvf32ger_u1()
       end subroutine test_xvf32ger_u1
 
 !CHECK-LABEL: @test_xvf32ger_u1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.xvf32ger(<16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_68:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_69:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_70:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_71:.*]] = load <16 x i8>, ptr %[[VAL_69]], align 16
+! LLVMIR:         %[[VAL_72:.*]] = load <16 x i8>, ptr %[[VAL_68]], align 16
+! LLVMIR:         %[[VAL_73:.*]] = call <512 x i1> @llvm.ppc.mma.xvf32ger(<16 x i8> %[[VAL_71]], <16 x i8> %[[VAL_72]])
+! LLVMIR:         store <512 x i1> %[[VAL_73]], ptr %[[VAL_70]], align 64
 
 
       subroutine test_xvf32ger_r4()
@@ -207,15 +207,15 @@ subroutine test_xvf32ger_r4()
       end subroutine test_xvf32ger_r4
 
 !CHECK-LABEL: @test_xvf32ger_r4_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.xvf32ger(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:  store <512 x i1> %8, ptr %1, align 64
+! LLVMIR:         %[[VAL_74:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_75:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_76:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_77:.*]] = load <4 x float>, ptr %[[VAL_75]], align 16
+! LLVMIR:         %[[VAL_78:.*]] = load <4 x float>, ptr %[[VAL_74]], align 16
+! LLVMIR:         %[[VAL_79:.*]] = bitcast <4 x float> %[[VAL_77]] to <16 x i8>
+! LLVMIR:         %[[VAL_80:.*]] = bitcast <4 x float> %[[VAL_78]] to <16 x i8>
+! LLVMIR:         %[[VAL_81:.*]] = call <512 x i1> @llvm.ppc.mma.xvf32ger(<16 x i8> %[[VAL_79]], <16 x i8> %[[VAL_80]])
+! LLVMIR:         store <512 x i1> %[[VAL_81]], ptr %[[VAL_76]], align 64
 
       subroutine test_xvf32gernn_u1()
       use, intrinsic :: mma
@@ -226,14 +226,14 @@ subroutine test_xvf32gernn_u1()
       end subroutine test_xvf32gernn_u1
 
 !CHECK-LABEL: @test_xvf32gernn_u1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf32gernn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_82:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_83:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_84:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_85:.*]] = load <16 x i8>, ptr %[[VAL_83]], align 16
+! LLVMIR:         %[[VAL_86:.*]] = load <16 x i8>, ptr %[[VAL_82]], align 16
+! LLVMIR:         %[[VAL_87:.*]] = load <512 x i1>, ptr %[[VAL_84]], align 64
+! LLVMIR:         %[[VAL_88:.*]] = call <512 x i1> @llvm.ppc.mma.xvf32gernn(<512 x i1> %[[VAL_87]], <16 x i8> %[[VAL_85]], <16 x i8> %[[VAL_86]])
+! LLVMIR:         store <512 x i1> %[[VAL_88]], ptr %[[VAL_84]], align 64
 
       subroutine test_xvf32gernn_r4()
       use, intrinsic :: mma
@@ -244,16 +244,16 @@ subroutine test_xvf32gernn_r4()
       end subroutine test_xvf32gernn_r4
 
 !CHECK-LABEL: @test_xvf32gernn_r4_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.xvf32gernn(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_89:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_90:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_91:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_92:.*]] = load <4 x float>, ptr %[[VAL_90]], align 16
+! LLVMIR:         %[[VAL_93:.*]] = load <4 x float>, ptr %[[VAL_89]], align 16
+! LLVMIR:         %[[VAL_94:.*]] = load <512 x i1>, ptr %[[VAL_91]], align 64
+! LLVMIR:         %[[VAL_95:.*]] = bitcast <4 x float> %[[VAL_92]] to <16 x i8>
+! LLVMIR:         %[[VAL_96:.*]] = bitcast <4 x float> %[[VAL_93]] to <16 x i8>
+! LLVMIR:         %[[VAL_97:.*]] = call <512 x i1> @llvm.ppc.mma.xvf32gernn(<512 x i1> %[[VAL_94]], <16 x i8> %[[VAL_95]], <16 x i8> %[[VAL_96]])
+! LLVMIR:         store <512 x i1> %[[VAL_97]], ptr %[[VAL_91]], align 64
 
       subroutine test_xvf32gernp_u1()
       use, intrinsic :: mma
@@ -264,14 +264,14 @@ subroutine test_xvf32gernp_u1()
       end subroutine test_xvf32gernp_u1
 
 !CHECK-LABEL: @test_xvf32gernp_u1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf32gernp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_98:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_99:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_100:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_101:.*]] = load <16 x i8>, ptr %[[VAL_99]], align 16
+! LLVMIR:         %[[VAL_102:.*]] = load <16 x i8>, ptr %[[VAL_98]], align 16
+! LLVMIR:         %[[VAL_103:.*]] = load <512 x i1>, ptr %[[VAL_100]], align 64
+! LLVMIR:         %[[VAL_104:.*]] = call <512 x i1> @llvm.ppc.mma.xvf32gernp(<512 x i1> %[[VAL_103]], <16 x i8> %[[VAL_101]], <16 x i8> %[[VAL_102]])
+! LLVMIR:         store <512 x i1> %[[VAL_104]], ptr %[[VAL_100]], align 64
 
       subroutine test_xvf32gernp_r4()
       use, intrinsic :: mma
@@ -282,16 +282,16 @@ subroutine test_xvf32gernp_r4()
       end subroutine test_xvf32gernp_r4
 
 !CHECK-LABEL: @test_xvf32gernp_r4_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.xvf32gernp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_105:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_106:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_107:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_108:.*]] = load <4 x float>, ptr %[[VAL_106]], align 16
+! LLVMIR:         %[[VAL_109:.*]] = load <4 x float>, ptr %[[VAL_105]], align 16
+! LLVMIR:         %[[VAL_110:.*]] = load <512 x i1>, ptr %[[VAL_107]], align 64
+! LLVMIR:         %[[VAL_111:.*]] = bitcast <4 x float> %[[VAL_108]] to <16 x i8>
+! LLVMIR:         %[[VAL_112:.*]] = bitcast <4 x float> %[[VAL_109]] to <16 x i8>
+! LLVMIR:         %[[VAL_113:.*]] = call <512 x i1> @llvm.ppc.mma.xvf32gernp(<512 x i1> %[[VAL_110]], <16 x i8> %[[VAL_111]], <16 x i8> %[[VAL_112]])
+! LLVMIR:         store <512 x i1> %[[VAL_113]], ptr %[[VAL_107]], align 64
 
       subroutine test_xvf32gerpn_u1()
       use, intrinsic :: mma
@@ -302,14 +302,14 @@ subroutine test_xvf32gerpn_u1()
       end subroutine test_xvf32gerpn_u1
 
 !CHECK-LABEL: @test_xvf32gerpn_u1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf32gerpn(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_114:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_115:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_116:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_117:.*]] = load <16 x i8>, ptr %[[VAL_115]], align 16
+! LLVMIR:         %[[VAL_118:.*]] = load <16 x i8>, ptr %[[VAL_114]], align 16
+! LLVMIR:         %[[VAL_119:.*]] = load <512 x i1>, ptr %[[VAL_116]], align 64
+! LLVMIR:         %[[VAL_120:.*]] = call <512 x i1> @llvm.ppc.mma.xvf32gerpn(<512 x i1> %[[VAL_119]], <16 x i8> %[[VAL_117]], <16 x i8> %[[VAL_118]])
+! LLVMIR:         store <512 x i1> %[[VAL_120]], ptr %[[VAL_116]], align 64
 
       subroutine test_xvf32gerpn_r4()
       use, intrinsic :: mma
@@ -320,16 +320,16 @@ subroutine test_xvf32gerpn_r4()
       end subroutine test_xvf32gerpn_r4
 
 !CHECK-LABEL: @test_xvf32gerpn_r4_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.xvf32gerpn(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_121:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_122:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_123:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_124:.*]] = load <4 x float>, ptr %[[VAL_122]], align 16
+! LLVMIR:         %[[VAL_125:.*]] = load <4 x float>, ptr %[[VAL_121]], align 16
+! LLVMIR:         %[[VAL_126:.*]] = load <512 x i1>, ptr %[[VAL_123]], align 64
+! LLVMIR:         %[[VAL_127:.*]] = bitcast <4 x float> %[[VAL_124]] to <16 x i8>
+! LLVMIR:         %[[VAL_128:.*]] = bitcast <4 x float> %[[VAL_125]] to <16 x i8>
+! LLVMIR:         %[[VAL_129:.*]] = call <512 x i1> @llvm.ppc.mma.xvf32gerpn(<512 x i1> %[[VAL_126]], <16 x i8> %[[VAL_127]], <16 x i8> %[[VAL_128]])
+! LLVMIR:         store <512 x i1> %[[VAL_129]], ptr %[[VAL_123]], align 64
 
       subroutine test_xvf32gerpp_u1()
       use, intrinsic :: mma
@@ -340,14 +340,14 @@ subroutine test_xvf32gerpp_u1()
       end subroutine test_xvf32gerpp_u1
 
 !CHECK-LABEL: @test_xvf32gerpp_u1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_130:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_131:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_132:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_133:.*]] = load <16 x i8>, ptr %[[VAL_131]], align 16
+! LLVMIR:         %[[VAL_134:.*]] = load <16 x i8>, ptr %[[VAL_130]], align 16
+! LLVMIR:         %[[VAL_135:.*]] = load <512 x i1>, ptr %[[VAL_132]], align 64
+! LLVMIR:         %[[VAL_136:.*]] = call <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1> %[[VAL_135]], <16 x i8> %[[VAL_133]], <16 x i8> %[[VAL_134]])
+! LLVMIR:         store <512 x i1> %[[VAL_136]], ptr %[[VAL_132]], align 64
 
 
       subroutine test_xvf32gerpp_r4()
@@ -359,16 +359,16 @@ subroutine test_xvf32gerpp_r4()
       end subroutine test_xvf32gerpp_r4
 
 !CHECK-LABEL: @test_xvf32gerpp_r4_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %3 = alloca <4 x float>, i64 1, align 16
-!LLVMIR:  %4 = load <4 x float>, ptr %2, align 16
-!LLVMIR:  %5 = load <4 x float>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = bitcast <4 x float> %4 to <16 x i8>
-!LLVMIR:  %8 = bitcast <4 x float> %5 to <16 x i8>
-!LLVMIR:  %9 = call <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8)
-!LLVMIR:  store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_137:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_138:.*]] = alloca <4 x float>, i64 1, align 16
+! LLVMIR:         %[[VAL_139:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_140:.*]] = load <4 x float>, ptr %[[VAL_138]], align 16
+! LLVMIR:         %[[VAL_141:.*]] = load <4 x float>, ptr %[[VAL_137]], align 16
+! LLVMIR:         %[[VAL_142:.*]] = load <512 x i1>, ptr %[[VAL_139]], align 64
+! LLVMIR:         %[[VAL_143:.*]] = bitcast <4 x float> %[[VAL_140]] to <16 x i8>
+! LLVMIR:         %[[VAL_144:.*]] = bitcast <4 x float> %[[VAL_141]] to <16 x i8>
+! LLVMIR:         %[[VAL_145:.*]] = call <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1> %[[VAL_142]], <16 x i8> %[[VAL_143]], <16 x i8> %[[VAL_144]])
+! LLVMIR:         store <512 x i1> %[[VAL_145]], ptr %[[VAL_139]], align 64
 
       subroutine test_xvf64ger_u1()
       use, intrinsic :: mma
@@ -380,13 +380,13 @@ subroutine test_xvf64ger_u1()
       end subroutine test_xvf64ger_u1
 
 !CHECK-LABEL: @test_xvf64ger_u1_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.xvf64ger(<256 x i1> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %6, ptr %2, align 64
+! LLVMIR:         %[[VAL_146:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_147:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_148:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_149:.*]] = load <256 x i1>, ptr %[[VAL_148]], align 32
+! LLVMIR:         %[[VAL_150:.*]] = load <16 x i8>, ptr %[[VAL_146]], align 16
+! LLVMIR:         %[[VAL_151:.*]] = call <512 x i1> @llvm.ppc.mma.xvf64ger(<256 x i1> %[[VAL_149]], <16 x i8> %[[VAL_150]])
+! LLVMIR:         store <512 x i1> %[[VAL_151]], ptr %[[VAL_147]], align 64
 
       subroutine test_xvf64ger_r8()
       use, intrinsic :: mma
@@ -398,14 +398,14 @@ subroutine test_xvf64ger_r8()
       end subroutine test_xvf64ger_r8
 
 !CHECK-LABEL: @test_xvf64ger_r8_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf64ger(<256 x i1> %4, <16 x i8> %6)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_152:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_153:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_154:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_155:.*]] = load <256 x i1>, ptr %[[VAL_154]], align 32
+! LLVMIR:         %[[VAL_156:.*]] = load <2 x double>, ptr %[[VAL_152]], align 16
+! LLVMIR:         %[[VAL_157:.*]] = bitcast <2 x double> %[[VAL_156]] to <16 x i8>
+! LLVMIR:         %[[VAL_158:.*]] = call <512 x i1> @llvm.ppc.mma.xvf64ger(<256 x i1> %[[VAL_155]], <16 x i8> %[[VAL_157]])
+! LLVMIR:         store <512 x i1> %[[VAL_158]], ptr %[[VAL_153]], align 64
 
 
       subroutine test_xvf64gernn_u1()
@@ -418,14 +418,14 @@ subroutine test_xvf64gernn_u1()
       end subroutine test_xvf64gernn_u1
 
 !CHECK-LABEL: @test_xvf64gernn_u1_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf64gernn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_159:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_160:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_161:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_162:.*]] = load <256 x i1>, ptr %[[VAL_161]], align 32
+! LLVMIR:         %[[VAL_163:.*]] = load <16 x i8>, ptr %[[VAL_159]], align 16
+! LLVMIR:         %[[VAL_164:.*]] = load <512 x i1>, ptr %[[VAL_160]], align 64
+! LLVMIR:         %[[VAL_165:.*]] = call <512 x i1> @llvm.ppc.mma.xvf64gernn(<512 x i1> %[[VAL_164]], <256 x i1> %[[VAL_162]], <16 x i8> %[[VAL_163]])
+! LLVMIR:         store <512 x i1> %[[VAL_165]], ptr %[[VAL_160]], align 64
 
 
       subroutine test_xvf64gernn_r8()
@@ -438,15 +438,15 @@ subroutine test_xvf64gernn_r8()
       end subroutine test_xvf64gernn_r8
 
 !CHECK-LABEL: @test_xvf64gernn_r8_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.xvf64gernn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_166:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_167:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_168:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_169:.*]] = load <256 x i1>, ptr %[[VAL_168]], align 32
+! LLVMIR:         %[[VAL_170:.*]] = load <2 x double>, ptr %[[VAL_166]], align 16
+! LLVMIR:         %[[VAL_171:.*]] = load <512 x i1>, ptr %[[VAL_167]], align 64
+! LLVMIR:         %[[VAL_172:.*]] = bitcast <2 x double> %[[VAL_170]] to <16 x i8>
+! LLVMIR:         %[[VAL_173:.*]] = call <512 x i1> @llvm.ppc.mma.xvf64gernn(<512 x i1> %[[VAL_171]], <256 x i1> %[[VAL_169]], <16 x i8> %[[VAL_172]])
+! LLVMIR:         store <512 x i1> %[[VAL_173]], ptr %[[VAL_167]], align 64
 
       subroutine test_xvf64gernp_u1()
       use, intrinsic :: mma
@@ -458,14 +458,14 @@ subroutine test_xvf64gernp_u1()
       end subroutine test_xvf64gernp_u1
 
 !CHECK-LABEL: @test_xvf64gernp_u1_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf64gernp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_174:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_175:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_176:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_177:.*]] = load <256 x i1>, ptr %[[VAL_176]], align 32
+! LLVMIR:         %[[VAL_178:.*]] = load <16 x i8>, ptr %[[VAL_174]], align 16
+! LLVMIR:         %[[VAL_179:.*]] = load <512 x i1>, ptr %[[VAL_175]], align 64
+! LLVMIR:         %[[VAL_180:.*]] = call <512 x i1> @llvm.ppc.mma.xvf64gernp(<512 x i1> %[[VAL_179]], <256 x i1> %[[VAL_177]], <16 x i8> %[[VAL_178]])
+! LLVMIR:         store <512 x i1> %[[VAL_180]], ptr %[[VAL_175]], align 64
 
       subroutine test_xvf64gernp_r8()
       use, intrinsic :: mma
@@ -477,14 +477,14 @@ subroutine test_xvf64gernp_r8()
       end subroutine test_xvf64gernp_r8
 
 !CHECK-LABEL: @test_xvf64gernp_r8_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf64gernp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_181:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_182:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_183:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_184:.*]] = load <256 x i1>, ptr %[[VAL_183]], align 32
+! LLVMIR:         %[[VAL_185:.*]] = load <16 x i8>, ptr %[[VAL_181]], align 16
+! LLVMIR:         %[[VAL_186:.*]] = load <512 x i1>, ptr %[[VAL_182]], align 64
+! LLVMIR:         %[[VAL_187:.*]] = call <512 x i1> @llvm.ppc.mma.xvf64gernp(<512 x i1> %[[VAL_186]], <256 x i1> %[[VAL_184]], <16 x i8> %[[VAL_185]])
+! LLVMIR:         store <512 x i1> %[[VAL_187]], ptr %[[VAL_182]], align 64
 
       subroutine test_xvf64gerpn_u1()
       use, intrinsic :: mma
@@ -496,14 +496,14 @@ subroutine test_xvf64gerpn_u1()
       end subroutine test_xvf64gerpn_u1
 
 !CHECK-LABEL: @test_xvf64gerpn_u1_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf64gerpn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_188:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_189:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_190:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_191:.*]] = load <256 x i1>, ptr %[[VAL_190]], align 32
+! LLVMIR:         %[[VAL_192:.*]] = load <16 x i8>, ptr %[[VAL_188]], align 16
+! LLVMIR:         %[[VAL_193:.*]] = load <512 x i1>, ptr %[[VAL_189]], align 64
+! LLVMIR:         %[[VAL_194:.*]] = call <512 x i1> @llvm.ppc.mma.xvf64gerpn(<512 x i1> %[[VAL_193]], <256 x i1> %[[VAL_191]], <16 x i8> %[[VAL_192]])
+! LLVMIR:         store <512 x i1> %[[VAL_194]], ptr %[[VAL_189]], align 64
 
       subroutine test_xvf64gerpn_r8()
       use, intrinsic :: mma
@@ -515,15 +515,15 @@ subroutine test_xvf64gerpn_r8()
       end subroutine test_xvf64gerpn_r8
 
 !CHECK-LABEL: @test_xvf64gerpn_r8_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.xvf64gerpn(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_195:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_196:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_197:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_198:.*]] = load <256 x i1>, ptr %[[VAL_197]], align 32
+! LLVMIR:         %[[VAL_199:.*]] = load <2 x double>, ptr %[[VAL_195]], align 16
+! LLVMIR:         %[[VAL_200:.*]] = load <512 x i1>, ptr %[[VAL_196]], align 64
+! LLVMIR:         %[[VAL_201:.*]] = bitcast <2 x double> %[[VAL_199]] to <16 x i8>
+! LLVMIR:         %[[VAL_202:.*]] = call <512 x i1> @llvm.ppc.mma.xvf64gerpn(<512 x i1> %[[VAL_200]], <256 x i1> %[[VAL_198]], <16 x i8> %[[VAL_201]])
+! LLVMIR:         store <512 x i1> %[[VAL_202]], ptr %[[VAL_196]], align 64
 
       subroutine test_xvf64gerpp_u1()
       use, intrinsic :: mma
@@ -535,14 +535,14 @@ subroutine test_xvf64gerpp_u1()
       end subroutine test_xvf64gerpp_u1
 
 !CHECK-LABEL: @test_xvf64gerpp_u1_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %2, align 64
+! LLVMIR:         %[[VAL_203:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_204:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_205:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_206:.*]] = load <256 x i1>, ptr %[[VAL_205]], align 32
+! LLVMIR:         %[[VAL_207:.*]] = load <16 x i8>, ptr %[[VAL_203]], align 16
+! LLVMIR:         %[[VAL_208:.*]] = load <512 x i1>, ptr %[[VAL_204]], align 64
+! LLVMIR:         %[[VAL_209:.*]] = call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %[[VAL_208]], <256 x i1> %[[VAL_206]], <16 x i8> %[[VAL_207]])
+! LLVMIR:         store <512 x i1> %[[VAL_209]], ptr %[[VAL_204]], align 64
 
 
       subroutine test_xvf64gerpp_r8()
@@ -555,15 +555,15 @@ subroutine test_xvf64gerpp_r8()
       end subroutine test_xvf64gerpp_r8
 
 !CHECK-LABEL: @test_xvf64gerpp_r8_
-!LLVMIR:  %1 = alloca <256 x i1>, i64 1, align 32
-!LLVMIR:  %2 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %3 = alloca <2 x double>, i64 1, align 16
-!LLVMIR:  %4 = load <256 x i1>, ptr %1, align 32
-!LLVMIR:  %5 = load <2 x double>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %2, align 64
-!LLVMIR:  %7 = bitcast <2 x double> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %6, <256 x i1> %4, <16 x i8> %7)
-!LLVMIR:  store <512 x i1> %8, ptr %2, align 64
+! LLVMIR:         %[[VAL_210:.*]] = alloca <2 x double>, i64 1, align 16
+! LLVMIR:         %[[VAL_211:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_212:.*]] = alloca <256 x i1>, i64 1, align 32
+! LLVMIR:         %[[VAL_213:.*]] = load <256 x i1>, ptr %[[VAL_212]], align 32
+! LLVMIR:         %[[VAL_214:.*]] = load <2 x double>, ptr %[[VAL_210]], align 16
+! LLVMIR:         %[[VAL_215:.*]] = load <512 x i1>, ptr %[[VAL_211]], align 64
+! LLVMIR:         %[[VAL_216:.*]] = bitcast <2 x double> %[[VAL_214]] to <16 x i8>
+! LLVMIR:         %[[VAL_217:.*]] = call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %[[VAL_215]], <256 x i1> %[[VAL_213]], <16 x i8> %[[VAL_216]])
+! LLVMIR:         store <512 x i1> %[[VAL_217]], ptr %[[VAL_211]], align 64
 
       subroutine test_xvi16ger2_u1()
       use, intrinsic :: mma
@@ -574,13 +574,13 @@ subroutine test_xvi16ger2_u1()
       end subroutine test_xvi16ger2_u1
 
 !CHECK-LABEL: @test_xvi16ger2_u1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.xvi16ger2(<16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_218:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_219:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_220:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_221:.*]] = load <16 x i8>, ptr %[[VAL_219]], align 16
+! LLVMIR:         %[[VAL_222:.*]] = load <16 x i8>, ptr %[[VAL_218]], align 16
+! LLVMIR:         %[[VAL_223:.*]] = call <512 x i1> @llvm.ppc.mma.xvi16ger2(<16 x i8> %[[VAL_221]], <16 x i8> %[[VAL_222]])
+! LLVMIR:         store <512 x i1> %[[VAL_223]], ptr %[[VAL_220]], align 64
 
       subroutine test_xvi16ger2_i2()
       use, intrinsic :: mma
@@ -591,15 +591,15 @@ subroutine test_xvi16ger2_i2()
       end subroutine test_xvi16ger2_i2
 
 !CHECK-LABEL: @test_xvi16ger2_i2_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:  %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:  %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:  %6 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:  %7 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:  %8 = call <512 x i1> @llvm.ppc.mma.xvi16ger2(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:  store <512 x i1> %8, ptr %1, align 64
+! LLVMIR:         %[[VAL_224:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_225:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_226:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_227:.*]] = load <8 x i16>, ptr %[[VAL_225]], align 16
+! LLVMIR:         %[[VAL_228:.*]] = load <8 x i16>, ptr %[[VAL_224]], align 16
+! LLVMIR:         %[[VAL_229:.*]] = bitcast <8 x i16> %[[VAL_227]] to <16 x i8>
+! LLVMIR:         %[[VAL_230:.*]] = bitcast <8 x i16> %[[VAL_228]] to <16 x i8>
+! LLVMIR:         %[[VAL_231:.*]] = call <512 x i1> @llvm.ppc.mma.xvi16ger2(<16 x i8> %[[VAL_229]], <16 x i8> %[[VAL_230]])
+! LLVMIR:         store <512 x i1> %[[VAL_231]], ptr %[[VAL_226]], align 64
 
       subroutine test_xvi16ger2pp_u1()
       use, intrinsic :: mma
@@ -610,14 +610,14 @@ subroutine test_xvi16ger2pp_u1()
       end subroutine test_xvi16ger2pp_u1
 
 !CHECK-LABEL: @test_xvi16ger2pp_u1_
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:   %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:   %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:   %7 = call <512 x i1> @llvm.ppc.mma.xvi16ger2pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:   store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_232:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_233:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_234:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_235:.*]] = load <16 x i8>, ptr %[[VAL_233]], align 16
+! LLVMIR:         %[[VAL_236:.*]] = load <16 x i8>, ptr %[[VAL_232]], align 16
+! LLVMIR:         %[[VAL_237:.*]] = load <512 x i1>, ptr %[[VAL_234]], align 64
+! LLVMIR:         %[[VAL_238:.*]] = call <512 x i1> @llvm.ppc.mma.xvi16ger2pp(<512 x i1> %[[VAL_237]], <16 x i8> %[[VAL_235]], <16 x i8> %[[VAL_236]])
+! LLVMIR:         store <512 x i1> %[[VAL_238]], ptr %[[VAL_234]], align 64
 
       subroutine test_xvi16ger2pp_i2()
       use, intrinsic :: mma
@@ -628,16 +628,16 @@ subroutine test_xvi16ger2pp_i2()
       end subroutine test_xvi16ger2pp_i2
 
 !CHECK-LABEL: @test_xvi16ger2pp_i2_
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:   %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:   %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:   %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:   %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:   %7 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:   %8 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:   %9 = call <512 x i1> @llvm.ppc.mma.xvi16ger2pp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8)
-!LLVMIR:   store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_239:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_240:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_241:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_242:.*]] = load <8 x i16>, ptr %[[VAL_240]], align 16
+! LLVMIR:         %[[VAL_243:.*]] = load <8 x i16>, ptr %[[VAL_239]], align 16
+! LLVMIR:         %[[VAL_244:.*]] = load <512 x i1>, ptr %[[VAL_241]], align 64
+! LLVMIR:         %[[VAL_245:.*]] = bitcast <8 x i16> %[[VAL_242]] to <16 x i8>
+! LLVMIR:         %[[VAL_246:.*]] = bitcast <8 x i16> %[[VAL_243]] to <16 x i8>
+! LLVMIR:         %[[VAL_247:.*]] = call <512 x i1> @llvm.ppc.mma.xvi16ger2pp(<512 x i1> %[[VAL_244]], <16 x i8> %[[VAL_245]], <16 x i8> %[[VAL_246]])
+! LLVMIR:         store <512 x i1> %[[VAL_247]], ptr %[[VAL_241]], align 64
 
       subroutine test_xvi16ger2s_u1()
       use, intrinsic :: mma
@@ -648,13 +648,13 @@ subroutine test_xvi16ger2s_u1()
       end subroutine test_xvi16ger2s_u1
 
 !CHECK-LABEL:  @test_xvi16ger2s_u1_
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:   %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:   %6 = call <512 x i1> @llvm.ppc.mma.xvi16ger2s(<16 x i8> %4, <16 x i8> %5)
-!LLVMIR:   store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_248:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_249:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_250:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_251:.*]] = load <16 x i8>, ptr %[[VAL_249]], align 16
+! LLVMIR:         %[[VAL_252:.*]] = load <16 x i8>, ptr %[[VAL_248]], align 16
+! LLVMIR:         %[[VAL_253:.*]] = call <512 x i1> @llvm.ppc.mma.xvi16ger2s(<16 x i8> %[[VAL_251]], <16 x i8> %[[VAL_252]])
+! LLVMIR:         store <512 x i1> %[[VAL_253]], ptr %[[VAL_250]], align 64
 
       subroutine test_xvi16ger2s_i2()
       use, intrinsic :: mma
@@ -665,15 +665,15 @@ subroutine test_xvi16ger2s_i2()
       end subroutine test_xvi16ger2s_i2
 
 !CHECK-LABEL:  @test_xvi16ger2s_i2_
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:   %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:   %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:   %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:   %6 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:   %7 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:   %8 = call <512 x i1> @llvm.ppc.mma.xvi16ger2s(<16 x i8> %6, <16 x i8> %7)
-!LLVMIR:   store <512 x i1> %8, ptr %1, align 64
+! LLVMIR:         %[[VAL_254:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_255:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_256:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_257:.*]] = load <8 x i16>, ptr %[[VAL_255]], align 16
+! LLVMIR:         %[[VAL_258:.*]] = load <8 x i16>, ptr %[[VAL_254]], align 16
+! LLVMIR:         %[[VAL_259:.*]] = bitcast <8 x i16> %[[VAL_257]] to <16 x i8>
+! LLVMIR:         %[[VAL_260:.*]] = bitcast <8 x i16> %[[VAL_258]] to <16 x i8>
+! LLVMIR:         %[[VAL_261:.*]] = call <512 x i1> @llvm.ppc.mma.xvi16ger2s(<16 x i8> %[[VAL_259]], <16 x i8> %[[VAL_260]])
+! LLVMIR:         store <512 x i1> %[[VAL_261]], ptr %[[VAL_256]], align 64
 
       subroutine test_xvi16ger2spp_u1()
       use, intrinsic :: mma
@@ -684,14 +684,14 @@ subroutine test_xvi16ger2spp_u1()
       end subroutine test_xvi16ger2spp_u1
 
 !CHECK-LABEL:  @test_xvi16ger2spp_u1_
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:   %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:   %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:   %7 = call <512 x i1> @llvm.ppc.mma.xvi16ger2spp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:   store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_262:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_263:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_264:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_265:.*]] = load <16 x i8>, ptr %[[VAL_263]], align 16
+! LLVMIR:         %[[VAL_266:.*]] = load <16 x i8>, ptr %[[VAL_262]], align 16
+! LLVMIR:         %[[VAL_267:.*]] = load <512 x i1>, ptr %[[VAL_264]], align 64
+! LLVMIR:         %[[VAL_268:.*]] = call <512 x i1> @llvm.ppc.mma.xvi16ger2spp(<512 x i1> %[[VAL_267]], <16 x i8> %[[VAL_265]], <16 x i8> %[[VAL_266]])
+! LLVMIR:         store <512 x i1> %[[VAL_268]], ptr %[[VAL_264]], align 64
 
       subroutine test_xvi16ger2spp_i2()
       use, intrinsic :: mma
@@ -702,16 +702,16 @@ subroutine test_xvi16ger2spp_i2()
       end subroutine test_xvi16ger2spp_i2
 
 !CHECK-LABEL:  @test_xvi16ger2spp_i2_
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:   %3 = alloca <8 x i16>, i64 1, align 16
-!LLVMIR:   %4 = load <8 x i16>, ptr %2, align 16
-!LLVMIR:   %5 = load <8 x i16>, ptr %3, align 16
-!LLVMIR:   %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:   %7 = bitcast <8 x i16> %4 to <16 x i8>
-!LLVMIR:   %8 = bitcast <8 x i16> %5 to <16 x i8>
-!LLVMIR:   %9 = call <512 x i1> @llvm.ppc.mma.xvi16ger2spp(<512 x i1> %6, <16 x i8> %7, <16 x i8> %8)
-!LLVMIR:   store <512 x i1> %9, ptr %1, align 64
+! LLVMIR:         %[[VAL_269:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_270:.*]] = alloca <8 x i16>, i64 1, align 16
+! LLVMIR:         %[[VAL_271:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_272:.*]] = load <8 x i16>, ptr %[[VAL_270]], align 16
+! LLVMIR:         %[[VAL_273:.*]] = load <8 x i16>, ptr %[[VAL_269]], align 16
+! LLVMIR:         %[[VAL_274:.*]] = load <512 x i1>, ptr %[[VAL_271]], align 64
+! LLVMIR:         %[[VAL_275:.*]] = bitcast <8 x i16> %[[VAL_272]] to <16 x i8>
+! LLVMIR:         %[[VAL_276:.*]] = bitcast <8 x i16> %[[VAL_273]] to <16 x i8>
+! LLVMIR:         %[[VAL_277:.*]] = call <512 x i1> @llvm.ppc.mma.xvi16ger2spp(<512 x i1> %[[VAL_274]], <16 x i8> %[[VAL_275]], <16 x i8> %[[VAL_276]])
+! LLVMIR:         store <512 x i1> %[[VAL_277]], ptr %[[VAL_271]], align 64
 
       subroutine test_xvi4ger8()
       use, intrinsic :: mma
@@ -722,13 +722,13 @@ subroutine test_xvi4ger8()
       end subroutine test_xvi4ger8
 
 !CHECK-LABEL:  @test_xvi4ger8_
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:   %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:   %6 = call <512 x i1> @llvm.ppc.mma.xvi4ger8(<16 x i8> %4, <16 x i8> %5)
-!LLVMIR:   store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_278:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_279:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_280:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_281:.*]] = load <16 x i8>, ptr %[[VAL_279]], align 16
+! LLVMIR:         %[[VAL_282:.*]] = load <16 x i8>, ptr %[[VAL_278]], align 16
+! LLVMIR:         %[[VAL_283:.*]] = call <512 x i1> @llvm.ppc.mma.xvi4ger8(<16 x i8> %[[VAL_281]], <16 x i8> %[[VAL_282]])
+! LLVMIR:         store <512 x i1> %[[VAL_283]], ptr %[[VAL_280]], align 64
 
       subroutine test_xvi4ger8pp()
       use, intrinsic :: mma
@@ -739,14 +739,14 @@ subroutine test_xvi4ger8pp()
       end subroutine test_xvi4ger8pp
 
 !CHECK-LABEL:  @test_xvi4ger8pp_
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:   %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:   %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:   %7 = call <512 x i1> @llvm.ppc.mma.xvi4ger8pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:   store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_284:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_285:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_286:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_287:.*]] = load <16 x i8>, ptr %[[VAL_285]], align 16
+! LLVMIR:         %[[VAL_288:.*]] = load <16 x i8>, ptr %[[VAL_284]], align 16
+! LLVMIR:         %[[VAL_289:.*]] = load <512 x i1>, ptr %[[VAL_286]], align 64
+! LLVMIR:         %[[VAL_290:.*]] = call <512 x i1> @llvm.ppc.mma.xvi4ger8pp(<512 x i1> %[[VAL_289]], <16 x i8> %[[VAL_287]], <16 x i8> %[[VAL_288]])
+! LLVMIR:         store <512 x i1> %[[VAL_290]], ptr %[[VAL_286]], align 64
 
       subroutine test_xvi8ger4_u1()
       use, intrinsic :: mma
@@ -757,13 +757,13 @@ subroutine test_xvi8ger4_u1()
       end subroutine test_xvi8ger4_u1
 
 !CHECK-LABEL: @test_xvi8ger4_u1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.xvi8ger4(<16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_291:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_292:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_293:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_294:.*]] = load <16 x i8>, ptr %[[VAL_292]], align 16
+! LLVMIR:         %[[VAL_295:.*]] = load <16 x i8>, ptr %[[VAL_291]], align 16
+! LLVMIR:         %[[VAL_296:.*]] = call <512 x i1> @llvm.ppc.mma.xvi8ger4(<16 x i8> %[[VAL_294]], <16 x i8> %[[VAL_295]])
+! LLVMIR:         store <512 x i1> %[[VAL_296]], ptr %[[VAL_293]], align 64
 
 
       subroutine test_xvi8ger4_i1()
@@ -775,13 +775,13 @@ subroutine test_xvi8ger4_i1()
       end subroutine test_xvi8ger4_i1
 
 !CHECK-LABEL: @test_xvi8ger4_i1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = call <512 x i1> @llvm.ppc.mma.xvi8ger4(<16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %6, ptr %1, align 64
+! LLVMIR:         %[[VAL_297:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_298:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_299:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_300:.*]] = load <16 x i8>, ptr %[[VAL_298]], align 16
+! LLVMIR:         %[[VAL_301:.*]] = load <16 x i8>, ptr %[[VAL_297]], align 16
+! LLVMIR:         %[[VAL_302:.*]] = call <512 x i1> @llvm.ppc.mma.xvi8ger4(<16 x i8> %[[VAL_300]], <16 x i8> %[[VAL_301]])
+! LLVMIR:         store <512 x i1> %[[VAL_302]], ptr %[[VAL_299]], align 64
 
       subroutine test_xvi8ger4pp_u1()
       use, intrinsic :: mma
@@ -792,14 +792,14 @@ subroutine test_xvi8ger4pp_u1()
       end subroutine test_xvi8ger4pp_u1
 
 !CHECK-LABEL: @test_xvi8ger4pp_u1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvi8ger4pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_303:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_304:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_305:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_306:.*]] = load <16 x i8>, ptr %[[VAL_304]], align 16
+! LLVMIR:         %[[VAL_307:.*]] = load <16 x i8>, ptr %[[VAL_303]], align 16
+! LLVMIR:         %[[VAL_308:.*]] = load <512 x i1>, ptr %[[VAL_305]], align 64
+! LLVMIR:         %[[VAL_309:.*]] = call <512 x i1> @llvm.ppc.mma.xvi8ger4pp(<512 x i1> %[[VAL_308]], <16 x i8> %[[VAL_306]], <16 x i8> %[[VAL_307]])
+! LLVMIR:         store <512 x i1> %[[VAL_309]], ptr %[[VAL_305]], align 64
 
       subroutine test_xvi8ger4pp_i1()
       use, intrinsic :: mma
@@ -810,14 +810,14 @@ subroutine test_xvi8ger4pp_i1()
       end subroutine test_xvi8ger4pp_i1
 
 !CHECK-LABEL:  @test_xvi8ger4pp_i1_
-!LLVMIR:   %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:   %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:   %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:   %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:   %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:   %7 = call <512 x i1> @llvm.ppc.mma.xvi8ger4pp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:   store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_310:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_311:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_312:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_313:.*]] = load <16 x i8>, ptr %[[VAL_311]], align 16
+! LLVMIR:         %[[VAL_314:.*]] = load <16 x i8>, ptr %[[VAL_310]], align 16
+! LLVMIR:         %[[VAL_315:.*]] = load <512 x i1>, ptr %[[VAL_312]], align 64
+! LLVMIR:         %[[VAL_316:.*]] = call <512 x i1> @llvm.ppc.mma.xvi8ger4pp(<512 x i1> %[[VAL_315]], <16 x i8> %[[VAL_313]], <16 x i8> %[[VAL_314]])
+! LLVMIR:         store <512 x i1> %[[VAL_316]], ptr %[[VAL_312]], align 64
 
       subroutine test_xvi8ger4spp_u1()
       use, intrinsic :: mma
@@ -828,14 +828,14 @@ subroutine test_xvi8ger4spp_u1()
       end subroutine test_xvi8ger4spp_u1
 
 !CHECK-LABEL: @test_xvi8ger4spp_u1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvi8ger4spp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_317:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_318:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_319:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_320:.*]] = load <16 x i8>, ptr %[[VAL_318]], align 16
+! LLVMIR:         %[[VAL_321:.*]] = load <16 x i8>, ptr %[[VAL_317]], align 16
+! LLVMIR:         %[[VAL_322:.*]] = load <512 x i1>, ptr %[[VAL_319]], align 64
+! LLVMIR:         %[[VAL_323:.*]] = call <512 x i1> @llvm.ppc.mma.xvi8ger4spp(<512 x i1> %[[VAL_322]], <16 x i8> %[[VAL_320]], <16 x i8> %[[VAL_321]])
+! LLVMIR:         store <512 x i1> %[[VAL_323]], ptr %[[VAL_319]], align 64
 
       subroutine test_xvi8ger4spp_i1()
       use, intrinsic :: mma
@@ -846,11 +846,11 @@ subroutine test_xvi8ger4spp_i1()
       end subroutine test_xvi8ger4spp_i1
 
 !CHECK-LABEL: @test_xvi8ger4spp_i1_
-!LLVMIR:  %1 = alloca <512 x i1>, i64 1, align 64
-!LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %4 = load <16 x i8>, ptr %2, align 16
-!LLVMIR:  %5 = load <16 x i8>, ptr %3, align 16
-!LLVMIR:  %6 = load <512 x i1>, ptr %1, align 64
-!LLVMIR:  %7 = call <512 x i1> @llvm.ppc.mma.xvi8ger4spp(<512 x i1> %6, <16 x i8> %4, <16 x i8> %5)
-!LLVMIR:  store <512 x i1> %7, ptr %1, align 64
+! LLVMIR:         %[[VAL_324:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_325:.*]] = alloca <16 x i8>, i64 1, align 16
+! LLVMIR:         %[[VAL_326:.*]] = alloca <512 x i1>, i64 1, align 64
+! LLVMIR:         %[[VAL_327:.*]] = load <16 x i8>, ptr %[[VAL_325]], align 16
+! LLVMIR:         %[[VAL_328:.*]] = load <16 x i8>, ptr %[[VAL_324]], align 16
+! LLVMIR:         %[[VAL_329:.*]] = load <512 x i1>, ptr %[[VAL_326]], align 64
+! LLVMIR:         %[[VAL_330:.*]] = call <512 x i1> @llvm.ppc.mma.xvi8ger4spp(<512 x i1> %[[VAL_329]], <16 x i8> %[[VAL_327]], <16 x i8> %[[VAL_328]])
+! LLVMIR:         store <512 x i1> %[[VAL_330]], ptr %[[VAL_326]], align 64
diff --git a/flang/test/Lower/PowerPC/ppc-pwr10-vec-intrinsics.f90 b/flang/test/Lower/PowerPC/ppc-pwr10-vec-intrinsics.f90
index c49f6f06c60ed..1c7c3e4a8b093 100644
--- a/flang/test/Lower/PowerPC/ppc-pwr10-vec-intrinsics.f90
+++ b/flang/test/Lower/PowerPC/ppc-pwr10-vec-intrinsics.f90
@@ -28,9 +28,9 @@ end subroutine test_cvspbf16
 !CHECK-LABEL: @test_cvspbf16_
 !LLVMIR:  %1 = alloca <16 x i8>, i64 1, align 16
 !LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = load <16 x i8>, ptr %2, align 16
+!LLVMIR:  %3 = load <16 x i8>, ptr %1, align 16
 !LLVMIR:  %4 = call <16 x i8> @llvm.ppc.vsx.xvcvspbf16(<16 x i8> %3)
-!LLVMIR:  store <16 x i8> %4, ptr %1, align 16
+!LLVMIR:  store <16 x i8> %4, ptr %2, align 16
 
       subroutine test_cvbf16spn()
       implicit none
@@ -41,9 +41,9 @@ end subroutine test_cvbf16spn
 !CHECK-LABEL: @test_cvbf16spn_
 !LLVMIR:  %1 = alloca <16 x i8>, i64 1, align 16
 !LLVMIR:  %2 = alloca <16 x i8>, i64 1, align 16
-!LLVMIR:  %3 = load <16 x i8>, ptr %2, align 16
+!LLVMIR:  %3 = load <16 x i8>, ptr %1, align 16
 !LLVMIR:  %4 = call <16 x i8> @llvm.ppc.vsx.xvcvbf16spn(<16 x i8> %3)
-!LLVMIR:  store <16 x i8> %4, ptr %1, align 16
+!LLVMIR:  store <16 x i8> %4, ptr %2, align 16
 
 !----------------------
 ! vec_lxvp
diff --git a/flang/test/Lower/PowerPC/ppc-vector-types.f90 b/flang/test/Lower/PowerPC/ppc-vector-types.f90
index 4745e4567b2d1..92c8130222b80 100644
--- a/flang/test/Lower/PowerPC/ppc-vector-types.f90
+++ b/flang/test/Lower/PowerPC/ppc-vector-types.f90
@@ -5,20 +5,20 @@
       program ppc_vec_unit
       implicit none
 
-      ! CHECK-LLVM-DAG: %[[VI1:.*]] = alloca <4 x i32>, i64 1, align 16
       ! CHECK-LLVM-DAG: %[[VI2:.*]] = alloca <4 x i32>, i64 1, align 16
+      ! CHECK-LLVM-DAG: %[[VI1:.*]] = alloca <4 x i32>, i64 1, align 16
       vector(integer(4)) :: vi1, vi2
 
-      ! CHECK-LLVM-DAG: %[[VR1:.*]] = alloca <2 x double>, i64 1, align 16
       ! CHECK-LLVM-DAG: %[[VR2:.*]] = alloca <2 x double>, i64 1, align 16
+      ! CHECK-LLVM-DAG: %[[VR1:.*]] = alloca <2 x double>, i64 1, align 16
       vector(real(8)) :: vr1, vr2
 
-      ! CHECK-LLVM-DAG: %[[VU1:.*]] = alloca <8 x i16>, i64 1, align 16
       ! CHECK-LLVM-DAG: %[[VU2:.*]] = alloca <8 x i16>, i64 1, align 16
+      ! CHECK-LLVM-DAG: %[[VU1:.*]] = alloca <8 x i16>, i64 1, align 16
       vector(unsigned(2)) :: vu1, vu2
 
-      ! CHECK-LLVM-DAG: %[[VP1:.*]] = alloca <256 x i1>, i64 1, align 32
       ! CHECK-LLVM-DAG: %[[VP2:.*]] = alloca <256 x i1>, i64 1, align 32
+      ! CHECK-LLVM-DAG: %[[VP1:.*]] = alloca <256 x i1>, i64 1, align 32
       __vector_pair :: vp1, vp2
 
       __vector_quad :: vq1, vq2



More information about the flang-commits mailing list