[llvm] 9e5fa4b - ScalarizeMaskedMemIntrin: Convert tests to opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 28 06:08:52 PST 2022


Author: Matt Arsenault
Date: 2022-11-28T09:08:31-05:00
New Revision: 9e5fa4b405b60b24a9bd8ab3ef470ce9d86a9453

URL: https://github.com/llvm/llvm-project/commit/9e5fa4b405b60b24a9bd8ab3ef470ce9d86a9453
DIFF: https://github.com/llvm/llvm-project/commit/9e5fa4b405b60b24a9bd8ab3ef470ce9d86a9453.diff

LOG: ScalarizeMaskedMemIntrin: Convert tests to opaque pointers

This was as easy as running the script and running
update_test_checks. I did notice the pass produces some GEPs with 0
indexes, such that running the opaquify script a second time produces
a diff.

Added: 
    

Modified: 
    llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/expand-masked-load.ll
    llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/expand-masked-store.ll
    llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-compressstore.ll
    llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-expandload.ll
    llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-gather.ll
    llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-load.ll
    llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-scatter.ll
    llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-store.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/expand-masked-load.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/expand-masked-load.ll
index 129920144037c..fc4f433b10c9f 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/expand-masked-load.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/expand-masked-load.ll
@@ -3,203 +3,195 @@
 ; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=aarch64-linux-gnu -mattr=+sve  | FileCheck -check-prefixes=CHECK,CHECK-LE %s
 ; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=aarch64_be-linux-gnu -data-layout="E-m:o-i64:64-i128:128-n32:64-S128" | FileCheck -check-prefixes=CHECK,CHECK-BE %s
 
-define <2 x i64> @scalarize_v2i64(<2 x i64>* %p, <2 x i1> %mask, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64(ptr %p, <2 x i1> %mask, <2 x i64> %passthru) {
 ; CHECK-LE-LABEL: @scalarize_v2i64(
-; CHECK-LE-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-LE-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-LE-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-LE-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-LE-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
+; CHECK-LE-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-LE-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-LE-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK-LE:       cond.load:
-; CHECK-LE-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 0
-; CHECK-LE-NEXT:    [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
-; CHECK-LE-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP5]], i64 0
+; CHECK-LE-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0
+; CHECK-LE-NEXT:    [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8
+; CHECK-LE-NEXT:    [[TMP5:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP4]], i64 0
 ; CHECK-LE-NEXT:    br label [[ELSE]]
 ; CHECK-LE:       else:
-; CHECK-LE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-LE-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-LE-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
-; CHECK-LE-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
+; CHECK-LE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-LE-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-LE-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-LE-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK-LE:       cond.load1:
-; CHECK-LE-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
-; CHECK-LE-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP9]], align 8
-; CHECK-LE-NEXT:    [[TMP11:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP10]], i64 1
+; CHECK-LE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[P]], i32 1
+; CHECK-LE-NEXT:    [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8
+; CHECK-LE-NEXT:    [[TMP10:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP9]], i64 1
 ; CHECK-LE-NEXT:    br label [[ELSE2]]
 ; CHECK-LE:       else2:
-; CHECK-LE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
+; CHECK-LE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP10]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-LE-NEXT:    ret <2 x i64> [[RES_PHI_ELSE3]]
 ;
 ; CHECK-BE-LABEL: @scalarize_v2i64(
-; CHECK-BE-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-BE-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-BE-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-BE-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-BE-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
+; CHECK-BE-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-BE-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-BE-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK-BE:       cond.load:
-; CHECK-BE-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 0
-; CHECK-BE-NEXT:    [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
-; CHECK-BE-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP5]], i64 0
+; CHECK-BE-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0
+; CHECK-BE-NEXT:    [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8
+; CHECK-BE-NEXT:    [[TMP5:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP4]], i64 0
 ; CHECK-BE-NEXT:    br label [[ELSE]]
 ; CHECK-BE:       else:
-; CHECK-BE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-BE-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-BE-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
-; CHECK-BE-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
+; CHECK-BE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-BE-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-BE-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-BE-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK-BE:       cond.load1:
-; CHECK-BE-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
-; CHECK-BE-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP9]], align 8
-; CHECK-BE-NEXT:    [[TMP11:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP10]], i64 1
+; CHECK-BE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[P]], i32 1
+; CHECK-BE-NEXT:    [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8
+; CHECK-BE-NEXT:    [[TMP10:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP9]], i64 1
 ; CHECK-BE-NEXT:    br label [[ELSE2]]
 ; CHECK-BE:       else2:
-; CHECK-BE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
+; CHECK-BE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP10]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-BE-NEXT:    ret <2 x i64> [[RES_PHI_ELSE3]]
 ;
-  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 128, <2 x i1> %mask, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr %p, i32 128, <2 x i1> %mask, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_ones_mask(<2 x i64>* %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_ones_mask(ptr %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_ones_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[P:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[P:%.*]], align 8
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
-  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr %p, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_zero_mask(<2 x i64>* %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_zero_mask(ptr %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_zero_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-NEXT:    ret <2 x i64> [[PASSTHRU:%.*]]
 ;
-  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr %p, i32 8, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_const_mask(<2 x i64>* %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_const_mask(ptr %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_const_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP2]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP3]], i64 1
-; CHECK-NEXT:    ret <2 x i64> [[TMP4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP2]], i64 1
+; CHECK-NEXT:    ret <2 x i64> [[TMP3]]
 ;
-  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr %p, i32 8, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
 ; This use a byte sized but non power of 2 element size. This used to crash due to bad alignment calculation.
-define <2 x i24> @scalarize_v2i24(<2 x i24>* %p, <2 x i1> %mask, <2 x i24> %passthru) {
+define <2 x i24> @scalarize_v2i24(ptr %p, <2 x i1> %mask, <2 x i24> %passthru) {
 ; CHECK-LE-LABEL: @scalarize_v2i24(
-; CHECK-LE-NEXT:    [[TMP1:%.*]] = bitcast <2 x i24>* [[P:%.*]] to i24*
 ; CHECK-LE-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-LE-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-LE-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-LE-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
+; CHECK-LE-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-LE-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-LE-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK-LE:       cond.load:
-; CHECK-LE-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 0
-; CHECK-LE-NEXT:    [[TMP5:%.*]] = load i24, i24* [[TMP4]], align 1
-; CHECK-LE-NEXT:    [[TMP6:%.*]] = insertelement <2 x i24> [[PASSTHRU:%.*]], i24 [[TMP5]], i64 0
+; CHECK-LE-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i24, ptr [[P:%.*]], i32 0
+; CHECK-LE-NEXT:    [[TMP4:%.*]] = load i24, ptr [[TMP3]], align 1
+; CHECK-LE-NEXT:    [[TMP5:%.*]] = insertelement <2 x i24> [[PASSTHRU:%.*]], i24 [[TMP4]], i64 0
 ; CHECK-LE-NEXT:    br label [[ELSE]]
 ; CHECK-LE:       else:
-; CHECK-LE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i24> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-LE-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-LE-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
-; CHECK-LE-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
+; CHECK-LE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i24> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-LE-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-LE-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-LE-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK-LE:       cond.load1:
-; CHECK-LE-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 1
-; CHECK-LE-NEXT:    [[TMP10:%.*]] = load i24, i24* [[TMP9]], align 1
-; CHECK-LE-NEXT:    [[TMP11:%.*]] = insertelement <2 x i24> [[RES_PHI_ELSE]], i24 [[TMP10]], i64 1
+; CHECK-LE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i24, ptr [[P]], i32 1
+; CHECK-LE-NEXT:    [[TMP9:%.*]] = load i24, ptr [[TMP8]], align 1
+; CHECK-LE-NEXT:    [[TMP10:%.*]] = insertelement <2 x i24> [[RES_PHI_ELSE]], i24 [[TMP9]], i64 1
 ; CHECK-LE-NEXT:    br label [[ELSE2]]
 ; CHECK-LE:       else2:
-; CHECK-LE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i24> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
+; CHECK-LE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i24> [ [[TMP10]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-LE-NEXT:    ret <2 x i24> [[RES_PHI_ELSE3]]
 ;
 ; CHECK-BE-LABEL: @scalarize_v2i24(
-; CHECK-BE-NEXT:    [[TMP1:%.*]] = bitcast <2 x i24>* [[P:%.*]] to i24*
 ; CHECK-BE-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-BE-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-BE-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-BE-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
+; CHECK-BE-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-BE-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-BE-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK-BE:       cond.load:
-; CHECK-BE-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 0
-; CHECK-BE-NEXT:    [[TMP5:%.*]] = load i24, i24* [[TMP4]], align 1
-; CHECK-BE-NEXT:    [[TMP6:%.*]] = insertelement <2 x i24> [[PASSTHRU:%.*]], i24 [[TMP5]], i64 0
+; CHECK-BE-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i24, ptr [[P:%.*]], i32 0
+; CHECK-BE-NEXT:    [[TMP4:%.*]] = load i24, ptr [[TMP3]], align 1
+; CHECK-BE-NEXT:    [[TMP5:%.*]] = insertelement <2 x i24> [[PASSTHRU:%.*]], i24 [[TMP4]], i64 0
 ; CHECK-BE-NEXT:    br label [[ELSE]]
 ; CHECK-BE:       else:
-; CHECK-BE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i24> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-BE-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-BE-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
-; CHECK-BE-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
+; CHECK-BE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i24> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-BE-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-BE-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-BE-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK-BE:       cond.load1:
-; CHECK-BE-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 1
-; CHECK-BE-NEXT:    [[TMP10:%.*]] = load i24, i24* [[TMP9]], align 1
-; CHECK-BE-NEXT:    [[TMP11:%.*]] = insertelement <2 x i24> [[RES_PHI_ELSE]], i24 [[TMP10]], i64 1
+; CHECK-BE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i24, ptr [[P]], i32 1
+; CHECK-BE-NEXT:    [[TMP9:%.*]] = load i24, ptr [[TMP8]], align 1
+; CHECK-BE-NEXT:    [[TMP10:%.*]] = insertelement <2 x i24> [[RES_PHI_ELSE]], i24 [[TMP9]], i64 1
 ; CHECK-BE-NEXT:    br label [[ELSE2]]
 ; CHECK-BE:       else2:
-; CHECK-BE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i24> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
+; CHECK-BE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i24> [ [[TMP10]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-BE-NEXT:    ret <2 x i24> [[RES_PHI_ELSE3]]
 ;
-  %ret = call <2 x i24> @llvm.masked.load.v2i24.p0v2i24(<2 x i24>* %p, i32 8, <2 x i1> %mask, <2 x i24> %passthru)
+  %ret = call <2 x i24> @llvm.masked.load.v2i24.p0(ptr %p, i32 8, <2 x i1> %mask, <2 x i24> %passthru)
   ret <2 x i24> %ret
 }
 
 ; This use a byte sized but non power of 2 element size. This used to crash due to bad alignment calculation.
-define <2 x i48> @scalarize_v2i48(<2 x i48>* %p, <2 x i1> %mask, <2 x i48> %passthru) {
+define <2 x i48> @scalarize_v2i48(ptr %p, <2 x i1> %mask, <2 x i48> %passthru) {
 ; CHECK-LE-LABEL: @scalarize_v2i48(
-; CHECK-LE-NEXT:    [[TMP1:%.*]] = bitcast <2 x i48>* [[P:%.*]] to i48*
 ; CHECK-LE-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-LE-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-LE-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-LE-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
+; CHECK-LE-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-LE-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-LE-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK-LE:       cond.load:
-; CHECK-LE-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 0
-; CHECK-LE-NEXT:    [[TMP5:%.*]] = load i48, i48* [[TMP4]], align 2
-; CHECK-LE-NEXT:    [[TMP6:%.*]] = insertelement <2 x i48> [[PASSTHRU:%.*]], i48 [[TMP5]], i64 0
+; CHECK-LE-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i48, ptr [[P:%.*]], i32 0
+; CHECK-LE-NEXT:    [[TMP4:%.*]] = load i48, ptr [[TMP3]], align 2
+; CHECK-LE-NEXT:    [[TMP5:%.*]] = insertelement <2 x i48> [[PASSTHRU:%.*]], i48 [[TMP4]], i64 0
 ; CHECK-LE-NEXT:    br label [[ELSE]]
 ; CHECK-LE:       else:
-; CHECK-LE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i48> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-LE-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-LE-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
-; CHECK-LE-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
+; CHECK-LE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i48> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-LE-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-LE-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-LE-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK-LE:       cond.load1:
-; CHECK-LE-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 1
-; CHECK-LE-NEXT:    [[TMP10:%.*]] = load i48, i48* [[TMP9]], align 2
-; CHECK-LE-NEXT:    [[TMP11:%.*]] = insertelement <2 x i48> [[RES_PHI_ELSE]], i48 [[TMP10]], i64 1
+; CHECK-LE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i48, ptr [[P]], i32 1
+; CHECK-LE-NEXT:    [[TMP9:%.*]] = load i48, ptr [[TMP8]], align 2
+; CHECK-LE-NEXT:    [[TMP10:%.*]] = insertelement <2 x i48> [[RES_PHI_ELSE]], i48 [[TMP9]], i64 1
 ; CHECK-LE-NEXT:    br label [[ELSE2]]
 ; CHECK-LE:       else2:
-; CHECK-LE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i48> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
+; CHECK-LE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i48> [ [[TMP10]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-LE-NEXT:    ret <2 x i48> [[RES_PHI_ELSE3]]
 ;
 ; CHECK-BE-LABEL: @scalarize_v2i48(
-; CHECK-BE-NEXT:    [[TMP1:%.*]] = bitcast <2 x i48>* [[P:%.*]] to i48*
 ; CHECK-BE-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-BE-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-BE-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-BE-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
+; CHECK-BE-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-BE-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-BE-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK-BE:       cond.load:
-; CHECK-BE-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 0
-; CHECK-BE-NEXT:    [[TMP5:%.*]] = load i48, i48* [[TMP4]], align 2
-; CHECK-BE-NEXT:    [[TMP6:%.*]] = insertelement <2 x i48> [[PASSTHRU:%.*]], i48 [[TMP5]], i64 0
+; CHECK-BE-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i48, ptr [[P:%.*]], i32 0
+; CHECK-BE-NEXT:    [[TMP4:%.*]] = load i48, ptr [[TMP3]], align 2
+; CHECK-BE-NEXT:    [[TMP5:%.*]] = insertelement <2 x i48> [[PASSTHRU:%.*]], i48 [[TMP4]], i64 0
 ; CHECK-BE-NEXT:    br label [[ELSE]]
 ; CHECK-BE:       else:
-; CHECK-BE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i48> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-BE-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-BE-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
-; CHECK-BE-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
+; CHECK-BE-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i48> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-BE-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-BE-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-BE-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK-BE:       cond.load1:
-; CHECK-BE-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 1
-; CHECK-BE-NEXT:    [[TMP10:%.*]] = load i48, i48* [[TMP9]], align 2
-; CHECK-BE-NEXT:    [[TMP11:%.*]] = insertelement <2 x i48> [[RES_PHI_ELSE]], i48 [[TMP10]], i64 1
+; CHECK-BE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i48, ptr [[P]], i32 1
+; CHECK-BE-NEXT:    [[TMP9:%.*]] = load i48, ptr [[TMP8]], align 2
+; CHECK-BE-NEXT:    [[TMP10:%.*]] = insertelement <2 x i48> [[RES_PHI_ELSE]], i48 [[TMP9]], i64 1
 ; CHECK-BE-NEXT:    br label [[ELSE2]]
 ; CHECK-BE:       else2:
-; CHECK-BE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i48> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
+; CHECK-BE-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i48> [ [[TMP10]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-BE-NEXT:    ret <2 x i48> [[RES_PHI_ELSE3]]
 ;
-  %ret = call <2 x i48> @llvm.masked.load.v2i48.p0v2i48(<2 x i48>* %p, i32 16, <2 x i1> %mask, <2 x i48> %passthru)
+  %ret = call <2 x i48> @llvm.masked.load.v2i48.p0(ptr %p, i32 16, <2 x i1> %mask, <2 x i48> %passthru)
   ret <2 x i48> %ret
 }
 
-declare <2 x i24> @llvm.masked.load.v2i24.p0v2i24(<2 x i24>*, i32, <2 x i1>, <2 x i24>)
-declare <2 x i48> @llvm.masked.load.v2i48.p0v2i48(<2 x i48>*, i32, <2 x i1>, <2 x i48>)
-declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>)
+declare <2 x i24> @llvm.masked.load.v2i24.p0(ptr, i32, <2 x i1>, <2 x i24>)
+declare <2 x i48> @llvm.masked.load.v2i48.p0(ptr, i32, <2 x i1>, <2 x i48>)
+declare <2 x i64> @llvm.masked.load.v2i64.p0(ptr, i32, <2 x i1>, <2 x i64>)

diff  --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/expand-masked-store.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/expand-masked-store.ll
index c729a2f577204..500f7d9e19f2d 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/expand-masked-store.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/expand-masked-store.ll
@@ -3,85 +3,81 @@
 ; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=aarch64-linux-gnu -mattr=+sve | FileCheck -check-prefixes=CHECK,CHECK-LE %s
 ; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=aarch64_be-linux-gnu -data-layout="E-m:o-i64:64-i128:128-n32:64-S128" | FileCheck -check-prefixes=CHECK,CHECK-BE %s
 
-define void @scalarize_v2i64(<2 x i64>* %p, <2 x i1> %mask, <2 x i64> %data) {
+define void @scalarize_v2i64(ptr %p, <2 x i1> %mask, <2 x i64> %data) {
 ; CHECK-LE-LABEL: @scalarize_v2i64(
-; CHECK-LE-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-LE-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-LE-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-LE-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-LE-NEXT:    br i1 [[TMP3]], label [[COND_STORE:%.*]], label [[ELSE:%.*]]
+; CHECK-LE-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-LE-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-LE-NEXT:    br i1 [[TMP2]], label [[COND_STORE:%.*]], label [[ELSE:%.*]]
 ; CHECK-LE:       cond.store:
-; CHECK-LE-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 0
-; CHECK-LE-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 0
-; CHECK-LE-NEXT:    store i64 [[TMP4]], i64* [[TMP5]], align 8
+; CHECK-LE-NEXT:    [[TMP3:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 0
+; CHECK-LE-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0
+; CHECK-LE-NEXT:    store i64 [[TMP3]], ptr [[TMP4]], align 8
 ; CHECK-LE-NEXT:    br label [[ELSE]]
 ; CHECK-LE:       else:
-; CHECK-LE-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-LE-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
-; CHECK-LE-NEXT:    br i1 [[TMP7]], label [[COND_STORE1:%.*]], label [[ELSE2:%.*]]
+; CHECK-LE-NEXT:    [[TMP5:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-LE-NEXT:    [[TMP6:%.*]] = icmp ne i2 [[TMP5]], 0
+; CHECK-LE-NEXT:    br i1 [[TMP6]], label [[COND_STORE1:%.*]], label [[ELSE2:%.*]]
 ; CHECK-LE:       cond.store1:
-; CHECK-LE-NEXT:    [[TMP8:%.*]] = extractelement <2 x i64> [[DATA]], i64 1
-; CHECK-LE-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
-; CHECK-LE-NEXT:    store i64 [[TMP8]], i64* [[TMP9]], align 8
+; CHECK-LE-NEXT:    [[TMP7:%.*]] = extractelement <2 x i64> [[DATA]], i64 1
+; CHECK-LE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[P]], i32 1
+; CHECK-LE-NEXT:    store i64 [[TMP7]], ptr [[TMP8]], align 8
 ; CHECK-LE-NEXT:    br label [[ELSE2]]
 ; CHECK-LE:       else2:
 ; CHECK-LE-NEXT:    ret void
 ;
 ; CHECK-BE-LABEL: @scalarize_v2i64(
-; CHECK-BE-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-BE-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-BE-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-BE-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-BE-NEXT:    br i1 [[TMP3]], label [[COND_STORE:%.*]], label [[ELSE:%.*]]
+; CHECK-BE-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-BE-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-BE-NEXT:    br i1 [[TMP2]], label [[COND_STORE:%.*]], label [[ELSE:%.*]]
 ; CHECK-BE:       cond.store:
-; CHECK-BE-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 0
-; CHECK-BE-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 0
-; CHECK-BE-NEXT:    store i64 [[TMP4]], i64* [[TMP5]], align 8
+; CHECK-BE-NEXT:    [[TMP3:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 0
+; CHECK-BE-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0
+; CHECK-BE-NEXT:    store i64 [[TMP3]], ptr [[TMP4]], align 8
 ; CHECK-BE-NEXT:    br label [[ELSE]]
 ; CHECK-BE:       else:
-; CHECK-BE-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-BE-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
-; CHECK-BE-NEXT:    br i1 [[TMP7]], label [[COND_STORE1:%.*]], label [[ELSE2:%.*]]
+; CHECK-BE-NEXT:    [[TMP5:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-BE-NEXT:    [[TMP6:%.*]] = icmp ne i2 [[TMP5]], 0
+; CHECK-BE-NEXT:    br i1 [[TMP6]], label [[COND_STORE1:%.*]], label [[ELSE2:%.*]]
 ; CHECK-BE:       cond.store1:
-; CHECK-BE-NEXT:    [[TMP8:%.*]] = extractelement <2 x i64> [[DATA]], i64 1
-; CHECK-BE-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
-; CHECK-BE-NEXT:    store i64 [[TMP8]], i64* [[TMP9]], align 8
+; CHECK-BE-NEXT:    [[TMP7:%.*]] = extractelement <2 x i64> [[DATA]], i64 1
+; CHECK-BE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[P]], i32 1
+; CHECK-BE-NEXT:    store i64 [[TMP7]], ptr [[TMP8]], align 8
 ; CHECK-BE-NEXT:    br label [[ELSE2]]
 ; CHECK-BE:       else2:
 ; CHECK-BE-NEXT:    ret void
 ;
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %data, <2 x i64>* %p, i32 128, <2 x i1> %mask)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %data, ptr %p, i32 128, <2 x i1> %mask)
   ret void
 }
 
-define void @scalarize_v2i64_ones_mask(<2 x i64>* %p, <2 x i64> %data) {
+define void @scalarize_v2i64_ones_mask(ptr %p, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64_ones_mask(
-; CHECK-NEXT:    store <2 x i64> [[DATA:%.*]], <2 x i64>* [[P:%.*]], align 8
+; CHECK-NEXT:    store <2 x i64> [[DATA:%.*]], ptr [[P:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %data, <2 x i64>* %p, i32 8, <2 x i1> <i1 true, i1 true>)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %data, ptr %p, i32 8, <2 x i1> <i1 true, i1 true>)
   ret void
 }
 
-define void @scalarize_v2i64_zero_mask(<2 x i64>* %p, <2 x i64> %data) {
+define void @scalarize_v2i64_zero_mask(ptr %p, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64_zero_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %data, <2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 false>)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %data, ptr %p, i32 8, <2 x i1> <i1 false, i1 false>)
   ret void
 }
 
-define void @scalarize_v2i64_const_mask(<2 x i64>* %p, <2 x i64> %data) {
+define void @scalarize_v2i64_const_mask(ptr %p, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64_const_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 1
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
-; CHECK-NEXT:    store i64 [[TMP2]], i64* [[TMP3]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 1
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 1
+; CHECK-NEXT:    store i64 [[TMP1]], ptr [[TMP2]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %data, <2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 true>)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %data, ptr %p, i32 8, <2 x i1> <i1 false, i1 true>)
   ret void
 }
 
-declare void @llvm.masked.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)
+declare void @llvm.masked.store.v2i64.p0(<2 x i64>, ptr, i32, <2 x i1>)

diff  --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-compressstore.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-compressstore.ll
index 4aa13b2ddf462..15cb332265897 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-compressstore.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-compressstore.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=x86_64-linux-gnu | FileCheck %s
 
-define void @scalarize_v2i64(i64* %p, <2 x i1> %mask, <2 x i64> %data) {
+define void @scalarize_v2i64(ptr %p, <2 x i1> %mask, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64(
 ; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
@@ -9,56 +9,56 @@ define void @scalarize_v2i64(i64* %p, <2 x i1> %mask, <2 x i64> %data) {
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_STORE:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.store:
 ; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 0
-; CHECK-NEXT:    store i64 [[TMP3]], i64* [[P:%.*]], align 1
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, i64* [[P]], i32 1
+; CHECK-NEXT:    store i64 [[TMP3]], ptr [[P:%.*]], align 1
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[P]], i32 1
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[PTR_PHI_ELSE:%.*]] = phi i64* [ [[TMP4]], [[COND_STORE]] ], [ [[P]], [[TMP0:%.*]] ]
+; CHECK-NEXT:    [[PTR_PHI_ELSE:%.*]] = phi ptr [ [[TMP4]], [[COND_STORE]] ], [ [[P]], [[TMP0:%.*]] ]
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i2 [[SCALAR_MASK]], -2
 ; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i2 [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[TMP6]], label [[COND_STORE1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.store1:
 ; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <2 x i64> [[DATA]], i64 1
-; CHECK-NEXT:    store i64 [[TMP7]], i64* [[PTR_PHI_ELSE]], align 1
+; CHECK-NEXT:    store i64 [[TMP7]], ptr [[PTR_PHI_ELSE]], align 1
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.compressstore.v2i64.p0v2i64(<2 x i64> %data, i64* %p, <2 x i1> %mask)
+  call void @llvm.masked.compressstore.v2i64.p0(<2 x i64> %data, ptr %p, <2 x i1> %mask)
   ret void
 }
 
-define void @scalarize_v2i64_ones_mask(i64* %p, <2 x i64> %data) {
+define void @scalarize_v2i64_ones_mask(ptr %p, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64_ones_mask(
 ; CHECK-NEXT:    [[ELT0:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i32 0
-; CHECK-NEXT:    store i64 [[ELT0]], i64* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0
+; CHECK-NEXT:    store i64 [[ELT0]], ptr [[TMP1]], align 1
 ; CHECK-NEXT:    [[ELT1:%.*]] = extractelement <2 x i64> [[DATA]], i64 1
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[P]], i32 1
-; CHECK-NEXT:    store i64 [[ELT1]], i64* [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[P]], i32 1
+; CHECK-NEXT:    store i64 [[ELT1]], ptr [[TMP2]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.compressstore.v2i64.p0v2i64(<2 x i64> %data, i64* %p, <2 x i1> <i1 true, i1 true>)
+  call void @llvm.masked.compressstore.v2i64.p0(<2 x i64> %data, ptr %p, <2 x i1> <i1 true, i1 true>)
   ret void
 }
 
-define void @scalarize_v2i64_zero_mask(i64* %p, <2 x i64> %data) {
+define void @scalarize_v2i64_zero_mask(ptr %p, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64_zero_mask(
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.compressstore.v2i64.p0v2i64(<2 x i64> %data, i64* %p, <2 x i1> <i1 false, i1 false>)
+  call void @llvm.masked.compressstore.v2i64.p0(<2 x i64> %data, ptr %p, <2 x i1> <i1 false, i1 false>)
   ret void
 }
 
-define void @scalarize_v2i64_const_mask(i64* %p, <2 x i64> %data) {
+define void @scalarize_v2i64_const_mask(ptr %p, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64_const_mask(
 ; CHECK-NEXT:    [[ELT1:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 1
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i32 0
-; CHECK-NEXT:    store i64 [[ELT1]], i64* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0
+; CHECK-NEXT:    store i64 [[ELT1]], ptr [[TMP1]], align 1
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.compressstore.v2i64.p0v2i64(<2 x i64> %data, i64* %p, <2 x i1> <i1 false, i1 true>)
+  call void @llvm.masked.compressstore.v2i64.p0(<2 x i64> %data, ptr %p, <2 x i1> <i1 false, i1 true>)
   ret void
 }
 
-declare void @llvm.masked.compressstore.v2i64.p0v2i64(<2 x i64>, i64*, <2 x i1>)
+declare void @llvm.masked.compressstore.v2i64.p0(<2 x i64>, ptr, <2 x i1>)

diff  --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-expandload.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-expandload.ll
index f2b5ff822848c..d67c121628bf4 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-expandload.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-expandload.ll
@@ -1,69 +1,69 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=x86_64-linux-gnu | FileCheck %s
 
-define <2 x i64> @scalarize_v2i64(i64* %p, <2 x i1> %mask, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64(ptr %p, <2 x i1> %mask, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64(
 ; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.load:
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* [[P:%.*]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr [[P:%.*]], align 1
 ; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP3]], i64 0
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i64, i64* [[P]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P]], i32 1
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
 ; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP4]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-NEXT:    [[PTR_PHI_ELSE:%.*]] = phi i64* [ [[TMP5]], [[COND_LOAD]] ], [ [[P]], [[TMP0]] ]
+; CHECK-NEXT:    [[PTR_PHI_ELSE:%.*]] = phi ptr [ [[TMP5]], [[COND_LOAD]] ], [ [[P]], [[TMP0]] ]
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
 ; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
 ; CHECK-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.load1:
-; CHECK-NEXT:    [[TMP8:%.*]] = load i64, i64* [[PTR_PHI_ELSE]], align 1
+; CHECK-NEXT:    [[TMP8:%.*]] = load i64, ptr [[PTR_PHI_ELSE]], align 1
 ; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP8]], i64 1
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
 ; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP9]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-NEXT:    ret <2 x i64> [[RES_PHI_ELSE3]]
 ;
-  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0v2i64(i64* %p, <2 x i1> %mask, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0(ptr %p, <2 x i1> %mask, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_ones_mask(i64* %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_ones_mask(ptr %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_ones_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i32 0
-; CHECK-NEXT:    [[LOAD0:%.*]] = load i64, i64* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i64, ptr [[TMP1]], align 1
 ; CHECK-NEXT:    [[RES0:%.*]] = insertelement <2 x i64> poison, i64 [[LOAD0]], i64 0
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[P]], i32 1
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[P]], i32 1
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, ptr [[TMP2]], align 1
 ; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[RES0]], i64 [[LOAD1]], i64 1
 ; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x i64> [[RES1]], <2 x i64> [[PASSTHRU:%.*]], <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    ret <2 x i64> [[TMP3]]
 ;
-  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0v2i64(i64* %p, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0(ptr %p, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_zero_mask(i64* %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_zero_mask(ptr %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_zero_mask(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x i64> undef, <2 x i64> [[PASSTHRU:%.*]], <2 x i32> <i32 2, i32 3>
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
-  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0v2i64(i64* %p, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0(ptr %p, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_const_mask(i64* %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_const_mask(ptr %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_const_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i32 0
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, ptr [[TMP1]], align 1
 ; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> <i64 undef, i64 poison>, i64 [[LOAD1]], i64 1
 ; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i64> [[RES1]], <2 x i64> [[PASSTHRU:%.*]], <2 x i32> <i32 2, i32 1>
 ; CHECK-NEXT:    ret <2 x i64> [[TMP2]]
 ;
-  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0v2i64(i64* %p, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.expandload.v2i64.p0(ptr %p, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-declare <2 x i64> @llvm.masked.expandload.v2i64.p0v2i64(i64*,  <2 x i1>, <2 x i64>)
+declare <2 x i64> @llvm.masked.expandload.v2i64.p0(ptr,  <2 x i1>, <2 x i64>)

diff  --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-gather.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-gather.ll
index 20791e7ee9176..e62f4370ca29b 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-gather.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-gather.ll
@@ -1,15 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=x86_64-linux-gnu | FileCheck %s
 
-define <2 x i64> @scalarize_v2i64(<2 x i64*> %p, <2 x i1> %mask, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64(<2 x ptr> %p, <2 x i1> %mask, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64(
 ; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.load:
-; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x i64*> [[P:%.*]], i64 0
-; CHECK-NEXT:    [[LOAD0:%.*]] = load i64, i64* [[PTR0]], align 8
+; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x ptr> [[P:%.*]], i64 0
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i64, ptr [[PTR0]], align 8
 ; CHECK-NEXT:    [[RES0:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[LOAD0]], i64 0
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
@@ -18,49 +18,49 @@ define <2 x i64> @scalarize_v2i64(<2 x i64*> %p, <2 x i1> %mask, <2 x i64> %pass
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i2 [[TMP3]], 0
 ; CHECK-NEXT:    br i1 [[TMP4]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.load1:
-; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P]], i64 1
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[PTR1]], align 8
+; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x ptr> [[P]], i64 1
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, ptr [[PTR1]], align 8
 ; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[LOAD1]], i64 1
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
 ; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[RES1]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-NEXT:    ret <2 x i64> [[RES_PHI_ELSE3]]
 ;
-  %ret = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> %p, i32 8, <2 x i1> %mask, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr> %p, i32 8, <2 x i1> %mask, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_ones_mask(<2 x i64*> %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_ones_mask(<2 x ptr> %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_ones_mask(
-; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x i64*> [[P:%.*]], i64 0
-; CHECK-NEXT:    [[LOAD0:%.*]] = load i64, i64* [[PTR0]], align 8
+; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x ptr> [[P:%.*]], i64 0
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i64, ptr [[PTR0]], align 8
 ; CHECK-NEXT:    [[RES0:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[LOAD0]], i64 0
-; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P]], i64 1
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[PTR1]], align 8
+; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x ptr> [[P]], i64 1
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, ptr [[PTR1]], align 8
 ; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[RES0]], i64 [[LOAD1]], i64 1
 ; CHECK-NEXT:    ret <2 x i64> [[RES1]]
 ;
-  %ret = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> %p, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr> %p, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_zero_mask(<2 x i64*> %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_zero_mask(<2 x ptr> %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_zero_mask(
 ; CHECK-NEXT:    ret <2 x i64> [[PASSTHRU:%.*]]
 ;
-  %ret = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> %p, i32 8, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr> %p, i32 8, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_const_mask(<2 x i64*> %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_const_mask(<2 x ptr> %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_const_mask(
-; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P:%.*]], i64 1
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, i64* [[PTR1]], align 8
+; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x ptr> [[P:%.*]], i64 1
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i64, ptr [[PTR1]], align 8
 ; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[LOAD1]], i64 1
 ; CHECK-NEXT:    ret <2 x i64> [[RES1]]
 ;
-  %ret = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> %p, i32 8, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr> %p, i32 8, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-declare <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*>, i32, <2 x i1>, <2 x i64>)
+declare <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i64>)

diff  --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-load.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-load.ll
index c3820928af5f9..4a21a42b35ac0 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-load.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-load.ll
@@ -1,128 +1,123 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=x86_64-linux-gnu | FileCheck %s
 
-define <2 x i64> @scalarize_v2i64(<2 x i64>* %p, <2 x i1> %mask, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64(ptr %p, <2 x i1> %mask, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.load:
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP5]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP4]], i64 0
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
+; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i64> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.load1:
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
-; CHECK-NEXT:    [[TMP10:%.*]] = load i64, i64* [[TMP9]], align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP10]], i64 1
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[P]], i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = insertelement <2 x i64> [[RES_PHI_ELSE]], i64 [[TMP9]], i64 1
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
-; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
+; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i64> [ [[TMP10]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-NEXT:    ret <2 x i64> [[RES_PHI_ELSE3]]
 ;
-  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 128, <2 x i1> %mask, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr %p, i32 128, <2 x i1> %mask, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_ones_mask(<2 x i64>* %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_ones_mask(ptr %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_ones_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[P:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[P:%.*]], align 8
 ; CHECK-NEXT:    ret <2 x i64> [[TMP1]]
 ;
-  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr %p, i32 8, <2 x i1> <i1 true, i1 true>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_zero_mask(<2 x i64>* %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_zero_mask(ptr %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_zero_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-NEXT:    ret <2 x i64> [[PASSTHRU:%.*]]
 ;
-  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr %p, i32 8, <2 x i1> <i1 false, i1 false>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
-define <2 x i64> @scalarize_v2i64_const_mask(<2 x i64>* %p, <2 x i64> %passthru) {
+define <2 x i64> @scalarize_v2i64_const_mask(ptr %p, <2 x i64> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i64_const_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP2]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP3]], i64 1
-; CHECK-NEXT:    ret <2 x i64> [[TMP4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> [[PASSTHRU:%.*]], i64 [[TMP2]], i64 1
+; CHECK-NEXT:    ret <2 x i64> [[TMP3]]
 ;
-  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
+  %ret = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr %p, i32 8, <2 x i1> <i1 false, i1 true>, <2 x i64> %passthru)
   ret <2 x i64> %ret
 }
 
 ; This use a byte sized but non power of 2 element size. This used to crash due to bad alignment calculation.
-define <2 x i24> @scalarize_v2i24(<2 x i24>* %p, <2 x i1> %mask, <2 x i24> %passthru) {
+define <2 x i24> @scalarize_v2i24(ptr %p, <2 x i1> %mask, <2 x i24> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i24(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i24>* [[P:%.*]] to i24*
 ; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.load:
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = load i24, i24* [[TMP4]], align 1
-; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i24> [[PASSTHRU:%.*]], i24 [[TMP5]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i24, ptr [[P:%.*]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = load i24, ptr [[TMP3]], align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i24> [[PASSTHRU:%.*]], i24 [[TMP4]], i64 0
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i24> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
+; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i24> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.load1:
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i24, i24* [[TMP1]], i32 1
-; CHECK-NEXT:    [[TMP10:%.*]] = load i24, i24* [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <2 x i24> [[RES_PHI_ELSE]], i24 [[TMP10]], i64 1
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i24, ptr [[P]], i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = load i24, ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[TMP10:%.*]] = insertelement <2 x i24> [[RES_PHI_ELSE]], i24 [[TMP9]], i64 1
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
-; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i24> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
+; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i24> [ [[TMP10]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-NEXT:    ret <2 x i24> [[RES_PHI_ELSE3]]
 ;
-  %ret = call <2 x i24> @llvm.masked.load.v2i24.p0v2i24(<2 x i24>* %p, i32 8, <2 x i1> %mask, <2 x i24> %passthru)
+  %ret = call <2 x i24> @llvm.masked.load.v2i24.p0(ptr %p, i32 8, <2 x i1> %mask, <2 x i24> %passthru)
   ret <2 x i24> %ret
 }
 
 ; This use a byte sized but non power of 2 element size. This used to crash due to bad alignment calculation.
-define <2 x i48> @scalarize_v2i48(<2 x i48>* %p, <2 x i1> %mask, <2 x i48> %passthru) {
+define <2 x i48> @scalarize_v2i48(ptr %p, <2 x i1> %mask, <2 x i48> %passthru) {
 ; CHECK-LABEL: @scalarize_v2i48(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i48>* [[P:%.*]] to i48*
 ; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-NEXT:    br i1 [[TMP3]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.load:
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = load i48, i48* [[TMP4]], align 2
-; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i48> [[PASSTHRU:%.*]], i48 [[TMP5]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i48, ptr [[P:%.*]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = load i48, ptr [[TMP3]], align 2
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i48> [[PASSTHRU:%.*]], i48 [[TMP4]], i64 0
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i48> [ [[TMP6]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
-; CHECK-NEXT:    [[TMP7:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i2 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[TMP8]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
+; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i48> [ [[TMP5]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[TMP7]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.load1:
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i48, i48* [[TMP1]], i32 1
-; CHECK-NEXT:    [[TMP10:%.*]] = load i48, i48* [[TMP9]], align 2
-; CHECK-NEXT:    [[TMP11:%.*]] = insertelement <2 x i48> [[RES_PHI_ELSE]], i48 [[TMP10]], i64 1
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i48, ptr [[P]], i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = load i48, ptr [[TMP8]], align 2
+; CHECK-NEXT:    [[TMP10:%.*]] = insertelement <2 x i48> [[RES_PHI_ELSE]], i48 [[TMP9]], i64 1
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
-; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i48> [ [[TMP11]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
+; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i48> [ [[TMP10]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
 ; CHECK-NEXT:    ret <2 x i48> [[RES_PHI_ELSE3]]
 ;
-  %ret = call <2 x i48> @llvm.masked.load.v2i48.p0v2i48(<2 x i48>* %p, i32 16, <2 x i1> %mask, <2 x i48> %passthru)
+  %ret = call <2 x i48> @llvm.masked.load.v2i48.p0(ptr %p, i32 16, <2 x i1> %mask, <2 x i48> %passthru)
   ret <2 x i48> %ret
 }
 
-declare <2 x i24> @llvm.masked.load.v2i24.p0v2i24(<2 x i24>*, i32, <2 x i1>, <2 x i24>)
-declare <2 x i48> @llvm.masked.load.v2i48.p0v2i48(<2 x i48>*, i32, <2 x i1>, <2 x i48>)
-declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>)
+declare <2 x i24> @llvm.masked.load.v2i24.p0(ptr, i32, <2 x i1>, <2 x i24>)
+declare <2 x i48> @llvm.masked.load.v2i48.p0(ptr, i32, <2 x i1>, <2 x i48>)
+declare <2 x i64> @llvm.masked.load.v2i64.p0(ptr, i32, <2 x i1>, <2 x i64>)

diff  --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-scatter.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-scatter.ll
index 8b0dad8cf3605..89f471b9b16d5 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-scatter.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-scatter.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=x86_64-linux-gnu | FileCheck %s
 
-define void @scalarize_v2i64(<2 x i64*> %p, <2 x i1> %mask, <2 x i64> %value) {
+define void @scalarize_v2i64(<2 x ptr> %p, <2 x i1> %mask, <2 x i64> %value) {
 ; CHECK-LABEL: @scalarize_v2i64(
 ; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
@@ -9,8 +9,8 @@ define void @scalarize_v2i64(<2 x i64*> %p, <2 x i1> %mask, <2 x i64> %value) {
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_STORE:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.store:
 ; CHECK-NEXT:    [[ELT0:%.*]] = extractelement <2 x i64> [[VALUE:%.*]], i64 0
-; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x i64*> [[P:%.*]], i64 0
-; CHECK-NEXT:    store i64 [[ELT0]], i64* [[PTR0]], align 8
+; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x ptr> [[P:%.*]], i64 0
+; CHECK-NEXT:    store i64 [[ELT0]], ptr [[PTR0]], align 8
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
 ; CHECK-NEXT:    [[TMP3:%.*]] = and i2 [[SCALAR_MASK]], -2
@@ -18,47 +18,47 @@ define void @scalarize_v2i64(<2 x i64*> %p, <2 x i1> %mask, <2 x i64> %value) {
 ; CHECK-NEXT:    br i1 [[TMP4]], label [[COND_STORE1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.store1:
 ; CHECK-NEXT:    [[ELT1:%.*]] = extractelement <2 x i64> [[VALUE]], i64 1
-; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P]], i64 1
-; CHECK-NEXT:    store i64 [[ELT1]], i64* [[PTR1]], align 8
+; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x ptr> [[P]], i64 1
+; CHECK-NEXT:    store i64 [[ELT1]], ptr [[PTR1]], align 8
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.scatter.v2i64.v2p0i64(<2 x i64> %value, <2 x i64*> %p, i32 8, <2 x i1> %mask)
+  call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> %value, <2 x ptr> %p, i32 8, <2 x i1> %mask)
   ret void
 }
 
-define void @scalarize_v2i64_ones_mask(<2 x i64*> %p, <2 x i64> %value) {
+define void @scalarize_v2i64_ones_mask(<2 x ptr> %p, <2 x i64> %value) {
 ; CHECK-LABEL: @scalarize_v2i64_ones_mask(
 ; CHECK-NEXT:    [[ELT0:%.*]] = extractelement <2 x i64> [[VALUE:%.*]], i64 0
-; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x i64*> [[P:%.*]], i64 0
-; CHECK-NEXT:    store i64 [[ELT0]], i64* [[PTR0]], align 8
+; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x ptr> [[P:%.*]], i64 0
+; CHECK-NEXT:    store i64 [[ELT0]], ptr [[PTR0]], align 8
 ; CHECK-NEXT:    [[ELT1:%.*]] = extractelement <2 x i64> [[VALUE]], i64 1
-; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P]], i64 1
-; CHECK-NEXT:    store i64 [[ELT1]], i64* [[PTR1]], align 8
+; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x ptr> [[P]], i64 1
+; CHECK-NEXT:    store i64 [[ELT1]], ptr [[PTR1]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.scatter.v2i64.v2p0i64(<2 x i64> %value, <2 x i64*> %p, i32 8, <2 x i1> <i1 true, i1 true>)
+  call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> %value, <2 x ptr> %p, i32 8, <2 x i1> <i1 true, i1 true>)
   ret void
 }
 
-define void @scalarize_v2i64_zero_mask(<2 x i64*> %p, <2 x i64> %value) {
+define void @scalarize_v2i64_zero_mask(<2 x ptr> %p, <2 x i64> %value) {
 ; CHECK-LABEL: @scalarize_v2i64_zero_mask(
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.scatter.v2i64.v2p0i64(<2 x i64> %value, <2 x i64*> %p, i32 8, <2 x i1> <i1 false, i1 false>)
+  call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> %value, <2 x ptr> %p, i32 8, <2 x i1> <i1 false, i1 false>)
   ret void
 }
 
-define void @scalarize_v2i64_const_mask(<2 x i64*> %p, <2 x i64> %value) {
+define void @scalarize_v2i64_const_mask(<2 x ptr> %p, <2 x i64> %value) {
 ; CHECK-LABEL: @scalarize_v2i64_const_mask(
 ; CHECK-NEXT:    [[ELT1:%.*]] = extractelement <2 x i64> [[VALUE:%.*]], i64 1
-; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x i64*> [[P:%.*]], i64 1
-; CHECK-NEXT:    store i64 [[ELT1]], i64* [[PTR1]], align 8
+; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x ptr> [[P:%.*]], i64 1
+; CHECK-NEXT:    store i64 [[ELT1]], ptr [[PTR1]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.scatter.v2i64.v2p0i64(<2 x i64> %value, <2 x i64*> %p, i32 8, <2 x i1> <i1 false, i1 true>)
+  call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> %value, <2 x ptr> %p, i32 8, <2 x i1> <i1 false, i1 true>)
   ret void
 }
 
-declare void @llvm.masked.scatter.v2i64.v2p0i64(<2 x i64>, <2 x i64*>, i32, <2 x i1>)
+declare void @llvm.masked.scatter.v2i64.v2p0(<2 x i64>, <2 x ptr>, i32, <2 x i1>)

diff  --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-store.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-store.ll
index b9fbeeabfac74..601411174681f 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-store.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/X86/expand-masked-store.ll
@@ -1,62 +1,59 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S %s -scalarize-masked-mem-intrin -mtriple=x86_64-linux-gnu | FileCheck %s
 
-define void @scalarize_v2i64(<2 x i64>* %p, <2 x i1> %mask, <2 x i64> %data) {
+define void @scalarize_v2i64(ptr %p, <2 x i1> %mask, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
-; CHECK-NEXT:    [[TMP2:%.*]] = and i2 [[SCALAR_MASK]], 1
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne i2 [[TMP2]], 0
-; CHECK-NEXT:    br i1 [[TMP3]], label [[COND_STORE:%.*]], label [[ELSE:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_STORE:%.*]], label [[ELSE:%.*]]
 ; CHECK:       cond.store:
-; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 0
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 0
-; CHECK-NEXT:    store i64 [[TMP4]], i64* [[TMP5]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 0
+; CHECK-NEXT:    store i64 [[TMP3]], ptr [[TMP4]], align 8
 ; CHECK-NEXT:    br label [[ELSE]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
-; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
-; CHECK-NEXT:    br i1 [[TMP7]], label [[COND_STORE1:%.*]], label [[ELSE2:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i2 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[TMP6]], label [[COND_STORE1:%.*]], label [[ELSE2:%.*]]
 ; CHECK:       cond.store1:
-; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x i64> [[DATA]], i64 1
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
-; CHECK-NEXT:    store i64 [[TMP8]], i64* [[TMP9]], align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <2 x i64> [[DATA]], i64 1
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[P]], i32 1
+; CHECK-NEXT:    store i64 [[TMP7]], ptr [[TMP8]], align 8
 ; CHECK-NEXT:    br label [[ELSE2]]
 ; CHECK:       else2:
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %data, <2 x i64>* %p, i32 128, <2 x i1> %mask)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %data, ptr %p, i32 128, <2 x i1> %mask)
   ret void
 }
 
-define void @scalarize_v2i64_ones_mask(<2 x i64>* %p, <2 x i64> %data) {
+define void @scalarize_v2i64_ones_mask(ptr %p, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64_ones_mask(
-; CHECK-NEXT:    store <2 x i64> [[DATA:%.*]], <2 x i64>* [[P:%.*]], align 8
+; CHECK-NEXT:    store <2 x i64> [[DATA:%.*]], ptr [[P:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %data, <2 x i64>* %p, i32 8, <2 x i1> <i1 true, i1 true>)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %data, ptr %p, i32 8, <2 x i1> <i1 true, i1 true>)
   ret void
 }
 
-define void @scalarize_v2i64_zero_mask(<2 x i64>* %p, <2 x i64> %data) {
+define void @scalarize_v2i64_zero_mask(ptr %p, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64_zero_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %data, <2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 false>)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %data, ptr %p, i32 8, <2 x i1> <i1 false, i1 false>)
   ret void
 }
 
-define void @scalarize_v2i64_const_mask(<2 x i64>* %p, <2 x i64> %data) {
+define void @scalarize_v2i64_const_mask(ptr %p, <2 x i64> %data) {
 ; CHECK-LABEL: @scalarize_v2i64_const_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64>* [[P:%.*]] to i64*
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 1
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i64, i64* [[TMP1]], i32 1
-; CHECK-NEXT:    store i64 [[TMP2]], i64* [[TMP3]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[DATA:%.*]], i64 1
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i32 1
+; CHECK-NEXT:    store i64 [[TMP1]], ptr [[TMP2]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %data, <2 x i64>* %p, i32 8, <2 x i1> <i1 false, i1 true>)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %data, ptr %p, i32 8, <2 x i1> <i1 false, i1 true>)
   ret void
 }
 
-declare void @llvm.masked.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)
+declare void @llvm.masked.store.v2i64.p0(<2 x i64>, ptr, i32, <2 x i1>)


        


More information about the llvm-commits mailing list