[flang-commits] [flang] d9c2c6f - [flang] Add PowerPC vec_stxvp and vsx_stxvp intrinsic

Kelvin Li via flang-commits flang-commits at lists.llvm.org
Mon Aug 14 12:38:56 PDT 2023


Author: Kelvin Li
Date: 2023-08-14T15:38:29-04:00
New Revision: d9c2c6fcb10945a77be2a00f15932b10b87886a7

URL: https://github.com/llvm/llvm-project/commit/d9c2c6fcb10945a77be2a00f15932b10b87886a7
DIFF: https://github.com/llvm/llvm-project/commit/d9c2c6fcb10945a77be2a00f15932b10b87886a7.diff

LOG: [flang] Add PowerPC vec_stxvp and vsx_stxvp intrinsic

Differential Revision: https://reviews.llvm.org/D157768

Added: 
    

Modified: 
    flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
    flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
    flang/module/__ppc_intrinsics.f90
    flang/test/Lower/PowerPC/ppc-pwr10-vec-intrinsics.f90
    flang/test/Lower/PowerPC/ppc-vec-store.f90

Removed: 
    


################################################################################
diff  --git a/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h b/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
index 31d0400d3108df..964d9726a08e8d 100644
--- a/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
+++ b/flang/include/flang/Optimizer/Builder/PPCIntrinsicCall.h
@@ -48,6 +48,7 @@ enum class VecOp {
   St,
   Ste,
   Stxv,
+  Stxvp,
   Sub,
   Xor,
   Xst,

diff  --git a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
index dedaf0b6619b8f..c0ba9809da6e7f 100644
--- a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp
@@ -223,6 +223,11 @@ static constexpr IntrinsicHandler ppcHandlers[]{
          &PI::genVecXStore<VecOp::Stxv>),
      {{{"arg1", asValue}, {"arg2", asValue}, {"arg3", asAddr}}},
      /*isElemental=*/false},
+    {"__ppc_vec_stxvp",
+     static_cast<IntrinsicLibrary::SubroutineGenerator>(
+         &PI::genVecStore<VecOp::Stxvp>),
+     {{{"arg1", asValue}, {"arg2", asValue}, {"arg3", asAddr}}},
+     /*isElemental=*/false},
     {"__ppc_vec_sub",
      static_cast<IntrinsicLibrary::ExtendedGenerator>(
          &PI::genVecAddAndMulSubXor<VecOp::Sub>),
@@ -1788,6 +1793,11 @@ void PPCIntrinsicLibrary::genVecStore(llvm::ArrayRef<fir::ExtendedValue> args) {
       assert(false && "unknown type");
     break;
   }
+  case VecOp::Stxvp:
+    // __vector_pair type
+    stTy = mlir::VectorType::get(256, mlir::IntegerType::get(context, 1));
+    fname = "llvm.ppc.vsx.stxvp";
+    break;
   default:
     llvm_unreachable("invalid vector operation for generator");
   }
@@ -1798,11 +1808,18 @@ void PPCIntrinsicLibrary::genVecStore(llvm::ArrayRef<fir::ExtendedValue> args) {
 
   llvm::SmallVector<mlir::Value, 4> biArgs;
 
-  mlir::Value newArg1;
+  if (vop == VecOp::Stxvp) {
+    biArgs.push_back(argBases[0]);
+    biArgs.push_back(addr);
+    builder.create<fir::CallOp>(loc, funcOp, biArgs);
+    return;
+  }
+
   auto vecTyInfo{getVecTypeFromFirType(argBases[0].getType())};
   auto cnv{builder.createConvert(loc, vecTyInfo.toMlirVectorType(context),
                                  argBases[0])};
 
+  mlir::Value newArg1{nullptr};
   if (stTy != arg1TyInfo.toMlirVectorType(context))
     newArg1 = builder.create<mlir::vector::BitCastOp>(loc, stTy, cnv);
   else

diff  --git a/flang/module/__ppc_intrinsics.f90 b/flang/module/__ppc_intrinsics.f90
index 936c50f92c83ce..8b2858c3922c0d 100644
--- a/flang/module/__ppc_intrinsics.f90
+++ b/flang/module/__ppc_intrinsics.f90
@@ -384,13 +384,76 @@ pure subroutine sub_vr##VKIND##ir##VKIND(arg1, arg2, arg3); \
     !dir$ ignore_tkr(r) arg3; \
   end subroutine ;
 
+! subroutine(__vector_pair, i, __vector_pair)
+  pure subroutine sub_vpi0vp(arg1, arg2, arg3)
+    __vector_pair, intent(in) :: arg1
+    integer(8), intent(in) :: arg2
+    !dir$ ignore_tkr(k) arg2
+    __vector_pair, intent(out) :: arg3
+    !dir$ ignore_tkr(r) arg3
+  end subroutine
+
+! subroutine(__vector_pair, i, vector(i))
+#define SUB_VPI0VI(VKIND) \
+  pure subroutine sub_vpi0vi##VKIND(arg1, arg2, arg3); \
+    __vector_pair, intent(in) :: arg1; \
+    integer(8), intent(in) :: arg2; \
+    !dir$ ignore_tkr(k) arg2; \
+    vector(integer(VKIND)), intent(out) :: arg3; \
+    !dir$ ignore_tkr(r) arg3; \
+  end subroutine;
+
+! subroutine(__vector_pair, i, vector(u))
+#define SUB_VPI0VU(VKIND) \
+  pure subroutine sub_vpi0vu##VKIND(arg1, arg2, arg3); \
+    __vector_pair, intent(in) :: arg1; \
+    integer(8), intent(in) :: arg2; \
+    !dir$ ignore_tkr(k) arg2; \
+    vector(unsigned(VKIND)), intent(out) :: arg3; \
+    !dir$ ignore_tkr(r) arg3; \
+  end subroutine;
+
+! subroutine(__vector_pair, i, vector(r))
+#define SUB_VPI0VR(VKIND) \
+  pure subroutine sub_vpi0vr##VKIND(arg1, arg2, arg3); \
+    __vector_pair, intent(in) :: arg1; \
+    integer(8), intent(in) :: arg2; \
+    !dir$ ignore_tkr(k) arg2; \
+    vector(real(VKIND)), intent(out) :: arg3; \
+    !dir$ ignore_tkr(r) arg3; \
+  end subroutine;
+
+! subroutine(__vector_pair, i, i)
+  pure subroutine sub_vpi0i0(arg1, arg2, arg3)
+    __vector_pair, intent(in) :: arg1
+    integer(8), intent(in) :: arg2
+    !dir$ ignore_tkr(k) arg2
+    integer(8), intent(out) :: arg3
+    !dir$ ignore_tkr(kr) arg3
+  end subroutine
+
+! subroutine(__vector_pair, i, r)
+  pure subroutine sub_vpi0r0(arg1, arg2, arg3)
+    __vector_pair, intent(in) :: arg1
+    integer(8), intent(in) :: arg2
+    !dir$ ignore_tkr(k) arg2
+    real(8), intent(out) :: arg3
+    !dir$ ignore_tkr(kr) arg3
+  end subroutine
+
   SUB_VIIVI(1) SUB_VIIVI(2) SUB_VIIVI(4) SUB_VIIVI(8)
   SUB_VUIVU(1) SUB_VUIVU(2) SUB_VUIVU(4) SUB_VUIVU(8)
   SUB_VRIVR(4) SUB_VRIVR(8)
   SUB_VIII(1) SUB_VIII(2) SUB_VIII(4) SUB_VIII(8)
   SUB_VUII(1) SUB_VUII(2) SUB_VUII(4) SUB_VUII(8)
   SUB_VRIR(4) SUB_VRIR(8)
+  SUB_VPI0VI(1) SUB_VPI0VI(2) SUB_VPI0VI(4) SUB_VPI0VI(8)
+  SUB_VPI0VU(1) SUB_VPI0VU(2) SUB_VPI0VU(4) SUB_VPI0VU(8)
+  SUB_VPI0VR(4) SUB_VPI0VR(8)
 
+#undef SUB_VPI0VR
+#undef SUB_VPI0VU
+#undef SUB_VPI0VI
 #undef SUB_VRIR
 #undef SUB_VUII
 #undef SUB_VIII
@@ -1328,4 +1391,53 @@ end function func_r8r8i
 #undef SUB_VU_I_I
 #undef SUB_VR_Ik_R
 
+!-----------------------------------------------------------------------
+! subroutine(__vector_pair, integer, __vector_pair/vector/integer/real)
+!-----------------------------------------------------------------------
+#define VP_I0_VI(NAME, VKIND) __ppc_##NAME##_vpi0vi##VKIND
+#define VP_I0_VU(NAME, VKIND) __ppc_##NAME##_vpi0vu##VKIND
+#define VP_I0_VR(NAME, VKIND) __ppc_##NAME##_vpi0vr##VKIND
+
+#define VEC_VP_I0_VI(NAME, VKIND) \
+  procedure(sub_vpi0vi##VKIND) :: VP_I0_VI(NAME, VKIND);
+#define VEC_VP_I0_VU(NAME, VKIND) \
+  procedure(sub_vpi0vu##VKIND) :: VP_I0_VU(NAME, VKIND);
+#define VEC_VP_I0_VR(NAME, VKIND) \
+  procedure(sub_vpi0vr##VKIND) :: VP_I0_VR(NAME, VKIND);
+
+! vec_stxvp
+  procedure(sub_vpi0vp) :: __ppc_vec_stxvp_vpi0vp0
+  procedure(sub_vpi0i0) :: __ppc_vec_stxvp_vpi0i0
+  procedure(sub_vpi0r0) :: __ppc_vec_stxvp_vpi0r0
+  VEC_VP_I0_VI(vec_stxvp, 1) VEC_VP_I0_VI(vec_stxvp, 2) VEC_VP_I0_VI(vec_stxvp, 4) VEC_VP_I0_VI(vec_stxvp, 8)
+  VEC_VP_I0_VU(vec_stxvp, 1) VEC_VP_I0_VU(vec_stxvp, 2) VEC_VP_I0_VU(vec_stxvp, 4) VEC_VP_I0_VU(vec_stxvp, 8)
+  VEC_VP_I0_VR(vec_stxvp, 4) VEC_VP_I0_VR(vec_stxvp, 8)
+  interface vec_stxvp
+     procedure :: __ppc_vec_stxvp_vpi0vp0
+     procedure :: __ppc_vec_stxvp_vpi0i0
+     procedure :: __ppc_vec_stxvp_vpi0r0
+     procedure :: VP_I0_VI(vec_stxvp, 1), VP_I0_VI(vec_stxvp, 2), VP_I0_VI(vec_stxvp, 4), VP_I0_VI(vec_stxvp, 8)
+     procedure :: VP_I0_VU(vec_stxvp, 1), VP_I0_VU(vec_stxvp, 2), VP_I0_VU(vec_stxvp, 4), VP_I0_VU(vec_stxvp, 8)
+     procedure :: VP_I0_VR(vec_stxvp, 4), VP_I0_VR(vec_stxvp, 8)
+  end interface vec_stxvp
+  public :: vec_stxvp
+
+! vsx_stxvp (alias to vec_stxvp)
+  interface vsx_stxvp
+     procedure :: __ppc_vec_stxvp_vpi0vp0
+     procedure :: __ppc_vec_stxvp_vpi0i0
+     procedure :: __ppc_vec_stxvp_vpi0r0
+     procedure :: VP_I0_VI(vec_stxvp, 1), VP_I0_VI(vec_stxvp, 2), VP_I0_VI(vec_stxvp, 4), VP_I0_VI(vec_stxvp, 8)
+     procedure :: VP_I0_VU(vec_stxvp, 1), VP_I0_VU(vec_stxvp, 2), VP_I0_VU(vec_stxvp, 4), VP_I0_VU(vec_stxvp, 8)
+     procedure :: VP_I0_VR(vec_stxvp, 4), VP_I0_VR(vec_stxvp, 8)
+  end interface vsx_stxvp
+  public :: vsx_stxvp
+
+#undef VEC_VP_I0_VR
+#undef VEC_VP_I0_VU
+#undef VEC_VP_I0_VI
+#undef VP_I0_VR
+#undef VP_I0_VU
+#undef VP_I0_VI
+
 end module __ppc_intrinsics

diff  --git a/flang/test/Lower/PowerPC/ppc-pwr10-vec-intrinsics.f90 b/flang/test/Lower/PowerPC/ppc-pwr10-vec-intrinsics.f90
index 5ff0cd6e677412..ec16181f9d6cd6 100644
--- a/flang/test/Lower/PowerPC/ppc-pwr10-vec-intrinsics.f90
+++ b/flang/test/Lower/PowerPC/ppc-pwr10-vec-intrinsics.f90
@@ -25,3 +25,271 @@ end subroutine test_cvbf16spn
 !CHECK:  %3 = load <16 x i8>, ptr %2, align 16
 !CHECK:  %4 = call <16 x i8> @llvm.ppc.vsx.xvcvbf16spn(<16 x i8> %3)
 !CHECK:  store <16 x i8> %4, ptr %1, align 16
+
+!----------------------
+! vec_stxvp
+!----------------------
+
+      subroutine test_vec_stxvp_i1(vp, offset, v1)
+      integer(1) :: offset
+      vector(integer(2)) :: v1
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_i1
+
+!CHECK-LABEL: @test_vec_stxvp_i1_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i8, ptr %1, align 1
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i8 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_i8(vp, offset, v1)
+      integer(8) :: offset
+      vector(integer(8)) :: v1
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_i8
+
+!CHECK-LABEL: @test_vec_stxvp_i8_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i64, ptr %1, align 8
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i64 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vi2(vp, offset, v1)
+      integer(2) :: offset
+      vector(integer(2)) :: v1
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vi2
+
+!CHECK-LABEL: @test_vec_stxvp_vi2_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i16, ptr %1, align 2
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vi4(vp, offset, v1)
+      integer(2) :: offset
+      vector(integer(4)) :: v1
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vi4
+
+!CHECK-LABEL: @test_vec_stxvp_vi4_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i16, ptr %1, align 2
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vu2(vp, offset, v1)
+      integer(2) :: offset
+      vector(unsigned(2)) :: v1
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vu2
+
+!CHECK-LABEL: @test_vec_stxvp_vu2_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i16, ptr %1, align 2
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vu4(vp, offset, v1)
+      integer(2) :: offset
+      vector(unsigned(4)) :: v1
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vu4
+
+!CHECK-LABEL: @test_vec_stxvp_vu4_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i16, ptr %1, align 2
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vr4(vp, offset, v1)
+      integer(2) :: offset
+      vector(real(4)) :: v1
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vr4
+
+!CHECK-LABEL: @test_vec_stxvp_vr4_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i16, ptr %1, align 2
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vr8(vp, offset, v1)
+      integer(2) :: offset
+      vector(real(8)) :: v1
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vr8
+
+!CHECK-LABEL: @test_vec_stxvp_vr8_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i16, ptr %1, align 2
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vvp(vp, offset, v1)
+      integer(2) :: offset
+      __vector_pair :: v1
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vvp
+
+!CHECK-LABEL: @test_vec_stxvp_vvp_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i16, ptr %1, align 2
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vi2_arr(vp, offset, v1)
+      integer :: offset
+      vector(integer(2)) :: v1(10)
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vi2_arr
+
+!CHECK-LABEL: @test_vec_stxvp_vi2_arr_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i32, ptr %1, align 4
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vi4_arr(vp, offset, v1)
+      integer :: offset
+      vector(integer(4)) :: v1(10)
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vi4_arr
+
+!CHECK-LABEL: @test_vec_stxvp_vi4_arr_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i32, ptr %1, align 4
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vu2_arr(vp, offset, v1)
+      integer :: offset
+      vector(unsigned(2)) :: v1(11)
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vu2_arr
+
+!CHECK-LABEL: @test_vec_stxvp_vu2_arr_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i32, ptr %1, align 4
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vu4_arr(vp, offset, v1)
+      integer(8) :: offset
+      vector(unsigned(4)) :: v1(11,3)
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vu4_arr
+
+!CHECK-LABEL: @test_vec_stxvp_vu4_arr_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i64, ptr %1, align 8
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i64 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vr4_arr(vp, offset, v1)
+      integer :: offset
+      vector(real(4)) :: v1(10)
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vr4_arr
+
+!CHECK-LABEL: @test_vec_stxvp_vr4_arr_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i32, ptr %1, align 4
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vr8_arr(vp, offset, v1)
+      integer :: offset
+      vector(real(8)) :: v1(10)
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vr8_arr
+
+!CHECK-LABEL: @test_vec_stxvp_vr8_arr_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i32, ptr %1, align 4
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vec_stxvp_vp_arr(vp, offset, v1)
+      integer :: offset
+      __vector_pair :: v1(10)
+      __vector_pair :: vp
+      call vec_stxvp(vp, offset, v1)
+      end subroutine test_vec_stxvp_vp_arr
+
+!CHECK-LABEL: @test_vec_stxvp_vp_arr_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i32, ptr %1, align 4
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+!----------------------
+! vsx_stxvp
+!----------------------
+
+      subroutine test_vsx_stxvp_i1(vp, offset, v1)
+      integer(1) :: offset
+      vector(integer(2)) :: v1
+      __vector_pair :: vp
+      call vsx_stxvp(vp, offset, v1)
+      end subroutine test_vsx_stxvp_i1
+
+!CHECK-LABEL: @test_vsx_stxvp_i1_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i8, ptr %1, align 1
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i8 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vsx_stxvp_vi2(vp, offset, v1)
+      integer(2) :: offset
+      vector(integer(2)) :: v1
+      __vector_pair :: vp
+      call vsx_stxvp(vp, offset, v1)
+      end subroutine test_vsx_stxvp_vi2
+
+!CHECK-LABEL: @test_vsx_stxvp_vi2_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i16, ptr %1, align 2
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vsx_stxvp_vr8_arr(vp, offset, v1)
+      integer :: offset
+      vector(real(8)) :: v1(10)
+      __vector_pair :: vp
+      call vsx_stxvp(vp, offset, v1)
+      end subroutine test_vsx_stxvp_vr8_arr
+
+!CHECK-LABEL: @test_vsx_stxvp_vr8_arr_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i32, ptr %1, align 4
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
+
+      subroutine test_vsx_stxvp_vp_arr(vp, offset, v1)
+      integer :: offset
+      __vector_pair :: v1(10)
+      __vector_pair :: vp
+      call vsx_stxvp(vp, offset, v1)
+      end subroutine test_vsx_stxvp_vp_arr
+
+!CHECK-LABEL: @test_vsx_stxvp_vp_arr_
+!CHECK:  %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
+!CHECK:  %[[offset:.*]] = load i32, ptr %1, align 4
+!CHECK:  %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
+!CHECK:  call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])

diff  --git a/flang/test/Lower/PowerPC/ppc-vec-store.f90 b/flang/test/Lower/PowerPC/ppc-vec-store.f90
index ddbb7fb2a879b7..8aa452d4110525 100644
--- a/flang/test/Lower/PowerPC/ppc-vec-store.f90
+++ b/flang/test/Lower/PowerPC/ppc-vec-store.f90
@@ -1204,4 +1204,3 @@ subroutine vec_xstw4_test_vi4i4vai4(arg1, arg2, arg3, i)
 ! CHECK: %[[trg:.*]] = getelementptr i8, ptr %[[elem]], i32 %[[arg2]]
 ! CHECK: store <4 x i32> %[[arg1]], ptr %[[trg]], align 1
 end subroutine vec_xstw4_test_vi4i4vai4
-


        


More information about the flang-commits mailing list