[clang] [Clang][OpenMP] Bug fix Default clause variable category (PR #165276)
via cfe-commits
cfe-commits at lists.llvm.org
Mon Oct 27 10:07:22 PDT 2025
https://github.com/SunilKuravinakop updated https://github.com/llvm/llvm-project/pull/165276
>From 822dd156ad30bc719fd454213c1a82b044e78c73 Mon Sep 17 00:00:00 2001
From: Sunil Kuravinakop <kuravina at pe31.hpc.amslabs.hpecorp.net>
Date: Mon, 27 Oct 2025 11:20:55 -0500
Subject: [PATCH] 1) In the default clause, changing check for allocatable type
in variable category. 2) Adding a new test case
clang/test/OpenMP/parallel_default_variableCategory_codegen.cpp 3) Taking
care of new comments in https://github.com/llvm/llvm-project/pull/157063
---
clang/lib/Sema/SemaOpenMP.cpp | 25 +++-
...allel_default_variableCategory_codegen.cpp | 117 ++++++++++++++++++
2 files changed, 137 insertions(+), 5 deletions(-)
create mode 100644 clang/test/OpenMP/parallel_default_variableCategory_codegen.cpp
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 5b5b1b685e153..f677be7f02583 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -1314,6 +1314,22 @@ static std::string getOpenMPClauseNameForDiag(OpenMPClauseKind C) {
return getOpenMPClauseName(C).str();
}
+bool isAllocatableType(QualType QT) {
+ if (QT->isPointerType())
+ return true;
+ QT = QT.getCanonicalType().getUnqualifiedType();
+ if (const CXXRecordDecl *RD = QT->getAsCXXRecordDecl()) {
+ if (isa<ClassTemplateSpecializationDecl>(RD)) {
+ std::string QName = RD->getQualifiedNameAsString();
+ return (QName == "std::vector" || QName == "vector" ||
+ QName == "std::unique_ptr" || QName == "unique_ptr" ||
+ QName == "std::shared_ptr" || QName == "shared_ptr" ||
+ QName == "llvm::SmallVector" || QName == "SmallVector");
+ }
+ }
+ return false;
+}
+
DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
ValueDecl *D) const {
D = getCanonicalDecl(D);
@@ -1370,20 +1386,19 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
DefaultDataSharingAttributes IterDA = Iter->DefaultAttr;
switch (Iter->DefaultVCAttr) {
case DSA_VC_aggregate:
- if (!VD->getType()->isAggregateType())
+ if (!D->getType()->isAggregateType())
IterDA = DSA_none;
break;
case DSA_VC_allocatable:
- if (!(VD->getType()->isPointerType() ||
- VD->getType()->isVariableArrayType()))
+ if (!isAllocatableType(D->getType()))
IterDA = DSA_none;
break;
case DSA_VC_pointer:
- if (!VD->getType()->isPointerType())
+ if (!D->getType()->isPointerType())
IterDA = DSA_none;
break;
case DSA_VC_scalar:
- if (!VD->getType()->isScalarType())
+ if (!D->getType()->isScalarType())
IterDA = DSA_none;
break;
case DSA_VC_all:
diff --git a/clang/test/OpenMP/parallel_default_variableCategory_codegen.cpp b/clang/test/OpenMP/parallel_default_variableCategory_codegen.cpp
new file mode 100644
index 0000000000000..b0674158f57e5
--- /dev/null
+++ b/clang/test/OpenMP/parallel_default_variableCategory_codegen.cpp
@@ -0,0 +1,117 @@
+// RUN: %clangxx -DOMP60 -Xclang -verify -Wno-vla -fopenmp -fopenmp-version=60 -x c++ -S -emit-llvm %s -o - | FileCheck --check-prefixes=OMP60 %s
+// expected-no-diagnostics
+#ifndef HEADER
+#define HEADER
+
+#include <vector>
+
+int global;
+#define VECTOR_SIZE 4
+int main (int argc, char **argv) {
+ int i,n;
+ int x;
+
+ n = VECTOR_SIZE;
+
+ #pragma omp parallel masked firstprivate(x) num_threads(2)
+ {
+ int *xPtr = nullptr;
+ // scalar
+ #pragma omp task default(shared:scalar)
+ {
+ xPtr = &x;
+ }
+ #pragma omp taskwait
+
+ // pointer
+ #pragma omp task default(shared:pointer) shared(x)
+ {
+ xPtr = &x;
+ }
+ #pragma omp taskwait
+ }
+
+ int *aggregate[VECTOR_SIZE] = {0,0,0,0};
+ std::vector<int *> arr(VECTOR_SIZE,0);
+
+ #pragma omp parallel masked num_threads(2)
+ {
+ // aggregate
+ #pragma omp task default(shared:aggregate)
+ for(i=0;i<n;i++) {
+ aggregate[i] = &x;
+ }
+ #pragma omp taskwait
+
+ #pragma omp task default(shared:aggregate) shared(x)
+ for(i=0;i<n;i++) {
+ aggregate[i] = &x;
+ }
+ #pragma omp taskwait
+
+ // allocatable
+ #pragma omp task default(shared:allocatable)
+ for(i=0;i<n;i++) {
+ arr[i] = &x;
+ }
+ #pragma omp taskwait
+
+ #pragma omp task default(shared:allocatable) shared(x)
+ for(i=0;i<n;i++) {
+ arr[i] = &x;
+ }
+ #pragma omp taskwait
+
+ // all
+ #pragma omp task default(shared:all)
+ for(i=0;i<n;i++) {
+ aggregate[i] = &x;
+ }
+ #pragma omp taskwait
+ }
+}
+
+#endif
+
+// OMP60-LABEL: define {{.*}}main.omp_outlined{{.*}}
+// OMP60-NEXT: entry:
+// OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60: [[X_ADDR:%.*]] = alloca{{.*}}
+// OMP60: [[xPTR:%.*]] = alloca{{.*}}
+// OMP60: store ptr null, ptr [[xPTR]]{{.*}}
+// OMP60: store ptr [[xPTR]]{{.*}}
+// OMP60: store ptr [[X_ADDR]]{{.*}}
+// OMP60-NEXT: {{.*}}call{{.*}}__kmpc_omp_task_alloc{{.*}}
+// OMP60: ret void
+//
+// OMP60: define {{.*}}main.omp_outlined{{.*}}
+// OMP60-NEXT: entry:
+// OMP60-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// OMP60: [[I_ADDR:%.*]] = alloca{{.*}}
+// OMP60-NEXT: [[N_ADDR:%.*]] = alloca{{.*}}
+// OMP60-NEXT: [[AGGREGATE_ADDR:%.*]] = alloca{{.*}}
+// OMP60-NEXT: [[X_ADDR:%.*]] = alloca{{.*}}
+// OMP60-NEXT: [[ARR_ADDR:%.*]] = alloca{{.*}}
+// OMP60: [[TMP0:%.*]] = load{{.*}}[[I_ADDR]]
+// OMP60-NEXT: [[TMP1:%.*]] = load{{.*}}[[N_ADDR]]
+// OMP60-NEXT: [[TMP2:%.*]] = load{{.*}}[[AGGREGATE_ADDR]]
+// OMP60-NEXT: [[TMP3:%.*]] = load{{.*}}[[X_ADDR]]
+// OMP60-NEXT: [[TMP4:%.*]] = load{{.*}}[[ARR_ADDR]]
+// OMP60: store ptr [[TMP2]]{{.*}}
+// OMP60-NEXT: {{.*}}call{{.*}}__kmpc_omp_task_alloc{{.*}}
+// OMP60: store ptr [[TMP2]]{{.*}}
+// OMP60: store ptr [[TMP3]]{{.*}}
+// OMP60-NEXT: {{.*}}call{{.*}}__kmpc_omp_task_alloc{{.*}}
+// OMP60: store ptr [[TMP4]]{{.*}}
+// OMP60-NEXT: {{.*}}call{{.*}}__kmpc_omp_task_alloc{{.*}}
+// OMP60: store ptr [[TMP4]]{{.*}}
+// OMP60: store ptr [[TMP3]]{{.*}}
+// OMP60-NEXT: {{.*}}call{{.*}}__kmpc_omp_task_alloc{{.*}}
+// OMP60: store ptr [[TMP0]]{{.*}}
+// OMP60: store ptr [[TMP1]]{{.*}}
+// OMP60: store ptr [[TMP2]]{{.*}}
+// OMP60: store ptr [[TMP3]]{{.*}}
+// OMP60-NEXT: {{.*}}call{{.*}}__kmpc_omp_task_alloc{{.*}}
+// OMP60: ret void
More information about the cfe-commits
mailing list