[llvm] [TTI] Return scalable size on scalable in getRegisterBitWidth (PR #186171)
Ramkumar Ramachandra via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 13 09:47:14 PDT 2026
https://github.com/artagnon updated https://github.com/llvm/llvm-project/pull/186171
>From 3306711fb6f398510323f149b1ae89f0902d41b7 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <artagnon at tenstorrent.com>
Date: Thu, 12 Mar 2026 16:15:06 +0000
Subject: [PATCH] [TTI] Return sane scalable default in getRegisterBitWidth
The change would allow us to write target-independent scalable-VF tests
for the VPlan routine narrowInterleaveGroups.
---
.../llvm/Analysis/TargetTransformInfoImpl.h | 2 +-
.../narrow-interleave-groups-scalable-vf.ll | 105 ++++++++++++++++++
2 files changed, 106 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/Transforms/LoopVectorize/narrow-interleave-groups-scalable-vf.ll
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index cd1d9248936c3..cb7ca21f503d1 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -634,7 +634,7 @@ class TargetTransformInfoImplBase {
virtual TypeSize
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
- return TypeSize::getFixed(32);
+ return TypeSize::get(32, K == TargetTransformInfo::RGK_ScalableVector);
}
virtual unsigned getMinVectorRegisterBitWidth() const { return 128; }
diff --git a/llvm/test/Transforms/LoopVectorize/narrow-interleave-groups-scalable-vf.ll b/llvm/test/Transforms/LoopVectorize/narrow-interleave-groups-scalable-vf.ll
new file mode 100644
index 0000000000000..a7dd9c267a4be
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/narrow-interleave-groups-scalable-vf.ll
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 6
+; RUN: opt %s -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -enable-interleaved-mem-accesses -S | FileCheck %s
+; RUN: opt %s -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -enable-interleaved-mem-accesses -force-target-supports-scalable-vectors=true -scalable-vectorization=preferred -S | FileCheck %s --check-prefix=SCALABLE
+
+define void @scalablevf(ptr %dst.start, i8 %a, i8 %b) {
+; CHECK-LABEL: define void @scalablevf(
+; CHECK-SAME: ptr [[DST_START:%.*]], i8 [[A:%.*]], i8 [[B:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i8> poison, i8 [[B]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT]], <4 x i8> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i8> poison, i8 [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT1]], <4 x i8> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST_START]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[NEXT_GEP]], align 1
+; CHECK-NEXT: [[TMP0:%.*]] = mul nuw <4 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP1:%.*]] = udiv <4 x i8> [[TMP0]], splat (i8 -1)
+; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i8> [[BROADCAST_SPLAT2]], [[TMP1]]
+; CHECK-NEXT: store <4 x i8> [[TMP2]], ptr [[NEXT_GEP]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
+; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; SCALABLE-LABEL: define void @scalablevf(
+; SCALABLE-SAME: ptr [[DST_START:%.*]], i8 [[A:%.*]], i8 [[B:%.*]]) {
+; SCALABLE-NEXT: [[ENTRY:.*:]]
+; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; SCALABLE-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
+; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 64, [[TMP1]]
+; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE: [[VECTOR_PH]]:
+; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 64, [[TMP2]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 64, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP4:%.*]] = mul i64 [[N_VEC]], 4
+; SCALABLE-NEXT: [[TMP5:%.*]] = mul i64 [[N_VEC]], 4
+; SCALABLE-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[DST_START]], i64 [[TMP5]]
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i8> poison, i8 [[B]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i8> poison, i8 [[A]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i8> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
+; SCALABLE: [[VECTOR_BODY]]:
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4
+; SCALABLE-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST_START]], i64 [[OFFSET_IDX]]
+; SCALABLE-NEXT: [[TMP10:%.*]] = load <vscale x 4 x i8>, ptr [[NEXT_GEP]], align 1
+; SCALABLE-NEXT: [[TMP20:%.*]] = mul nuw <vscale x 4 x i8> [[TMP10]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP21:%.*]] = udiv <vscale x 4 x i8> [[TMP20]], splat (i8 -1)
+; SCALABLE-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i8> [[BROADCAST_SPLAT2]], [[TMP21]]
+; SCALABLE-NEXT: store <vscale x 4 x i8> [[TMP22]], ptr [[NEXT_GEP]], align 1
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
+; SCALABLE-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; SCALABLE-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; SCALABLE: [[MIDDLE_BLOCK]]:
+; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 64, [[N_VEC]]
+; SCALABLE-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; SCALABLE: [[SCALAR_PH]]:
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %dst = phi ptr [ %dst.start, %entry ], [ %dst.next, %loop ]
+ %dst.next = getelementptr inbounds nuw i8, ptr %dst, i64 4
+ %load.dst = load i8, ptr %dst, align 1
+ %mul.dst.0 = mul nuw i8 %load.dst, %b
+ %udiv.0 = udiv i8 %mul.dst.0, 255
+ %val.0 = add i8 %a, %udiv.0
+ store i8 %val.0, ptr %dst, align 1
+ %gep.dst.1 = getelementptr inbounds nuw i8, ptr %dst, i64 1
+ %load.dst.1 = load i8, ptr %gep.dst.1, align 1
+ %mul.dst.1 = mul nuw i8 %load.dst.1, %b
+ %udiv.1 = udiv i8 %mul.dst.1, 255
+ %val.1 = add i8 %a, %udiv.1
+ store i8 %val.1, ptr %gep.dst.1, align 1
+ %gep.dst.2 = getelementptr inbounds nuw i8, ptr %dst, i64 2
+ %load.dst.2 = load i8, ptr %gep.dst.2, align 1
+ %mul.dst.2 = mul nuw i8 %load.dst.2, %b
+ %udiv.2 = udiv i8 %mul.dst.2, 255
+ %val.2 = add i8 %a, %udiv.2
+ store i8 %val.2, ptr %gep.dst.2, align 1
+ %gep.dst.3 = getelementptr inbounds nuw i8, ptr %dst, i64 3
+ %load.dst.3 = load i8, ptr %gep.dst.3, align 1
+ %mul.dst.3 = mul nuw i8 %load.dst.3, %b
+ %udiv.3 = udiv i8 %mul.dst.3, 255
+ %val.3 = add i8 %a, %udiv.3
+ store i8 %val.3, ptr %gep.dst.3, align 1
+ %iv.next = add i64 %iv, 4
+ %exit.cond = icmp eq i64 %iv.next, 256
+ br i1 %exit.cond, label %exit, label %loop
+
+exit:
+ ret void
+}
More information about the llvm-commits
mailing list