[llvm] e59f022 - [GlobalISel] Translate <1 x N> getelementptrs to scalar G_PTR_ADDs

Jessica Paquette via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 1 16:39:47 PDT 2021


Author: Jessica Paquette
Date: 2021-07-01T16:38:47-07:00
New Revision: e59f02216f1c6972925c5ef0f1df6d434c652c69

URL: https://github.com/llvm/llvm-project/commit/e59f02216f1c6972925c5ef0f1df6d434c652c69
DIFF: https://github.com/llvm/llvm-project/commit/e59f02216f1c6972925c5ef0f1df6d434c652c69.diff

LOG: [GlobalISel] Translate <1 x N> getelementptrs to scalar G_PTR_ADDs

In `IRTranslator::translateGetElementPtr`, when we run into a vector gep with
some scalar operands, we try to normalize those operands using
`buildSplatVector`.

This is fine except for when the getelementptr has a <1 x N> type. In that case
it is treated as a scalar. If we run into one of these then every call to

```
// With VectorWidth = 1
LLT::fixed_vector(VectorWidth, PtrTy)
```

will assert.

Here's an example (equivalent to the added testcase):
https://godbolt.org/z/hGsTnMYdW

To get around this, this patch adds a variable, `WantSplatVector`, which
is true when our vector type ought to actually be represented using a vector.
When it's false, we'll translate as a scalar. This checks if `VectorWidth > 1`.

This fixes this bug:
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=35496

Differential Revision: https://reviews.llvm.org/D105316

Added: 
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-one-by-n-vector-ptr-add.ll

Modified: 
    llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index e791232e74f3d..73b763710fdff 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1472,12 +1472,19 @@ bool IRTranslator::translateGetElementPtr(const User &U,
   // Normalize Vector GEP - all scalar operands should be converted to the
   // splat vector.
   unsigned VectorWidth = 0;
-  if (auto *VT = dyn_cast<VectorType>(U.getType()))
+
+  // True if we should use a splat vector; using VectorWidth alone is not
+  // sufficient.
+  bool WantSplatVector = false;
+  if (auto *VT = dyn_cast<VectorType>(U.getType())) {
     VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
+    // We don't produce 1 x N vectors; those are treated as scalars.
+    WantSplatVector = VectorWidth > 1;
+  }
 
   // We might need to splat the base pointer into a vector if the offsets
   // are vectors.
-  if (VectorWidth && !PtrTy.isVector()) {
+  if (WantSplatVector && !PtrTy.isVector()) {
     BaseReg =
         MIRBuilder
             .buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
@@ -1516,7 +1523,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
       Register IdxReg = getOrCreateVReg(*Idx);
       LLT IdxTy = MRI->getType(IdxReg);
       if (IdxTy != OffsetTy) {
-        if (!IdxTy.isVector() && VectorWidth) {
+        if (!IdxTy.isVector() && WantSplatVector) {
           IdxReg = MIRBuilder.buildSplatVector(
             OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
         }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-one-by-n-vector-ptr-add.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-one-by-n-vector-ptr-add.ll
new file mode 100644
index 0000000000000..849fb01637855
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-one-by-n-vector-ptr-add.ll
@@ -0,0 +1,42 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -O0 -global-isel -mtriple aarch64 -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s
+
+; Make sure we treat <1 x N> getelementptrs like scalar getelementptrs.
+
+; We should not create a splat vector for the non-vector index on this
+; getelementptr. The entire getelementptr should be translated to a scalar
+; G_PTR_ADD.
+define <1 x i8*> @one_elt_vector_ptr_add_non_vector_idx(<1 x i8*> %vec) {
+  ; CHECK-LABEL: name: one_elt_vector_ptr_add_non_vector_idx
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $d0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $d0
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+  ; CHECK:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[SEXT]](s64)
+  ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
+  ; CHECK:   $d0 = COPY [[COPY2]](p0)
+  ; CHECK:   RET_ReallyLR implicit $d0
+  %ptr_add = getelementptr i8, <1 x i8*> %vec, <1 x i32> <i32 1>
+  ret <1 x i8*> %ptr_add
+}
+
+; We should not create a splat vector for the non-vector pointer on this
+; getelementptr. The entire getelementptr should be translated to a scalar
+; G_PTR_ADD.
+define <1 x i8*> @one_elt_vector_ptr_add_non_vector_ptr(i8* %vec) {
+  ; CHECK-LABEL: name: one_elt_vector_ptr_add_non_vector_ptr
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $x0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+  ; CHECK:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[SEXT]](s64)
+  ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
+  ; CHECK:   $d0 = COPY [[COPY2]](p0)
+  ; CHECK:   RET_ReallyLR implicit $d0
+  %ptr_add = getelementptr i8, i8* %vec, <1 x i32> <i32 1>
+  ret <1 x i8*> %ptr_add
+}


        


More information about the llvm-commits mailing list