[llvm] [IR] Initial introduction of memset_pattern (PR #97583)

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Sat Nov 9 07:31:16 PST 2024


https://github.com/asb updated https://github.com/llvm/llvm-project/pull/97583

>From 627f1ef07008107924c3e5031776d312d497becf Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 10 Jul 2024 10:33:45 +0100
Subject: [PATCH 01/24] [IR] Initial introduction of memset_pattern

---
 llvm/docs/LangRef.rst                         |  56 ++
 llvm/include/llvm/IR/InstVisitor.h            |   3 +
 llvm/include/llvm/IR/IntrinsicInst.h          |  22 +-
 llvm/include/llvm/IR/Intrinsics.td            |   8 +
 llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp |   8 +
 llvm/lib/IR/Verifier.cpp                      |   3 +-
 .../Transforms/Utils/LowerMemIntrinsics.cpp   | 113 ++++
 llvm/test/CodeGen/RISCV/memset-pattern.ll     | 591 ++++++++++++++++++
 .../PowerPC/lit.local.cfg                     |   2 +
 .../PowerPC/memset-pattern.ll                 |   8 +
 .../RISCV/lit.local.cfg                       |   2 +
 ...memset-pattern-non-power-of-two-pattern.ll |   8 +
 .../RISCV/memset-pattern.ll                   | 234 +++++++
 llvm/test/Verifier/intrinsic-immarg.ll        |  10 +
 llvm/test/Verifier/memset-pattern-inline.ll   |   9 +
 15 files changed, 1075 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/memset-pattern.ll
 create mode 100644 llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/lit.local.cfg
 create mode 100644 llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
 create mode 100644 llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/lit.local.cfg
 create mode 100644 llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll
 create mode 100644 llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
 create mode 100644 llvm/test/Verifier/memset-pattern-inline.ll

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index ae39217dc8ff8e..44f1d6d6d19bdf 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15230,6 +15230,62 @@ The behavior of '``llvm.memset.inline.*``' is equivalent to the behavior of
 '``llvm.memset.*``', but the generated code is guaranteed not to call any
 external functions.
 
+.. _int_memset_pattern:
+
+'``llvm.memset_pattern``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+This is an overloaded intrinsic. You can use ``llvm.memset_pattern`` on
+any integer bit width and for different address spaces. Not all targets
+support all bit widths however.
+
+::
+
+      declare void @llvm.memset_pattern.p0.i64.i128(ptr <dest>, i128 <val>,
+                                                    i64 <len>, i1 <isvolatile>)
+
+Overview:
+"""""""""
+
+The '``llvm.memset_pattern.*``' intrinsics fill a block of memory with
+a particular value. This may be expanded to an inline loop, a sequence of
+stores, or a libcall depending on what is available for the target and the
+expected performance and code size impact.
+
+Arguments:
+""""""""""
+
+The first argument is a pointer to the destination to fill, the second
+is the value with which to fill it, the third argument is an integer
+argument specifying the number of bytes to fill, and the fourth is a boolean
+indicating a volatile access.
+
+The :ref:`align <attr_align>` parameter attribute can be provided
+for the first argument.
+
+If the ``isvolatile`` parameter is ``true``, the
+``llvm.memset_pattern`` call is a :ref:`volatile operation <volatile>`. The
+detailed access behavior is not very cleanly specified and it is unwise to
+depend on it.
+
+Semantics:
+""""""""""
+
+The '``llvm.memset_pattern.*``' intrinsics fill "len" bytes of memory
+starting at the destination location. If the argument is known to be aligned
+to some boundary, this can be specified as an attribute on the argument.
+
+If ``<len>`` is not an integer multiple of the pattern width in bytes, then any
+remainder bytes will be copied from ``<val>``.
+If ``<len>`` is 0, it is no-op modulo the behavior of attributes attached to
+the arguments.
+If ``<len>`` is not a well-defined value, the behavior is undefined.
+If ``<len>`` is not zero, ``<dest>`` should be well-defined, otherwise the
+behavior is undefined.
+
 .. _int_sqrt:
 
 '``llvm.sqrt.*``' Intrinsic
diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index 311e0ac47ddfad..aa4f0f36e4ed73 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -208,6 +208,7 @@ class InstVisitor {
   RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); }
   RetTy visitMemSetInst(MemSetInst &I)            { DELEGATE(MemIntrinsic); }
   RetTy visitMemSetInlineInst(MemSetInlineInst &I){ DELEGATE(MemSetInst); }
+  RetTy visitMemSetPatternInst(MemSetPatternInst &I) { DELEGATE(MemSetInst); }
   RetTy visitMemCpyInst(MemCpyInst &I)            { DELEGATE(MemTransferInst); }
   RetTy visitMemCpyInlineInst(MemCpyInlineInst &I){ DELEGATE(MemCpyInst); }
   RetTy visitMemMoveInst(MemMoveInst &I)          { DELEGATE(MemTransferInst); }
@@ -295,6 +296,8 @@ class InstVisitor {
       case Intrinsic::memset:      DELEGATE(MemSetInst);
       case Intrinsic::memset_inline:
         DELEGATE(MemSetInlineInst);
+      case Intrinsic::memset_pattern:
+        DELEGATE(MemSetPatternInst);
       case Intrinsic::vastart:     DELEGATE(VAStartInst);
       case Intrinsic::vaend:       DELEGATE(VAEndInst);
       case Intrinsic::vacopy:      DELEGATE(VACopyInst);
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index a2ecf625ff61aa..af8789d4958d89 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1208,6 +1208,7 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
     case Intrinsic::memmove:
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
+    case Intrinsic::memset_pattern:
     case Intrinsic::memcpy_inline:
       return true;
     default:
@@ -1219,7 +1220,8 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
   }
 };
 
-/// This class wraps the llvm.memset and llvm.memset.inline intrinsics.
+/// This class wraps the llvm.memset, llvm.memset.inline, and
+/// llvm.memset_pattern intrinsics.
 class MemSetInst : public MemSetBase<MemIntrinsic> {
 public:
   // Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -1227,6 +1229,7 @@ class MemSetInst : public MemSetBase<MemIntrinsic> {
     switch (I->getIntrinsicID()) {
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
+    case Intrinsic::memset_pattern:
       return true;
     default:
       return false;
@@ -1249,6 +1252,21 @@ class MemSetInlineInst : public MemSetInst {
   }
 };
 
+/// This class wraps the llvm.memset.pattern intrinsic.
+class MemSetPatternInst : public MemSetInst {
+public:
+  ConstantInt *getLength() const {
+    return cast<ConstantInt>(MemSetInst::getLength());
+  }
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const IntrinsicInst *I) {
+    return I->getIntrinsicID() == Intrinsic::memset_pattern;
+  }
+  static bool classof(const Value *V) {
+    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+  }
+};
+
 /// This class wraps the llvm.memcpy/memmove intrinsics.
 class MemTransferInst : public MemTransferBase<MemIntrinsic> {
 public:
@@ -1328,6 +1346,7 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
     case Intrinsic::memmove:
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
+    case Intrinsic::memset_pattern:
     case Intrinsic::memcpy_element_unordered_atomic:
     case Intrinsic::memmove_element_unordered_atomic:
     case Intrinsic::memset_element_unordered_atomic:
@@ -1350,6 +1369,7 @@ class AnyMemSetInst : public MemSetBase<AnyMemIntrinsic> {
     switch (I->getIntrinsicID()) {
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
+    case Intrinsic::memset_pattern:
     case Intrinsic::memset_element_unordered_atomic:
       return true;
     default:
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 65a9b68b5229df..f79a19e0a8d298 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1003,6 +1003,14 @@ def int_memset_inline
        NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
        ImmArg<ArgIndex<3>>]>;
 
+// Memset variant that writes a given pattern.
+def int_memset_pattern
+    : Intrinsic<[],
+      [llvm_anyptr_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_i1_ty],
+      [IntrWriteMem, IntrArgMemOnly, IntrWillReturn, IntrNoFree, IntrNoCallback,
+       NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
+       ImmArg<ArgIndex<3>>], "llvm.memset_pattern">;
+
 // FIXME: Add version of these floating point intrinsics which allow non-default
 // rounding modes and FP exception handling.
 
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 8572cdc1604562..97267afeacef36 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -276,6 +276,13 @@ bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
       Memset->eraseFromParent();
       break;
     }
+    case Intrinsic::memset_pattern: {
+      auto *Memset = cast<MemSetPatternInst>(Inst);
+      expandMemSetAsLoop(Memset);
+      Changed = true;
+      Memset->eraseFromParent();
+      break;
+    }
     default:
       llvm_unreachable("unhandled intrinsic");
     }
@@ -294,6 +301,7 @@ bool PreISelIntrinsicLowering::lowerIntrinsics(Module &M) const {
     case Intrinsic::memmove:
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
+    case Intrinsic::memset_pattern:
       Changed |= expandMemIntrinsicUses(F);
       break;
     case Intrinsic::load_relative:
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index d156eaec4c172c..75f4bf2973919a 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5435,7 +5435,8 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
   case Intrinsic::memcpy_inline:
   case Intrinsic::memmove:
   case Intrinsic::memset:
-  case Intrinsic::memset_inline: {
+  case Intrinsic::memset_inline:
+  case Intrinsic::memset_pattern: {
     break;
   }
   case Intrinsic::memcpy_element_unordered_atomic:
diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index d2814f07530d8e..8d7c234d16878f 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -456,6 +456,109 @@ static void createMemMoveLoop(Instruction *InsertBefore, Value *SrcAddr,
   ElseTerm->eraseFromParent();
 }
 
+static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
+                                    Value *CopyLen, Value *SetValue,
+                                    Align DstAlign, bool IsVolatile) {
+  BasicBlock *OrigBB = InsertBefore->getParent();
+  Function *F = OrigBB->getParent();
+  const DataLayout &DL = F->getDataLayout();
+
+  if (DL.isBigEndian())
+    report_fatal_error("memset_pattern.inline expansion not currently "
+                       "implemented for big-endian targets",
+                       false);
+
+  // To start with, let's assume SetValue is an i128 and bail out if it's not.
+  if (!isPowerOf2_32(SetValue->getType()->getScalarSizeInBits()))
+    report_fatal_error("Pattern width for memset_pattern must be a power of 2",
+                       false);
+  unsigned PatternSize = SetValue->getType()->getScalarSizeInBits() / 8;
+
+  Type *TypeOfCopyLen = CopyLen->getType();
+
+  BasicBlock *NewBB = OrigBB->splitBasicBlock(InsertBefore, "split");
+  BasicBlock *LoopBB =
+      BasicBlock::Create(F->getContext(), "storeloop", F, NewBB);
+  BasicBlock *RemCheckBB =
+      BasicBlock::Create(F->getContext(), "remcheck", F, NewBB);
+  BasicBlock *RemainderLoopBB =
+      BasicBlock::Create(F->getContext(), "remainderloop", F, NewBB);
+  IRBuilder<> Builder(OrigBB->getTerminator());
+
+  ConstantInt *CILoopOpSize =
+      ConstantInt::get(dyn_cast<IntegerType>(TypeOfCopyLen), PatternSize);
+  Value *RuntimeLoopCount =
+      getRuntimeLoopCount(DL, Builder, CopyLen, CILoopOpSize, PatternSize);
+  Value *RuntimeRemainder =
+      getRuntimeLoopRemainder(DL, Builder, CopyLen, CILoopOpSize, PatternSize);
+
+  Builder.CreateCondBr(Builder.CreateICmpEQ(ConstantInt::get(TypeOfCopyLen, 0),
+                                            RuntimeLoopCount),
+                       RemCheckBB, LoopBB);
+  OrigBB->getTerminator()->eraseFromParent();
+
+  IRBuilder<> LoopBuilder(LoopBB);
+  PHINode *CurrentDst = LoopBuilder.CreatePHI(DstAddr->getType(), 0);
+  CurrentDst->addIncoming(DstAddr, OrigBB);
+  PHINode *LoopCount = LoopBuilder.CreatePHI(TypeOfCopyLen, 0);
+  LoopCount->addIncoming(RuntimeLoopCount, OrigBB);
+
+  // Create the store instruction for the pattern
+  LoopBuilder.CreateAlignedStore(SetValue, CurrentDst, DstAlign, IsVolatile);
+
+  Value *NextDst = LoopBuilder.CreateInBoundsGEP(
+      SetValue->getType(), CurrentDst,
+      ConstantInt::get(TypeOfCopyLen, PatternSize));
+  CurrentDst->addIncoming(NextDst, LoopBB);
+
+  Value *NewLoopCount =
+      LoopBuilder.CreateSub(LoopCount, ConstantInt::get(TypeOfCopyLen, 1));
+  LoopCount->addIncoming(NewLoopCount, LoopBB);
+
+  LoopBuilder.CreateCondBr(
+      LoopBuilder.CreateICmpNE(NewLoopCount,
+                               ConstantInt::get(TypeOfCopyLen, 0)),
+      LoopBB, RemCheckBB);
+
+  IRBuilder<> RemCheckBuilder(RemCheckBB, RemCheckBB->begin());
+  // Branch to the end if there are no remainder bytes.
+  PHINode *RemainderDstPHI = RemCheckBuilder.CreatePHI(NextDst->getType(), 0);
+  RemainderDstPHI->addIncoming(DstAddr, OrigBB);
+  RemainderDstPHI->addIncoming(NextDst, LoopBB);
+  RemCheckBuilder.CreateCondBr(
+      RemCheckBuilder.CreateICmpEQ(RuntimeRemainder,
+                                   ConstantInt::get(TypeOfCopyLen, 0)),
+      NewBB, RemainderLoopBB);
+
+  // Remainder loop
+  IRBuilder<> RemainderLoopBuilder(RemainderLoopBB);
+  PHINode *ByteIndex = RemainderLoopBuilder.CreatePHI(TypeOfCopyLen, 0);
+  ByteIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0), RemCheckBB);
+  Type *TypeOfSetValue = SetValue->getType();
+  PHINode *ShiftedValue = RemainderLoopBuilder.CreatePHI(TypeOfSetValue, 0);
+  ShiftedValue->addIncoming(SetValue, RemCheckBB);
+
+  Value *ByteToStore = RemainderLoopBuilder.CreateTrunc(
+      ShiftedValue, RemainderLoopBuilder.getInt8Ty());
+
+  RemainderLoopBuilder.CreateStore(
+      ByteToStore,
+      RemainderLoopBuilder.CreateInBoundsGEP(RemainderLoopBuilder.getInt8Ty(),
+                                             RemainderDstPHI, ByteIndex),
+      IsVolatile);
+
+  Value *NewByteIndex = RemainderLoopBuilder.CreateAdd(
+      ByteIndex, ConstantInt::get(TypeOfCopyLen, 1));
+  ByteIndex->addIncoming(NewByteIndex, RemainderLoopBB);
+  Value *NewShiftedValue = RemainderLoopBuilder.CreateLShr(
+      ShiftedValue, ConstantInt::get(TypeOfSetValue, 8));
+  ShiftedValue->addIncoming(NewShiftedValue, RemainderLoopBB);
+
+  RemainderLoopBuilder.CreateCondBr(
+      RemainderLoopBuilder.CreateICmpULT(NewByteIndex, RuntimeRemainder),
+      RemainderLoopBB, NewBB);
+}
+
 static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr,
                              Value *CopyLen, Value *SetValue, Align DstAlign,
                              bool IsVolatile) {
@@ -591,6 +694,16 @@ bool llvm::expandMemMoveAsLoop(MemMoveInst *Memmove,
 }
 
 void llvm::expandMemSetAsLoop(MemSetInst *Memset) {
+  if (isa<MemSetPatternInst>(Memset)) {
+    return createMemSetPatternLoop(
+        /* InsertBefore */ Memset,
+        /* DstAddr */ Memset->getRawDest(),
+        /* CopyLen */ Memset->getLength(),
+        /* SetValue */ Memset->getValue(),
+        /* Alignment */ Memset->getDestAlign().valueOrOne(),
+        Memset->isVolatile());
+  }
+
   createMemSetLoop(/* InsertBefore */ Memset,
                    /* DstAddr */ Memset->getRawDest(),
                    /* CopyLen */ Memset->getLength(),
diff --git a/llvm/test/CodeGen/RISCV/memset-pattern.ll b/llvm/test/CodeGen/RISCV/memset-pattern.ll
new file mode 100644
index 00000000000000..ea50ae0b56e401
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/memset-pattern.ll
@@ -0,0 +1,591 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m \
+; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
+; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
+
+define void @memset_1(ptr %a, i128 %value) nounwind {
+; RV32-BOTH-LABEL: memset_1:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    sb a1, 0(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: memset_1:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    sb a1, 0(a0)
+; RV64-BOTH-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
+  ret void
+}
+
+define void @memset_2(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_2:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    sh a1, 0(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_2:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    sh a1, 0(a0)
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 2, i1 0)
+  ret void
+}
+
+define void @memset_3(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_3:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a2, a1, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    srli a1, a1, 16
+; RV32-NEXT:    sb a1, 2(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_3:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    srli a1, a1, 16
+; RV64-NEXT:    sb a1, 2(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_3:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    sh a1, 0(a0)
+; RV32-FAST-NEXT:    srli a1, a1, 16
+; RV32-FAST-NEXT:    sb a1, 2(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_3:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    sh a1, 0(a0)
+; RV64-FAST-NEXT:    srli a1, a1, 16
+; RV64-FAST-NEXT:    sb a1, 2(a0)
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 3, i1 0)
+  ret void
+}
+
+define void @memset_4(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_4:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    sw a1, 0(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_4:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    sw a1, 0(a0)
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 4, i1 0)
+  ret void
+}
+
+define void @memset_5(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_5:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw a2, 0(a1)
+; RV32-NEXT:    lw a1, 4(a1)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    srli a1, a2, 24
+; RV32-NEXT:    sb a1, 3(a0)
+; RV32-NEXT:    srli a1, a2, 16
+; RV32-NEXT:    sb a1, 2(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_5:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a2, a1, 8
+; RV64-NEXT:    sb a2, 1(a0)
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    sb a1, 4(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_5:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a2, 4(a1)
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    sb a2, 4(a0)
+; RV32-FAST-NEXT:    sw a1, 0(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_5:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    sw a1, 0(a0)
+; RV64-FAST-NEXT:    srli a1, a1, 32
+; RV64-FAST-NEXT:    sb a1, 4(a0)
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 5, i1 0)
+  ret void
+}
+
+define void @memset_6(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_6:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw a2, 4(a1)
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_6:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_6:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a2, 4(a1)
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    sh a2, 4(a0)
+; RV32-FAST-NEXT:    sw a1, 0(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_6:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    sw a1, 0(a0)
+; RV64-FAST-NEXT:    srli a1, a1, 32
+; RV64-FAST-NEXT:    sh a1, 4(a0)
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 6, i1 0)
+  ret void
+}
+
+define void @memset_7(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_7:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw a2, 4(a1)
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a3, a2, 8
+; RV32-NEXT:    sb a3, 5(a0)
+; RV32-NEXT:    srli a2, a2, 16
+; RV32-NEXT:    sb a2, 6(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_7:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_7:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a2, 4(a1)
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    sh a2, 4(a0)
+; RV32-FAST-NEXT:    sw a1, 0(a0)
+; RV32-FAST-NEXT:    srli a2, a2, 16
+; RV32-FAST-NEXT:    sb a2, 6(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_7:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    sw a1, 0(a0)
+; RV64-FAST-NEXT:    srli a2, a1, 48
+; RV64-FAST-NEXT:    sb a2, 6(a0)
+; RV64-FAST-NEXT:    srli a1, a1, 32
+; RV64-FAST-NEXT:    sh a1, 4(a0)
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 7, i1 0)
+  ret void
+}
+
+define void @memset_8(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw a2, 4(a1)
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a3, a2, 24
+; RV32-NEXT:    sb a3, 7(a0)
+; RV32-NEXT:    srli a3, a2, 16
+; RV32-NEXT:    sb a3, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_8:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a2, 4(a1)
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    sw a2, 4(a0)
+; RV32-FAST-NEXT:    sw a1, 0(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_8:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    sd a1, 0(a0)
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 8, i1 0)
+  ret void
+}
+
+define void @memset_9(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_9:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw a2, 4(a1)
+; RV32-NEXT:    lw a3, 0(a1)
+; RV32-NEXT:    lw a1, 8(a1)
+; RV32-NEXT:    sb a2, 4(a0)
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a1, 8(a0)
+; RV32-NEXT:    srli a1, a2, 24
+; RV32-NEXT:    sb a1, 7(a0)
+; RV32-NEXT:    srli a1, a2, 16
+; RV32-NEXT:    sb a1, 6(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 5(a0)
+; RV32-NEXT:    srli a1, a3, 24
+; RV32-NEXT:    sb a1, 3(a0)
+; RV32-NEXT:    srli a1, a3, 16
+; RV32-NEXT:    sb a1, 2(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_9:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    sb a2, 8(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_9:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a2, 4(a1)
+; RV32-FAST-NEXT:    lw a3, 0(a1)
+; RV32-FAST-NEXT:    lw a1, 8(a1)
+; RV32-FAST-NEXT:    sw a2, 4(a0)
+; RV32-FAST-NEXT:    sw a3, 0(a0)
+; RV32-FAST-NEXT:    sb a1, 8(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_9:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    sb a2, 8(a0)
+; RV64-FAST-NEXT:    sd a1, 0(a0)
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 9, i1 0)
+  ret void
+}
+
+define void @memset_16(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw a2, 12(a1)
+; RV32-NEXT:    lw a3, 8(a1)
+; RV32-NEXT:    lw a4, 4(a1)
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    sb a2, 12(a0)
+; RV32-NEXT:    sb a3, 8(a0)
+; RV32-NEXT:    sb a4, 4(a0)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    srli a5, a2, 24
+; RV32-NEXT:    sb a5, 15(a0)
+; RV32-NEXT:    srli a5, a2, 16
+; RV32-NEXT:    sb a5, 14(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 13(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 11(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 10(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 9(a0)
+; RV32-NEXT:    srli a2, a4, 24
+; RV32-NEXT:    sb a2, 7(a0)
+; RV32-NEXT:    srli a2, a4, 16
+; RV32-NEXT:    sb a2, 6(a0)
+; RV32-NEXT:    srli a4, a4, 8
+; RV32-NEXT:    sb a4, 5(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sb a2, 8(a0)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 15(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 14(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 13(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 12(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 11(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 10(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 9(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_16:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a2, 12(a1)
+; RV32-FAST-NEXT:    lw a3, 8(a1)
+; RV32-FAST-NEXT:    lw a4, 4(a1)
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    sw a2, 12(a0)
+; RV32-FAST-NEXT:    sw a3, 8(a0)
+; RV32-FAST-NEXT:    sw a4, 4(a0)
+; RV32-FAST-NEXT:    sw a1, 0(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_16:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    sd a2, 8(a0)
+; RV64-FAST-NEXT:    sd a1, 0(a0)
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 16, i1 0)
+  ret void
+}
+
+define void @memset_17(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_17:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw a2, 12(a1)
+; RV32-NEXT:    lw a3, 8(a1)
+; RV32-NEXT:    lw a4, 4(a1)
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    sb a2, 12(a0)
+; RV32-NEXT:    sb a3, 8(a0)
+; RV32-NEXT:    sb a4, 4(a0)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    sb a1, 16(a0)
+; RV32-NEXT:    srli a5, a2, 24
+; RV32-NEXT:    sb a5, 15(a0)
+; RV32-NEXT:    srli a5, a2, 16
+; RV32-NEXT:    sb a5, 14(a0)
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    sb a2, 13(a0)
+; RV32-NEXT:    srli a2, a3, 24
+; RV32-NEXT:    sb a2, 11(a0)
+; RV32-NEXT:    srli a2, a3, 16
+; RV32-NEXT:    sb a2, 10(a0)
+; RV32-NEXT:    srli a3, a3, 8
+; RV32-NEXT:    sb a3, 9(a0)
+; RV32-NEXT:    srli a2, a4, 24
+; RV32-NEXT:    sb a2, 7(a0)
+; RV32-NEXT:    srli a2, a4, 16
+; RV32-NEXT:    sb a2, 6(a0)
+; RV32-NEXT:    srli a4, a4, 8
+; RV32-NEXT:    sb a4, 5(a0)
+; RV32-NEXT:    srli a2, a1, 24
+; RV32-NEXT:    sb a2, 3(a0)
+; RV32-NEXT:    srli a2, a1, 16
+; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_17:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sb a2, 8(a0)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    sb a1, 16(a0)
+; RV64-NEXT:    srli a3, a2, 56
+; RV64-NEXT:    sb a3, 15(a0)
+; RV64-NEXT:    srli a3, a2, 48
+; RV64-NEXT:    sb a3, 14(a0)
+; RV64-NEXT:    srli a3, a2, 40
+; RV64-NEXT:    sb a3, 13(a0)
+; RV64-NEXT:    srli a3, a2, 32
+; RV64-NEXT:    sb a3, 12(a0)
+; RV64-NEXT:    srli a3, a2, 24
+; RV64-NEXT:    sb a3, 11(a0)
+; RV64-NEXT:    srli a3, a2, 16
+; RV64-NEXT:    sb a3, 10(a0)
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    sb a2, 9(a0)
+; RV64-NEXT:    srli a2, a1, 56
+; RV64-NEXT:    sb a2, 7(a0)
+; RV64-NEXT:    srli a2, a1, 48
+; RV64-NEXT:    sb a2, 6(a0)
+; RV64-NEXT:    srli a2, a1, 40
+; RV64-NEXT:    sb a2, 5(a0)
+; RV64-NEXT:    srli a2, a1, 32
+; RV64-NEXT:    sb a2, 4(a0)
+; RV64-NEXT:    srli a2, a1, 24
+; RV64-NEXT:    sb a2, 3(a0)
+; RV64-NEXT:    srli a2, a1, 16
+; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_17:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a2, 12(a1)
+; RV32-FAST-NEXT:    lw a3, 8(a1)
+; RV32-FAST-NEXT:    lw a4, 4(a1)
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    sw a2, 12(a0)
+; RV32-FAST-NEXT:    sw a3, 8(a0)
+; RV32-FAST-NEXT:    sw a4, 4(a0)
+; RV32-FAST-NEXT:    sw a1, 0(a0)
+; RV32-FAST-NEXT:    sb a1, 16(a0)
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_17:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    sd a2, 8(a0)
+; RV64-FAST-NEXT:    sd a1, 0(a0)
+; RV64-FAST-NEXT:    sb a1, 16(a0)
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 17, i1 0)
+  ret void
+}
+
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/lit.local.cfg b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/lit.local.cfg
new file mode 100644
index 00000000000000..bb982488eb15ee
--- /dev/null
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "PowerPC" in config.root.targets:
+    config.unsupported = True
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
new file mode 100644
index 00000000000000..8434ca1c9016be
--- /dev/null
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
@@ -0,0 +1,8 @@
+; RUN: not opt -mtriple=powerpc64 -passes=pre-isel-intrinsic-lowering -S -o - %s 2>&1 | FileCheck %s
+
+; CHECK: LLVM ERROR: memset_pattern.inline expansion not currently implemented for big-endian targets 
+
+define void @memset_pattern_x(ptr %a, i128 %value, i64 %x) nounwind {
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 %x, i1 0)
+  ret void
+}
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/lit.local.cfg b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/lit.local.cfg
new file mode 100644
index 00000000000000..17351748513d98
--- /dev/null
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "RISCV" in config.root.targets:
+    config.unsupported = True
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll
new file mode 100644
index 00000000000000..ce4ae0cf14c9e8
--- /dev/null
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll
@@ -0,0 +1,8 @@
+; RUN: not opt -mtriple=riscv64 -passes=pre-isel-intrinsic-lowering -S -o - %s 2>&1 | FileCheck %s
+
+; CHECK: LLVM ERROR: Pattern width for memset_pattern must be a power of 2
+
+define void @memset_pattern_i127_x(ptr %a, i127 %value, i64 %x) nounwind {
+  tail call void @llvm.memset_pattern.p0.i64.i127(ptr %a, i127 %value, i64 %x, i1 0)
+  ret void
+}
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
new file mode 100644
index 00000000000000..74ca4c4422a755
--- /dev/null
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
@@ -0,0 +1,234 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=riscv64 -passes=pre-isel-intrinsic-lowering -S -o - %s | FileCheck %s
+
+define void @memset_pattern_i128_1(ptr %a, i128 %value) nounwind {
+; CHECK-LABEL: define void @memset_pattern_i128_1(
+; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    br i1 true, label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK:       [[STORELOOP]]:
+; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 0, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 16
+; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[REMCHECK]]
+; CHECK:       [[REMCHECK]]:
+; CHECK-NEXT:    [[TMP6:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP3]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
+; CHECK:       [[REMAINDERLOOP]]:
+; CHECK-NEXT:    [[TMP7:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP11:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP8:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP12:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i8
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP7]]
+; CHECK-NEXT:    store i8 [[TMP9]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP11]] = add i64 [[TMP7]], 1
+; CHECK-NEXT:    [[TMP12]] = lshr i128 [[TMP8]], 8
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], 1
+; CHECK-NEXT:    br i1 [[TMP13]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK:       [[SPLIT]]:
+; CHECK-NEXT:    ret void
+;
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
+  ret void
+}
+
+define void @memset_pattern_i128_3(ptr %a, i128 %value) nounwind {
+; CHECK-LABEL: define void @memset_pattern_i128_3(
+; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    br i1 true, label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK:       [[STORELOOP]]:
+; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 0, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 16
+; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[REMCHECK]]
+; CHECK:       [[REMCHECK]]:
+; CHECK-NEXT:    [[TMP6:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP3]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
+; CHECK:       [[REMAINDERLOOP]]:
+; CHECK-NEXT:    [[TMP7:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP11:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP8:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP12:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i8
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP7]]
+; CHECK-NEXT:    store i8 [[TMP9]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP11]] = add i64 [[TMP7]], 1
+; CHECK-NEXT:    [[TMP12]] = lshr i128 [[TMP8]], 8
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], 3
+; CHECK-NEXT:    br i1 [[TMP13]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK:       [[SPLIT]]:
+; CHECK-NEXT:    ret void
+;
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 3, i1 0)
+  ret void
+}
+
+define void @memset_pattern_i128_14(ptr %a, i128 %value) nounwind {
+; CHECK-LABEL: define void @memset_pattern_i128_14(
+; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    br i1 true, label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK:       [[STORELOOP]]:
+; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 0, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 16
+; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[REMCHECK]]
+; CHECK:       [[REMCHECK]]:
+; CHECK-NEXT:    [[TMP6:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP3]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
+; CHECK:       [[REMAINDERLOOP]]:
+; CHECK-NEXT:    [[TMP7:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP11:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP8:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP12:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i8
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP7]]
+; CHECK-NEXT:    store i8 [[TMP9]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP11]] = add i64 [[TMP7]], 1
+; CHECK-NEXT:    [[TMP12]] = lshr i128 [[TMP8]], 8
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], 14
+; CHECK-NEXT:    br i1 [[TMP13]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK:       [[SPLIT]]:
+; CHECK-NEXT:    ret void
+;
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 14, i1 0)
+  ret void
+}
+
+define void @memset_pattern_i128_16(ptr %a, i128 %value) nounwind {
+; CHECK-LABEL: define void @memset_pattern_i128_16(
+; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    br i1 false, label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK:       [[STORELOOP]]:
+; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 1, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 16
+; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[REMCHECK]]
+; CHECK:       [[REMCHECK]]:
+; CHECK-NEXT:    [[TMP6:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP3]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 true, label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
+; CHECK:       [[REMAINDERLOOP]]:
+; CHECK-NEXT:    [[TMP7:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP11:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP8:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP12:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i8
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP7]]
+; CHECK-NEXT:    store i8 [[TMP9]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP11]] = add i64 [[TMP7]], 1
+; CHECK-NEXT:    [[TMP12]] = lshr i128 [[TMP8]], 8
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], 0
+; CHECK-NEXT:    br i1 [[TMP13]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK:       [[SPLIT]]:
+; CHECK-NEXT:    ret void
+;
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 16, i1 0)
+  ret void
+}
+
+define void @memset_pattern_i128_38(ptr %a, i128 %value) nounwind {
+; CHECK-LABEL: define void @memset_pattern_i128_38(
+; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    br i1 false, label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK:       [[STORELOOP]]:
+; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 2, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 16
+; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[REMCHECK]]
+; CHECK:       [[REMCHECK]]:
+; CHECK-NEXT:    [[TMP6:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP3]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
+; CHECK:       [[REMAINDERLOOP]]:
+; CHECK-NEXT:    [[TMP7:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP11:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP8:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP12:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i8
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP7]]
+; CHECK-NEXT:    store i8 [[TMP9]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP11]] = add i64 [[TMP7]], 1
+; CHECK-NEXT:    [[TMP12]] = lshr i128 [[TMP8]], 8
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], 6
+; CHECK-NEXT:    br i1 [[TMP13]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK:       [[SPLIT]]:
+; CHECK-NEXT:    ret void
+;
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 38, i1 0)
+  ret void
+}
+
+define void @memset_pattern_i128_x(ptr %a, i128 %value, i64 %x) nounwind {
+; CHECK-LABEL: define void @memset_pattern_i128_x(
+; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[X]], 4
+; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[X]], 15
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 0, [[TMP1]]
+; CHECK-NEXT:    br i1 [[TMP3]], label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK:       [[STORELOOP]]:
+; CHECK-NEXT:    [[TMP4:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = phi i64 [ [[TMP1]], [[TMP0]] ], [ [[TMP7:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP4]], align 1
+; CHECK-NEXT:    [[TMP6]] = getelementptr inbounds i128, ptr [[TMP4]], i64 16
+; CHECK-NEXT:    [[TMP7]] = sub i64 [[TMP5]], 1
+; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[TMP8]], label %[[STORELOOP]], label %[[REMCHECK]]
+; CHECK:       [[REMCHECK]]:
+; CHECK-NEXT:    [[TMP9:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP6]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[TMP2]], 0
+; CHECK-NEXT:    br i1 [[TMP10]], label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
+; CHECK:       [[REMAINDERLOOP]]:
+; CHECK-NEXT:    [[TMP11:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP15:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP12:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP16:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP13:%.*]] = trunc i128 [[TMP12]] to i8
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[TMP11]]
+; CHECK-NEXT:    store i8 [[TMP13]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP15]] = add i64 [[TMP11]], 1
+; CHECK-NEXT:    [[TMP16]] = lshr i128 [[TMP12]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ult i64 [[TMP15]], [[TMP2]]
+; CHECK-NEXT:    br i1 [[TMP17]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK:       [[SPLIT]]:
+; CHECK-NEXT:    ret void
+;
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 %x, i1 0)
+  ret void
+}
+
+define void @memset_pattern_i256_x(ptr %a, i256 %value, i64 %x) nounwind {
+; CHECK-LABEL: define void @memset_pattern_i256_x(
+; CHECK-SAME: ptr [[A:%.*]], i256 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[X]], 5
+; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[X]], 31
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 0, [[TMP1]]
+; CHECK-NEXT:    br i1 [[TMP3]], label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK:       [[STORELOOP]]:
+; CHECK-NEXT:    [[TMP4:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = phi i64 [ [[TMP1]], [[TMP0]] ], [ [[TMP7:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    store i256 [[VALUE]], ptr [[TMP4]], align 1
+; CHECK-NEXT:    [[TMP6]] = getelementptr inbounds i256, ptr [[TMP4]], i64 32
+; CHECK-NEXT:    [[TMP7]] = sub i64 [[TMP5]], 1
+; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[TMP8]], label %[[STORELOOP]], label %[[REMCHECK]]
+; CHECK:       [[REMCHECK]]:
+; CHECK-NEXT:    [[TMP9:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP6]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[TMP2]], 0
+; CHECK-NEXT:    br i1 [[TMP10]], label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
+; CHECK:       [[REMAINDERLOOP]]:
+; CHECK-NEXT:    [[TMP11:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP15:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP12:%.*]] = phi i256 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP16:%.*]], %[[REMAINDERLOOP]] ]
+; CHECK-NEXT:    [[TMP13:%.*]] = trunc i256 [[TMP12]] to i8
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[TMP11]]
+; CHECK-NEXT:    store i8 [[TMP13]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP15]] = add i64 [[TMP11]], 1
+; CHECK-NEXT:    [[TMP16]] = lshr i256 [[TMP12]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ult i64 [[TMP15]], [[TMP2]]
+; CHECK-NEXT:    br i1 [[TMP17]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK:       [[SPLIT]]:
+; CHECK-NEXT:    ret void
+;
+  tail call void @llvm.memset_pattern.p0.i64.i256(ptr %a, i256 %value, i64 %x, i1 0)
+  ret void
+}
diff --git a/llvm/test/Verifier/intrinsic-immarg.ll b/llvm/test/Verifier/intrinsic-immarg.ll
index b1b9f7ee4be112..37745c6a9acee7 100644
--- a/llvm/test/Verifier/intrinsic-immarg.ll
+++ b/llvm/test/Verifier/intrinsic-immarg.ll
@@ -72,6 +72,16 @@ define void @memset_inline_is_volatile(ptr %dest, i8 %value, i1 %is.volatile) {
 }
 
 
+declare void @llvm.memset_pattern.p0.i32.i32(ptr nocapture, i32, i32, i1)
+define void @memset_pattern_is_volatile(ptr %dest, i32 %value, i1 %is.volatile) {
+  ; CHECK: immarg operand has non-immediate parameter
+  ; CHECK-NEXT: i1 %is.volatile
+  ; CHECK-NEXT: call void @llvm.memset_pattern.p0.i32.i32(ptr %dest, i32 %value, i32 8, i1 %is.volatile)
+  call void @llvm.memset_pattern.p0.i32.i32(ptr %dest, i32 %value, i32 8, i1 %is.volatile)
+  ret void
+}
+
+
 declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
 define void @objectsize(ptr %ptr, i1 %a, i1 %b, i1 %c) {
   ; CHECK: immarg operand has non-immediate parameter
diff --git a/llvm/test/Verifier/memset-pattern-inline.ll b/llvm/test/Verifier/memset-pattern-inline.ll
new file mode 100644
index 00000000000000..7f2e01ef99ea51
--- /dev/null
+++ b/llvm/test/Verifier/memset-pattern-inline.ll
@@ -0,0 +1,9 @@
+; RUN: not opt -passes=verify < %s 2>&1 | FileCheck %s
+
+; CHECK: alignment is not a power of two
+
+define void @foo(ptr %P, i32 %value) {
+  call void @llvm.memset_pattern.p0.i32.i32(ptr align 3 %P, i32 %value, i32 4, i1 false)
+  ret void
+}
+declare void @llvm.memset_pattern.p0.i32.i32(ptr nocapture, i32, i32, i1) nounwind

>From 3a0d10a53d3731d1abe1e6c49f44badb6821e927 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 24 Jul 2024 13:21:27 +0100
Subject: [PATCH 02/24] Tweak wording in Langref description based on feedback

---
 llvm/docs/LangRef.rst | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 44f1d6d6d19bdf..cf8744a22dca1c 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15239,8 +15239,8 @@ Syntax:
 """""""
 
 This is an overloaded intrinsic. You can use ``llvm.memset_pattern`` on
-any integer bit width and for different address spaces. Not all targets
-support all bit widths however.
+any integer bit width that is an integral number of bytes and for different
+address spaces. Not all targets support all bit widths however.
 
 ::
 

>From d710a1f2b58b0b84df4feb4e41350f59fe5b4feb Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 24 Jul 2024 13:29:27 +0100
Subject: [PATCH 03/24] Removing ConstantInt getLength (holdover from when this
 was a restriction)

---
 llvm/include/llvm/IR/IntrinsicInst.h | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index af8789d4958d89..b549852c3fc228 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1255,9 +1255,6 @@ class MemSetInlineInst : public MemSetInst {
 /// This class wraps the llvm.memset.pattern intrinsic.
 class MemSetPatternInst : public MemSetInst {
 public:
-  ConstantInt *getLength() const {
-    return cast<ConstantInt>(MemSetInst::getLength());
-  }
   // Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const IntrinsicInst *I) {
     return I->getIntrinsicID() == Intrinsic::memset_pattern;

>From 4bcc00e06bbcad395175ef4401129147c71986a2 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 24 Jul 2024 13:46:24 +0100
Subject: [PATCH 04/24] Properly update memset-pattern.ll test cases

---
 llvm/test/CodeGen/RISCV/memset-pattern.ll | 1405 ++++++++++++++-------
 1 file changed, 938 insertions(+), 467 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/memset-pattern.ll b/llvm/test/CodeGen/RISCV/memset-pattern.ll
index ea50ae0b56e401..85f66c8d3d230e 100644
--- a/llvm/test/CodeGen/RISCV/memset-pattern.ll
+++ b/llvm/test/CodeGen/RISCV/memset-pattern.ll
@@ -8,486 +8,610 @@
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+unaligned-scalar-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
 
+; TODO: Due to the initial naive lowering implementation of memset_pattern in
+; PreISelIntrinsicLowering, the generated code is not good.
+
 define void @memset_1(ptr %a, i128 %value) nounwind {
 ; RV32-BOTH-LABEL: memset_1:
 ; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    lw a2, 12(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
 ; RV32-BOTH-NEXT:    lw a1, 0(a1)
-; RV32-BOTH-NEXT:    sb a1, 0(a0)
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB0_1: # %remainderloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    add a7, a0, a5
+; RV32-BOTH-NEXT:    sb a1, 0(a7)
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz a7, a5
+; RV32-BOTH-NEXT:    add a6, a6, a7
+; RV32-BOTH-NEXT:    or a7, a5, a6
+; RV32-BOTH-NEXT:    srli a1, a1, 8
+; RV32-BOTH-NEXT:    slli t0, a4, 24
+; RV32-BOTH-NEXT:    or a1, a1, t0
+; RV32-BOTH-NEXT:    srli a4, a4, 8
+; RV32-BOTH-NEXT:    slli t0, a3, 24
+; RV32-BOTH-NEXT:    or a4, a4, t0
+; RV32-BOTH-NEXT:    srli a3, a3, 8
+; RV32-BOTH-NEXT:    slli t0, a2, 24
+; RV32-BOTH-NEXT:    or a3, a3, t0
+; RV32-BOTH-NEXT:    srli a2, a2, 8
+; RV32-BOTH-NEXT:    beqz a7, .LBB0_1
+; RV32-BOTH-NEXT:  # %bb.2: # %split
 ; RV32-BOTH-NEXT:    ret
 ;
 ; RV64-BOTH-LABEL: memset_1:
 ; RV64-BOTH:       # %bb.0:
-; RV64-BOTH-NEXT:    sb a1, 0(a0)
+; RV64-BOTH-NEXT:    li a3, 0
+; RV64-BOTH-NEXT:  .LBB0_1: # %remainderloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    add a4, a0, a3
+; RV64-BOTH-NEXT:    sb a1, 0(a4)
+; RV64-BOTH-NEXT:    addi a3, a3, 1
+; RV64-BOTH-NEXT:    srli a1, a1, 8
+; RV64-BOTH-NEXT:    slli a4, a2, 56
+; RV64-BOTH-NEXT:    or a1, a1, a4
+; RV64-BOTH-NEXT:    srli a2, a2, 8
+; RV64-BOTH-NEXT:    beqz a3, .LBB0_1
+; RV64-BOTH-NEXT:  # %bb.2: # %split
 ; RV64-BOTH-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
 define void @memset_2(ptr %a, i128 %value) nounwind {
-; RV32-LABEL: memset_2:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lw a1, 0(a1)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: memset_2:
-; RV64:       # %bb.0:
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    ret
-;
-; RV32-FAST-LABEL: memset_2:
-; RV32-FAST:       # %bb.0:
-; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    sh a1, 0(a0)
-; RV32-FAST-NEXT:    ret
+; RV32-BOTH-LABEL: memset_2:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    lw a2, 12(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB1_1: # %remainderloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    add a7, a0, a5
+; RV32-BOTH-NEXT:    sb a1, 0(a7)
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz a7, a5
+; RV32-BOTH-NEXT:    add a6, a6, a7
+; RV32-BOTH-NEXT:    srli a1, a1, 8
+; RV32-BOTH-NEXT:    slli a7, a4, 24
+; RV32-BOTH-NEXT:    or a1, a1, a7
+; RV32-BOTH-NEXT:    srli a4, a4, 8
+; RV32-BOTH-NEXT:    slli a7, a3, 24
+; RV32-BOTH-NEXT:    or a4, a4, a7
+; RV32-BOTH-NEXT:    srli a3, a3, 8
+; RV32-BOTH-NEXT:    slli a7, a2, 24
+; RV32-BOTH-NEXT:    or a3, a3, a7
+; RV32-BOTH-NEXT:    seqz a7, a6
+; RV32-BOTH-NEXT:    sltiu t0, a5, 2
+; RV32-BOTH-NEXT:    and a7, a7, t0
+; RV32-BOTH-NEXT:    srli a2, a2, 8
+; RV32-BOTH-NEXT:    bnez a7, .LBB1_1
+; RV32-BOTH-NEXT:  # %bb.2: # %split
+; RV32-BOTH-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_2:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    sh a1, 0(a0)
-; RV64-FAST-NEXT:    ret
+; RV64-BOTH-LABEL: memset_2:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    li a3, 0
+; RV64-BOTH-NEXT:    li a4, 2
+; RV64-BOTH-NEXT:  .LBB1_1: # %remainderloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    add a5, a0, a3
+; RV64-BOTH-NEXT:    sb a1, 0(a5)
+; RV64-BOTH-NEXT:    addi a3, a3, 1
+; RV64-BOTH-NEXT:    srli a1, a1, 8
+; RV64-BOTH-NEXT:    slli a5, a2, 56
+; RV64-BOTH-NEXT:    or a1, a1, a5
+; RV64-BOTH-NEXT:    srli a2, a2, 8
+; RV64-BOTH-NEXT:    bltu a3, a4, .LBB1_1
+; RV64-BOTH-NEXT:  # %bb.2: # %split
+; RV64-BOTH-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 2, i1 0)
   ret void
 }
 
 define void @memset_3(ptr %a, i128 %value) nounwind {
-; RV32-LABEL: memset_3:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lw a1, 0(a1)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a2, a1, 8
-; RV32-NEXT:    sb a2, 1(a0)
-; RV32-NEXT:    srli a1, a1, 16
-; RV32-NEXT:    sb a1, 2(a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: memset_3:
-; RV64:       # %bb.0:
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 8
-; RV64-NEXT:    sb a2, 1(a0)
-; RV64-NEXT:    srli a1, a1, 16
-; RV64-NEXT:    sb a1, 2(a0)
-; RV64-NEXT:    ret
-;
-; RV32-FAST-LABEL: memset_3:
-; RV32-FAST:       # %bb.0:
-; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    sh a1, 0(a0)
-; RV32-FAST-NEXT:    srli a1, a1, 16
-; RV32-FAST-NEXT:    sb a1, 2(a0)
-; RV32-FAST-NEXT:    ret
+; RV32-BOTH-LABEL: memset_3:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    lw a2, 12(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB2_1: # %remainderloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    add a7, a0, a5
+; RV32-BOTH-NEXT:    sb a1, 0(a7)
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz a7, a5
+; RV32-BOTH-NEXT:    add a6, a6, a7
+; RV32-BOTH-NEXT:    srli a1, a1, 8
+; RV32-BOTH-NEXT:    slli a7, a4, 24
+; RV32-BOTH-NEXT:    or a1, a1, a7
+; RV32-BOTH-NEXT:    srli a4, a4, 8
+; RV32-BOTH-NEXT:    slli a7, a3, 24
+; RV32-BOTH-NEXT:    or a4, a4, a7
+; RV32-BOTH-NEXT:    srli a3, a3, 8
+; RV32-BOTH-NEXT:    slli a7, a2, 24
+; RV32-BOTH-NEXT:    or a3, a3, a7
+; RV32-BOTH-NEXT:    seqz a7, a6
+; RV32-BOTH-NEXT:    sltiu t0, a5, 3
+; RV32-BOTH-NEXT:    and a7, a7, t0
+; RV32-BOTH-NEXT:    srli a2, a2, 8
+; RV32-BOTH-NEXT:    bnez a7, .LBB2_1
+; RV32-BOTH-NEXT:  # %bb.2: # %split
+; RV32-BOTH-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_3:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    sh a1, 0(a0)
-; RV64-FAST-NEXT:    srli a1, a1, 16
-; RV64-FAST-NEXT:    sb a1, 2(a0)
-; RV64-FAST-NEXT:    ret
+; RV64-BOTH-LABEL: memset_3:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    li a3, 0
+; RV64-BOTH-NEXT:    li a4, 3
+; RV64-BOTH-NEXT:  .LBB2_1: # %remainderloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    add a5, a0, a3
+; RV64-BOTH-NEXT:    sb a1, 0(a5)
+; RV64-BOTH-NEXT:    addi a3, a3, 1
+; RV64-BOTH-NEXT:    srli a1, a1, 8
+; RV64-BOTH-NEXT:    slli a5, a2, 56
+; RV64-BOTH-NEXT:    or a1, a1, a5
+; RV64-BOTH-NEXT:    srli a2, a2, 8
+; RV64-BOTH-NEXT:    bltu a3, a4, .LBB2_1
+; RV64-BOTH-NEXT:  # %bb.2: # %split
+; RV64-BOTH-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 3, i1 0)
   ret void
 }
 
 define void @memset_4(ptr %a, i128 %value) nounwind {
-; RV32-LABEL: memset_4:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lw a1, 0(a1)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: memset_4:
-; RV64:       # %bb.0:
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    ret
-;
-; RV32-FAST-LABEL: memset_4:
-; RV32-FAST:       # %bb.0:
-; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    sw a1, 0(a0)
-; RV32-FAST-NEXT:    ret
+; RV32-BOTH-LABEL: memset_4:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    lw a2, 12(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB3_1: # %remainderloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    add a7, a0, a5
+; RV32-BOTH-NEXT:    sb a1, 0(a7)
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz a7, a5
+; RV32-BOTH-NEXT:    add a6, a6, a7
+; RV32-BOTH-NEXT:    srli a1, a1, 8
+; RV32-BOTH-NEXT:    slli a7, a4, 24
+; RV32-BOTH-NEXT:    or a1, a1, a7
+; RV32-BOTH-NEXT:    srli a4, a4, 8
+; RV32-BOTH-NEXT:    slli a7, a3, 24
+; RV32-BOTH-NEXT:    or a4, a4, a7
+; RV32-BOTH-NEXT:    srli a3, a3, 8
+; RV32-BOTH-NEXT:    slli a7, a2, 24
+; RV32-BOTH-NEXT:    or a3, a3, a7
+; RV32-BOTH-NEXT:    seqz a7, a6
+; RV32-BOTH-NEXT:    sltiu t0, a5, 4
+; RV32-BOTH-NEXT:    and a7, a7, t0
+; RV32-BOTH-NEXT:    srli a2, a2, 8
+; RV32-BOTH-NEXT:    bnez a7, .LBB3_1
+; RV32-BOTH-NEXT:  # %bb.2: # %split
+; RV32-BOTH-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_4:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    sw a1, 0(a0)
-; RV64-FAST-NEXT:    ret
+; RV64-BOTH-LABEL: memset_4:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    li a3, 0
+; RV64-BOTH-NEXT:    li a4, 4
+; RV64-BOTH-NEXT:  .LBB3_1: # %remainderloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    add a5, a0, a3
+; RV64-BOTH-NEXT:    sb a1, 0(a5)
+; RV64-BOTH-NEXT:    addi a3, a3, 1
+; RV64-BOTH-NEXT:    srli a1, a1, 8
+; RV64-BOTH-NEXT:    slli a5, a2, 56
+; RV64-BOTH-NEXT:    or a1, a1, a5
+; RV64-BOTH-NEXT:    srli a2, a2, 8
+; RV64-BOTH-NEXT:    bltu a3, a4, .LBB3_1
+; RV64-BOTH-NEXT:  # %bb.2: # %split
+; RV64-BOTH-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 4, i1 0)
   ret void
 }
 
 define void @memset_5(ptr %a, i128 %value) nounwind {
-; RV32-LABEL: memset_5:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lw a2, 0(a1)
-; RV32-NEXT:    lw a1, 4(a1)
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    srli a1, a2, 24
-; RV32-NEXT:    sb a1, 3(a0)
-; RV32-NEXT:    srli a1, a2, 16
-; RV32-NEXT:    sb a1, 2(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 1(a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: memset_5:
-; RV64:       # %bb.0:
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a2, a1, 8
-; RV64-NEXT:    sb a2, 1(a0)
-; RV64-NEXT:    srli a1, a1, 32
-; RV64-NEXT:    sb a1, 4(a0)
-; RV64-NEXT:    ret
-;
-; RV32-FAST-LABEL: memset_5:
-; RV32-FAST:       # %bb.0:
-; RV32-FAST-NEXT:    lw a2, 4(a1)
-; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    sb a2, 4(a0)
-; RV32-FAST-NEXT:    sw a1, 0(a0)
-; RV32-FAST-NEXT:    ret
+; RV32-BOTH-LABEL: memset_5:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    lw a2, 12(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB4_1: # %remainderloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    add a7, a0, a5
+; RV32-BOTH-NEXT:    sb a1, 0(a7)
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz a7, a5
+; RV32-BOTH-NEXT:    add a6, a6, a7
+; RV32-BOTH-NEXT:    srli a1, a1, 8
+; RV32-BOTH-NEXT:    slli a7, a4, 24
+; RV32-BOTH-NEXT:    or a1, a1, a7
+; RV32-BOTH-NEXT:    srli a4, a4, 8
+; RV32-BOTH-NEXT:    slli a7, a3, 24
+; RV32-BOTH-NEXT:    or a4, a4, a7
+; RV32-BOTH-NEXT:    srli a3, a3, 8
+; RV32-BOTH-NEXT:    slli a7, a2, 24
+; RV32-BOTH-NEXT:    or a3, a3, a7
+; RV32-BOTH-NEXT:    seqz a7, a6
+; RV32-BOTH-NEXT:    sltiu t0, a5, 5
+; RV32-BOTH-NEXT:    and a7, a7, t0
+; RV32-BOTH-NEXT:    srli a2, a2, 8
+; RV32-BOTH-NEXT:    bnez a7, .LBB4_1
+; RV32-BOTH-NEXT:  # %bb.2: # %split
+; RV32-BOTH-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_5:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    sw a1, 0(a0)
-; RV64-FAST-NEXT:    srli a1, a1, 32
-; RV64-FAST-NEXT:    sb a1, 4(a0)
-; RV64-FAST-NEXT:    ret
+; RV64-BOTH-LABEL: memset_5:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    li a3, 0
+; RV64-BOTH-NEXT:    li a4, 5
+; RV64-BOTH-NEXT:  .LBB4_1: # %remainderloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    add a5, a0, a3
+; RV64-BOTH-NEXT:    sb a1, 0(a5)
+; RV64-BOTH-NEXT:    addi a3, a3, 1
+; RV64-BOTH-NEXT:    srli a1, a1, 8
+; RV64-BOTH-NEXT:    slli a5, a2, 56
+; RV64-BOTH-NEXT:    or a1, a1, a5
+; RV64-BOTH-NEXT:    srli a2, a2, 8
+; RV64-BOTH-NEXT:    bltu a3, a4, .LBB4_1
+; RV64-BOTH-NEXT:  # %bb.2: # %split
+; RV64-BOTH-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 5, i1 0)
   ret void
 }
 
 define void @memset_6(ptr %a, i128 %value) nounwind {
-; RV32-LABEL: memset_6:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lw a2, 4(a1)
-; RV32-NEXT:    lw a1, 0(a1)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: memset_6:
-; RV64:       # %bb.0:
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    ret
-;
-; RV32-FAST-LABEL: memset_6:
-; RV32-FAST:       # %bb.0:
-; RV32-FAST-NEXT:    lw a2, 4(a1)
-; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    sh a2, 4(a0)
-; RV32-FAST-NEXT:    sw a1, 0(a0)
-; RV32-FAST-NEXT:    ret
+; RV32-BOTH-LABEL: memset_6:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    lw a2, 12(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB5_1: # %remainderloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    add a7, a0, a5
+; RV32-BOTH-NEXT:    sb a1, 0(a7)
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz a7, a5
+; RV32-BOTH-NEXT:    add a6, a6, a7
+; RV32-BOTH-NEXT:    srli a1, a1, 8
+; RV32-BOTH-NEXT:    slli a7, a4, 24
+; RV32-BOTH-NEXT:    or a1, a1, a7
+; RV32-BOTH-NEXT:    srli a4, a4, 8
+; RV32-BOTH-NEXT:    slli a7, a3, 24
+; RV32-BOTH-NEXT:    or a4, a4, a7
+; RV32-BOTH-NEXT:    srli a3, a3, 8
+; RV32-BOTH-NEXT:    slli a7, a2, 24
+; RV32-BOTH-NEXT:    or a3, a3, a7
+; RV32-BOTH-NEXT:    seqz a7, a6
+; RV32-BOTH-NEXT:    sltiu t0, a5, 6
+; RV32-BOTH-NEXT:    and a7, a7, t0
+; RV32-BOTH-NEXT:    srli a2, a2, 8
+; RV32-BOTH-NEXT:    bnez a7, .LBB5_1
+; RV32-BOTH-NEXT:  # %bb.2: # %split
+; RV32-BOTH-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_6:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    sw a1, 0(a0)
-; RV64-FAST-NEXT:    srli a1, a1, 32
-; RV64-FAST-NEXT:    sh a1, 4(a0)
-; RV64-FAST-NEXT:    ret
+; RV64-BOTH-LABEL: memset_6:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    li a3, 0
+; RV64-BOTH-NEXT:    li a4, 6
+; RV64-BOTH-NEXT:  .LBB5_1: # %remainderloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    add a5, a0, a3
+; RV64-BOTH-NEXT:    sb a1, 0(a5)
+; RV64-BOTH-NEXT:    addi a3, a3, 1
+; RV64-BOTH-NEXT:    srli a1, a1, 8
+; RV64-BOTH-NEXT:    slli a5, a2, 56
+; RV64-BOTH-NEXT:    or a1, a1, a5
+; RV64-BOTH-NEXT:    srli a2, a2, 8
+; RV64-BOTH-NEXT:    bltu a3, a4, .LBB5_1
+; RV64-BOTH-NEXT:  # %bb.2: # %split
+; RV64-BOTH-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 6, i1 0)
   ret void
 }
 
 define void @memset_7(ptr %a, i128 %value) nounwind {
-; RV32-LABEL: memset_7:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lw a2, 4(a1)
-; RV32-NEXT:    lw a1, 0(a1)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a3, a2, 8
-; RV32-NEXT:    sb a3, 5(a0)
-; RV32-NEXT:    srli a2, a2, 16
-; RV32-NEXT:    sb a2, 6(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: memset_7:
-; RV64:       # %bb.0:
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    ret
-;
-; RV32-FAST-LABEL: memset_7:
-; RV32-FAST:       # %bb.0:
-; RV32-FAST-NEXT:    lw a2, 4(a1)
-; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    sh a2, 4(a0)
-; RV32-FAST-NEXT:    sw a1, 0(a0)
-; RV32-FAST-NEXT:    srli a2, a2, 16
-; RV32-FAST-NEXT:    sb a2, 6(a0)
-; RV32-FAST-NEXT:    ret
+; RV32-BOTH-LABEL: memset_7:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    lw a2, 12(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB6_1: # %remainderloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    add a7, a0, a5
+; RV32-BOTH-NEXT:    sb a1, 0(a7)
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz a7, a5
+; RV32-BOTH-NEXT:    add a6, a6, a7
+; RV32-BOTH-NEXT:    srli a1, a1, 8
+; RV32-BOTH-NEXT:    slli a7, a4, 24
+; RV32-BOTH-NEXT:    or a1, a1, a7
+; RV32-BOTH-NEXT:    srli a4, a4, 8
+; RV32-BOTH-NEXT:    slli a7, a3, 24
+; RV32-BOTH-NEXT:    or a4, a4, a7
+; RV32-BOTH-NEXT:    srli a3, a3, 8
+; RV32-BOTH-NEXT:    slli a7, a2, 24
+; RV32-BOTH-NEXT:    or a3, a3, a7
+; RV32-BOTH-NEXT:    seqz a7, a6
+; RV32-BOTH-NEXT:    sltiu t0, a5, 7
+; RV32-BOTH-NEXT:    and a7, a7, t0
+; RV32-BOTH-NEXT:    srli a2, a2, 8
+; RV32-BOTH-NEXT:    bnez a7, .LBB6_1
+; RV32-BOTH-NEXT:  # %bb.2: # %split
+; RV32-BOTH-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_7:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    sw a1, 0(a0)
-; RV64-FAST-NEXT:    srli a2, a1, 48
-; RV64-FAST-NEXT:    sb a2, 6(a0)
-; RV64-FAST-NEXT:    srli a1, a1, 32
-; RV64-FAST-NEXT:    sh a1, 4(a0)
-; RV64-FAST-NEXT:    ret
+; RV64-BOTH-LABEL: memset_7:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    li a3, 0
+; RV64-BOTH-NEXT:    li a4, 7
+; RV64-BOTH-NEXT:  .LBB6_1: # %remainderloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    add a5, a0, a3
+; RV64-BOTH-NEXT:    sb a1, 0(a5)
+; RV64-BOTH-NEXT:    addi a3, a3, 1
+; RV64-BOTH-NEXT:    srli a1, a1, 8
+; RV64-BOTH-NEXT:    slli a5, a2, 56
+; RV64-BOTH-NEXT:    or a1, a1, a5
+; RV64-BOTH-NEXT:    srli a2, a2, 8
+; RV64-BOTH-NEXT:    bltu a3, a4, .LBB6_1
+; RV64-BOTH-NEXT:  # %bb.2: # %split
+; RV64-BOTH-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 7, i1 0)
   ret void
 }
 
 define void @memset_8(ptr %a, i128 %value) nounwind {
-; RV32-LABEL: memset_8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lw a2, 4(a1)
-; RV32-NEXT:    lw a1, 0(a1)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a3, a2, 24
-; RV32-NEXT:    sb a3, 7(a0)
-; RV32-NEXT:    srli a3, a2, 16
-; RV32-NEXT:    sb a3, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: memset_8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    ret
-;
-; RV32-FAST-LABEL: memset_8:
-; RV32-FAST:       # %bb.0:
-; RV32-FAST-NEXT:    lw a2, 4(a1)
-; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    sw a2, 4(a0)
-; RV32-FAST-NEXT:    sw a1, 0(a0)
-; RV32-FAST-NEXT:    ret
+; RV32-BOTH-LABEL: memset_8:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    lw a2, 12(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB7_1: # %remainderloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    add a7, a0, a5
+; RV32-BOTH-NEXT:    sb a1, 0(a7)
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz a7, a5
+; RV32-BOTH-NEXT:    add a6, a6, a7
+; RV32-BOTH-NEXT:    srli a1, a1, 8
+; RV32-BOTH-NEXT:    slli a7, a4, 24
+; RV32-BOTH-NEXT:    or a1, a1, a7
+; RV32-BOTH-NEXT:    srli a4, a4, 8
+; RV32-BOTH-NEXT:    slli a7, a3, 24
+; RV32-BOTH-NEXT:    or a4, a4, a7
+; RV32-BOTH-NEXT:    srli a3, a3, 8
+; RV32-BOTH-NEXT:    slli a7, a2, 24
+; RV32-BOTH-NEXT:    or a3, a3, a7
+; RV32-BOTH-NEXT:    seqz a7, a6
+; RV32-BOTH-NEXT:    sltiu t0, a5, 8
+; RV32-BOTH-NEXT:    and a7, a7, t0
+; RV32-BOTH-NEXT:    srli a2, a2, 8
+; RV32-BOTH-NEXT:    bnez a7, .LBB7_1
+; RV32-BOTH-NEXT:  # %bb.2: # %split
+; RV32-BOTH-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_8:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    sd a1, 0(a0)
-; RV64-FAST-NEXT:    ret
+; RV64-BOTH-LABEL: memset_8:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    li a3, 0
+; RV64-BOTH-NEXT:    li a4, 8
+; RV64-BOTH-NEXT:  .LBB7_1: # %remainderloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    add a5, a0, a3
+; RV64-BOTH-NEXT:    sb a1, 0(a5)
+; RV64-BOTH-NEXT:    addi a3, a3, 1
+; RV64-BOTH-NEXT:    srli a1, a1, 8
+; RV64-BOTH-NEXT:    slli a5, a2, 56
+; RV64-BOTH-NEXT:    or a1, a1, a5
+; RV64-BOTH-NEXT:    srli a2, a2, 8
+; RV64-BOTH-NEXT:    bltu a3, a4, .LBB7_1
+; RV64-BOTH-NEXT:  # %bb.2: # %split
+; RV64-BOTH-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 8, i1 0)
   ret void
 }
 
 define void @memset_9(ptr %a, i128 %value) nounwind {
-; RV32-LABEL: memset_9:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lw a2, 4(a1)
-; RV32-NEXT:    lw a3, 0(a1)
-; RV32-NEXT:    lw a1, 8(a1)
-; RV32-NEXT:    sb a2, 4(a0)
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a1, 8(a0)
-; RV32-NEXT:    srli a1, a2, 24
-; RV32-NEXT:    sb a1, 7(a0)
-; RV32-NEXT:    srli a1, a2, 16
-; RV32-NEXT:    sb a1, 6(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 5(a0)
-; RV32-NEXT:    srli a1, a3, 24
-; RV32-NEXT:    sb a1, 3(a0)
-; RV32-NEXT:    srli a1, a3, 16
-; RV32-NEXT:    sb a1, 2(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 1(a0)
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: memset_9:
-; RV64:       # %bb.0:
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    sb a2, 8(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
-; RV64-NEXT:    ret
-;
-; RV32-FAST-LABEL: memset_9:
-; RV32-FAST:       # %bb.0:
-; RV32-FAST-NEXT:    lw a2, 4(a1)
-; RV32-FAST-NEXT:    lw a3, 0(a1)
-; RV32-FAST-NEXT:    lw a1, 8(a1)
-; RV32-FAST-NEXT:    sw a2, 4(a0)
-; RV32-FAST-NEXT:    sw a3, 0(a0)
-; RV32-FAST-NEXT:    sb a1, 8(a0)
-; RV32-FAST-NEXT:    ret
+; RV32-BOTH-LABEL: memset_9:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    lw a2, 12(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB8_1: # %remainderloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    add a7, a0, a5
+; RV32-BOTH-NEXT:    sb a1, 0(a7)
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz a7, a5
+; RV32-BOTH-NEXT:    add a6, a6, a7
+; RV32-BOTH-NEXT:    srli a1, a1, 8
+; RV32-BOTH-NEXT:    slli a7, a4, 24
+; RV32-BOTH-NEXT:    or a1, a1, a7
+; RV32-BOTH-NEXT:    srli a4, a4, 8
+; RV32-BOTH-NEXT:    slli a7, a3, 24
+; RV32-BOTH-NEXT:    or a4, a4, a7
+; RV32-BOTH-NEXT:    srli a3, a3, 8
+; RV32-BOTH-NEXT:    slli a7, a2, 24
+; RV32-BOTH-NEXT:    or a3, a3, a7
+; RV32-BOTH-NEXT:    seqz a7, a6
+; RV32-BOTH-NEXT:    sltiu t0, a5, 9
+; RV32-BOTH-NEXT:    and a7, a7, t0
+; RV32-BOTH-NEXT:    srli a2, a2, 8
+; RV32-BOTH-NEXT:    bnez a7, .LBB8_1
+; RV32-BOTH-NEXT:  # %bb.2: # %split
+; RV32-BOTH-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_9:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    sb a2, 8(a0)
-; RV64-FAST-NEXT:    sd a1, 0(a0)
-; RV64-FAST-NEXT:    ret
+; RV64-BOTH-LABEL: memset_9:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    li a3, 0
+; RV64-BOTH-NEXT:    li a4, 9
+; RV64-BOTH-NEXT:  .LBB8_1: # %remainderloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    add a5, a0, a3
+; RV64-BOTH-NEXT:    sb a1, 0(a5)
+; RV64-BOTH-NEXT:    addi a3, a3, 1
+; RV64-BOTH-NEXT:    srli a1, a1, 8
+; RV64-BOTH-NEXT:    slli a5, a2, 56
+; RV64-BOTH-NEXT:    or a1, a1, a5
+; RV64-BOTH-NEXT:    srli a2, a2, 8
+; RV64-BOTH-NEXT:    bltu a3, a4, .LBB8_1
+; RV64-BOTH-NEXT:  # %bb.2: # %split
+; RV64-BOTH-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 9, i1 0)
   ret void
 }
 
 define void @memset_16(ptr %a, i128 %value) nounwind {
 ; RV32-LABEL: memset_16:
-; RV32:       # %bb.0:
+; RV32:       # %bb.0: # %storeloop.preheader
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s2, 4(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    lw a2, 12(a1)
-; RV32-NEXT:    lw a3, 8(a1)
-; RV32-NEXT:    lw a4, 4(a1)
-; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    lw a3, 0(a1)
+; RV32-NEXT:    lw a4, 8(a1)
+; RV32-NEXT:    lw a1, 4(a1)
+; RV32-NEXT:    addi a5, a0, 256
+; RV32-NEXT:    srli a6, a3, 24
+; RV32-NEXT:    srli a7, a3, 16
+; RV32-NEXT:    srli t0, a3, 8
+; RV32-NEXT:    srli t1, a1, 24
+; RV32-NEXT:    srli t2, a1, 16
+; RV32-NEXT:    srli t3, a1, 8
+; RV32-NEXT:    srli t4, a4, 24
+; RV32-NEXT:    srli t5, a4, 16
+; RV32-NEXT:    srli t6, a4, 8
+; RV32-NEXT:    srli s0, a2, 24
+; RV32-NEXT:    srli s1, a2, 16
+; RV32-NEXT:    srli s2, a2, 8
+; RV32-NEXT:  .LBB9_1: # %storeloop
+; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    sb a6, 3(a0)
+; RV32-NEXT:    sb a7, 2(a0)
+; RV32-NEXT:    sb t0, 1(a0)
+; RV32-NEXT:    sb a4, 8(a0)
+; RV32-NEXT:    sb t1, 7(a0)
+; RV32-NEXT:    sb t2, 6(a0)
+; RV32-NEXT:    sb t3, 5(a0)
 ; RV32-NEXT:    sb a2, 12(a0)
-; RV32-NEXT:    sb a3, 8(a0)
-; RV32-NEXT:    sb a4, 4(a0)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    srli a5, a2, 24
-; RV32-NEXT:    sb a5, 15(a0)
-; RV32-NEXT:    srli a5, a2, 16
-; RV32-NEXT:    sb a5, 14(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 13(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 11(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 10(a0)
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 9(a0)
-; RV32-NEXT:    srli a2, a4, 24
-; RV32-NEXT:    sb a2, 7(a0)
-; RV32-NEXT:    srli a2, a4, 16
-; RV32-NEXT:    sb a2, 6(a0)
-; RV32-NEXT:    srli a4, a4, 8
-; RV32-NEXT:    sb a4, 5(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 2(a0)
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    sb t4, 11(a0)
+; RV32-NEXT:    sb t5, 10(a0)
+; RV32-NEXT:    sb t6, 9(a0)
+; RV32-NEXT:    sb s0, 15(a0)
+; RV32-NEXT:    sb s1, 14(a0)
+; RV32-NEXT:    sb s2, 13(a0)
+; RV32-NEXT:    addi a0, a0, 256
+; RV32-NEXT:    bne a0, a5, .LBB9_1
+; RV32-NEXT:  # %bb.2: # %split
+; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: memset_16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    sb a2, 8(a0)
+; RV64:       # %bb.0: # %storeloop.preheader
+; RV64-NEXT:    addi sp, sp, -32
+; RV64-NEXT:    sd s0, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s1, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s2, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi a3, a0, 256
+; RV64-NEXT:    srli a4, a1, 56
+; RV64-NEXT:    srli a5, a1, 48
+; RV64-NEXT:    srli a6, a1, 40
+; RV64-NEXT:    srli a7, a1, 32
+; RV64-NEXT:    srli t0, a1, 24
+; RV64-NEXT:    srli t1, a1, 16
+; RV64-NEXT:    srli t2, a1, 8
+; RV64-NEXT:    srli t3, a2, 56
+; RV64-NEXT:    srli t4, a2, 48
+; RV64-NEXT:    srli t5, a2, 40
+; RV64-NEXT:    srli t6, a2, 32
+; RV64-NEXT:    srli s0, a2, 24
+; RV64-NEXT:    srli s1, a2, 16
+; RV64-NEXT:    srli s2, a2, 8
+; RV64-NEXT:  .LBB9_1: # %storeloop
+; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 15(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 14(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 13(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 12(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 11(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 10(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 9(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    sb a2, 8(a0)
+; RV64-NEXT:    sb a4, 7(a0)
+; RV64-NEXT:    sb a5, 6(a0)
+; RV64-NEXT:    sb a6, 5(a0)
+; RV64-NEXT:    sb a7, 4(a0)
+; RV64-NEXT:    sb t0, 3(a0)
+; RV64-NEXT:    sb t1, 2(a0)
+; RV64-NEXT:    sb t2, 1(a0)
+; RV64-NEXT:    sb t3, 15(a0)
+; RV64-NEXT:    sb t4, 14(a0)
+; RV64-NEXT:    sb t5, 13(a0)
+; RV64-NEXT:    sb t6, 12(a0)
+; RV64-NEXT:    sb s0, 11(a0)
+; RV64-NEXT:    sb s1, 10(a0)
+; RV64-NEXT:    sb s2, 9(a0)
+; RV64-NEXT:    addi a0, a0, 256
+; RV64-NEXT:    bne a0, a3, .LBB9_1
+; RV64-NEXT:  # %bb.2: # %split
+; RV64-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 32
 ; RV64-NEXT:    ret
 ;
 ; RV32-FAST-LABEL: memset_16:
-; RV32-FAST:       # %bb.0:
+; RV32-FAST:       # %bb.0: # %storeloop.preheader
 ; RV32-FAST-NEXT:    lw a2, 12(a1)
 ; RV32-FAST-NEXT:    lw a3, 8(a1)
 ; RV32-FAST-NEXT:    lw a4, 4(a1)
 ; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    sw a2, 12(a0)
-; RV32-FAST-NEXT:    sw a3, 8(a0)
-; RV32-FAST-NEXT:    sw a4, 4(a0)
+; RV32-FAST-NEXT:    addi a5, a0, 256
+; RV32-FAST-NEXT:  .LBB9_1: # %storeloop
+; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-FAST-NEXT:    sw a1, 0(a0)
+; RV32-FAST-NEXT:    sw a4, 4(a0)
+; RV32-FAST-NEXT:    sw a3, 8(a0)
+; RV32-FAST-NEXT:    sw a2, 12(a0)
+; RV32-FAST-NEXT:    addi a0, a0, 256
+; RV32-FAST-NEXT:    bne a0, a5, .LBB9_1
+; RV32-FAST-NEXT:  # %bb.2: # %split
 ; RV32-FAST-NEXT:    ret
 ;
 ; RV64-FAST-LABEL: memset_16:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    sd a2, 8(a0)
+; RV64-FAST:       # %bb.0: # %storeloop.preheader
+; RV64-FAST-NEXT:    addi a3, a0, 256
+; RV64-FAST-NEXT:  .LBB9_1: # %storeloop
+; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-FAST-NEXT:    sd a1, 0(a0)
+; RV64-FAST-NEXT:    sd a2, 8(a0)
+; RV64-FAST-NEXT:    addi a0, a0, 256
+; RV64-FAST-NEXT:    bne a0, a3, .LBB9_1
+; RV64-FAST-NEXT:  # %bb.2: # %split
 ; RV64-FAST-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 16, i1 0)
   ret void
@@ -495,97 +619,444 @@ define void @memset_16(ptr %a, i128 %value) nounwind {
 
 define void @memset_17(ptr %a, i128 %value) nounwind {
 ; RV32-LABEL: memset_17:
-; RV32:       # %bb.0:
+; RV32:       # %bb.0: # %storeloop.preheader
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s2, 4(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    lw a2, 12(a1)
-; RV32-NEXT:    lw a3, 8(a1)
-; RV32-NEXT:    lw a4, 4(a1)
-; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    lw a3, 0(a1)
+; RV32-NEXT:    lw a4, 8(a1)
+; RV32-NEXT:    lw a1, 4(a1)
+; RV32-NEXT:    addi a5, a0, 256
+; RV32-NEXT:    srli a6, a3, 24
+; RV32-NEXT:    srli a7, a3, 16
+; RV32-NEXT:    srli t0, a3, 8
+; RV32-NEXT:    srli t1, a1, 24
+; RV32-NEXT:    srli t2, a1, 16
+; RV32-NEXT:    srli t3, a1, 8
+; RV32-NEXT:    srli t4, a4, 24
+; RV32-NEXT:    srli t5, a4, 16
+; RV32-NEXT:    srli t6, a4, 8
+; RV32-NEXT:    srli s0, a2, 24
+; RV32-NEXT:    srli s1, a2, 16
+; RV32-NEXT:    srli s2, a2, 8
+; RV32-NEXT:  .LBB10_1: # %storeloop
+; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    sb a1, 4(a0)
+; RV32-NEXT:    sb a6, 3(a0)
+; RV32-NEXT:    sb a7, 2(a0)
+; RV32-NEXT:    sb t0, 1(a0)
+; RV32-NEXT:    sb a4, 8(a0)
+; RV32-NEXT:    sb t1, 7(a0)
+; RV32-NEXT:    sb t2, 6(a0)
+; RV32-NEXT:    sb t3, 5(a0)
 ; RV32-NEXT:    sb a2, 12(a0)
-; RV32-NEXT:    sb a3, 8(a0)
-; RV32-NEXT:    sb a4, 4(a0)
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    sb a1, 16(a0)
-; RV32-NEXT:    srli a5, a2, 24
-; RV32-NEXT:    sb a5, 15(a0)
-; RV32-NEXT:    srli a5, a2, 16
-; RV32-NEXT:    sb a5, 14(a0)
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    sb a2, 13(a0)
-; RV32-NEXT:    srli a2, a3, 24
-; RV32-NEXT:    sb a2, 11(a0)
-; RV32-NEXT:    srli a2, a3, 16
-; RV32-NEXT:    sb a2, 10(a0)
+; RV32-NEXT:    sb t4, 11(a0)
+; RV32-NEXT:    sb t5, 10(a0)
+; RV32-NEXT:    sb t6, 9(a0)
+; RV32-NEXT:    sb s0, 15(a0)
+; RV32-NEXT:    sb s1, 14(a0)
+; RV32-NEXT:    sb s2, 13(a0)
+; RV32-NEXT:    addi a0, a0, 256
+; RV32-NEXT:    bne a0, a5, .LBB10_1
+; RV32-NEXT:  # %bb.2: # %remcheck
+; RV32-NEXT:    li a5, 0
+; RV32-NEXT:    li a6, 0
+; RV32-NEXT:  .LBB10_3: # %remainderloop
+; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-NEXT:    add a7, a0, a5
+; RV32-NEXT:    sb a3, 0(a7)
+; RV32-NEXT:    addi a5, a5, 1
+; RV32-NEXT:    seqz a7, a5
+; RV32-NEXT:    add a6, a6, a7
+; RV32-NEXT:    or a7, a5, a6
 ; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    sb a3, 9(a0)
-; RV32-NEXT:    srli a2, a4, 24
-; RV32-NEXT:    sb a2, 7(a0)
-; RV32-NEXT:    srli a2, a4, 16
-; RV32-NEXT:    sb a2, 6(a0)
-; RV32-NEXT:    srli a4, a4, 8
-; RV32-NEXT:    sb a4, 5(a0)
-; RV32-NEXT:    srli a2, a1, 24
-; RV32-NEXT:    sb a2, 3(a0)
-; RV32-NEXT:    srli a2, a1, 16
-; RV32-NEXT:    sb a2, 2(a0)
+; RV32-NEXT:    slli t0, a1, 24
+; RV32-NEXT:    or a3, a3, t0
 ; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    sb a1, 1(a0)
+; RV32-NEXT:    slli t0, a4, 24
+; RV32-NEXT:    or a1, a1, t0
+; RV32-NEXT:    srli a4, a4, 8
+; RV32-NEXT:    slli t0, a2, 24
+; RV32-NEXT:    or a4, a4, t0
+; RV32-NEXT:    srli a2, a2, 8
+; RV32-NEXT:    beqz a7, .LBB10_3
+; RV32-NEXT:  # %bb.4: # %split
+; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: memset_17:
-; RV64:       # %bb.0:
-; RV64-NEXT:    sb a2, 8(a0)
+; RV64:       # %bb.0: # %storeloop.preheader
+; RV64-NEXT:    addi sp, sp, -32
+; RV64-NEXT:    sd s0, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s1, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s2, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi a3, a0, 256
+; RV64-NEXT:    srli a4, a1, 56
+; RV64-NEXT:    srli a5, a1, 48
+; RV64-NEXT:    srli a6, a1, 40
+; RV64-NEXT:    srli a7, a1, 32
+; RV64-NEXT:    srli t0, a1, 24
+; RV64-NEXT:    srli t1, a1, 16
+; RV64-NEXT:    srli t2, a1, 8
+; RV64-NEXT:    srli t3, a2, 56
+; RV64-NEXT:    srli t4, a2, 48
+; RV64-NEXT:    srli t5, a2, 40
+; RV64-NEXT:    srli t6, a2, 32
+; RV64-NEXT:    srli s0, a2, 24
+; RV64-NEXT:    srli s1, a2, 16
+; RV64-NEXT:    srli s2, a2, 8
+; RV64-NEXT:  .LBB10_1: # %storeloop
+; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    sb a1, 16(a0)
-; RV64-NEXT:    srli a3, a2, 56
-; RV64-NEXT:    sb a3, 15(a0)
-; RV64-NEXT:    srli a3, a2, 48
-; RV64-NEXT:    sb a3, 14(a0)
-; RV64-NEXT:    srli a3, a2, 40
-; RV64-NEXT:    sb a3, 13(a0)
-; RV64-NEXT:    srli a3, a2, 32
-; RV64-NEXT:    sb a3, 12(a0)
-; RV64-NEXT:    srli a3, a2, 24
-; RV64-NEXT:    sb a3, 11(a0)
-; RV64-NEXT:    srli a3, a2, 16
-; RV64-NEXT:    sb a3, 10(a0)
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    sb a2, 9(a0)
-; RV64-NEXT:    srli a2, a1, 56
-; RV64-NEXT:    sb a2, 7(a0)
-; RV64-NEXT:    srli a2, a1, 48
-; RV64-NEXT:    sb a2, 6(a0)
-; RV64-NEXT:    srli a2, a1, 40
-; RV64-NEXT:    sb a2, 5(a0)
-; RV64-NEXT:    srli a2, a1, 32
-; RV64-NEXT:    sb a2, 4(a0)
-; RV64-NEXT:    srli a2, a1, 24
-; RV64-NEXT:    sb a2, 3(a0)
-; RV64-NEXT:    srli a2, a1, 16
-; RV64-NEXT:    sb a2, 2(a0)
+; RV64-NEXT:    sb a2, 8(a0)
+; RV64-NEXT:    sb a4, 7(a0)
+; RV64-NEXT:    sb a5, 6(a0)
+; RV64-NEXT:    sb a6, 5(a0)
+; RV64-NEXT:    sb a7, 4(a0)
+; RV64-NEXT:    sb t0, 3(a0)
+; RV64-NEXT:    sb t1, 2(a0)
+; RV64-NEXT:    sb t2, 1(a0)
+; RV64-NEXT:    sb t3, 15(a0)
+; RV64-NEXT:    sb t4, 14(a0)
+; RV64-NEXT:    sb t5, 13(a0)
+; RV64-NEXT:    sb t6, 12(a0)
+; RV64-NEXT:    sb s0, 11(a0)
+; RV64-NEXT:    sb s1, 10(a0)
+; RV64-NEXT:    sb s2, 9(a0)
+; RV64-NEXT:    addi a0, a0, 256
+; RV64-NEXT:    bne a0, a3, .LBB10_1
+; RV64-NEXT:  # %bb.2: # %remcheck
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:  .LBB10_3: # %remainderloop
+; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-NEXT:    add a4, a0, a3
+; RV64-NEXT:    sb a1, 0(a4)
+; RV64-NEXT:    addi a3, a3, 1
 ; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    sb a1, 1(a0)
+; RV64-NEXT:    slli a4, a2, 56
+; RV64-NEXT:    or a1, a1, a4
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    beqz a3, .LBB10_3
+; RV64-NEXT:  # %bb.4: # %split
+; RV64-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 32
 ; RV64-NEXT:    ret
 ;
 ; RV32-FAST-LABEL: memset_17:
-; RV32-FAST:       # %bb.0:
+; RV32-FAST:       # %bb.0: # %storeloop.preheader
 ; RV32-FAST-NEXT:    lw a2, 12(a1)
 ; RV32-FAST-NEXT:    lw a3, 8(a1)
 ; RV32-FAST-NEXT:    lw a4, 4(a1)
 ; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    sw a2, 12(a0)
-; RV32-FAST-NEXT:    sw a3, 8(a0)
-; RV32-FAST-NEXT:    sw a4, 4(a0)
+; RV32-FAST-NEXT:    addi a5, a0, 256
+; RV32-FAST-NEXT:  .LBB10_1: # %storeloop
+; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-FAST-NEXT:    sw a1, 0(a0)
-; RV32-FAST-NEXT:    sb a1, 16(a0)
+; RV32-FAST-NEXT:    sw a4, 4(a0)
+; RV32-FAST-NEXT:    sw a3, 8(a0)
+; RV32-FAST-NEXT:    sw a2, 12(a0)
+; RV32-FAST-NEXT:    addi a0, a0, 256
+; RV32-FAST-NEXT:    bne a0, a5, .LBB10_1
+; RV32-FAST-NEXT:  # %bb.2: # %remcheck
+; RV32-FAST-NEXT:    li a5, 0
+; RV32-FAST-NEXT:    li a6, 0
+; RV32-FAST-NEXT:  .LBB10_3: # %remainderloop
+; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-FAST-NEXT:    add a7, a0, a5
+; RV32-FAST-NEXT:    sb a1, 0(a7)
+; RV32-FAST-NEXT:    addi a5, a5, 1
+; RV32-FAST-NEXT:    seqz a7, a5
+; RV32-FAST-NEXT:    add a6, a6, a7
+; RV32-FAST-NEXT:    or a7, a5, a6
+; RV32-FAST-NEXT:    srli a1, a1, 8
+; RV32-FAST-NEXT:    slli t0, a4, 24
+; RV32-FAST-NEXT:    or a1, a1, t0
+; RV32-FAST-NEXT:    srli a4, a4, 8
+; RV32-FAST-NEXT:    slli t0, a3, 24
+; RV32-FAST-NEXT:    or a4, a4, t0
+; RV32-FAST-NEXT:    srli a3, a3, 8
+; RV32-FAST-NEXT:    slli t0, a2, 24
+; RV32-FAST-NEXT:    or a3, a3, t0
+; RV32-FAST-NEXT:    srli a2, a2, 8
+; RV32-FAST-NEXT:    beqz a7, .LBB10_3
+; RV32-FAST-NEXT:  # %bb.4: # %split
 ; RV32-FAST-NEXT:    ret
 ;
 ; RV64-FAST-LABEL: memset_17:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    sd a2, 8(a0)
+; RV64-FAST:       # %bb.0: # %storeloop.preheader
+; RV64-FAST-NEXT:    addi a3, a0, 256
+; RV64-FAST-NEXT:  .LBB10_1: # %storeloop
+; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-FAST-NEXT:    sd a1, 0(a0)
-; RV64-FAST-NEXT:    sb a1, 16(a0)
+; RV64-FAST-NEXT:    sd a2, 8(a0)
+; RV64-FAST-NEXT:    addi a0, a0, 256
+; RV64-FAST-NEXT:    bne a0, a3, .LBB10_1
+; RV64-FAST-NEXT:  # %bb.2: # %remcheck
+; RV64-FAST-NEXT:    li a3, 0
+; RV64-FAST-NEXT:  .LBB10_3: # %remainderloop
+; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-FAST-NEXT:    add a4, a0, a3
+; RV64-FAST-NEXT:    sb a1, 0(a4)
+; RV64-FAST-NEXT:    addi a3, a3, 1
+; RV64-FAST-NEXT:    srli a1, a1, 8
+; RV64-FAST-NEXT:    slli a4, a2, 56
+; RV64-FAST-NEXT:    or a1, a1, a4
+; RV64-FAST-NEXT:    srli a2, a2, 8
+; RV64-FAST-NEXT:    beqz a3, .LBB10_3
+; RV64-FAST-NEXT:  # %bb.4: # %split
 ; RV64-FAST-NEXT:    ret
   tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 17, i1 0)
   ret void
 }
 
+define void @memset_x(ptr %a, i128 %value, i64 %x) nounwind {
+; RV32-LABEL: memset_x:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s2, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s3, 0(sp) # 4-byte Folded Spill
+; RV32-NEXT:    lw a4, 12(a1)
+; RV32-NEXT:    lw a5, 8(a1)
+; RV32-NEXT:    lw a6, 4(a1)
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    slli a7, a3, 28
+; RV32-NEXT:    srli t0, a2, 4
+; RV32-NEXT:    or a7, t0, a7
+; RV32-NEXT:    srli a3, a3, 4
+; RV32-NEXT:    or a3, a7, a3
+; RV32-NEXT:    andi a2, a2, 15
+; RV32-NEXT:    beqz a3, .LBB11_3
+; RV32-NEXT:  # %bb.1: # %storeloop.preheader
+; RV32-NEXT:    slli a3, a7, 8
+; RV32-NEXT:    add a3, a0, a3
+; RV32-NEXT:    srli a7, a1, 24
+; RV32-NEXT:    srli t0, a1, 16
+; RV32-NEXT:    srli t1, a1, 8
+; RV32-NEXT:    srli t2, a6, 24
+; RV32-NEXT:    srli t3, a6, 16
+; RV32-NEXT:    srli t4, a6, 8
+; RV32-NEXT:    srli t5, a5, 24
+; RV32-NEXT:    srli t6, a5, 16
+; RV32-NEXT:    srli s0, a5, 8
+; RV32-NEXT:    srli s1, a4, 24
+; RV32-NEXT:    srli s2, a4, 16
+; RV32-NEXT:    srli s3, a4, 8
+; RV32-NEXT:  .LBB11_2: # %storeloop
+; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    sb a6, 4(a0)
+; RV32-NEXT:    sb a7, 3(a0)
+; RV32-NEXT:    sb t0, 2(a0)
+; RV32-NEXT:    sb t1, 1(a0)
+; RV32-NEXT:    sb a5, 8(a0)
+; RV32-NEXT:    sb t2, 7(a0)
+; RV32-NEXT:    sb t3, 6(a0)
+; RV32-NEXT:    sb t4, 5(a0)
+; RV32-NEXT:    sb a4, 12(a0)
+; RV32-NEXT:    sb t5, 11(a0)
+; RV32-NEXT:    sb t6, 10(a0)
+; RV32-NEXT:    sb s0, 9(a0)
+; RV32-NEXT:    sb s1, 15(a0)
+; RV32-NEXT:    sb s2, 14(a0)
+; RV32-NEXT:    sb s3, 13(a0)
+; RV32-NEXT:    addi a0, a0, 256
+; RV32-NEXT:    bne a0, a3, .LBB11_2
+; RV32-NEXT:  .LBB11_3: # %remcheck
+; RV32-NEXT:    beqz a2, .LBB11_6
+; RV32-NEXT:  # %bb.4: # %remainderloop.preheader
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    li a7, 0
+; RV32-NEXT:  .LBB11_5: # %remainderloop
+; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-NEXT:    add t0, a0, a3
+; RV32-NEXT:    sb a1, 0(t0)
+; RV32-NEXT:    addi a3, a3, 1
+; RV32-NEXT:    seqz t0, a3
+; RV32-NEXT:    add a7, a7, t0
+; RV32-NEXT:    srli a1, a1, 8
+; RV32-NEXT:    slli t0, a6, 24
+; RV32-NEXT:    or a1, a1, t0
+; RV32-NEXT:    srli a6, a6, 8
+; RV32-NEXT:    slli t0, a5, 24
+; RV32-NEXT:    or a6, a6, t0
+; RV32-NEXT:    srli a5, a5, 8
+; RV32-NEXT:    slli t0, a4, 24
+; RV32-NEXT:    or a5, a5, t0
+; RV32-NEXT:    sltu t0, a3, a2
+; RV32-NEXT:    seqz t1, a7
+; RV32-NEXT:    and t0, t1, t0
+; RV32-NEXT:    srli a4, a4, 8
+; RV32-NEXT:    bnez t0, .LBB11_5
+; RV32-NEXT:  .LBB11_6: # %split
+; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s3, 0(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: memset_x:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -32
+; RV64-NEXT:    sd s0, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s1, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s2, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s3, 0(sp) # 8-byte Folded Spill
+; RV64-NEXT:    srli a4, a3, 4
+; RV64-NEXT:    andi a3, a3, 15
+; RV64-NEXT:    beqz a4, .LBB11_3
+; RV64-NEXT:  # %bb.1: # %storeloop.preheader
+; RV64-NEXT:    slli a4, a4, 8
+; RV64-NEXT:    add a4, a0, a4
+; RV64-NEXT:    srli a5, a1, 56
+; RV64-NEXT:    srli a6, a1, 48
+; RV64-NEXT:    srli a7, a1, 40
+; RV64-NEXT:    srli t0, a1, 32
+; RV64-NEXT:    srli t1, a1, 24
+; RV64-NEXT:    srli t2, a1, 16
+; RV64-NEXT:    srli t3, a1, 8
+; RV64-NEXT:    srli t4, a2, 56
+; RV64-NEXT:    srli t5, a2, 48
+; RV64-NEXT:    srli t6, a2, 40
+; RV64-NEXT:    srli s0, a2, 32
+; RV64-NEXT:    srli s1, a2, 24
+; RV64-NEXT:    srli s2, a2, 16
+; RV64-NEXT:    srli s3, a2, 8
+; RV64-NEXT:  .LBB11_2: # %storeloop
+; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    sb a2, 8(a0)
+; RV64-NEXT:    sb a5, 7(a0)
+; RV64-NEXT:    sb a6, 6(a0)
+; RV64-NEXT:    sb a7, 5(a0)
+; RV64-NEXT:    sb t0, 4(a0)
+; RV64-NEXT:    sb t1, 3(a0)
+; RV64-NEXT:    sb t2, 2(a0)
+; RV64-NEXT:    sb t3, 1(a0)
+; RV64-NEXT:    sb t4, 15(a0)
+; RV64-NEXT:    sb t5, 14(a0)
+; RV64-NEXT:    sb t6, 13(a0)
+; RV64-NEXT:    sb s0, 12(a0)
+; RV64-NEXT:    sb s1, 11(a0)
+; RV64-NEXT:    sb s2, 10(a0)
+; RV64-NEXT:    sb s3, 9(a0)
+; RV64-NEXT:    addi a0, a0, 256
+; RV64-NEXT:    bne a0, a4, .LBB11_2
+; RV64-NEXT:  .LBB11_3: # %remcheck
+; RV64-NEXT:    beqz a3, .LBB11_6
+; RV64-NEXT:  # %bb.4: # %remainderloop.preheader
+; RV64-NEXT:    li a4, 0
+; RV64-NEXT:  .LBB11_5: # %remainderloop
+; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-NEXT:    add a5, a0, a4
+; RV64-NEXT:    sb a1, 0(a5)
+; RV64-NEXT:    addi a4, a4, 1
+; RV64-NEXT:    srli a1, a1, 8
+; RV64-NEXT:    slli a5, a2, 56
+; RV64-NEXT:    or a1, a1, a5
+; RV64-NEXT:    srli a2, a2, 8
+; RV64-NEXT:    bltu a4, a3, .LBB11_5
+; RV64-NEXT:  .LBB11_6: # %split
+; RV64-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s3, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+;
+; RV32-FAST-LABEL: memset_x:
+; RV32-FAST:       # %bb.0:
+; RV32-FAST-NEXT:    lw a4, 12(a1)
+; RV32-FAST-NEXT:    lw a5, 8(a1)
+; RV32-FAST-NEXT:    lw a6, 4(a1)
+; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    slli a7, a3, 28
+; RV32-FAST-NEXT:    srli t0, a2, 4
+; RV32-FAST-NEXT:    or a7, t0, a7
+; RV32-FAST-NEXT:    srli a3, a3, 4
+; RV32-FAST-NEXT:    or a3, a7, a3
+; RV32-FAST-NEXT:    andi a2, a2, 15
+; RV32-FAST-NEXT:    beqz a3, .LBB11_3
+; RV32-FAST-NEXT:  # %bb.1: # %storeloop.preheader
+; RV32-FAST-NEXT:    slli a3, a7, 8
+; RV32-FAST-NEXT:    add a3, a0, a3
+; RV32-FAST-NEXT:  .LBB11_2: # %storeloop
+; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-FAST-NEXT:    sw a1, 0(a0)
+; RV32-FAST-NEXT:    sw a6, 4(a0)
+; RV32-FAST-NEXT:    sw a5, 8(a0)
+; RV32-FAST-NEXT:    sw a4, 12(a0)
+; RV32-FAST-NEXT:    addi a0, a0, 256
+; RV32-FAST-NEXT:    bne a0, a3, .LBB11_2
+; RV32-FAST-NEXT:  .LBB11_3: # %remcheck
+; RV32-FAST-NEXT:    beqz a2, .LBB11_6
+; RV32-FAST-NEXT:  # %bb.4: # %remainderloop.preheader
+; RV32-FAST-NEXT:    li a3, 0
+; RV32-FAST-NEXT:    li a7, 0
+; RV32-FAST-NEXT:  .LBB11_5: # %remainderloop
+; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-FAST-NEXT:    add t0, a0, a3
+; RV32-FAST-NEXT:    sb a1, 0(t0)
+; RV32-FAST-NEXT:    addi a3, a3, 1
+; RV32-FAST-NEXT:    seqz t0, a3
+; RV32-FAST-NEXT:    add a7, a7, t0
+; RV32-FAST-NEXT:    srli a1, a1, 8
+; RV32-FAST-NEXT:    slli t0, a6, 24
+; RV32-FAST-NEXT:    or a1, a1, t0
+; RV32-FAST-NEXT:    srli a6, a6, 8
+; RV32-FAST-NEXT:    slli t0, a5, 24
+; RV32-FAST-NEXT:    or a6, a6, t0
+; RV32-FAST-NEXT:    srli a5, a5, 8
+; RV32-FAST-NEXT:    slli t0, a4, 24
+; RV32-FAST-NEXT:    or a5, a5, t0
+; RV32-FAST-NEXT:    sltu t0, a3, a2
+; RV32-FAST-NEXT:    seqz t1, a7
+; RV32-FAST-NEXT:    and t0, t1, t0
+; RV32-FAST-NEXT:    srli a4, a4, 8
+; RV32-FAST-NEXT:    bnez t0, .LBB11_5
+; RV32-FAST-NEXT:  .LBB11_6: # %split
+; RV32-FAST-NEXT:    ret
+;
+; RV64-FAST-LABEL: memset_x:
+; RV64-FAST:       # %bb.0:
+; RV64-FAST-NEXT:    srli a4, a3, 4
+; RV64-FAST-NEXT:    andi a3, a3, 15
+; RV64-FAST-NEXT:    beqz a4, .LBB11_3
+; RV64-FAST-NEXT:  # %bb.1: # %storeloop.preheader
+; RV64-FAST-NEXT:    slli a4, a4, 8
+; RV64-FAST-NEXT:    add a4, a0, a4
+; RV64-FAST-NEXT:  .LBB11_2: # %storeloop
+; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-FAST-NEXT:    sd a1, 0(a0)
+; RV64-FAST-NEXT:    sd a2, 8(a0)
+; RV64-FAST-NEXT:    addi a0, a0, 256
+; RV64-FAST-NEXT:    bne a0, a4, .LBB11_2
+; RV64-FAST-NEXT:  .LBB11_3: # %remcheck
+; RV64-FAST-NEXT:    beqz a3, .LBB11_6
+; RV64-FAST-NEXT:  # %bb.4: # %remainderloop.preheader
+; RV64-FAST-NEXT:    li a4, 0
+; RV64-FAST-NEXT:  .LBB11_5: # %remainderloop
+; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-FAST-NEXT:    add a5, a0, a4
+; RV64-FAST-NEXT:    sb a1, 0(a5)
+; RV64-FAST-NEXT:    addi a4, a4, 1
+; RV64-FAST-NEXT:    srli a1, a1, 8
+; RV64-FAST-NEXT:    slli a5, a2, 56
+; RV64-FAST-NEXT:    or a1, a1, a5
+; RV64-FAST-NEXT:    srli a2, a2, 8
+; RV64-FAST-NEXT:    bltu a4, a3, .LBB11_5
+; RV64-FAST-NEXT:  .LBB11_6: # %split
+; RV64-FAST-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 %x, i1 0)
+  ret void
+}

>From 7d1347c70884a80d115bf46937032cd302eb6b99 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 31 Jul 2024 10:30:16 +0100
Subject: [PATCH 05/24] Removed outdated comment

---
 llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp | 1 -
 1 file changed, 1 deletion(-)

diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index 8d7c234d16878f..b3c3b149bcd24a 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -468,7 +468,6 @@ static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
                        "implemented for big-endian targets",
                        false);
 
-  // To start with, let's assume SetValue is an i128 and bail out if it's not.
   if (!isPowerOf2_32(SetValue->getType()->getScalarSizeInBits()))
     report_fatal_error("Pattern width for memset_pattern must be a power of 2",
                        false);

>From 60ba68bef863c0bd93290d2b9ac6fdfd25cc9f5a Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 31 Jul 2024 10:38:45 +0100
Subject: [PATCH 06/24] Change to memset_pattern taking a count rather than a
 number of bytes

Also simplify/remove tests that are no longer relevant given we don't
need to worry about writes of a partial pattern.
---
 llvm/docs/LangRef.rst                         |    4 +-
 .../Transforms/Utils/LowerMemIntrinsics.cpp   |   76 +-
 llvm/test/CodeGen/RISCV/memset-pattern.ll     | 1019 ++---------------
 .../RISCV/memset-pattern.ll                   |  190 +--
 4 files changed, 137 insertions(+), 1152 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index cf8744a22dca1c..cfa42797012bc0 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15260,8 +15260,8 @@ Arguments:
 
 The first argument is a pointer to the destination to fill, the second
 is the value with which to fill it, the third argument is an integer
-argument specifying the number of bytes to fill, and the fourth is a boolean
-indicating a volatile access.
+argument specifying the number of times to fill the value, and the fourth is a
+boolean indicating a volatile access.
 
 The :ref:`align <attr_align>` parameter attribute can be provided
 for the first argument.
diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index b3c3b149bcd24a..29d3ff3bed244b 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -457,7 +457,7 @@ static void createMemMoveLoop(Instruction *InsertBefore, Value *SrcAddr,
 }
 
 static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
-                                    Value *CopyLen, Value *SetValue,
+                                    Value *Count, Value *SetValue,
                                     Align DstAlign, bool IsVolatile) {
   BasicBlock *OrigBB = InsertBefore->getParent();
   Function *F = OrigBB->getParent();
@@ -471,91 +471,39 @@ static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
   if (!isPowerOf2_32(SetValue->getType()->getScalarSizeInBits()))
     report_fatal_error("Pattern width for memset_pattern must be a power of 2",
                        false);
-  unsigned PatternSize = SetValue->getType()->getScalarSizeInBits() / 8;
 
-  Type *TypeOfCopyLen = CopyLen->getType();
+  Type *TypeOfCount = Count->getType();
 
   BasicBlock *NewBB = OrigBB->splitBasicBlock(InsertBefore, "split");
   BasicBlock *LoopBB =
       BasicBlock::Create(F->getContext(), "storeloop", F, NewBB);
-  BasicBlock *RemCheckBB =
-      BasicBlock::Create(F->getContext(), "remcheck", F, NewBB);
-  BasicBlock *RemainderLoopBB =
-      BasicBlock::Create(F->getContext(), "remainderloop", F, NewBB);
   IRBuilder<> Builder(OrigBB->getTerminator());
 
-  ConstantInt *CILoopOpSize =
-      ConstantInt::get(dyn_cast<IntegerType>(TypeOfCopyLen), PatternSize);
-  Value *RuntimeLoopCount =
-      getRuntimeLoopCount(DL, Builder, CopyLen, CILoopOpSize, PatternSize);
-  Value *RuntimeRemainder =
-      getRuntimeLoopRemainder(DL, Builder, CopyLen, CILoopOpSize, PatternSize);
-
-  Builder.CreateCondBr(Builder.CreateICmpEQ(ConstantInt::get(TypeOfCopyLen, 0),
-                                            RuntimeLoopCount),
-                       RemCheckBB, LoopBB);
+  Builder.CreateCondBr(
+      Builder.CreateICmpEQ(ConstantInt::get(TypeOfCount, 0), Count), NewBB,
+      LoopBB);
   OrigBB->getTerminator()->eraseFromParent();
 
   IRBuilder<> LoopBuilder(LoopBB);
   PHINode *CurrentDst = LoopBuilder.CreatePHI(DstAddr->getType(), 0);
   CurrentDst->addIncoming(DstAddr, OrigBB);
-  PHINode *LoopCount = LoopBuilder.CreatePHI(TypeOfCopyLen, 0);
-  LoopCount->addIncoming(RuntimeLoopCount, OrigBB);
+  PHINode *LoopCount = LoopBuilder.CreatePHI(TypeOfCount, 0);
+  LoopCount->addIncoming(Count, OrigBB);
 
   // Create the store instruction for the pattern
   LoopBuilder.CreateAlignedStore(SetValue, CurrentDst, DstAlign, IsVolatile);
 
   Value *NextDst = LoopBuilder.CreateInBoundsGEP(
-      SetValue->getType(), CurrentDst,
-      ConstantInt::get(TypeOfCopyLen, PatternSize));
+      SetValue->getType(), CurrentDst, ConstantInt::get(TypeOfCount, 1));
   CurrentDst->addIncoming(NextDst, LoopBB);
 
   Value *NewLoopCount =
-      LoopBuilder.CreateSub(LoopCount, ConstantInt::get(TypeOfCopyLen, 1));
+      LoopBuilder.CreateSub(LoopCount, ConstantInt::get(TypeOfCount, 1));
   LoopCount->addIncoming(NewLoopCount, LoopBB);
 
   LoopBuilder.CreateCondBr(
-      LoopBuilder.CreateICmpNE(NewLoopCount,
-                               ConstantInt::get(TypeOfCopyLen, 0)),
-      LoopBB, RemCheckBB);
-
-  IRBuilder<> RemCheckBuilder(RemCheckBB, RemCheckBB->begin());
-  // Branch to the end if there are no remainder bytes.
-  PHINode *RemainderDstPHI = RemCheckBuilder.CreatePHI(NextDst->getType(), 0);
-  RemainderDstPHI->addIncoming(DstAddr, OrigBB);
-  RemainderDstPHI->addIncoming(NextDst, LoopBB);
-  RemCheckBuilder.CreateCondBr(
-      RemCheckBuilder.CreateICmpEQ(RuntimeRemainder,
-                                   ConstantInt::get(TypeOfCopyLen, 0)),
-      NewBB, RemainderLoopBB);
-
-  // Remainder loop
-  IRBuilder<> RemainderLoopBuilder(RemainderLoopBB);
-  PHINode *ByteIndex = RemainderLoopBuilder.CreatePHI(TypeOfCopyLen, 0);
-  ByteIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0), RemCheckBB);
-  Type *TypeOfSetValue = SetValue->getType();
-  PHINode *ShiftedValue = RemainderLoopBuilder.CreatePHI(TypeOfSetValue, 0);
-  ShiftedValue->addIncoming(SetValue, RemCheckBB);
-
-  Value *ByteToStore = RemainderLoopBuilder.CreateTrunc(
-      ShiftedValue, RemainderLoopBuilder.getInt8Ty());
-
-  RemainderLoopBuilder.CreateStore(
-      ByteToStore,
-      RemainderLoopBuilder.CreateInBoundsGEP(RemainderLoopBuilder.getInt8Ty(),
-                                             RemainderDstPHI, ByteIndex),
-      IsVolatile);
-
-  Value *NewByteIndex = RemainderLoopBuilder.CreateAdd(
-      ByteIndex, ConstantInt::get(TypeOfCopyLen, 1));
-  ByteIndex->addIncoming(NewByteIndex, RemainderLoopBB);
-  Value *NewShiftedValue = RemainderLoopBuilder.CreateLShr(
-      ShiftedValue, ConstantInt::get(TypeOfSetValue, 8));
-  ShiftedValue->addIncoming(NewShiftedValue, RemainderLoopBB);
-
-  RemainderLoopBuilder.CreateCondBr(
-      RemainderLoopBuilder.CreateICmpULT(NewByteIndex, RuntimeRemainder),
-      RemainderLoopBB, NewBB);
+      LoopBuilder.CreateICmpNE(NewLoopCount, ConstantInt::get(TypeOfCount, 0)),
+      LoopBB, NewBB);
 }
 
 static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr,
@@ -697,7 +645,7 @@ void llvm::expandMemSetAsLoop(MemSetInst *Memset) {
     return createMemSetPatternLoop(
         /* InsertBefore */ Memset,
         /* DstAddr */ Memset->getRawDest(),
-        /* CopyLen */ Memset->getLength(),
+        /* Count */ Memset->getLength(),
         /* SetValue */ Memset->getValue(),
         /* Alignment */ Memset->getDestAlign().valueOrOne(),
         Memset->isVolatile());
diff --git a/llvm/test/CodeGen/RISCV/memset-pattern.ll b/llvm/test/CodeGen/RISCV/memset-pattern.ll
index 85f66c8d3d230e..417121e6a27429 100644
--- a/llvm/test/CodeGen/RISCV/memset-pattern.ll
+++ b/llvm/test/CodeGen/RISCV/memset-pattern.ll
@@ -13,480 +13,40 @@
 
 define void @memset_1(ptr %a, i128 %value) nounwind {
 ; RV32-BOTH-LABEL: memset_1:
-; RV32-BOTH:       # %bb.0:
+; RV32-BOTH:       # %bb.0: # %storeloop.preheader
 ; RV32-BOTH-NEXT:    lw a2, 12(a1)
 ; RV32-BOTH-NEXT:    lw a3, 8(a1)
 ; RV32-BOTH-NEXT:    lw a4, 4(a1)
 ; RV32-BOTH-NEXT:    lw a1, 0(a1)
-; RV32-BOTH-NEXT:    li a5, 0
-; RV32-BOTH-NEXT:    li a6, 0
-; RV32-BOTH-NEXT:  .LBB0_1: # %remainderloop
+; RV32-BOTH-NEXT:    addi a5, a0, 16
+; RV32-BOTH-NEXT:  .LBB0_1: # %storeloop
 ; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    add a7, a0, a5
-; RV32-BOTH-NEXT:    sb a1, 0(a7)
-; RV32-BOTH-NEXT:    addi a5, a5, 1
-; RV32-BOTH-NEXT:    seqz a7, a5
-; RV32-BOTH-NEXT:    add a6, a6, a7
-; RV32-BOTH-NEXT:    or a7, a5, a6
-; RV32-BOTH-NEXT:    srli a1, a1, 8
-; RV32-BOTH-NEXT:    slli t0, a4, 24
-; RV32-BOTH-NEXT:    or a1, a1, t0
-; RV32-BOTH-NEXT:    srli a4, a4, 8
-; RV32-BOTH-NEXT:    slli t0, a3, 24
-; RV32-BOTH-NEXT:    or a4, a4, t0
-; RV32-BOTH-NEXT:    srli a3, a3, 8
-; RV32-BOTH-NEXT:    slli t0, a2, 24
-; RV32-BOTH-NEXT:    or a3, a3, t0
-; RV32-BOTH-NEXT:    srli a2, a2, 8
-; RV32-BOTH-NEXT:    beqz a7, .LBB0_1
+; RV32-BOTH-NEXT:    sw a1, 0(a0)
+; RV32-BOTH-NEXT:    sw a4, 4(a0)
+; RV32-BOTH-NEXT:    sw a3, 8(a0)
+; RV32-BOTH-NEXT:    sw a2, 12(a0)
+; RV32-BOTH-NEXT:    addi a0, a0, 16
+; RV32-BOTH-NEXT:    bne a0, a5, .LBB0_1
 ; RV32-BOTH-NEXT:  # %bb.2: # %split
 ; RV32-BOTH-NEXT:    ret
 ;
 ; RV64-BOTH-LABEL: memset_1:
-; RV64-BOTH:       # %bb.0:
-; RV64-BOTH-NEXT:    li a3, 0
-; RV64-BOTH-NEXT:  .LBB0_1: # %remainderloop
-; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-BOTH-NEXT:    add a4, a0, a3
-; RV64-BOTH-NEXT:    sb a1, 0(a4)
-; RV64-BOTH-NEXT:    addi a3, a3, 1
-; RV64-BOTH-NEXT:    srli a1, a1, 8
-; RV64-BOTH-NEXT:    slli a4, a2, 56
-; RV64-BOTH-NEXT:    or a1, a1, a4
-; RV64-BOTH-NEXT:    srli a2, a2, 8
-; RV64-BOTH-NEXT:    beqz a3, .LBB0_1
-; RV64-BOTH-NEXT:  # %bb.2: # %split
-; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
-  ret void
-}
-
-define void @memset_2(ptr %a, i128 %value) nounwind {
-; RV32-BOTH-LABEL: memset_2:
-; RV32-BOTH:       # %bb.0:
-; RV32-BOTH-NEXT:    lw a2, 12(a1)
-; RV32-BOTH-NEXT:    lw a3, 8(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
-; RV32-BOTH-NEXT:    li a5, 0
-; RV32-BOTH-NEXT:    li a6, 0
-; RV32-BOTH-NEXT:  .LBB1_1: # %remainderloop
-; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    add a7, a0, a5
-; RV32-BOTH-NEXT:    sb a1, 0(a7)
-; RV32-BOTH-NEXT:    addi a5, a5, 1
-; RV32-BOTH-NEXT:    seqz a7, a5
-; RV32-BOTH-NEXT:    add a6, a6, a7
-; RV32-BOTH-NEXT:    srli a1, a1, 8
-; RV32-BOTH-NEXT:    slli a7, a4, 24
-; RV32-BOTH-NEXT:    or a1, a1, a7
-; RV32-BOTH-NEXT:    srli a4, a4, 8
-; RV32-BOTH-NEXT:    slli a7, a3, 24
-; RV32-BOTH-NEXT:    or a4, a4, a7
-; RV32-BOTH-NEXT:    srli a3, a3, 8
-; RV32-BOTH-NEXT:    slli a7, a2, 24
-; RV32-BOTH-NEXT:    or a3, a3, a7
-; RV32-BOTH-NEXT:    seqz a7, a6
-; RV32-BOTH-NEXT:    sltiu t0, a5, 2
-; RV32-BOTH-NEXT:    and a7, a7, t0
-; RV32-BOTH-NEXT:    srli a2, a2, 8
-; RV32-BOTH-NEXT:    bnez a7, .LBB1_1
-; RV32-BOTH-NEXT:  # %bb.2: # %split
-; RV32-BOTH-NEXT:    ret
-;
-; RV64-BOTH-LABEL: memset_2:
-; RV64-BOTH:       # %bb.0:
-; RV64-BOTH-NEXT:    li a3, 0
-; RV64-BOTH-NEXT:    li a4, 2
-; RV64-BOTH-NEXT:  .LBB1_1: # %remainderloop
+; RV64-BOTH:       # %bb.0: # %storeloop.preheader
+; RV64-BOTH-NEXT:    addi a3, a0, 16
+; RV64-BOTH-NEXT:  .LBB0_1: # %storeloop
 ; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-BOTH-NEXT:    add a5, a0, a3
-; RV64-BOTH-NEXT:    sb a1, 0(a5)
-; RV64-BOTH-NEXT:    addi a3, a3, 1
-; RV64-BOTH-NEXT:    srli a1, a1, 8
-; RV64-BOTH-NEXT:    slli a5, a2, 56
-; RV64-BOTH-NEXT:    or a1, a1, a5
-; RV64-BOTH-NEXT:    srli a2, a2, 8
-; RV64-BOTH-NEXT:    bltu a3, a4, .LBB1_1
+; RV64-BOTH-NEXT:    sd a1, 0(a0)
+; RV64-BOTH-NEXT:    sd a2, 8(a0)
+; RV64-BOTH-NEXT:    addi a0, a0, 16
+; RV64-BOTH-NEXT:    bne a0, a3, .LBB0_1
 ; RV64-BOTH-NEXT:  # %bb.2: # %split
 ; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 2, i1 0)
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
-define void @memset_3(ptr %a, i128 %value) nounwind {
-; RV32-BOTH-LABEL: memset_3:
-; RV32-BOTH:       # %bb.0:
-; RV32-BOTH-NEXT:    lw a2, 12(a1)
-; RV32-BOTH-NEXT:    lw a3, 8(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
-; RV32-BOTH-NEXT:    li a5, 0
-; RV32-BOTH-NEXT:    li a6, 0
-; RV32-BOTH-NEXT:  .LBB2_1: # %remainderloop
-; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    add a7, a0, a5
-; RV32-BOTH-NEXT:    sb a1, 0(a7)
-; RV32-BOTH-NEXT:    addi a5, a5, 1
-; RV32-BOTH-NEXT:    seqz a7, a5
-; RV32-BOTH-NEXT:    add a6, a6, a7
-; RV32-BOTH-NEXT:    srli a1, a1, 8
-; RV32-BOTH-NEXT:    slli a7, a4, 24
-; RV32-BOTH-NEXT:    or a1, a1, a7
-; RV32-BOTH-NEXT:    srli a4, a4, 8
-; RV32-BOTH-NEXT:    slli a7, a3, 24
-; RV32-BOTH-NEXT:    or a4, a4, a7
-; RV32-BOTH-NEXT:    srli a3, a3, 8
-; RV32-BOTH-NEXT:    slli a7, a2, 24
-; RV32-BOTH-NEXT:    or a3, a3, a7
-; RV32-BOTH-NEXT:    seqz a7, a6
-; RV32-BOTH-NEXT:    sltiu t0, a5, 3
-; RV32-BOTH-NEXT:    and a7, a7, t0
-; RV32-BOTH-NEXT:    srli a2, a2, 8
-; RV32-BOTH-NEXT:    bnez a7, .LBB2_1
-; RV32-BOTH-NEXT:  # %bb.2: # %split
-; RV32-BOTH-NEXT:    ret
-;
-; RV64-BOTH-LABEL: memset_3:
-; RV64-BOTH:       # %bb.0:
-; RV64-BOTH-NEXT:    li a3, 0
-; RV64-BOTH-NEXT:    li a4, 3
-; RV64-BOTH-NEXT:  .LBB2_1: # %remainderloop
-; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-BOTH-NEXT:    add a5, a0, a3
-; RV64-BOTH-NEXT:    sb a1, 0(a5)
-; RV64-BOTH-NEXT:    addi a3, a3, 1
-; RV64-BOTH-NEXT:    srli a1, a1, 8
-; RV64-BOTH-NEXT:    slli a5, a2, 56
-; RV64-BOTH-NEXT:    or a1, a1, a5
-; RV64-BOTH-NEXT:    srli a2, a2, 8
-; RV64-BOTH-NEXT:    bltu a3, a4, .LBB2_1
-; RV64-BOTH-NEXT:  # %bb.2: # %split
-; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 3, i1 0)
-  ret void
-}
-
-define void @memset_4(ptr %a, i128 %value) nounwind {
-; RV32-BOTH-LABEL: memset_4:
-; RV32-BOTH:       # %bb.0:
-; RV32-BOTH-NEXT:    lw a2, 12(a1)
-; RV32-BOTH-NEXT:    lw a3, 8(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
-; RV32-BOTH-NEXT:    li a5, 0
-; RV32-BOTH-NEXT:    li a6, 0
-; RV32-BOTH-NEXT:  .LBB3_1: # %remainderloop
-; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    add a7, a0, a5
-; RV32-BOTH-NEXT:    sb a1, 0(a7)
-; RV32-BOTH-NEXT:    addi a5, a5, 1
-; RV32-BOTH-NEXT:    seqz a7, a5
-; RV32-BOTH-NEXT:    add a6, a6, a7
-; RV32-BOTH-NEXT:    srli a1, a1, 8
-; RV32-BOTH-NEXT:    slli a7, a4, 24
-; RV32-BOTH-NEXT:    or a1, a1, a7
-; RV32-BOTH-NEXT:    srli a4, a4, 8
-; RV32-BOTH-NEXT:    slli a7, a3, 24
-; RV32-BOTH-NEXT:    or a4, a4, a7
-; RV32-BOTH-NEXT:    srli a3, a3, 8
-; RV32-BOTH-NEXT:    slli a7, a2, 24
-; RV32-BOTH-NEXT:    or a3, a3, a7
-; RV32-BOTH-NEXT:    seqz a7, a6
-; RV32-BOTH-NEXT:    sltiu t0, a5, 4
-; RV32-BOTH-NEXT:    and a7, a7, t0
-; RV32-BOTH-NEXT:    srli a2, a2, 8
-; RV32-BOTH-NEXT:    bnez a7, .LBB3_1
-; RV32-BOTH-NEXT:  # %bb.2: # %split
-; RV32-BOTH-NEXT:    ret
-;
-; RV64-BOTH-LABEL: memset_4:
-; RV64-BOTH:       # %bb.0:
-; RV64-BOTH-NEXT:    li a3, 0
-; RV64-BOTH-NEXT:    li a4, 4
-; RV64-BOTH-NEXT:  .LBB3_1: # %remainderloop
-; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-BOTH-NEXT:    add a5, a0, a3
-; RV64-BOTH-NEXT:    sb a1, 0(a5)
-; RV64-BOTH-NEXT:    addi a3, a3, 1
-; RV64-BOTH-NEXT:    srli a1, a1, 8
-; RV64-BOTH-NEXT:    slli a5, a2, 56
-; RV64-BOTH-NEXT:    or a1, a1, a5
-; RV64-BOTH-NEXT:    srli a2, a2, 8
-; RV64-BOTH-NEXT:    bltu a3, a4, .LBB3_1
-; RV64-BOTH-NEXT:  # %bb.2: # %split
-; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 4, i1 0)
-  ret void
-}
-
-define void @memset_5(ptr %a, i128 %value) nounwind {
-; RV32-BOTH-LABEL: memset_5:
-; RV32-BOTH:       # %bb.0:
-; RV32-BOTH-NEXT:    lw a2, 12(a1)
-; RV32-BOTH-NEXT:    lw a3, 8(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
-; RV32-BOTH-NEXT:    li a5, 0
-; RV32-BOTH-NEXT:    li a6, 0
-; RV32-BOTH-NEXT:  .LBB4_1: # %remainderloop
-; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    add a7, a0, a5
-; RV32-BOTH-NEXT:    sb a1, 0(a7)
-; RV32-BOTH-NEXT:    addi a5, a5, 1
-; RV32-BOTH-NEXT:    seqz a7, a5
-; RV32-BOTH-NEXT:    add a6, a6, a7
-; RV32-BOTH-NEXT:    srli a1, a1, 8
-; RV32-BOTH-NEXT:    slli a7, a4, 24
-; RV32-BOTH-NEXT:    or a1, a1, a7
-; RV32-BOTH-NEXT:    srli a4, a4, 8
-; RV32-BOTH-NEXT:    slli a7, a3, 24
-; RV32-BOTH-NEXT:    or a4, a4, a7
-; RV32-BOTH-NEXT:    srli a3, a3, 8
-; RV32-BOTH-NEXT:    slli a7, a2, 24
-; RV32-BOTH-NEXT:    or a3, a3, a7
-; RV32-BOTH-NEXT:    seqz a7, a6
-; RV32-BOTH-NEXT:    sltiu t0, a5, 5
-; RV32-BOTH-NEXT:    and a7, a7, t0
-; RV32-BOTH-NEXT:    srli a2, a2, 8
-; RV32-BOTH-NEXT:    bnez a7, .LBB4_1
-; RV32-BOTH-NEXT:  # %bb.2: # %split
-; RV32-BOTH-NEXT:    ret
-;
-; RV64-BOTH-LABEL: memset_5:
-; RV64-BOTH:       # %bb.0:
-; RV64-BOTH-NEXT:    li a3, 0
-; RV64-BOTH-NEXT:    li a4, 5
-; RV64-BOTH-NEXT:  .LBB4_1: # %remainderloop
-; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-BOTH-NEXT:    add a5, a0, a3
-; RV64-BOTH-NEXT:    sb a1, 0(a5)
-; RV64-BOTH-NEXT:    addi a3, a3, 1
-; RV64-BOTH-NEXT:    srli a1, a1, 8
-; RV64-BOTH-NEXT:    slli a5, a2, 56
-; RV64-BOTH-NEXT:    or a1, a1, a5
-; RV64-BOTH-NEXT:    srli a2, a2, 8
-; RV64-BOTH-NEXT:    bltu a3, a4, .LBB4_1
-; RV64-BOTH-NEXT:  # %bb.2: # %split
-; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 5, i1 0)
-  ret void
-}
-
-define void @memset_6(ptr %a, i128 %value) nounwind {
-; RV32-BOTH-LABEL: memset_6:
-; RV32-BOTH:       # %bb.0:
-; RV32-BOTH-NEXT:    lw a2, 12(a1)
-; RV32-BOTH-NEXT:    lw a3, 8(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
-; RV32-BOTH-NEXT:    li a5, 0
-; RV32-BOTH-NEXT:    li a6, 0
-; RV32-BOTH-NEXT:  .LBB5_1: # %remainderloop
-; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    add a7, a0, a5
-; RV32-BOTH-NEXT:    sb a1, 0(a7)
-; RV32-BOTH-NEXT:    addi a5, a5, 1
-; RV32-BOTH-NEXT:    seqz a7, a5
-; RV32-BOTH-NEXT:    add a6, a6, a7
-; RV32-BOTH-NEXT:    srli a1, a1, 8
-; RV32-BOTH-NEXT:    slli a7, a4, 24
-; RV32-BOTH-NEXT:    or a1, a1, a7
-; RV32-BOTH-NEXT:    srli a4, a4, 8
-; RV32-BOTH-NEXT:    slli a7, a3, 24
-; RV32-BOTH-NEXT:    or a4, a4, a7
-; RV32-BOTH-NEXT:    srli a3, a3, 8
-; RV32-BOTH-NEXT:    slli a7, a2, 24
-; RV32-BOTH-NEXT:    or a3, a3, a7
-; RV32-BOTH-NEXT:    seqz a7, a6
-; RV32-BOTH-NEXT:    sltiu t0, a5, 6
-; RV32-BOTH-NEXT:    and a7, a7, t0
-; RV32-BOTH-NEXT:    srli a2, a2, 8
-; RV32-BOTH-NEXT:    bnez a7, .LBB5_1
-; RV32-BOTH-NEXT:  # %bb.2: # %split
-; RV32-BOTH-NEXT:    ret
-;
-; RV64-BOTH-LABEL: memset_6:
-; RV64-BOTH:       # %bb.0:
-; RV64-BOTH-NEXT:    li a3, 0
-; RV64-BOTH-NEXT:    li a4, 6
-; RV64-BOTH-NEXT:  .LBB5_1: # %remainderloop
-; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-BOTH-NEXT:    add a5, a0, a3
-; RV64-BOTH-NEXT:    sb a1, 0(a5)
-; RV64-BOTH-NEXT:    addi a3, a3, 1
-; RV64-BOTH-NEXT:    srli a1, a1, 8
-; RV64-BOTH-NEXT:    slli a5, a2, 56
-; RV64-BOTH-NEXT:    or a1, a1, a5
-; RV64-BOTH-NEXT:    srli a2, a2, 8
-; RV64-BOTH-NEXT:    bltu a3, a4, .LBB5_1
-; RV64-BOTH-NEXT:  # %bb.2: # %split
-; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 6, i1 0)
-  ret void
-}
-
-define void @memset_7(ptr %a, i128 %value) nounwind {
-; RV32-BOTH-LABEL: memset_7:
-; RV32-BOTH:       # %bb.0:
-; RV32-BOTH-NEXT:    lw a2, 12(a1)
-; RV32-BOTH-NEXT:    lw a3, 8(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
-; RV32-BOTH-NEXT:    li a5, 0
-; RV32-BOTH-NEXT:    li a6, 0
-; RV32-BOTH-NEXT:  .LBB6_1: # %remainderloop
-; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    add a7, a0, a5
-; RV32-BOTH-NEXT:    sb a1, 0(a7)
-; RV32-BOTH-NEXT:    addi a5, a5, 1
-; RV32-BOTH-NEXT:    seqz a7, a5
-; RV32-BOTH-NEXT:    add a6, a6, a7
-; RV32-BOTH-NEXT:    srli a1, a1, 8
-; RV32-BOTH-NEXT:    slli a7, a4, 24
-; RV32-BOTH-NEXT:    or a1, a1, a7
-; RV32-BOTH-NEXT:    srli a4, a4, 8
-; RV32-BOTH-NEXT:    slli a7, a3, 24
-; RV32-BOTH-NEXT:    or a4, a4, a7
-; RV32-BOTH-NEXT:    srli a3, a3, 8
-; RV32-BOTH-NEXT:    slli a7, a2, 24
-; RV32-BOTH-NEXT:    or a3, a3, a7
-; RV32-BOTH-NEXT:    seqz a7, a6
-; RV32-BOTH-NEXT:    sltiu t0, a5, 7
-; RV32-BOTH-NEXT:    and a7, a7, t0
-; RV32-BOTH-NEXT:    srli a2, a2, 8
-; RV32-BOTH-NEXT:    bnez a7, .LBB6_1
-; RV32-BOTH-NEXT:  # %bb.2: # %split
-; RV32-BOTH-NEXT:    ret
-;
-; RV64-BOTH-LABEL: memset_7:
-; RV64-BOTH:       # %bb.0:
-; RV64-BOTH-NEXT:    li a3, 0
-; RV64-BOTH-NEXT:    li a4, 7
-; RV64-BOTH-NEXT:  .LBB6_1: # %remainderloop
-; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-BOTH-NEXT:    add a5, a0, a3
-; RV64-BOTH-NEXT:    sb a1, 0(a5)
-; RV64-BOTH-NEXT:    addi a3, a3, 1
-; RV64-BOTH-NEXT:    srli a1, a1, 8
-; RV64-BOTH-NEXT:    slli a5, a2, 56
-; RV64-BOTH-NEXT:    or a1, a1, a5
-; RV64-BOTH-NEXT:    srli a2, a2, 8
-; RV64-BOTH-NEXT:    bltu a3, a4, .LBB6_1
-; RV64-BOTH-NEXT:  # %bb.2: # %split
-; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 7, i1 0)
-  ret void
-}
-
-define void @memset_8(ptr %a, i128 %value) nounwind {
-; RV32-BOTH-LABEL: memset_8:
-; RV32-BOTH:       # %bb.0:
-; RV32-BOTH-NEXT:    lw a2, 12(a1)
-; RV32-BOTH-NEXT:    lw a3, 8(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
-; RV32-BOTH-NEXT:    li a5, 0
-; RV32-BOTH-NEXT:    li a6, 0
-; RV32-BOTH-NEXT:  .LBB7_1: # %remainderloop
-; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    add a7, a0, a5
-; RV32-BOTH-NEXT:    sb a1, 0(a7)
-; RV32-BOTH-NEXT:    addi a5, a5, 1
-; RV32-BOTH-NEXT:    seqz a7, a5
-; RV32-BOTH-NEXT:    add a6, a6, a7
-; RV32-BOTH-NEXT:    srli a1, a1, 8
-; RV32-BOTH-NEXT:    slli a7, a4, 24
-; RV32-BOTH-NEXT:    or a1, a1, a7
-; RV32-BOTH-NEXT:    srli a4, a4, 8
-; RV32-BOTH-NEXT:    slli a7, a3, 24
-; RV32-BOTH-NEXT:    or a4, a4, a7
-; RV32-BOTH-NEXT:    srli a3, a3, 8
-; RV32-BOTH-NEXT:    slli a7, a2, 24
-; RV32-BOTH-NEXT:    or a3, a3, a7
-; RV32-BOTH-NEXT:    seqz a7, a6
-; RV32-BOTH-NEXT:    sltiu t0, a5, 8
-; RV32-BOTH-NEXT:    and a7, a7, t0
-; RV32-BOTH-NEXT:    srli a2, a2, 8
-; RV32-BOTH-NEXT:    bnez a7, .LBB7_1
-; RV32-BOTH-NEXT:  # %bb.2: # %split
-; RV32-BOTH-NEXT:    ret
-;
-; RV64-BOTH-LABEL: memset_8:
-; RV64-BOTH:       # %bb.0:
-; RV64-BOTH-NEXT:    li a3, 0
-; RV64-BOTH-NEXT:    li a4, 8
-; RV64-BOTH-NEXT:  .LBB7_1: # %remainderloop
-; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-BOTH-NEXT:    add a5, a0, a3
-; RV64-BOTH-NEXT:    sb a1, 0(a5)
-; RV64-BOTH-NEXT:    addi a3, a3, 1
-; RV64-BOTH-NEXT:    srli a1, a1, 8
-; RV64-BOTH-NEXT:    slli a5, a2, 56
-; RV64-BOTH-NEXT:    or a1, a1, a5
-; RV64-BOTH-NEXT:    srli a2, a2, 8
-; RV64-BOTH-NEXT:    bltu a3, a4, .LBB7_1
-; RV64-BOTH-NEXT:  # %bb.2: # %split
-; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 8, i1 0)
-  ret void
-}
-
-define void @memset_9(ptr %a, i128 %value) nounwind {
-; RV32-BOTH-LABEL: memset_9:
-; RV32-BOTH:       # %bb.0:
-; RV32-BOTH-NEXT:    lw a2, 12(a1)
-; RV32-BOTH-NEXT:    lw a3, 8(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
-; RV32-BOTH-NEXT:    li a5, 0
-; RV32-BOTH-NEXT:    li a6, 0
-; RV32-BOTH-NEXT:  .LBB8_1: # %remainderloop
-; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    add a7, a0, a5
-; RV32-BOTH-NEXT:    sb a1, 0(a7)
-; RV32-BOTH-NEXT:    addi a5, a5, 1
-; RV32-BOTH-NEXT:    seqz a7, a5
-; RV32-BOTH-NEXT:    add a6, a6, a7
-; RV32-BOTH-NEXT:    srli a1, a1, 8
-; RV32-BOTH-NEXT:    slli a7, a4, 24
-; RV32-BOTH-NEXT:    or a1, a1, a7
-; RV32-BOTH-NEXT:    srli a4, a4, 8
-; RV32-BOTH-NEXT:    slli a7, a3, 24
-; RV32-BOTH-NEXT:    or a4, a4, a7
-; RV32-BOTH-NEXT:    srli a3, a3, 8
-; RV32-BOTH-NEXT:    slli a7, a2, 24
-; RV32-BOTH-NEXT:    or a3, a3, a7
-; RV32-BOTH-NEXT:    seqz a7, a6
-; RV32-BOTH-NEXT:    sltiu t0, a5, 9
-; RV32-BOTH-NEXT:    and a7, a7, t0
-; RV32-BOTH-NEXT:    srli a2, a2, 8
-; RV32-BOTH-NEXT:    bnez a7, .LBB8_1
-; RV32-BOTH-NEXT:  # %bb.2: # %split
-; RV32-BOTH-NEXT:    ret
-;
-; RV64-BOTH-LABEL: memset_9:
-; RV64-BOTH:       # %bb.0:
-; RV64-BOTH-NEXT:    li a3, 0
-; RV64-BOTH-NEXT:    li a4, 9
-; RV64-BOTH-NEXT:  .LBB8_1: # %remainderloop
-; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-BOTH-NEXT:    add a5, a0, a3
-; RV64-BOTH-NEXT:    sb a1, 0(a5)
-; RV64-BOTH-NEXT:    addi a3, a3, 1
-; RV64-BOTH-NEXT:    srli a1, a1, 8
-; RV64-BOTH-NEXT:    slli a5, a2, 56
-; RV64-BOTH-NEXT:    or a1, a1, a5
-; RV64-BOTH-NEXT:    srli a2, a2, 8
-; RV64-BOTH-NEXT:    bltu a3, a4, .LBB8_1
-; RV64-BOTH-NEXT:  # %bb.2: # %split
-; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 9, i1 0)
-  ret void
-}
-
-define void @memset_16(ptr %a, i128 %value) nounwind {
-; RV32-LABEL: memset_16:
+define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
+; RV32-LABEL: memset_1_noalign:
 ; RV32:       # %bb.0: # %storeloop.preheader
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
@@ -496,7 +56,7 @@ define void @memset_16(ptr %a, i128 %value) nounwind {
 ; RV32-NEXT:    lw a3, 0(a1)
 ; RV32-NEXT:    lw a4, 8(a1)
 ; RV32-NEXT:    lw a1, 4(a1)
-; RV32-NEXT:    addi a5, a0, 256
+; RV32-NEXT:    addi a5, a0, 16
 ; RV32-NEXT:    srli a6, a3, 24
 ; RV32-NEXT:    srli a7, a3, 16
 ; RV32-NEXT:    srli t0, a3, 8
@@ -509,7 +69,7 @@ define void @memset_16(ptr %a, i128 %value) nounwind {
 ; RV32-NEXT:    srli s0, a2, 24
 ; RV32-NEXT:    srli s1, a2, 16
 ; RV32-NEXT:    srli s2, a2, 8
-; RV32-NEXT:  .LBB9_1: # %storeloop
+; RV32-NEXT:  .LBB1_1: # %storeloop
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-NEXT:    sb a3, 0(a0)
 ; RV32-NEXT:    sb a1, 4(a0)
@@ -527,8 +87,8 @@ define void @memset_16(ptr %a, i128 %value) nounwind {
 ; RV32-NEXT:    sb s0, 15(a0)
 ; RV32-NEXT:    sb s1, 14(a0)
 ; RV32-NEXT:    sb s2, 13(a0)
-; RV32-NEXT:    addi a0, a0, 256
-; RV32-NEXT:    bne a0, a5, .LBB9_1
+; RV32-NEXT:    addi a0, a0, 16
+; RV32-NEXT:    bne a0, a5, .LBB1_1
 ; RV32-NEXT:  # %bb.2: # %split
 ; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
@@ -536,13 +96,13 @@ define void @memset_16(ptr %a, i128 %value) nounwind {
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: memset_16:
+; RV64-LABEL: memset_1_noalign:
 ; RV64:       # %bb.0: # %storeloop.preheader
 ; RV64-NEXT:    addi sp, sp, -32
 ; RV64-NEXT:    sd s0, 24(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s1, 16(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s2, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    addi a3, a0, 256
+; RV64-NEXT:    addi a3, a0, 16
 ; RV64-NEXT:    srli a4, a1, 56
 ; RV64-NEXT:    srli a5, a1, 48
 ; RV64-NEXT:    srli a6, a1, 40
@@ -557,7 +117,7 @@ define void @memset_16(ptr %a, i128 %value) nounwind {
 ; RV64-NEXT:    srli s0, a2, 24
 ; RV64-NEXT:    srli s1, a2, 16
 ; RV64-NEXT:    srli s2, a2, 8
-; RV64-NEXT:  .LBB9_1: # %storeloop
+; RV64-NEXT:  .LBB1_1: # %storeloop
 ; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NEXT:    sb a1, 0(a0)
 ; RV64-NEXT:    sb a2, 8(a0)
@@ -575,8 +135,8 @@ define void @memset_16(ptr %a, i128 %value) nounwind {
 ; RV64-NEXT:    sb s0, 11(a0)
 ; RV64-NEXT:    sb s1, 10(a0)
 ; RV64-NEXT:    sb s2, 9(a0)
-; RV64-NEXT:    addi a0, a0, 256
-; RV64-NEXT:    bne a0, a3, .LBB9_1
+; RV64-NEXT:    addi a0, a0, 16
+; RV64-NEXT:    bne a0, a3, .LBB1_1
 ; RV64-NEXT:  # %bb.2: # %split
 ; RV64-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
@@ -584,479 +144,110 @@ define void @memset_16(ptr %a, i128 %value) nounwind {
 ; RV64-NEXT:    addi sp, sp, 32
 ; RV64-NEXT:    ret
 ;
-; RV32-FAST-LABEL: memset_16:
+; RV32-FAST-LABEL: memset_1_noalign:
 ; RV32-FAST:       # %bb.0: # %storeloop.preheader
 ; RV32-FAST-NEXT:    lw a2, 12(a1)
 ; RV32-FAST-NEXT:    lw a3, 8(a1)
 ; RV32-FAST-NEXT:    lw a4, 4(a1)
 ; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    addi a5, a0, 256
-; RV32-FAST-NEXT:  .LBB9_1: # %storeloop
+; RV32-FAST-NEXT:    addi a5, a0, 16
+; RV32-FAST-NEXT:  .LBB1_1: # %storeloop
 ; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-FAST-NEXT:    sw a1, 0(a0)
 ; RV32-FAST-NEXT:    sw a4, 4(a0)
 ; RV32-FAST-NEXT:    sw a3, 8(a0)
 ; RV32-FAST-NEXT:    sw a2, 12(a0)
-; RV32-FAST-NEXT:    addi a0, a0, 256
-; RV32-FAST-NEXT:    bne a0, a5, .LBB9_1
+; RV32-FAST-NEXT:    addi a0, a0, 16
+; RV32-FAST-NEXT:    bne a0, a5, .LBB1_1
 ; RV32-FAST-NEXT:  # %bb.2: # %split
 ; RV32-FAST-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_16:
+; RV64-FAST-LABEL: memset_1_noalign:
 ; RV64-FAST:       # %bb.0: # %storeloop.preheader
-; RV64-FAST-NEXT:    addi a3, a0, 256
-; RV64-FAST-NEXT:  .LBB9_1: # %storeloop
+; RV64-FAST-NEXT:    addi a3, a0, 16
+; RV64-FAST-NEXT:  .LBB1_1: # %storeloop
 ; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-FAST-NEXT:    sd a1, 0(a0)
 ; RV64-FAST-NEXT:    sd a2, 8(a0)
-; RV64-FAST-NEXT:    addi a0, a0, 256
-; RV64-FAST-NEXT:    bne a0, a3, .LBB9_1
+; RV64-FAST-NEXT:    addi a0, a0, 16
+; RV64-FAST-NEXT:    bne a0, a3, .LBB1_1
 ; RV64-FAST-NEXT:  # %bb.2: # %split
 ; RV64-FAST-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 16, i1 0)
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
-define void @memset_17(ptr %a, i128 %value) nounwind {
-; RV32-LABEL: memset_17:
-; RV32:       # %bb.0: # %storeloop.preheader
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s2, 4(sp) # 4-byte Folded Spill
-; RV32-NEXT:    lw a2, 12(a1)
-; RV32-NEXT:    lw a3, 0(a1)
-; RV32-NEXT:    lw a4, 8(a1)
-; RV32-NEXT:    lw a1, 4(a1)
-; RV32-NEXT:    addi a5, a0, 256
-; RV32-NEXT:    srli a6, a3, 24
-; RV32-NEXT:    srli a7, a3, 16
-; RV32-NEXT:    srli t0, a3, 8
-; RV32-NEXT:    srli t1, a1, 24
-; RV32-NEXT:    srli t2, a1, 16
-; RV32-NEXT:    srli t3, a1, 8
-; RV32-NEXT:    srli t4, a4, 24
-; RV32-NEXT:    srli t5, a4, 16
-; RV32-NEXT:    srli t6, a4, 8
-; RV32-NEXT:    srli s0, a2, 24
-; RV32-NEXT:    srli s1, a2, 16
-; RV32-NEXT:    srli s2, a2, 8
-; RV32-NEXT:  .LBB10_1: # %storeloop
-; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    sb a6, 3(a0)
-; RV32-NEXT:    sb a7, 2(a0)
-; RV32-NEXT:    sb t0, 1(a0)
-; RV32-NEXT:    sb a4, 8(a0)
-; RV32-NEXT:    sb t1, 7(a0)
-; RV32-NEXT:    sb t2, 6(a0)
-; RV32-NEXT:    sb t3, 5(a0)
-; RV32-NEXT:    sb a2, 12(a0)
-; RV32-NEXT:    sb t4, 11(a0)
-; RV32-NEXT:    sb t5, 10(a0)
-; RV32-NEXT:    sb t6, 9(a0)
-; RV32-NEXT:    sb s0, 15(a0)
-; RV32-NEXT:    sb s1, 14(a0)
-; RV32-NEXT:    sb s2, 13(a0)
-; RV32-NEXT:    addi a0, a0, 256
-; RV32-NEXT:    bne a0, a5, .LBB10_1
-; RV32-NEXT:  # %bb.2: # %remcheck
-; RV32-NEXT:    li a5, 0
-; RV32-NEXT:    li a6, 0
-; RV32-NEXT:  .LBB10_3: # %remainderloop
-; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    add a7, a0, a5
-; RV32-NEXT:    sb a3, 0(a7)
-; RV32-NEXT:    addi a5, a5, 1
-; RV32-NEXT:    seqz a7, a5
-; RV32-NEXT:    add a6, a6, a7
-; RV32-NEXT:    or a7, a5, a6
-; RV32-NEXT:    srli a3, a3, 8
-; RV32-NEXT:    slli t0, a1, 24
-; RV32-NEXT:    or a3, a3, t0
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    slli t0, a4, 24
-; RV32-NEXT:    or a1, a1, t0
-; RV32-NEXT:    srli a4, a4, 8
-; RV32-NEXT:    slli t0, a2, 24
-; RV32-NEXT:    or a4, a4, t0
-; RV32-NEXT:    srli a2, a2, 8
-; RV32-NEXT:    beqz a7, .LBB10_3
-; RV32-NEXT:  # %bb.4: # %split
-; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: memset_17:
-; RV64:       # %bb.0: # %storeloop.preheader
-; RV64-NEXT:    addi sp, sp, -32
-; RV64-NEXT:    sd s0, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s1, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s2, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    addi a3, a0, 256
-; RV64-NEXT:    srli a4, a1, 56
-; RV64-NEXT:    srli a5, a1, 48
-; RV64-NEXT:    srli a6, a1, 40
-; RV64-NEXT:    srli a7, a1, 32
-; RV64-NEXT:    srli t0, a1, 24
-; RV64-NEXT:    srli t1, a1, 16
-; RV64-NEXT:    srli t2, a1, 8
-; RV64-NEXT:    srli t3, a2, 56
-; RV64-NEXT:    srli t4, a2, 48
-; RV64-NEXT:    srli t5, a2, 40
-; RV64-NEXT:    srli t6, a2, 32
-; RV64-NEXT:    srli s0, a2, 24
-; RV64-NEXT:    srli s1, a2, 16
-; RV64-NEXT:    srli s2, a2, 8
-; RV64-NEXT:  .LBB10_1: # %storeloop
-; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    sb a2, 8(a0)
-; RV64-NEXT:    sb a4, 7(a0)
-; RV64-NEXT:    sb a5, 6(a0)
-; RV64-NEXT:    sb a6, 5(a0)
-; RV64-NEXT:    sb a7, 4(a0)
-; RV64-NEXT:    sb t0, 3(a0)
-; RV64-NEXT:    sb t1, 2(a0)
-; RV64-NEXT:    sb t2, 1(a0)
-; RV64-NEXT:    sb t3, 15(a0)
-; RV64-NEXT:    sb t4, 14(a0)
-; RV64-NEXT:    sb t5, 13(a0)
-; RV64-NEXT:    sb t6, 12(a0)
-; RV64-NEXT:    sb s0, 11(a0)
-; RV64-NEXT:    sb s1, 10(a0)
-; RV64-NEXT:    sb s2, 9(a0)
-; RV64-NEXT:    addi a0, a0, 256
-; RV64-NEXT:    bne a0, a3, .LBB10_1
-; RV64-NEXT:  # %bb.2: # %remcheck
-; RV64-NEXT:    li a3, 0
-; RV64-NEXT:  .LBB10_3: # %remainderloop
-; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-NEXT:    add a4, a0, a3
-; RV64-NEXT:    sb a1, 0(a4)
-; RV64-NEXT:    addi a3, a3, 1
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    slli a4, a2, 56
-; RV64-NEXT:    or a1, a1, a4
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    beqz a3, .LBB10_3
-; RV64-NEXT:  # %bb.4: # %split
-; RV64-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 32
-; RV64-NEXT:    ret
-;
-; RV32-FAST-LABEL: memset_17:
-; RV32-FAST:       # %bb.0: # %storeloop.preheader
-; RV32-FAST-NEXT:    lw a2, 12(a1)
-; RV32-FAST-NEXT:    lw a3, 8(a1)
-; RV32-FAST-NEXT:    lw a4, 4(a1)
-; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    addi a5, a0, 256
-; RV32-FAST-NEXT:  .LBB10_1: # %storeloop
-; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-FAST-NEXT:    sw a1, 0(a0)
-; RV32-FAST-NEXT:    sw a4, 4(a0)
-; RV32-FAST-NEXT:    sw a3, 8(a0)
-; RV32-FAST-NEXT:    sw a2, 12(a0)
-; RV32-FAST-NEXT:    addi a0, a0, 256
-; RV32-FAST-NEXT:    bne a0, a5, .LBB10_1
-; RV32-FAST-NEXT:  # %bb.2: # %remcheck
-; RV32-FAST-NEXT:    li a5, 0
-; RV32-FAST-NEXT:    li a6, 0
-; RV32-FAST-NEXT:  .LBB10_3: # %remainderloop
-; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-FAST-NEXT:    add a7, a0, a5
-; RV32-FAST-NEXT:    sb a1, 0(a7)
-; RV32-FAST-NEXT:    addi a5, a5, 1
-; RV32-FAST-NEXT:    seqz a7, a5
-; RV32-FAST-NEXT:    add a6, a6, a7
-; RV32-FAST-NEXT:    or a7, a5, a6
-; RV32-FAST-NEXT:    srli a1, a1, 8
-; RV32-FAST-NEXT:    slli t0, a4, 24
-; RV32-FAST-NEXT:    or a1, a1, t0
-; RV32-FAST-NEXT:    srli a4, a4, 8
-; RV32-FAST-NEXT:    slli t0, a3, 24
-; RV32-FAST-NEXT:    or a4, a4, t0
-; RV32-FAST-NEXT:    srli a3, a3, 8
-; RV32-FAST-NEXT:    slli t0, a2, 24
-; RV32-FAST-NEXT:    or a3, a3, t0
-; RV32-FAST-NEXT:    srli a2, a2, 8
-; RV32-FAST-NEXT:    beqz a7, .LBB10_3
-; RV32-FAST-NEXT:  # %bb.4: # %split
-; RV32-FAST-NEXT:    ret
+define void @memset_4(ptr %a, i128 %value) nounwind {
+; RV32-BOTH-LABEL: memset_4:
+; RV32-BOTH:       # %bb.0: # %storeloop.preheader
+; RV32-BOTH-NEXT:    lw a2, 12(a1)
+; RV32-BOTH-NEXT:    lw a3, 8(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    addi a5, a0, 64
+; RV32-BOTH-NEXT:  .LBB2_1: # %storeloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    sw a1, 0(a0)
+; RV32-BOTH-NEXT:    sw a4, 4(a0)
+; RV32-BOTH-NEXT:    sw a3, 8(a0)
+; RV32-BOTH-NEXT:    sw a2, 12(a0)
+; RV32-BOTH-NEXT:    addi a0, a0, 16
+; RV32-BOTH-NEXT:    bne a0, a5, .LBB2_1
+; RV32-BOTH-NEXT:  # %bb.2: # %split
+; RV32-BOTH-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_17:
-; RV64-FAST:       # %bb.0: # %storeloop.preheader
-; RV64-FAST-NEXT:    addi a3, a0, 256
-; RV64-FAST-NEXT:  .LBB10_1: # %storeloop
-; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-FAST-NEXT:    sd a1, 0(a0)
-; RV64-FAST-NEXT:    sd a2, 8(a0)
-; RV64-FAST-NEXT:    addi a0, a0, 256
-; RV64-FAST-NEXT:    bne a0, a3, .LBB10_1
-; RV64-FAST-NEXT:  # %bb.2: # %remcheck
-; RV64-FAST-NEXT:    li a3, 0
-; RV64-FAST-NEXT:  .LBB10_3: # %remainderloop
-; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-FAST-NEXT:    add a4, a0, a3
-; RV64-FAST-NEXT:    sb a1, 0(a4)
-; RV64-FAST-NEXT:    addi a3, a3, 1
-; RV64-FAST-NEXT:    srli a1, a1, 8
-; RV64-FAST-NEXT:    slli a4, a2, 56
-; RV64-FAST-NEXT:    or a1, a1, a4
-; RV64-FAST-NEXT:    srli a2, a2, 8
-; RV64-FAST-NEXT:    beqz a3, .LBB10_3
-; RV64-FAST-NEXT:  # %bb.4: # %split
-; RV64-FAST-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 17, i1 0)
+; RV64-BOTH-LABEL: memset_4:
+; RV64-BOTH:       # %bb.0: # %storeloop.preheader
+; RV64-BOTH-NEXT:    addi a3, a0, 64
+; RV64-BOTH-NEXT:  .LBB2_1: # %storeloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    sd a1, 0(a0)
+; RV64-BOTH-NEXT:    sd a2, 8(a0)
+; RV64-BOTH-NEXT:    addi a0, a0, 16
+; RV64-BOTH-NEXT:    bne a0, a3, .LBB2_1
+; RV64-BOTH-NEXT:  # %bb.2: # %split
+; RV64-BOTH-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 4, i1 0)
   ret void
 }
 
 define void @memset_x(ptr %a, i128 %value, i64 %x) nounwind {
-; RV32-LABEL: memset_x:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s2, 4(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s3, 0(sp) # 4-byte Folded Spill
-; RV32-NEXT:    lw a4, 12(a1)
-; RV32-NEXT:    lw a5, 8(a1)
-; RV32-NEXT:    lw a6, 4(a1)
-; RV32-NEXT:    lw a1, 0(a1)
-; RV32-NEXT:    slli a7, a3, 28
-; RV32-NEXT:    srli t0, a2, 4
-; RV32-NEXT:    or a7, t0, a7
-; RV32-NEXT:    srli a3, a3, 4
-; RV32-NEXT:    or a3, a7, a3
-; RV32-NEXT:    andi a2, a2, 15
-; RV32-NEXT:    beqz a3, .LBB11_3
-; RV32-NEXT:  # %bb.1: # %storeloop.preheader
-; RV32-NEXT:    slli a3, a7, 8
-; RV32-NEXT:    add a3, a0, a3
-; RV32-NEXT:    srli a7, a1, 24
-; RV32-NEXT:    srli t0, a1, 16
-; RV32-NEXT:    srli t1, a1, 8
-; RV32-NEXT:    srli t2, a6, 24
-; RV32-NEXT:    srli t3, a6, 16
-; RV32-NEXT:    srli t4, a6, 8
-; RV32-NEXT:    srli t5, a5, 24
-; RV32-NEXT:    srli t6, a5, 16
-; RV32-NEXT:    srli s0, a5, 8
-; RV32-NEXT:    srli s1, a4, 24
-; RV32-NEXT:    srli s2, a4, 16
-; RV32-NEXT:    srli s3, a4, 8
-; RV32-NEXT:  .LBB11_2: # %storeloop
-; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    sb a1, 0(a0)
-; RV32-NEXT:    sb a6, 4(a0)
-; RV32-NEXT:    sb a7, 3(a0)
-; RV32-NEXT:    sb t0, 2(a0)
-; RV32-NEXT:    sb t1, 1(a0)
-; RV32-NEXT:    sb a5, 8(a0)
-; RV32-NEXT:    sb t2, 7(a0)
-; RV32-NEXT:    sb t3, 6(a0)
-; RV32-NEXT:    sb t4, 5(a0)
-; RV32-NEXT:    sb a4, 12(a0)
-; RV32-NEXT:    sb t5, 11(a0)
-; RV32-NEXT:    sb t6, 10(a0)
-; RV32-NEXT:    sb s0, 9(a0)
-; RV32-NEXT:    sb s1, 15(a0)
-; RV32-NEXT:    sb s2, 14(a0)
-; RV32-NEXT:    sb s3, 13(a0)
-; RV32-NEXT:    addi a0, a0, 256
-; RV32-NEXT:    bne a0, a3, .LBB11_2
-; RV32-NEXT:  .LBB11_3: # %remcheck
-; RV32-NEXT:    beqz a2, .LBB11_6
-; RV32-NEXT:  # %bb.4: # %remainderloop.preheader
-; RV32-NEXT:    li a3, 0
-; RV32-NEXT:    li a7, 0
-; RV32-NEXT:  .LBB11_5: # %remainderloop
-; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    add t0, a0, a3
-; RV32-NEXT:    sb a1, 0(t0)
-; RV32-NEXT:    addi a3, a3, 1
-; RV32-NEXT:    seqz t0, a3
-; RV32-NEXT:    add a7, a7, t0
-; RV32-NEXT:    srli a1, a1, 8
-; RV32-NEXT:    slli t0, a6, 24
-; RV32-NEXT:    or a1, a1, t0
-; RV32-NEXT:    srli a6, a6, 8
-; RV32-NEXT:    slli t0, a5, 24
-; RV32-NEXT:    or a6, a6, t0
-; RV32-NEXT:    srli a5, a5, 8
-; RV32-NEXT:    slli t0, a4, 24
-; RV32-NEXT:    or a5, a5, t0
-; RV32-NEXT:    sltu t0, a3, a2
-; RV32-NEXT:    seqz t1, a7
-; RV32-NEXT:    and t0, t1, t0
-; RV32-NEXT:    srli a4, a4, 8
-; RV32-NEXT:    bnez t0, .LBB11_5
-; RV32-NEXT:  .LBB11_6: # %split
-; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s3, 0(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: memset_x:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -32
-; RV64-NEXT:    sd s0, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s1, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s2, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s3, 0(sp) # 8-byte Folded Spill
-; RV64-NEXT:    srli a4, a3, 4
-; RV64-NEXT:    andi a3, a3, 15
-; RV64-NEXT:    beqz a4, .LBB11_3
-; RV64-NEXT:  # %bb.1: # %storeloop.preheader
-; RV64-NEXT:    slli a4, a4, 8
-; RV64-NEXT:    add a4, a0, a4
-; RV64-NEXT:    srli a5, a1, 56
-; RV64-NEXT:    srli a6, a1, 48
-; RV64-NEXT:    srli a7, a1, 40
-; RV64-NEXT:    srli t0, a1, 32
-; RV64-NEXT:    srli t1, a1, 24
-; RV64-NEXT:    srli t2, a1, 16
-; RV64-NEXT:    srli t3, a1, 8
-; RV64-NEXT:    srli t4, a2, 56
-; RV64-NEXT:    srli t5, a2, 48
-; RV64-NEXT:    srli t6, a2, 40
-; RV64-NEXT:    srli s0, a2, 32
-; RV64-NEXT:    srli s1, a2, 24
-; RV64-NEXT:    srli s2, a2, 16
-; RV64-NEXT:    srli s3, a2, 8
-; RV64-NEXT:  .LBB11_2: # %storeloop
-; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    sb a2, 8(a0)
-; RV64-NEXT:    sb a5, 7(a0)
-; RV64-NEXT:    sb a6, 6(a0)
-; RV64-NEXT:    sb a7, 5(a0)
-; RV64-NEXT:    sb t0, 4(a0)
-; RV64-NEXT:    sb t1, 3(a0)
-; RV64-NEXT:    sb t2, 2(a0)
-; RV64-NEXT:    sb t3, 1(a0)
-; RV64-NEXT:    sb t4, 15(a0)
-; RV64-NEXT:    sb t5, 14(a0)
-; RV64-NEXT:    sb t6, 13(a0)
-; RV64-NEXT:    sb s0, 12(a0)
-; RV64-NEXT:    sb s1, 11(a0)
-; RV64-NEXT:    sb s2, 10(a0)
-; RV64-NEXT:    sb s3, 9(a0)
-; RV64-NEXT:    addi a0, a0, 256
-; RV64-NEXT:    bne a0, a4, .LBB11_2
-; RV64-NEXT:  .LBB11_3: # %remcheck
-; RV64-NEXT:    beqz a3, .LBB11_6
-; RV64-NEXT:  # %bb.4: # %remainderloop.preheader
-; RV64-NEXT:    li a4, 0
-; RV64-NEXT:  .LBB11_5: # %remainderloop
-; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-NEXT:    add a5, a0, a4
-; RV64-NEXT:    sb a1, 0(a5)
-; RV64-NEXT:    addi a4, a4, 1
-; RV64-NEXT:    srli a1, a1, 8
-; RV64-NEXT:    slli a5, a2, 56
-; RV64-NEXT:    or a1, a1, a5
-; RV64-NEXT:    srli a2, a2, 8
-; RV64-NEXT:    bltu a4, a3, .LBB11_5
-; RV64-NEXT:  .LBB11_6: # %split
-; RV64-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s3, 0(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 32
-; RV64-NEXT:    ret
-;
-; RV32-FAST-LABEL: memset_x:
-; RV32-FAST:       # %bb.0:
-; RV32-FAST-NEXT:    lw a4, 12(a1)
-; RV32-FAST-NEXT:    lw a5, 8(a1)
-; RV32-FAST-NEXT:    lw a6, 4(a1)
-; RV32-FAST-NEXT:    lw a1, 0(a1)
-; RV32-FAST-NEXT:    slli a7, a3, 28
-; RV32-FAST-NEXT:    srli t0, a2, 4
-; RV32-FAST-NEXT:    or a7, t0, a7
-; RV32-FAST-NEXT:    srli a3, a3, 4
-; RV32-FAST-NEXT:    or a3, a7, a3
-; RV32-FAST-NEXT:    andi a2, a2, 15
-; RV32-FAST-NEXT:    beqz a3, .LBB11_3
-; RV32-FAST-NEXT:  # %bb.1: # %storeloop.preheader
-; RV32-FAST-NEXT:    slli a3, a7, 8
-; RV32-FAST-NEXT:    add a3, a0, a3
-; RV32-FAST-NEXT:  .LBB11_2: # %storeloop
-; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-FAST-NEXT:    sw a1, 0(a0)
-; RV32-FAST-NEXT:    sw a6, 4(a0)
-; RV32-FAST-NEXT:    sw a5, 8(a0)
-; RV32-FAST-NEXT:    sw a4, 12(a0)
-; RV32-FAST-NEXT:    addi a0, a0, 256
-; RV32-FAST-NEXT:    bne a0, a3, .LBB11_2
-; RV32-FAST-NEXT:  .LBB11_3: # %remcheck
-; RV32-FAST-NEXT:    beqz a2, .LBB11_6
-; RV32-FAST-NEXT:  # %bb.4: # %remainderloop.preheader
-; RV32-FAST-NEXT:    li a3, 0
-; RV32-FAST-NEXT:    li a7, 0
-; RV32-FAST-NEXT:  .LBB11_5: # %remainderloop
-; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-FAST-NEXT:    add t0, a0, a3
-; RV32-FAST-NEXT:    sb a1, 0(t0)
-; RV32-FAST-NEXT:    addi a3, a3, 1
-; RV32-FAST-NEXT:    seqz t0, a3
-; RV32-FAST-NEXT:    add a7, a7, t0
-; RV32-FAST-NEXT:    srli a1, a1, 8
-; RV32-FAST-NEXT:    slli t0, a6, 24
-; RV32-FAST-NEXT:    or a1, a1, t0
-; RV32-FAST-NEXT:    srli a6, a6, 8
-; RV32-FAST-NEXT:    slli t0, a5, 24
-; RV32-FAST-NEXT:    or a6, a6, t0
-; RV32-FAST-NEXT:    srli a5, a5, 8
-; RV32-FAST-NEXT:    slli t0, a4, 24
-; RV32-FAST-NEXT:    or a5, a5, t0
-; RV32-FAST-NEXT:    sltu t0, a3, a2
-; RV32-FAST-NEXT:    seqz t1, a7
-; RV32-FAST-NEXT:    and t0, t1, t0
-; RV32-FAST-NEXT:    srli a4, a4, 8
-; RV32-FAST-NEXT:    bnez t0, .LBB11_5
-; RV32-FAST-NEXT:  .LBB11_6: # %split
-; RV32-FAST-NEXT:    ret
+; RV32-BOTH-LABEL: memset_x:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    or a3, a2, a3
+; RV32-BOTH-NEXT:    beqz a3, .LBB3_3
+; RV32-BOTH-NEXT:  # %bb.1: # %storeloop.preheader
+; RV32-BOTH-NEXT:    lw a3, 12(a1)
+; RV32-BOTH-NEXT:    lw a4, 8(a1)
+; RV32-BOTH-NEXT:    lw a5, 4(a1)
+; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    slli a2, a2, 4
+; RV32-BOTH-NEXT:    add a2, a0, a2
+; RV32-BOTH-NEXT:  .LBB3_2: # %storeloop
+; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-BOTH-NEXT:    sw a1, 0(a0)
+; RV32-BOTH-NEXT:    sw a5, 4(a0)
+; RV32-BOTH-NEXT:    sw a4, 8(a0)
+; RV32-BOTH-NEXT:    sw a3, 12(a0)
+; RV32-BOTH-NEXT:    addi a0, a0, 16
+; RV32-BOTH-NEXT:    bne a0, a2, .LBB3_2
+; RV32-BOTH-NEXT:  .LBB3_3: # %split
+; RV32-BOTH-NEXT:    ret
 ;
-; RV64-FAST-LABEL: memset_x:
-; RV64-FAST:       # %bb.0:
-; RV64-FAST-NEXT:    srli a4, a3, 4
-; RV64-FAST-NEXT:    andi a3, a3, 15
-; RV64-FAST-NEXT:    beqz a4, .LBB11_3
-; RV64-FAST-NEXT:  # %bb.1: # %storeloop.preheader
-; RV64-FAST-NEXT:    slli a4, a4, 8
-; RV64-FAST-NEXT:    add a4, a0, a4
-; RV64-FAST-NEXT:  .LBB11_2: # %storeloop
-; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-FAST-NEXT:    sd a1, 0(a0)
-; RV64-FAST-NEXT:    sd a2, 8(a0)
-; RV64-FAST-NEXT:    addi a0, a0, 256
-; RV64-FAST-NEXT:    bne a0, a4, .LBB11_2
-; RV64-FAST-NEXT:  .LBB11_3: # %remcheck
-; RV64-FAST-NEXT:    beqz a3, .LBB11_6
-; RV64-FAST-NEXT:  # %bb.4: # %remainderloop.preheader
-; RV64-FAST-NEXT:    li a4, 0
-; RV64-FAST-NEXT:  .LBB11_5: # %remainderloop
-; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-FAST-NEXT:    add a5, a0, a4
-; RV64-FAST-NEXT:    sb a1, 0(a5)
-; RV64-FAST-NEXT:    addi a4, a4, 1
-; RV64-FAST-NEXT:    srli a1, a1, 8
-; RV64-FAST-NEXT:    slli a5, a2, 56
-; RV64-FAST-NEXT:    or a1, a1, a5
-; RV64-FAST-NEXT:    srli a2, a2, 8
-; RV64-FAST-NEXT:    bltu a4, a3, .LBB11_5
-; RV64-FAST-NEXT:  .LBB11_6: # %split
-; RV64-FAST-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 %x, i1 0)
+; RV64-BOTH-LABEL: memset_x:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    beqz a3, .LBB3_3
+; RV64-BOTH-NEXT:  # %bb.1: # %storeloop.preheader
+; RV64-BOTH-NEXT:    slli a3, a3, 4
+; RV64-BOTH-NEXT:    add a3, a0, a3
+; RV64-BOTH-NEXT:  .LBB3_2: # %storeloop
+; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-BOTH-NEXT:    sd a1, 0(a0)
+; RV64-BOTH-NEXT:    sd a2, 8(a0)
+; RV64-BOTH-NEXT:    addi a0, a0, 16
+; RV64-BOTH-NEXT:    bne a0, a3, .LBB3_2
+; RV64-BOTH-NEXT:  .LBB3_3: # %split
+; RV64-BOTH-NEXT:    ret
+  tail call void @llvm.memset_pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 %x, i1 0)
   ret void
 }
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
index 74ca4c4422a755..4c79f7d9f36c60 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
@@ -4,28 +4,15 @@
 define void @memset_pattern_i128_1(ptr %a, i128 %value) nounwind {
 ; CHECK-LABEL: define void @memset_pattern_i128_1(
 ; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT:    br i1 true, label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
 ; CHECK:       [[STORELOOP]]:
 ; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 0, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 1, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
 ; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
-; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 16
+; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 1
 ; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[REMCHECK]]
-; CHECK:       [[REMCHECK]]:
-; CHECK-NEXT:    [[TMP6:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP3]], %[[STORELOOP]] ]
-; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
-; CHECK:       [[REMAINDERLOOP]]:
-; CHECK-NEXT:    [[TMP7:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP11:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP8:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP12:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i8
-; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP7]]
-; CHECK-NEXT:    store i8 [[TMP9]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP11]] = add i64 [[TMP7]], 1
-; CHECK-NEXT:    [[TMP12]] = lshr i128 [[TMP8]], 8
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], 1
-; CHECK-NEXT:    br i1 [[TMP13]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -33,95 +20,18 @@ define void @memset_pattern_i128_1(ptr %a, i128 %value) nounwind {
   ret void
 }
 
-define void @memset_pattern_i128_3(ptr %a, i128 %value) nounwind {
-; CHECK-LABEL: define void @memset_pattern_i128_3(
-; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    br i1 true, label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
-; CHECK:       [[STORELOOP]]:
-; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 0, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
-; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 16
-; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[REMCHECK]]
-; CHECK:       [[REMCHECK]]:
-; CHECK-NEXT:    [[TMP6:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP3]], %[[STORELOOP]] ]
-; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
-; CHECK:       [[REMAINDERLOOP]]:
-; CHECK-NEXT:    [[TMP7:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP11:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP8:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP12:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i8
-; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP7]]
-; CHECK-NEXT:    store i8 [[TMP9]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP11]] = add i64 [[TMP7]], 1
-; CHECK-NEXT:    [[TMP12]] = lshr i128 [[TMP8]], 8
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], 3
-; CHECK-NEXT:    br i1 [[TMP13]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
-; CHECK:       [[SPLIT]]:
-; CHECK-NEXT:    ret void
-;
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 3, i1 0)
-  ret void
-}
-
-define void @memset_pattern_i128_14(ptr %a, i128 %value) nounwind {
-; CHECK-LABEL: define void @memset_pattern_i128_14(
-; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    br i1 true, label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
-; CHECK:       [[STORELOOP]]:
-; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 0, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
-; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 16
-; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[REMCHECK]]
-; CHECK:       [[REMCHECK]]:
-; CHECK-NEXT:    [[TMP6:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP3]], %[[STORELOOP]] ]
-; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
-; CHECK:       [[REMAINDERLOOP]]:
-; CHECK-NEXT:    [[TMP7:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP11:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP8:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP12:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i8
-; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP7]]
-; CHECK-NEXT:    store i8 [[TMP9]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP11]] = add i64 [[TMP7]], 1
-; CHECK-NEXT:    [[TMP12]] = lshr i128 [[TMP8]], 8
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], 14
-; CHECK-NEXT:    br i1 [[TMP13]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
-; CHECK:       [[SPLIT]]:
-; CHECK-NEXT:    ret void
-;
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 14, i1 0)
-  ret void
-}
-
 define void @memset_pattern_i128_16(ptr %a, i128 %value) nounwind {
 ; CHECK-LABEL: define void @memset_pattern_i128_16(
 ; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    br i1 false, label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
 ; CHECK:       [[STORELOOP]]:
 ; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 1, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 16, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
 ; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
-; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 16
+; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 1
 ; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[REMCHECK]]
-; CHECK:       [[REMCHECK]]:
-; CHECK-NEXT:    [[TMP6:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP3]], %[[STORELOOP]] ]
-; CHECK-NEXT:    br i1 true, label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
-; CHECK:       [[REMAINDERLOOP]]:
-; CHECK-NEXT:    [[TMP7:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP11:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP8:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP12:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i8
-; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP7]]
-; CHECK-NEXT:    store i8 [[TMP9]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP11]] = add i64 [[TMP7]], 1
-; CHECK-NEXT:    [[TMP12]] = lshr i128 [[TMP8]], 8
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], 0
-; CHECK-NEXT:    br i1 [[TMP13]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -129,67 +39,19 @@ define void @memset_pattern_i128_16(ptr %a, i128 %value) nounwind {
   ret void
 }
 
-define void @memset_pattern_i128_38(ptr %a, i128 %value) nounwind {
-; CHECK-LABEL: define void @memset_pattern_i128_38(
-; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    br i1 false, label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
-; CHECK:       [[STORELOOP]]:
-; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 2, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
-; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 16
-; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[REMCHECK]]
-; CHECK:       [[REMCHECK]]:
-; CHECK-NEXT:    [[TMP6:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP3]], %[[STORELOOP]] ]
-; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
-; CHECK:       [[REMAINDERLOOP]]:
-; CHECK-NEXT:    [[TMP7:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP11:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP8:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP12:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i8
-; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[TMP7]]
-; CHECK-NEXT:    store i8 [[TMP9]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP11]] = add i64 [[TMP7]], 1
-; CHECK-NEXT:    [[TMP12]] = lshr i128 [[TMP8]], 8
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], 6
-; CHECK-NEXT:    br i1 [[TMP13]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
-; CHECK:       [[SPLIT]]:
-; CHECK-NEXT:    ret void
-;
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 38, i1 0)
-  ret void
-}
-
 define void @memset_pattern_i128_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; CHECK-LABEL: define void @memset_pattern_i128_x(
 ; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[X]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[X]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 0, [[TMP1]]
-; CHECK-NEXT:    br i1 [[TMP3]], label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 0, [[X]]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
 ; CHECK:       [[STORELOOP]]:
 ; CHECK-NEXT:    [[TMP4:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP5:%.*]] = phi i64 [ [[TMP1]], [[TMP0]] ], [ [[TMP7:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = phi i64 [ [[X]], [[TMP0]] ], [ [[TMP7:%.*]], %[[STORELOOP]] ]
 ; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP4]], align 1
-; CHECK-NEXT:    [[TMP6]] = getelementptr inbounds i128, ptr [[TMP4]], i64 16
+; CHECK-NEXT:    [[TMP6]] = getelementptr inbounds i128, ptr [[TMP4]], i64 1
 ; CHECK-NEXT:    [[TMP7]] = sub i64 [[TMP5]], 1
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[TMP8]], label %[[STORELOOP]], label %[[REMCHECK]]
-; CHECK:       [[REMCHECK]]:
-; CHECK-NEXT:    [[TMP9:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP6]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[TMP2]], 0
-; CHECK-NEXT:    br i1 [[TMP10]], label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
-; CHECK:       [[REMAINDERLOOP]]:
-; CHECK-NEXT:    [[TMP11:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP15:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP12:%.*]] = phi i128 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP16:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP13:%.*]] = trunc i128 [[TMP12]] to i8
-; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[TMP11]]
-; CHECK-NEXT:    store i8 [[TMP13]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP15]] = add i64 [[TMP11]], 1
-; CHECK-NEXT:    [[TMP16]] = lshr i128 [[TMP12]], 8
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ult i64 [[TMP15]], [[TMP2]]
-; CHECK-NEXT:    br i1 [[TMP17]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    br i1 [[TMP8]], label %[[STORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -200,32 +62,16 @@ define void @memset_pattern_i128_x(ptr %a, i128 %value, i64 %x) nounwind {
 define void @memset_pattern_i256_x(ptr %a, i256 %value, i64 %x) nounwind {
 ; CHECK-LABEL: define void @memset_pattern_i256_x(
 ; CHECK-SAME: ptr [[A:%.*]], i256 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[X]], 5
-; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[X]], 31
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 0, [[TMP1]]
-; CHECK-NEXT:    br i1 [[TMP3]], label %[[REMCHECK:.*]], label %[[STORELOOP:.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 0, [[X]]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
 ; CHECK:       [[STORELOOP]]:
 ; CHECK-NEXT:    [[TMP4:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP5:%.*]] = phi i64 [ [[TMP1]], [[TMP0]] ], [ [[TMP7:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP5:%.*]] = phi i64 [ [[X]], [[TMP0]] ], [ [[TMP7:%.*]], %[[STORELOOP]] ]
 ; CHECK-NEXT:    store i256 [[VALUE]], ptr [[TMP4]], align 1
-; CHECK-NEXT:    [[TMP6]] = getelementptr inbounds i256, ptr [[TMP4]], i64 32
+; CHECK-NEXT:    [[TMP6]] = getelementptr inbounds i256, ptr [[TMP4]], i64 1
 ; CHECK-NEXT:    [[TMP7]] = sub i64 [[TMP5]], 1
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[TMP8]], label %[[STORELOOP]], label %[[REMCHECK]]
-; CHECK:       [[REMCHECK]]:
-; CHECK-NEXT:    [[TMP9:%.*]] = phi ptr [ [[A]], [[TMP0]] ], [ [[TMP6]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[TMP2]], 0
-; CHECK-NEXT:    br i1 [[TMP10]], label %[[SPLIT:.*]], label %[[REMAINDERLOOP:.*]]
-; CHECK:       [[REMAINDERLOOP]]:
-; CHECK-NEXT:    [[TMP11:%.*]] = phi i64 [ 0, %[[REMCHECK]] ], [ [[TMP15:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP12:%.*]] = phi i256 [ [[VALUE]], %[[REMCHECK]] ], [ [[TMP16:%.*]], %[[REMAINDERLOOP]] ]
-; CHECK-NEXT:    [[TMP13:%.*]] = trunc i256 [[TMP12]] to i8
-; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[TMP11]]
-; CHECK-NEXT:    store i8 [[TMP13]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP15]] = add i64 [[TMP11]], 1
-; CHECK-NEXT:    [[TMP16]] = lshr i256 [[TMP12]], 8
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ult i64 [[TMP15]], [[TMP2]]
-; CHECK-NEXT:    br i1 [[TMP17]], label %[[REMAINDERLOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    br i1 [[TMP8]], label %[[STORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;

>From be558f9d52e70e1ce9528ab4998cc52c76ad3cbf Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 31 Jul 2024 14:36:45 +0100
Subject: [PATCH 07/24] Rename to llvm.memset.pattern as requested in review

---
 llvm/docs/LangRef.rst                                  | 10 +++++-----
 llvm/include/llvm/IR/IntrinsicInst.h                   |  2 +-
 llvm/include/llvm/IR/Intrinsics.td                     |  2 +-
 llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp       |  2 +-
 llvm/test/CodeGen/RISCV/memset-pattern.ll              | 10 +++++-----
 .../PreISelIntrinsicLowering/PowerPC/memset-pattern.ll |  6 +++---
 .../RISCV/memset-pattern-non-power-of-two-pattern.ll   |  2 +-
 .../PreISelIntrinsicLowering/RISCV/memset-pattern.ll   |  8 ++++----
 llvm/test/Verifier/intrinsic-immarg.ll                 |  6 +++---
 llvm/test/Verifier/memset-pattern-inline.ll            |  4 ++--
 10 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index cfa42797012bc0..18002261a1ad64 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15232,7 +15232,7 @@ external functions.
 
 .. _int_memset_pattern:
 
-'``llvm.memset_pattern``' Intrinsic
+'``llvm.memset.pattern``' Intrinsic
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Syntax:
@@ -15244,13 +15244,13 @@ address spaces. Not all targets support all bit widths however.
 
 ::
 
-      declare void @llvm.memset_pattern.p0.i64.i128(ptr <dest>, i128 <val>,
+      declare void @llvm.memset.pattern.p0.i64.i128(ptr <dest>, i128 <val>,
                                                     i64 <len>, i1 <isvolatile>)
 
 Overview:
 """""""""
 
-The '``llvm.memset_pattern.*``' intrinsics fill a block of memory with
+The '``llvm.memset.pattern.*``' intrinsics fill a block of memory with
 a particular value. This may be expanded to an inline loop, a sequence of
 stores, or a libcall depending on what is available for the target and the
 expected performance and code size impact.
@@ -15267,14 +15267,14 @@ The :ref:`align <attr_align>` parameter attribute can be provided
 for the first argument.
 
 If the ``isvolatile`` parameter is ``true``, the
-``llvm.memset_pattern`` call is a :ref:`volatile operation <volatile>`. The
+``llvm.memset.pattern`` call is a :ref:`volatile operation <volatile>`. The
 detailed access behavior is not very cleanly specified and it is unwise to
 depend on it.
 
 Semantics:
 """"""""""
 
-The '``llvm.memset_pattern.*``' intrinsics fill "len" bytes of memory
+The '``llvm.memset.pattern*``' intrinsic fills "len" bytes of memory
 starting at the destination location. If the argument is known to be aligned
 to some boundary, this can be specified as an attribute on the argument.
 
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index b549852c3fc228..ae69cf93f7d510 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1221,7 +1221,7 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
 };
 
 /// This class wraps the llvm.memset, llvm.memset.inline, and
-/// llvm.memset_pattern intrinsics.
+/// llvm.memset.pattern intrinsics.
 class MemSetInst : public MemSetBase<MemIntrinsic> {
 public:
   // Methods for support type inquiry through isa, cast, and dyn_cast:
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index f79a19e0a8d298..548590c6c17934 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1009,7 +1009,7 @@ def int_memset_pattern
       [llvm_anyptr_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_i1_ty],
       [IntrWriteMem, IntrArgMemOnly, IntrWillReturn, IntrNoFree, IntrNoCallback,
        NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
-       ImmArg<ArgIndex<3>>], "llvm.memset_pattern">;
+       ImmArg<ArgIndex<3>>]>;
 
 // FIXME: Add version of these floating point intrinsics which allow non-default
 // rounding modes and FP exception handling.
diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index 29d3ff3bed244b..370b552eceb7ba 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -464,7 +464,7 @@ static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
   const DataLayout &DL = F->getDataLayout();
 
   if (DL.isBigEndian())
-    report_fatal_error("memset_pattern.inline expansion not currently "
+    report_fatal_error("memset.pattern expansion not currently "
                        "implemented for big-endian targets",
                        false);
 
diff --git a/llvm/test/CodeGen/RISCV/memset-pattern.ll b/llvm/test/CodeGen/RISCV/memset-pattern.ll
index 417121e6a27429..76c172152a0d3b 100644
--- a/llvm/test/CodeGen/RISCV/memset-pattern.ll
+++ b/llvm/test/CodeGen/RISCV/memset-pattern.ll
@@ -8,7 +8,7 @@
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+unaligned-scalar-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
 
-; TODO: Due to the initial naive lowering implementation of memset_pattern in
+; TODO: Due to the initial naive lowering implementation of memset.pattern in
 ; PreISelIntrinsicLowering, the generated code is not good.
 
 define void @memset_1(ptr %a, i128 %value) nounwind {
@@ -41,7 +41,7 @@ define void @memset_1(ptr %a, i128 %value) nounwind {
 ; RV64-BOTH-NEXT:    bne a0, a3, .LBB0_1
 ; RV64-BOTH-NEXT:  # %bb.2: # %split
 ; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 1, i1 0)
+  tail call void @llvm.memset.pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
@@ -173,7 +173,7 @@ define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 ; RV64-FAST-NEXT:    bne a0, a3, .LBB1_1
 ; RV64-FAST-NEXT:  # %bb.2: # %split
 ; RV64-FAST-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
+  tail call void @llvm.memset.pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
@@ -207,7 +207,7 @@ define void @memset_4(ptr %a, i128 %value) nounwind {
 ; RV64-BOTH-NEXT:    bne a0, a3, .LBB2_1
 ; RV64-BOTH-NEXT:  # %bb.2: # %split
 ; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 4, i1 0)
+  tail call void @llvm.memset.pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 4, i1 0)
   ret void
 }
 
@@ -248,6 +248,6 @@ define void @memset_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; RV64-BOTH-NEXT:    bne a0, a3, .LBB3_2
 ; RV64-BOTH-NEXT:  .LBB3_3: # %split
 ; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 %x, i1 0)
+  tail call void @llvm.memset.pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 %x, i1 0)
   ret void
 }
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
index 8434ca1c9016be..03180d566a462c 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
@@ -1,8 +1,8 @@
 ; RUN: not opt -mtriple=powerpc64 -passes=pre-isel-intrinsic-lowering -S -o - %s 2>&1 | FileCheck %s
 
-; CHECK: LLVM ERROR: memset_pattern.inline expansion not currently implemented for big-endian targets 
+; CHECK: LLVM ERROR: memset.pattern expansion not currently implemented for big-endian targets 
 
-define void @memset_pattern_x(ptr %a, i128 %value, i64 %x) nounwind {
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 %x, i1 0)
+define void @memset.pattern(ptr %a, i128 %value, i64 %x) nounwind {
+  tail call void @llvm.memset.pattern.p0.i64.i128(ptr %a, i128 %value, i64 %x, i1 0)
   ret void
 }
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll
index ce4ae0cf14c9e8..fa1b42e082eccd 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll
@@ -3,6 +3,6 @@
 ; CHECK: LLVM ERROR: Pattern width for memset_pattern must be a power of 2
 
 define void @memset_pattern_i127_x(ptr %a, i127 %value, i64 %x) nounwind {
-  tail call void @llvm.memset_pattern.p0.i64.i127(ptr %a, i127 %value, i64 %x, i1 0)
+  tail call void @llvm.memset.pattern.p0.i64.i127(ptr %a, i127 %value, i64 %x, i1 0)
   ret void
 }
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
index 4c79f7d9f36c60..16e7464eba9e75 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
@@ -16,7 +16,7 @@ define void @memset_pattern_i128_1(ptr %a, i128 %value) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
+  tail call void @llvm.memset.pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
@@ -35,7 +35,7 @@ define void @memset_pattern_i128_16(ptr %a, i128 %value) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 16, i1 0)
+  tail call void @llvm.memset.pattern.p0.i64.i128(ptr %a, i128 %value, i64 16, i1 0)
   ret void
 }
 
@@ -55,7 +55,7 @@ define void @memset_pattern_i128_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset_pattern.p0.i64.i128(ptr %a, i128 %value, i64 %x, i1 0)
+  tail call void @llvm.memset.pattern.p0.i64.i128(ptr %a, i128 %value, i64 %x, i1 0)
   ret void
 }
 
@@ -75,6 +75,6 @@ define void @memset_pattern_i256_x(ptr %a, i256 %value, i64 %x) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset_pattern.p0.i64.i256(ptr %a, i256 %value, i64 %x, i1 0)
+  tail call void @llvm.memset.pattern.p0.i64.i256(ptr %a, i256 %value, i64 %x, i1 0)
   ret void
 }
diff --git a/llvm/test/Verifier/intrinsic-immarg.ll b/llvm/test/Verifier/intrinsic-immarg.ll
index 37745c6a9acee7..f860c575a157b4 100644
--- a/llvm/test/Verifier/intrinsic-immarg.ll
+++ b/llvm/test/Verifier/intrinsic-immarg.ll
@@ -72,12 +72,12 @@ define void @memset_inline_is_volatile(ptr %dest, i8 %value, i1 %is.volatile) {
 }
 
 
-declare void @llvm.memset_pattern.p0.i32.i32(ptr nocapture, i32, i32, i1)
+declare void @llvm.memset.pattern.p0.i32.i32(ptr nocapture, i32, i32, i1)
 define void @memset_pattern_is_volatile(ptr %dest, i32 %value, i1 %is.volatile) {
   ; CHECK: immarg operand has non-immediate parameter
   ; CHECK-NEXT: i1 %is.volatile
-  ; CHECK-NEXT: call void @llvm.memset_pattern.p0.i32.i32(ptr %dest, i32 %value, i32 8, i1 %is.volatile)
-  call void @llvm.memset_pattern.p0.i32.i32(ptr %dest, i32 %value, i32 8, i1 %is.volatile)
+  ; CHECK-NEXT: call void @llvm.memset.pattern.p0.i32.i32(ptr %dest, i32 %value, i32 8, i1 %is.volatile)
+  call void @llvm.memset.pattern.p0.i32.i32(ptr %dest, i32 %value, i32 8, i1 %is.volatile)
   ret void
 }
 
diff --git a/llvm/test/Verifier/memset-pattern-inline.ll b/llvm/test/Verifier/memset-pattern-inline.ll
index 7f2e01ef99ea51..93d9ee1cfba109 100644
--- a/llvm/test/Verifier/memset-pattern-inline.ll
+++ b/llvm/test/Verifier/memset-pattern-inline.ll
@@ -3,7 +3,7 @@
 ; CHECK: alignment is not a power of two
 
 define void @foo(ptr %P, i32 %value) {
-  call void @llvm.memset_pattern.p0.i32.i32(ptr align 3 %P, i32 %value, i32 4, i1 false)
+  call void @llvm.memset.pattern.p0.i32.i32(ptr align 3 %P, i32 %value, i32 4, i1 false)
   ret void
 }
-declare void @llvm.memset_pattern.p0.i32.i32(ptr nocapture, i32, i32, i1) nounwind
+declare void @llvm.memset.pattern.p0.i32.i32(ptr nocapture, i32, i32, i1) nounwind

>From dfc0564152554d74652bb6502b4e9a83b1be7c30 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 14 Aug 2024 10:07:46 +0100
Subject: [PATCH 08/24] Add comments to memset_pattern intrinsic to describe
 args

---
 llvm/include/llvm/IR/Intrinsics.td | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 548590c6c17934..87f5d370d02336 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1006,7 +1006,10 @@ def int_memset_inline
 // Memset variant that writes a given pattern.
 def int_memset_pattern
     : Intrinsic<[],
-      [llvm_anyptr_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_i1_ty],
+      [llvm_anyptr_ty, // Destination.
+       llvm_anyint_ty, // Pattern value.
+       llvm_anyint_ty, // Count (number of times to fill value).
+       llvm_i1_ty],    // IsVolatile.
       [IntrWriteMem, IntrArgMemOnly, IntrWillReturn, IntrNoFree, IntrNoCallback,
        NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
        ImmArg<ArgIndex<3>>]>;

>From 8ac8b69cdf94e281e373b691dfbffa3fac8669b3 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 14 Aug 2024 10:27:05 +0100
Subject: [PATCH 09/24] Improve memset.pattern langref: fix outdated refs to
 bytes and mention endianness

---
 llvm/docs/LangRef.rst | 20 +++++++++++---------
 1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 18002261a1ad64..9e646e9eb04ec1 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15245,7 +15245,7 @@ address spaces. Not all targets support all bit widths however.
 ::
 
       declare void @llvm.memset.pattern.p0.i64.i128(ptr <dest>, i128 <val>,
-                                                    i64 <len>, i1 <isvolatile>)
+                                                    i64 <count>, i1 <isvolatile>)
 
 Overview:
 """""""""
@@ -15274,16 +15274,18 @@ depend on it.
 Semantics:
 """"""""""
 
-The '``llvm.memset.pattern*``' intrinsic fills "len" bytes of memory
-starting at the destination location. If the argument is known to be aligned
-to some boundary, this can be specified as an attribute on the argument.
+The '``llvm.memset.pattern*``' intrinsic fills memory starting at the
+destination location with the given pattern ``<count>`` times. If the argument
+is known to be aligned to some boundary, this can be specified as an attribute
+on the argument. The pattern fills will respect the endianness of the target:
+i.e. on little endian targets, the least significant byte of the pattern is
+first in memory, while the most significant byte is first in memory for big
+endian targets.
 
-If ``<len>`` is not an integer multiple of the pattern width in bytes, then any
-remainder bytes will be copied from ``<val>``.
-If ``<len>`` is 0, it is no-op modulo the behavior of attributes attached to
+If ``<count>`` is 0, it is no-op modulo the behavior of attributes attached to
 the arguments.
-If ``<len>`` is not a well-defined value, the behavior is undefined.
-If ``<len>`` is not zero, ``<dest>`` should be well-defined, otherwise the
+If ``<count>`` is not a well-defined value, the behavior is undefined.
+If ``<count>`` is not zero, ``<dest>`` should be well-defined, otherwise the
 behavior is undefined.
 
 .. _int_sqrt:

>From 6d16c82ab5b1b1cbe1978f6e4c199700e2760b6a Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 11 Sep 2024 07:24:54 +0100
Subject: [PATCH 10/24] Excise errant memset_pattern mention

---
 llvm/docs/LangRef.rst | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 9e646e9eb04ec1..51b7104fc9e7de 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15238,7 +15238,7 @@ external functions.
 Syntax:
 """""""
 
-This is an overloaded intrinsic. You can use ``llvm.memset_pattern`` on
+This is an overloaded intrinsic. You can use ``llvm.memset.pattern`` on
 any integer bit width that is an integral number of bytes and for different
 address spaces. Not all targets support all bit widths however.
 

>From 55ee84abc3315b3e30c922c2f3750bd69cfb1d5d Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 11 Sep 2024 07:24:54 +0100
Subject: [PATCH 11/24] Fix incorrect mangling in LangRef and explain memory
 address is incremented by allocation size

---
 llvm/docs/LangRef.rst | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 51b7104fc9e7de..17f4473aade1d7 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15244,7 +15244,7 @@ address spaces. Not all targets support all bit widths however.
 
 ::
 
-      declare void @llvm.memset.pattern.p0.i64.i128(ptr <dest>, i128 <val>,
+      declare void @llvm.memset.pattern.p0.i128.i64(ptr <dest>, i128 <val>,
                                                     i64 <count>, i1 <isvolatile>)
 
 Overview:
@@ -15280,7 +15280,8 @@ is known to be aligned to some boundary, this can be specified as an attribute
 on the argument. The pattern fills will respect the endianness of the target:
 i.e. on little endian targets, the least significant byte of the pattern is
 first in memory, while the most significant byte is first in memory for big
-endian targets.
+endian targets. The memory address is incremented by the allocation size of
+the type.
 
 If ``<count>`` is 0, it is no-op modulo the behavior of attributes attached to
 the arguments.

>From 1e60edd21936fb04f50ffdb7383e070f733d97d7 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 11 Sep 2024 07:24:55 +0100
Subject: [PATCH 12/24] Allow memset.pattern expansion for big endian targets

The refusal to do so is a holdover from an older expansion strategy and
semantics.
---
 .../Transforms/Utils/LowerMemIntrinsics.cpp   |  6 -----
 .../PowerPC/memset-pattern.ll                 | 23 ++++++++++++++++---
 2 files changed, 20 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index 370b552eceb7ba..a9739144ac0774 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -461,12 +461,6 @@ static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
                                     Align DstAlign, bool IsVolatile) {
   BasicBlock *OrigBB = InsertBefore->getParent();
   Function *F = OrigBB->getParent();
-  const DataLayout &DL = F->getDataLayout();
-
-  if (DL.isBigEndian())
-    report_fatal_error("memset.pattern expansion not currently "
-                       "implemented for big-endian targets",
-                       false);
 
   if (!isPowerOf2_32(SetValue->getType()->getScalarSizeInBits()))
     report_fatal_error("Pattern width for memset_pattern must be a power of 2",
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
index 03180d566a462c..aa2dbb5d520725 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
@@ -1,8 +1,25 @@
-; RUN: not opt -mtriple=powerpc64 -passes=pre-isel-intrinsic-lowering -S -o - %s 2>&1 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=powerpc64 -passes=pre-isel-intrinsic-lowering -S -o - %s 2>&1 | FileCheck %s
 
-; CHECK: LLVM ERROR: memset.pattern expansion not currently implemented for big-endian targets 
+; Simple smoke test that memset.pattern is still expanded on big endian
+; targets.
 
 define void @memset.pattern(ptr %a, i128 %value, i64 %x) nounwind {
-  tail call void @llvm.memset.pattern.p0.i64.i128(ptr %a, i128 %value, i64 %x, i1 0)
+; CHECK-LABEL: define void @memset.pattern(
+; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 0, [[X]]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
+; CHECK:       [[STORELOOP]]:
+; CHECK-NEXT:    [[TMP2:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = phi i64 [ [[X]], [[TMP0]] ], [ [[TMP5:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP4]] = getelementptr inbounds i128, ptr [[TMP2]], i64 1
+; CHECK-NEXT:    [[TMP5]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[TMP6]], label %[[STORELOOP]], label %[[SPLIT]]
+; CHECK:       [[SPLIT]]:
+; CHECK-NEXT:    ret void
+;
+  tail call void @llvm.memset.pattern(ptr %a, i128 %value, i64 %x, i1 0)
   ret void
 }

>From 88b5af3f6e6a8b3809d539bf25d59750c1e5d953 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 11 Sep 2024 07:24:55 +0100
Subject: [PATCH 13/24] Allow non-power-of-two length patterns

---
 .../Transforms/Utils/LowerMemIntrinsics.cpp   |  4 ----
 ...memset-pattern-non-power-of-two-pattern.ll |  8 --------
 .../RISCV/memset-pattern.ll                   | 20 +++++++++++++++++++
 3 files changed, 20 insertions(+), 12 deletions(-)
 delete mode 100644 llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll

diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index a9739144ac0774..1977faa0c92dd6 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -462,10 +462,6 @@ static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
   BasicBlock *OrigBB = InsertBefore->getParent();
   Function *F = OrigBB->getParent();
 
-  if (!isPowerOf2_32(SetValue->getType()->getScalarSizeInBits()))
-    report_fatal_error("Pattern width for memset_pattern must be a power of 2",
-                       false);
-
   Type *TypeOfCount = Count->getType();
 
   BasicBlock *NewBB = OrigBB->splitBasicBlock(InsertBefore, "split");
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll
deleted file mode 100644
index fa1b42e082eccd..00000000000000
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern-non-power-of-two-pattern.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: not opt -mtriple=riscv64 -passes=pre-isel-intrinsic-lowering -S -o - %s 2>&1 | FileCheck %s
-
-; CHECK: LLVM ERROR: Pattern width for memset_pattern must be a power of 2
-
-define void @memset_pattern_i127_x(ptr %a, i127 %value, i64 %x) nounwind {
-  tail call void @llvm.memset.pattern.p0.i64.i127(ptr %a, i127 %value, i64 %x, i1 0)
-  ret void
-}
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
index 16e7464eba9e75..7a382400554f1d 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
@@ -39,6 +39,26 @@ define void @memset_pattern_i128_16(ptr %a, i128 %value) nounwind {
   ret void
 }
 
+define void @memset_pattern_i127_x(ptr %a, i127 %value, i64 %x) nounwind {
+; CHECK-LABEL: define void @memset_pattern_i127_x(
+; CHECK-SAME: ptr [[A:%.*]], i127 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 0, [[X]]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
+; CHECK:       [[STORELOOP]]:
+; CHECK-NEXT:    [[TMP2:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = phi i64 [ [[X]], [[TMP0]] ], [ [[TMP5:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    store i127 [[VALUE]], ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP4]] = getelementptr inbounds i127, ptr [[TMP2]], i64 1
+; CHECK-NEXT:    [[TMP5]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[TMP6]], label %[[STORELOOP]], label %[[SPLIT]]
+; CHECK:       [[SPLIT]]:
+; CHECK-NEXT:    ret void
+;
+  tail call void @llvm.memset.pattern(ptr %a, i127 %value, i64 %x, i1 0)
+  ret void
+}
+
 define void @memset_pattern_i128_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; CHECK-LABEL: define void @memset_pattern_i128_x(
 ; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {

>From ea429b4ddf973b4045b96e9c0c93a57997bfe2b1 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 11 Sep 2024 07:24:55 +0100
Subject: [PATCH 14/24] Remove unnecessary and incorrect mangling from
 llvm.memset.pattern uses

---
 llvm/test/CodeGen/RISCV/memset-pattern.ll                 | 8 ++++----
 .../PreISelIntrinsicLowering/RISCV/memset-pattern.ll      | 8 ++++----
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/memset-pattern.ll b/llvm/test/CodeGen/RISCV/memset-pattern.ll
index 76c172152a0d3b..04bc01ab875595 100644
--- a/llvm/test/CodeGen/RISCV/memset-pattern.ll
+++ b/llvm/test/CodeGen/RISCV/memset-pattern.ll
@@ -41,7 +41,7 @@ define void @memset_1(ptr %a, i128 %value) nounwind {
 ; RV64-BOTH-NEXT:    bne a0, a3, .LBB0_1
 ; RV64-BOTH-NEXT:  # %bb.2: # %split
 ; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset.pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 1, i1 0)
+  tail call void @llvm.memset.pattern(ptr align 8 %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
@@ -173,7 +173,7 @@ define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 ; RV64-FAST-NEXT:    bne a0, a3, .LBB1_1
 ; RV64-FAST-NEXT:  # %bb.2: # %split
 ; RV64-FAST-NEXT:    ret
-  tail call void @llvm.memset.pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
+  tail call void @llvm.memset.pattern(ptr %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
@@ -207,7 +207,7 @@ define void @memset_4(ptr %a, i128 %value) nounwind {
 ; RV64-BOTH-NEXT:    bne a0, a3, .LBB2_1
 ; RV64-BOTH-NEXT:  # %bb.2: # %split
 ; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset.pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 4, i1 0)
+  tail call void @llvm.memset.pattern(ptr align 8 %a, i128 %value, i64 4, i1 0)
   ret void
 }
 
@@ -248,6 +248,6 @@ define void @memset_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; RV64-BOTH-NEXT:    bne a0, a3, .LBB3_2
 ; RV64-BOTH-NEXT:  .LBB3_3: # %split
 ; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset.pattern.p0.i64.i128(ptr align 8 %a, i128 %value, i64 %x, i1 0)
+  tail call void @llvm.memset.pattern(ptr align 8 %a, i128 %value, i64 %x, i1 0)
   ret void
 }
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
index 7a382400554f1d..ac6376fe48b67c 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
@@ -16,7 +16,7 @@ define void @memset_pattern_i128_1(ptr %a, i128 %value) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.pattern.p0.i64.i128(ptr %a, i128 %value, i64 1, i1 0)
+  tail call void @llvm.memset.pattern(ptr %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
@@ -35,7 +35,7 @@ define void @memset_pattern_i128_16(ptr %a, i128 %value) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.pattern.p0.i64.i128(ptr %a, i128 %value, i64 16, i1 0)
+  tail call void @llvm.memset.pattern(ptr %a, i128 %value, i64 16, i1 0)
   ret void
 }
 
@@ -75,7 +75,7 @@ define void @memset_pattern_i128_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.pattern.p0.i64.i128(ptr %a, i128 %value, i64 %x, i1 0)
+  tail call void @llvm.memset.pattern(ptr %a, i128 %value, i64 %x, i1 0)
   ret void
 }
 
@@ -95,6 +95,6 @@ define void @memset_pattern_i256_x(ptr %a, i256 %value, i64 %x) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.pattern.p0.i64.i256(ptr %a, i256 %value, i64 %x, i1 0)
+  tail call void @llvm.memset.pattern(ptr %a, i256 %value, i64 %x, i1 0)
   ret void
 }

>From e9c98c8f917356e9e12f83df79cbcf2442723149 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 11 Sep 2024 07:24:55 +0100
Subject: [PATCH 15/24] Rename memset-pattern-inline.ll test to
 memset-pattern.ll to reflect current naming

---
 .../test/Verifier/{memset-pattern-inline.ll => memset-pattern.ll} | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
 rename llvm/test/Verifier/{memset-pattern-inline.ll => memset-pattern.ll} (100%)

diff --git a/llvm/test/Verifier/memset-pattern-inline.ll b/llvm/test/Verifier/memset-pattern.ll
similarity index 100%
rename from llvm/test/Verifier/memset-pattern-inline.ll
rename to llvm/test/Verifier/memset-pattern.ll

>From 30d59b99c0be4b05993b374a7a1c5cf74ca1a6c6 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 11 Sep 2024 07:24:55 +0100
Subject: [PATCH 16/24] Remove unnecessary comment

---
 llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp | 1 -
 1 file changed, 1 deletion(-)

diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index 1977faa0c92dd6..1757b370d5257f 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -480,7 +480,6 @@ static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
   PHINode *LoopCount = LoopBuilder.CreatePHI(TypeOfCount, 0);
   LoopCount->addIncoming(Count, OrigBB);
 
-  // Create the store instruction for the pattern
   LoopBuilder.CreateAlignedStore(SetValue, CurrentDst, DstAlign, IsVolatile);
 
   Value *NextDst = LoopBuilder.CreateInBoundsGEP(

>From d83fdfb2439a1ab1d7191ef49acef1fc48ef217e Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 11 Sep 2024 07:24:56 +0100
Subject: [PATCH 17/24] Fix logic for alignment of stores in memset.pattern
 expansion

---
 .../Transforms/Utils/LowerMemIntrinsics.cpp   |  5 ++-
 .../RISCV/memset-pattern.ll                   | 34 +++++++++++++++++++
 2 files changed, 38 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index 1757b370d5257f..3c5e9aa4ab0020 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -461,6 +461,7 @@ static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
                                     Align DstAlign, bool IsVolatile) {
   BasicBlock *OrigBB = InsertBefore->getParent();
   Function *F = OrigBB->getParent();
+  const DataLayout &DL = F->getDataLayout();
 
   Type *TypeOfCount = Count->getType();
 
@@ -480,7 +481,9 @@ static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
   PHINode *LoopCount = LoopBuilder.CreatePHI(TypeOfCount, 0);
   LoopCount->addIncoming(Count, OrigBB);
 
-  LoopBuilder.CreateAlignedStore(SetValue, CurrentDst, DstAlign, IsVolatile);
+  unsigned PatSize = DL.getTypeStoreSize(SetValue->getType());
+  Align PatAlign(commonAlignment(DstAlign, PatSize));
+  LoopBuilder.CreateAlignedStore(SetValue, CurrentDst, PatAlign, IsVolatile);
 
   Value *NextDst = LoopBuilder.CreateInBoundsGEP(
       SetValue->getType(), CurrentDst, ConstantInt::get(TypeOfCount, 1));
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
index ac6376fe48b67c..1ead4ab570ad86 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
@@ -98,3 +98,37 @@ define void @memset_pattern_i256_x(ptr %a, i256 %value, i64 %x) nounwind {
   tail call void @llvm.memset.pattern(ptr %a, i256 %value, i64 %x, i1 0)
   ret void
 }
+
+; The common alignment of the allocation of the pattern stride (its allocation
+; size) and the destination pointer should be used.
+define void @memset_pattern_i15_x_alignment(ptr %a, i15 %value, i64 %x) nounwind {
+; CHECK-LABEL: define void @memset_pattern_i15_x_alignment(
+; CHECK-SAME: ptr [[A:%.*]], i15 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 0, [[X]]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
+; CHECK:       [[STORELOOP]]:
+; CHECK-NEXT:    [[TMP2:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = phi i64 [ [[X]], [[TMP0]] ], [ [[TMP5:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    store i15 [[VALUE]], ptr [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP4]] = getelementptr inbounds i15, ptr [[TMP2]], i64 1
+; CHECK-NEXT:    [[TMP5]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[TMP6]], label %[[STORELOOP]], label %[[SPLIT]]
+; CHECK:       [[SPLIT]]:
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 0, [[X]]
+; CHECK-NEXT:    br i1 [[TMP7]], label %[[SPLIT1:.*]], label %[[STORELOOP2:.*]]
+; CHECK:       [[STORELOOP2]]:
+; CHECK-NEXT:    [[TMP8:%.*]] = phi ptr [ [[A]], %[[SPLIT]] ], [ [[TMP10:%.*]], %[[STORELOOP2]] ]
+; CHECK-NEXT:    [[TMP9:%.*]] = phi i64 [ [[X]], %[[SPLIT]] ], [ [[TMP11:%.*]], %[[STORELOOP2]] ]
+; CHECK-NEXT:    store i15 [[VALUE]], ptr [[TMP8]], align 2
+; CHECK-NEXT:    [[TMP10]] = getelementptr inbounds i15, ptr [[TMP8]], i64 1
+; CHECK-NEXT:    [[TMP11]] = sub i64 [[TMP9]], 1
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP11]], 0
+; CHECK-NEXT:    br i1 [[TMP12]], label %[[STORELOOP2]], label %[[SPLIT1]]
+; CHECK:       [[SPLIT1]]:
+; CHECK-NEXT:    ret void
+;
+  call void @llvm.memset.pattern(ptr align 1 %a, i15 %value, i64 %x, i1 0)
+  call void @llvm.memset.pattern(ptr align 2 %a, i15 %value, i64 %x, i1 0)
+  ret void
+}

>From c19adc1489556ea8195595ed8e5685eb63c417cf Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Fri, 8 Nov 2024 15:24:21 +0000
Subject: [PATCH 18/24] Regenerate memset-pattern.ll after merge

---
 llvm/test/CodeGen/RISCV/memset-pattern.ll | 136 +++++++++++-----------
 1 file changed, 68 insertions(+), 68 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/memset-pattern.ll b/llvm/test/CodeGen/RISCV/memset-pattern.ll
index 04bc01ab875595..a112396abb74f7 100644
--- a/llvm/test/CodeGen/RISCV/memset-pattern.ll
+++ b/llvm/test/CodeGen/RISCV/memset-pattern.ll
@@ -14,17 +14,17 @@
 define void @memset_1(ptr %a, i128 %value) nounwind {
 ; RV32-BOTH-LABEL: memset_1:
 ; RV32-BOTH:       # %bb.0: # %storeloop.preheader
-; RV32-BOTH-NEXT:    lw a2, 12(a1)
-; RV32-BOTH-NEXT:    lw a3, 8(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    lw a2, 0(a1)
+; RV32-BOTH-NEXT:    lw a3, 4(a1)
+; RV32-BOTH-NEXT:    lw a4, 8(a1)
+; RV32-BOTH-NEXT:    lw a1, 12(a1)
 ; RV32-BOTH-NEXT:    addi a5, a0, 16
 ; RV32-BOTH-NEXT:  .LBB0_1: # %storeloop
 ; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    sw a1, 0(a0)
-; RV32-BOTH-NEXT:    sw a4, 4(a0)
-; RV32-BOTH-NEXT:    sw a3, 8(a0)
-; RV32-BOTH-NEXT:    sw a2, 12(a0)
+; RV32-BOTH-NEXT:    sw a2, 0(a0)
+; RV32-BOTH-NEXT:    sw a3, 4(a0)
+; RV32-BOTH-NEXT:    sw a4, 8(a0)
+; RV32-BOTH-NEXT:    sw a1, 12(a0)
 ; RV32-BOTH-NEXT:    addi a0, a0, 16
 ; RV32-BOTH-NEXT:    bne a0, a5, .LBB0_1
 ; RV32-BOTH-NEXT:  # %bb.2: # %split
@@ -52,41 +52,41 @@ define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 ; RV32-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s2, 4(sp) # 4-byte Folded Spill
-; RV32-NEXT:    lw a2, 12(a1)
-; RV32-NEXT:    lw a3, 0(a1)
+; RV32-NEXT:    lw a2, 0(a1)
+; RV32-NEXT:    lw a3, 4(a1)
 ; RV32-NEXT:    lw a4, 8(a1)
-; RV32-NEXT:    lw a1, 4(a1)
+; RV32-NEXT:    lw a1, 12(a1)
 ; RV32-NEXT:    addi a5, a0, 16
-; RV32-NEXT:    srli a6, a3, 24
-; RV32-NEXT:    srli a7, a3, 16
-; RV32-NEXT:    srli t0, a3, 8
-; RV32-NEXT:    srli t1, a1, 24
-; RV32-NEXT:    srli t2, a1, 16
-; RV32-NEXT:    srli t3, a1, 8
+; RV32-NEXT:    srli a6, a2, 24
+; RV32-NEXT:    srli a7, a2, 16
+; RV32-NEXT:    srli t0, a2, 8
+; RV32-NEXT:    srli t1, a3, 24
+; RV32-NEXT:    srli t2, a3, 16
+; RV32-NEXT:    srli t3, a3, 8
 ; RV32-NEXT:    srli t4, a4, 24
 ; RV32-NEXT:    srli t5, a4, 16
 ; RV32-NEXT:    srli t6, a4, 8
-; RV32-NEXT:    srli s0, a2, 24
-; RV32-NEXT:    srli s1, a2, 16
-; RV32-NEXT:    srli s2, a2, 8
+; RV32-NEXT:    srli s0, a1, 24
+; RV32-NEXT:    srli s1, a1, 16
+; RV32-NEXT:    srli s2, a1, 8
 ; RV32-NEXT:  .LBB1_1: # %storeloop
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    sb a3, 0(a0)
-; RV32-NEXT:    sb a1, 4(a0)
-; RV32-NEXT:    sb a6, 3(a0)
-; RV32-NEXT:    sb a7, 2(a0)
+; RV32-NEXT:    sb a2, 0(a0)
 ; RV32-NEXT:    sb t0, 1(a0)
-; RV32-NEXT:    sb a4, 8(a0)
-; RV32-NEXT:    sb t1, 7(a0)
-; RV32-NEXT:    sb t2, 6(a0)
+; RV32-NEXT:    sb a7, 2(a0)
+; RV32-NEXT:    sb a6, 3(a0)
+; RV32-NEXT:    sb a3, 4(a0)
 ; RV32-NEXT:    sb t3, 5(a0)
-; RV32-NEXT:    sb a2, 12(a0)
-; RV32-NEXT:    sb t4, 11(a0)
-; RV32-NEXT:    sb t5, 10(a0)
+; RV32-NEXT:    sb t2, 6(a0)
+; RV32-NEXT:    sb t1, 7(a0)
+; RV32-NEXT:    sb a4, 8(a0)
 ; RV32-NEXT:    sb t6, 9(a0)
-; RV32-NEXT:    sb s0, 15(a0)
-; RV32-NEXT:    sb s1, 14(a0)
+; RV32-NEXT:    sb t5, 10(a0)
+; RV32-NEXT:    sb t4, 11(a0)
+; RV32-NEXT:    sb a1, 12(a0)
 ; RV32-NEXT:    sb s2, 13(a0)
+; RV32-NEXT:    sb s1, 14(a0)
+; RV32-NEXT:    sb s0, 15(a0)
 ; RV32-NEXT:    addi a0, a0, 16
 ; RV32-NEXT:    bne a0, a5, .LBB1_1
 ; RV32-NEXT:  # %bb.2: # %split
@@ -119,22 +119,22 @@ define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 ; RV64-NEXT:    srli s2, a2, 8
 ; RV64-NEXT:  .LBB1_1: # %storeloop
 ; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-NEXT:    sb a1, 0(a0)
-; RV64-NEXT:    sb a2, 8(a0)
-; RV64-NEXT:    sb a4, 7(a0)
-; RV64-NEXT:    sb a5, 6(a0)
-; RV64-NEXT:    sb a6, 5(a0)
 ; RV64-NEXT:    sb a7, 4(a0)
-; RV64-NEXT:    sb t0, 3(a0)
-; RV64-NEXT:    sb t1, 2(a0)
+; RV64-NEXT:    sb a6, 5(a0)
+; RV64-NEXT:    sb a5, 6(a0)
+; RV64-NEXT:    sb a4, 7(a0)
+; RV64-NEXT:    sb a1, 0(a0)
 ; RV64-NEXT:    sb t2, 1(a0)
-; RV64-NEXT:    sb t3, 15(a0)
-; RV64-NEXT:    sb t4, 14(a0)
-; RV64-NEXT:    sb t5, 13(a0)
+; RV64-NEXT:    sb t1, 2(a0)
+; RV64-NEXT:    sb t0, 3(a0)
 ; RV64-NEXT:    sb t6, 12(a0)
-; RV64-NEXT:    sb s0, 11(a0)
-; RV64-NEXT:    sb s1, 10(a0)
+; RV64-NEXT:    sb t5, 13(a0)
+; RV64-NEXT:    sb t4, 14(a0)
+; RV64-NEXT:    sb t3, 15(a0)
+; RV64-NEXT:    sb a2, 8(a0)
 ; RV64-NEXT:    sb s2, 9(a0)
+; RV64-NEXT:    sb s1, 10(a0)
+; RV64-NEXT:    sb s0, 11(a0)
 ; RV64-NEXT:    addi a0, a0, 16
 ; RV64-NEXT:    bne a0, a3, .LBB1_1
 ; RV64-NEXT:  # %bb.2: # %split
@@ -146,17 +146,17 @@ define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 ;
 ; RV32-FAST-LABEL: memset_1_noalign:
 ; RV32-FAST:       # %bb.0: # %storeloop.preheader
-; RV32-FAST-NEXT:    lw a2, 12(a1)
-; RV32-FAST-NEXT:    lw a3, 8(a1)
-; RV32-FAST-NEXT:    lw a4, 4(a1)
-; RV32-FAST-NEXT:    lw a1, 0(a1)
+; RV32-FAST-NEXT:    lw a2, 0(a1)
+; RV32-FAST-NEXT:    lw a3, 4(a1)
+; RV32-FAST-NEXT:    lw a4, 8(a1)
+; RV32-FAST-NEXT:    lw a1, 12(a1)
 ; RV32-FAST-NEXT:    addi a5, a0, 16
 ; RV32-FAST-NEXT:  .LBB1_1: # %storeloop
 ; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-FAST-NEXT:    sw a1, 0(a0)
-; RV32-FAST-NEXT:    sw a4, 4(a0)
-; RV32-FAST-NEXT:    sw a3, 8(a0)
-; RV32-FAST-NEXT:    sw a2, 12(a0)
+; RV32-FAST-NEXT:    sw a2, 0(a0)
+; RV32-FAST-NEXT:    sw a3, 4(a0)
+; RV32-FAST-NEXT:    sw a4, 8(a0)
+; RV32-FAST-NEXT:    sw a1, 12(a0)
 ; RV32-FAST-NEXT:    addi a0, a0, 16
 ; RV32-FAST-NEXT:    bne a0, a5, .LBB1_1
 ; RV32-FAST-NEXT:  # %bb.2: # %split
@@ -180,17 +180,17 @@ define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 define void @memset_4(ptr %a, i128 %value) nounwind {
 ; RV32-BOTH-LABEL: memset_4:
 ; RV32-BOTH:       # %bb.0: # %storeloop.preheader
-; RV32-BOTH-NEXT:    lw a2, 12(a1)
-; RV32-BOTH-NEXT:    lw a3, 8(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    lw a2, 0(a1)
+; RV32-BOTH-NEXT:    lw a3, 4(a1)
+; RV32-BOTH-NEXT:    lw a4, 8(a1)
+; RV32-BOTH-NEXT:    lw a1, 12(a1)
 ; RV32-BOTH-NEXT:    addi a5, a0, 64
 ; RV32-BOTH-NEXT:  .LBB2_1: # %storeloop
 ; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    sw a1, 0(a0)
-; RV32-BOTH-NEXT:    sw a4, 4(a0)
-; RV32-BOTH-NEXT:    sw a3, 8(a0)
-; RV32-BOTH-NEXT:    sw a2, 12(a0)
+; RV32-BOTH-NEXT:    sw a2, 0(a0)
+; RV32-BOTH-NEXT:    sw a3, 4(a0)
+; RV32-BOTH-NEXT:    sw a4, 8(a0)
+; RV32-BOTH-NEXT:    sw a1, 12(a0)
 ; RV32-BOTH-NEXT:    addi a0, a0, 16
 ; RV32-BOTH-NEXT:    bne a0, a5, .LBB2_1
 ; RV32-BOTH-NEXT:  # %bb.2: # %split
@@ -217,18 +217,18 @@ define void @memset_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; RV32-BOTH-NEXT:    or a3, a2, a3
 ; RV32-BOTH-NEXT:    beqz a3, .LBB3_3
 ; RV32-BOTH-NEXT:  # %bb.1: # %storeloop.preheader
-; RV32-BOTH-NEXT:    lw a3, 12(a1)
-; RV32-BOTH-NEXT:    lw a4, 8(a1)
-; RV32-BOTH-NEXT:    lw a5, 4(a1)
-; RV32-BOTH-NEXT:    lw a1, 0(a1)
+; RV32-BOTH-NEXT:    lw a3, 0(a1)
+; RV32-BOTH-NEXT:    lw a4, 4(a1)
+; RV32-BOTH-NEXT:    lw a5, 8(a1)
+; RV32-BOTH-NEXT:    lw a1, 12(a1)
 ; RV32-BOTH-NEXT:    slli a2, a2, 4
 ; RV32-BOTH-NEXT:    add a2, a0, a2
 ; RV32-BOTH-NEXT:  .LBB3_2: # %storeloop
 ; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    sw a1, 0(a0)
-; RV32-BOTH-NEXT:    sw a5, 4(a0)
-; RV32-BOTH-NEXT:    sw a4, 8(a0)
-; RV32-BOTH-NEXT:    sw a3, 12(a0)
+; RV32-BOTH-NEXT:    sw a3, 0(a0)
+; RV32-BOTH-NEXT:    sw a4, 4(a0)
+; RV32-BOTH-NEXT:    sw a5, 8(a0)
+; RV32-BOTH-NEXT:    sw a1, 12(a0)
 ; RV32-BOTH-NEXT:    addi a0, a0, 16
 ; RV32-BOTH-NEXT:    bne a0, a2, .LBB3_2
 ; RV32-BOTH-NEXT:  .LBB3_3: # %split

>From 03e07d55eb8b730a49f135521d7740f8db08fa17 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Fri, 8 Nov 2024 15:24:22 +0000
Subject: [PATCH 19/24] Use normal createMemsetAsLoop helper for memset.pattern

As pointed out in the review, with various changes to memset.pattern
semantics since the first version these are now effectively identical.
---
 .../Transforms/Utils/LowerMemIntrinsics.cpp   |  52 ----
 llvm/test/CodeGen/RISCV/memset-pattern.ll     | 244 +++++++++++-------
 .../PowerPC/memset-pattern.ll                 |  15 +-
 .../RISCV/memset-pattern.ll                   | 105 ++++----
 4 files changed, 200 insertions(+), 216 deletions(-)

diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index 7aa815db61c27f..546217093550a2 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -824,48 +824,6 @@ static void createMemMoveLoopKnownSize(Instruction *InsertBefore,
   }
 }
 
-static void createMemSetPatternLoop(Instruction *InsertBefore, Value *DstAddr,
-                                    Value *Count, Value *SetValue,
-                                    Align DstAlign, bool IsVolatile) {
-  BasicBlock *OrigBB = InsertBefore->getParent();
-  Function *F = OrigBB->getParent();
-  const DataLayout &DL = F->getDataLayout();
-
-  Type *TypeOfCount = Count->getType();
-
-  BasicBlock *NewBB = OrigBB->splitBasicBlock(InsertBefore, "split");
-  BasicBlock *LoopBB =
-      BasicBlock::Create(F->getContext(), "storeloop", F, NewBB);
-  IRBuilder<> Builder(OrigBB->getTerminator());
-
-  Builder.CreateCondBr(
-      Builder.CreateICmpEQ(ConstantInt::get(TypeOfCount, 0), Count), NewBB,
-      LoopBB);
-  OrigBB->getTerminator()->eraseFromParent();
-
-  IRBuilder<> LoopBuilder(LoopBB);
-  PHINode *CurrentDst = LoopBuilder.CreatePHI(DstAddr->getType(), 0);
-  CurrentDst->addIncoming(DstAddr, OrigBB);
-  PHINode *LoopCount = LoopBuilder.CreatePHI(TypeOfCount, 0);
-  LoopCount->addIncoming(Count, OrigBB);
-
-  unsigned PatSize = DL.getTypeStoreSize(SetValue->getType());
-  Align PatAlign(commonAlignment(DstAlign, PatSize));
-  LoopBuilder.CreateAlignedStore(SetValue, CurrentDst, PatAlign, IsVolatile);
-
-  Value *NextDst = LoopBuilder.CreateInBoundsGEP(
-      SetValue->getType(), CurrentDst, ConstantInt::get(TypeOfCount, 1));
-  CurrentDst->addIncoming(NextDst, LoopBB);
-
-  Value *NewLoopCount =
-      LoopBuilder.CreateSub(LoopCount, ConstantInt::get(TypeOfCount, 1));
-  LoopCount->addIncoming(NewLoopCount, LoopBB);
-
-  LoopBuilder.CreateCondBr(
-      LoopBuilder.CreateICmpNE(NewLoopCount, ConstantInt::get(TypeOfCount, 0)),
-      LoopBB, NewBB);
-}
-
 static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr,
                              Value *CopyLen, Value *SetValue, Align DstAlign,
                              bool IsVolatile) {
@@ -1004,16 +962,6 @@ bool llvm::expandMemMoveAsLoop(MemMoveInst *Memmove,
 }
 
 void llvm::expandMemSetAsLoop(MemSetInst *Memset) {
-  if (isa<MemSetPatternInst>(Memset)) {
-    return createMemSetPatternLoop(
-        /* InsertBefore */ Memset,
-        /* DstAddr */ Memset->getRawDest(),
-        /* Count */ Memset->getLength(),
-        /* SetValue */ Memset->getValue(),
-        /* Alignment */ Memset->getDestAlign().valueOrOne(),
-        Memset->isVolatile());
-  }
-
   createMemSetLoop(/* InsertBefore */ Memset,
                    /* DstAddr */ Memset->getRawDest(),
                    /* CopyLen */ Memset->getLength(),
diff --git a/llvm/test/CodeGen/RISCV/memset-pattern.ll b/llvm/test/CodeGen/RISCV/memset-pattern.ll
index a112396abb74f7..f89b11c90e44c3 100644
--- a/llvm/test/CodeGen/RISCV/memset-pattern.ll
+++ b/llvm/test/CodeGen/RISCV/memset-pattern.ll
@@ -13,27 +13,33 @@
 
 define void @memset_1(ptr %a, i128 %value) nounwind {
 ; RV32-BOTH-LABEL: memset_1:
-; RV32-BOTH:       # %bb.0: # %storeloop.preheader
+; RV32-BOTH:       # %bb.0: # %loadstoreloop.preheader
 ; RV32-BOTH-NEXT:    lw a2, 0(a1)
 ; RV32-BOTH-NEXT:    lw a3, 4(a1)
 ; RV32-BOTH-NEXT:    lw a4, 8(a1)
 ; RV32-BOTH-NEXT:    lw a1, 12(a1)
-; RV32-BOTH-NEXT:    addi a5, a0, 16
-; RV32-BOTH-NEXT:  .LBB0_1: # %storeloop
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB0_1: # %loadstoreloop
 ; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    sw a2, 0(a0)
-; RV32-BOTH-NEXT:    sw a3, 4(a0)
-; RV32-BOTH-NEXT:    sw a4, 8(a0)
-; RV32-BOTH-NEXT:    sw a1, 12(a0)
-; RV32-BOTH-NEXT:    addi a0, a0, 16
-; RV32-BOTH-NEXT:    bne a0, a5, .LBB0_1
+; RV32-BOTH-NEXT:    slli a7, a5, 4
+; RV32-BOTH-NEXT:    add a7, a0, a7
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz t0, a5
+; RV32-BOTH-NEXT:    add a6, a6, t0
+; RV32-BOTH-NEXT:    or t0, a5, a6
+; RV32-BOTH-NEXT:    sw a2, 0(a7)
+; RV32-BOTH-NEXT:    sw a3, 4(a7)
+; RV32-BOTH-NEXT:    sw a4, 8(a7)
+; RV32-BOTH-NEXT:    sw a1, 12(a7)
+; RV32-BOTH-NEXT:    beqz t0, .LBB0_1
 ; RV32-BOTH-NEXT:  # %bb.2: # %split
 ; RV32-BOTH-NEXT:    ret
 ;
 ; RV64-BOTH-LABEL: memset_1:
-; RV64-BOTH:       # %bb.0: # %storeloop.preheader
+; RV64-BOTH:       # %bb.0: # %loadstoreloop.preheader
 ; RV64-BOTH-NEXT:    addi a3, a0, 16
-; RV64-BOTH-NEXT:  .LBB0_1: # %storeloop
+; RV64-BOTH-NEXT:  .LBB0_1: # %loadstoreloop
 ; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-BOTH-NEXT:    sd a1, 0(a0)
 ; RV64-BOTH-NEXT:    sd a2, 8(a0)
@@ -47,57 +53,69 @@ define void @memset_1(ptr %a, i128 %value) nounwind {
 
 define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 ; RV32-LABEL: memset_1_noalign:
-; RV32:       # %bb.0: # %storeloop.preheader
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s2, 4(sp) # 4-byte Folded Spill
-; RV32-NEXT:    lw a2, 0(a1)
-; RV32-NEXT:    lw a3, 4(a1)
-; RV32-NEXT:    lw a4, 8(a1)
+; RV32:       # %bb.0: # %loadstoreloop.preheader
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    sw s0, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s2, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s3, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s4, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s5, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 0
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    lw a4, 4(a1)
+; RV32-NEXT:    lw a5, 0(a1)
+; RV32-NEXT:    lw a6, 8(a1)
 ; RV32-NEXT:    lw a1, 12(a1)
-; RV32-NEXT:    addi a5, a0, 16
-; RV32-NEXT:    srli a6, a2, 24
-; RV32-NEXT:    srli a7, a2, 16
-; RV32-NEXT:    srli t0, a2, 8
-; RV32-NEXT:    srli t1, a3, 24
-; RV32-NEXT:    srli t2, a3, 16
-; RV32-NEXT:    srli t3, a3, 8
-; RV32-NEXT:    srli t4, a4, 24
-; RV32-NEXT:    srli t5, a4, 16
-; RV32-NEXT:    srli t6, a4, 8
-; RV32-NEXT:    srli s0, a1, 24
-; RV32-NEXT:    srli s1, a1, 16
-; RV32-NEXT:    srli s2, a1, 8
-; RV32-NEXT:  .LBB1_1: # %storeloop
+; RV32-NEXT:    srli a7, a4, 24
+; RV32-NEXT:    srli t0, a4, 16
+; RV32-NEXT:    srli t1, a4, 8
+; RV32-NEXT:    srli t2, a5, 24
+; RV32-NEXT:    srli t3, a5, 16
+; RV32-NEXT:    srli t4, a5, 8
+; RV32-NEXT:    srli t5, a6, 24
+; RV32-NEXT:    srli t6, a6, 16
+; RV32-NEXT:    srli s0, a6, 8
+; RV32-NEXT:    srli s1, a1, 24
+; RV32-NEXT:    srli s2, a1, 16
+; RV32-NEXT:    srli s3, a1, 8
+; RV32-NEXT:  .LBB1_1: # %loadstoreloop
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    sb a2, 0(a0)
-; RV32-NEXT:    sb t0, 1(a0)
-; RV32-NEXT:    sb a7, 2(a0)
-; RV32-NEXT:    sb a6, 3(a0)
-; RV32-NEXT:    sb a3, 4(a0)
-; RV32-NEXT:    sb t3, 5(a0)
-; RV32-NEXT:    sb t2, 6(a0)
-; RV32-NEXT:    sb t1, 7(a0)
-; RV32-NEXT:    sb a4, 8(a0)
-; RV32-NEXT:    sb t6, 9(a0)
-; RV32-NEXT:    sb t5, 10(a0)
-; RV32-NEXT:    sb t4, 11(a0)
-; RV32-NEXT:    sb a1, 12(a0)
-; RV32-NEXT:    sb s2, 13(a0)
-; RV32-NEXT:    sb s1, 14(a0)
-; RV32-NEXT:    sb s0, 15(a0)
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    bne a0, a5, .LBB1_1
+; RV32-NEXT:    slli s4, a2, 4
+; RV32-NEXT:    add s4, a0, s4
+; RV32-NEXT:    sb a4, 4(s4)
+; RV32-NEXT:    sb t1, 5(s4)
+; RV32-NEXT:    sb t0, 6(s4)
+; RV32-NEXT:    sb a7, 7(s4)
+; RV32-NEXT:    sb a5, 0(s4)
+; RV32-NEXT:    sb t4, 1(s4)
+; RV32-NEXT:    sb t3, 2(s4)
+; RV32-NEXT:    sb t2, 3(s4)
+; RV32-NEXT:    sb a6, 8(s4)
+; RV32-NEXT:    sb s0, 9(s4)
+; RV32-NEXT:    sb t6, 10(s4)
+; RV32-NEXT:    sb t5, 11(s4)
+; RV32-NEXT:    addi a2, a2, 1
+; RV32-NEXT:    seqz s5, a2
+; RV32-NEXT:    add a3, a3, s5
+; RV32-NEXT:    or s5, a2, a3
+; RV32-NEXT:    sb a1, 12(s4)
+; RV32-NEXT:    sb s3, 13(s4)
+; RV32-NEXT:    sb s2, 14(s4)
+; RV32-NEXT:    sb s1, 15(s4)
+; RV32-NEXT:    beqz s5, .LBB1_1
 ; RV32-NEXT:  # %bb.2: # %split
-; RV32-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    lw s0, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s2, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s3, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s4, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s5, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 32
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: memset_1_noalign:
-; RV64:       # %bb.0: # %storeloop.preheader
+; RV64:       # %bb.0: # %loadstoreloop.preheader
 ; RV64-NEXT:    addi sp, sp, -32
 ; RV64-NEXT:    sd s0, 24(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s1, 16(sp) # 8-byte Folded Spill
@@ -117,7 +135,7 @@ define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 ; RV64-NEXT:    srli s0, a2, 24
 ; RV64-NEXT:    srli s1, a2, 16
 ; RV64-NEXT:    srli s2, a2, 8
-; RV64-NEXT:  .LBB1_1: # %storeloop
+; RV64-NEXT:  .LBB1_1: # %loadstoreloop
 ; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-NEXT:    sb a7, 4(a0)
 ; RV64-NEXT:    sb a6, 5(a0)
@@ -145,27 +163,33 @@ define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 ; RV64-NEXT:    ret
 ;
 ; RV32-FAST-LABEL: memset_1_noalign:
-; RV32-FAST:       # %bb.0: # %storeloop.preheader
+; RV32-FAST:       # %bb.0: # %loadstoreloop.preheader
 ; RV32-FAST-NEXT:    lw a2, 0(a1)
 ; RV32-FAST-NEXT:    lw a3, 4(a1)
 ; RV32-FAST-NEXT:    lw a4, 8(a1)
 ; RV32-FAST-NEXT:    lw a1, 12(a1)
-; RV32-FAST-NEXT:    addi a5, a0, 16
-; RV32-FAST-NEXT:  .LBB1_1: # %storeloop
+; RV32-FAST-NEXT:    li a5, 0
+; RV32-FAST-NEXT:    li a6, 0
+; RV32-FAST-NEXT:  .LBB1_1: # %loadstoreloop
 ; RV32-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-FAST-NEXT:    sw a2, 0(a0)
-; RV32-FAST-NEXT:    sw a3, 4(a0)
-; RV32-FAST-NEXT:    sw a4, 8(a0)
-; RV32-FAST-NEXT:    sw a1, 12(a0)
-; RV32-FAST-NEXT:    addi a0, a0, 16
-; RV32-FAST-NEXT:    bne a0, a5, .LBB1_1
+; RV32-FAST-NEXT:    slli a7, a5, 4
+; RV32-FAST-NEXT:    add a7, a0, a7
+; RV32-FAST-NEXT:    addi a5, a5, 1
+; RV32-FAST-NEXT:    seqz t0, a5
+; RV32-FAST-NEXT:    add a6, a6, t0
+; RV32-FAST-NEXT:    or t0, a5, a6
+; RV32-FAST-NEXT:    sw a2, 0(a7)
+; RV32-FAST-NEXT:    sw a3, 4(a7)
+; RV32-FAST-NEXT:    sw a4, 8(a7)
+; RV32-FAST-NEXT:    sw a1, 12(a7)
+; RV32-FAST-NEXT:    beqz t0, .LBB1_1
 ; RV32-FAST-NEXT:  # %bb.2: # %split
 ; RV32-FAST-NEXT:    ret
 ;
 ; RV64-FAST-LABEL: memset_1_noalign:
-; RV64-FAST:       # %bb.0: # %storeloop.preheader
+; RV64-FAST:       # %bb.0: # %loadstoreloop.preheader
 ; RV64-FAST-NEXT:    addi a3, a0, 16
-; RV64-FAST-NEXT:  .LBB1_1: # %storeloop
+; RV64-FAST-NEXT:  .LBB1_1: # %loadstoreloop
 ; RV64-FAST-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-FAST-NEXT:    sd a1, 0(a0)
 ; RV64-FAST-NEXT:    sd a2, 8(a0)
@@ -179,27 +203,35 @@ define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 
 define void @memset_4(ptr %a, i128 %value) nounwind {
 ; RV32-BOTH-LABEL: memset_4:
-; RV32-BOTH:       # %bb.0: # %storeloop.preheader
+; RV32-BOTH:       # %bb.0: # %loadstoreloop.preheader
 ; RV32-BOTH-NEXT:    lw a2, 0(a1)
 ; RV32-BOTH-NEXT:    lw a3, 4(a1)
 ; RV32-BOTH-NEXT:    lw a4, 8(a1)
 ; RV32-BOTH-NEXT:    lw a1, 12(a1)
-; RV32-BOTH-NEXT:    addi a5, a0, 64
-; RV32-BOTH-NEXT:  .LBB2_1: # %storeloop
+; RV32-BOTH-NEXT:    li a5, 0
+; RV32-BOTH-NEXT:    li a6, 0
+; RV32-BOTH-NEXT:  .LBB2_1: # %loadstoreloop
 ; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    sw a2, 0(a0)
-; RV32-BOTH-NEXT:    sw a3, 4(a0)
-; RV32-BOTH-NEXT:    sw a4, 8(a0)
-; RV32-BOTH-NEXT:    sw a1, 12(a0)
-; RV32-BOTH-NEXT:    addi a0, a0, 16
-; RV32-BOTH-NEXT:    bne a0, a5, .LBB2_1
+; RV32-BOTH-NEXT:    slli a7, a5, 4
+; RV32-BOTH-NEXT:    add a7, a0, a7
+; RV32-BOTH-NEXT:    addi a5, a5, 1
+; RV32-BOTH-NEXT:    seqz t0, a5
+; RV32-BOTH-NEXT:    add a6, a6, t0
+; RV32-BOTH-NEXT:    seqz t0, a6
+; RV32-BOTH-NEXT:    sltiu t1, a5, 4
+; RV32-BOTH-NEXT:    and t0, t0, t1
+; RV32-BOTH-NEXT:    sw a2, 0(a7)
+; RV32-BOTH-NEXT:    sw a3, 4(a7)
+; RV32-BOTH-NEXT:    sw a4, 8(a7)
+; RV32-BOTH-NEXT:    sw a1, 12(a7)
+; RV32-BOTH-NEXT:    bnez t0, .LBB2_1
 ; RV32-BOTH-NEXT:  # %bb.2: # %split
 ; RV32-BOTH-NEXT:    ret
 ;
 ; RV64-BOTH-LABEL: memset_4:
-; RV64-BOTH:       # %bb.0: # %storeloop.preheader
+; RV64-BOTH:       # %bb.0: # %loadstoreloop.preheader
 ; RV64-BOTH-NEXT:    addi a3, a0, 64
-; RV64-BOTH-NEXT:  .LBB2_1: # %storeloop
+; RV64-BOTH-NEXT:  .LBB2_1: # %loadstoreloop
 ; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-BOTH-NEXT:    sd a1, 0(a0)
 ; RV64-BOTH-NEXT:    sd a2, 8(a0)
@@ -214,38 +246,50 @@ define void @memset_4(ptr %a, i128 %value) nounwind {
 define void @memset_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; RV32-BOTH-LABEL: memset_x:
 ; RV32-BOTH:       # %bb.0:
-; RV32-BOTH-NEXT:    or a3, a2, a3
-; RV32-BOTH-NEXT:    beqz a3, .LBB3_3
-; RV32-BOTH-NEXT:  # %bb.1: # %storeloop.preheader
-; RV32-BOTH-NEXT:    lw a3, 0(a1)
-; RV32-BOTH-NEXT:    lw a4, 4(a1)
-; RV32-BOTH-NEXT:    lw a5, 8(a1)
+; RV32-BOTH-NEXT:    or a4, a2, a3
+; RV32-BOTH-NEXT:    beqz a4, .LBB3_5
+; RV32-BOTH-NEXT:  # %bb.1: # %loadstoreloop.preheader
+; RV32-BOTH-NEXT:    lw a4, 0(a1)
+; RV32-BOTH-NEXT:    lw a5, 4(a1)
+; RV32-BOTH-NEXT:    lw a6, 8(a1)
 ; RV32-BOTH-NEXT:    lw a1, 12(a1)
-; RV32-BOTH-NEXT:    slli a2, a2, 4
-; RV32-BOTH-NEXT:    add a2, a0, a2
-; RV32-BOTH-NEXT:  .LBB3_2: # %storeloop
+; RV32-BOTH-NEXT:    li a7, 0
+; RV32-BOTH-NEXT:    li t0, 0
+; RV32-BOTH-NEXT:    j .LBB3_3
+; RV32-BOTH-NEXT:  .LBB3_2: # %loadstoreloop
+; RV32-BOTH-NEXT:    # in Loop: Header=BB3_3 Depth=1
+; RV32-BOTH-NEXT:    sltu t1, t0, a3
+; RV32-BOTH-NEXT:    beqz t1, .LBB3_5
+; RV32-BOTH-NEXT:  .LBB3_3: # %loadstoreloop
 ; RV32-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-BOTH-NEXT:    sw a3, 0(a0)
-; RV32-BOTH-NEXT:    sw a4, 4(a0)
-; RV32-BOTH-NEXT:    sw a5, 8(a0)
-; RV32-BOTH-NEXT:    sw a1, 12(a0)
-; RV32-BOTH-NEXT:    addi a0, a0, 16
-; RV32-BOTH-NEXT:    bne a0, a2, .LBB3_2
-; RV32-BOTH-NEXT:  .LBB3_3: # %split
+; RV32-BOTH-NEXT:    slli t1, a7, 4
+; RV32-BOTH-NEXT:    add t1, a0, t1
+; RV32-BOTH-NEXT:    addi a7, a7, 1
+; RV32-BOTH-NEXT:    seqz t2, a7
+; RV32-BOTH-NEXT:    add t0, t0, t2
+; RV32-BOTH-NEXT:    sw a4, 0(t1)
+; RV32-BOTH-NEXT:    sw a5, 4(t1)
+; RV32-BOTH-NEXT:    sw a6, 8(t1)
+; RV32-BOTH-NEXT:    sw a1, 12(t1)
+; RV32-BOTH-NEXT:    bne t0, a3, .LBB3_2
+; RV32-BOTH-NEXT:  # %bb.4: # in Loop: Header=BB3_3 Depth=1
+; RV32-BOTH-NEXT:    sltu t1, a7, a2
+; RV32-BOTH-NEXT:    bnez t1, .LBB3_3
+; RV32-BOTH-NEXT:  .LBB3_5: # %split
 ; RV32-BOTH-NEXT:    ret
 ;
 ; RV64-BOTH-LABEL: memset_x:
 ; RV64-BOTH:       # %bb.0:
 ; RV64-BOTH-NEXT:    beqz a3, .LBB3_3
-; RV64-BOTH-NEXT:  # %bb.1: # %storeloop.preheader
-; RV64-BOTH-NEXT:    slli a3, a3, 4
-; RV64-BOTH-NEXT:    add a3, a0, a3
-; RV64-BOTH-NEXT:  .LBB3_2: # %storeloop
+; RV64-BOTH-NEXT:  # %bb.1: # %loadstoreloop.preheader
+; RV64-BOTH-NEXT:    li a4, 0
+; RV64-BOTH-NEXT:  .LBB3_2: # %loadstoreloop
 ; RV64-BOTH-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV64-BOTH-NEXT:    sd a1, 0(a0)
 ; RV64-BOTH-NEXT:    sd a2, 8(a0)
+; RV64-BOTH-NEXT:    addi a4, a4, 1
 ; RV64-BOTH-NEXT:    addi a0, a0, 16
-; RV64-BOTH-NEXT:    bne a0, a3, .LBB3_2
+; RV64-BOTH-NEXT:    bltu a4, a3, .LBB3_2
 ; RV64-BOTH-NEXT:  .LBB3_3: # %split
 ; RV64-BOTH-NEXT:    ret
   tail call void @llvm.memset.pattern(ptr align 8 %a, i128 %value, i64 %x, i1 0)
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
index aa2dbb5d520725..7f9b1536cda152 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
@@ -8,15 +8,14 @@ define void @memset.pattern(ptr %a, i128 %value, i64 %x) nounwind {
 ; CHECK-LABEL: define void @memset.pattern(
 ; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 0, [[X]]
-; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
-; CHECK:       [[STORELOOP]]:
-; CHECK-NEXT:    [[TMP2:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP3:%.*]] = phi i64 [ [[X]], [[TMP0]] ], [ [[TMP5:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[LOADSTORELOOP:.*]]
+; CHECK:       [[LOADSTORELOOP]]:
+; CHECK-NEXT:    [[TMP3:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP4:%.*]], %[[LOADSTORELOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i128, ptr [[A]], i64 [[TMP3]]
 ; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP2]], align 1
-; CHECK-NEXT:    [[TMP4]] = getelementptr inbounds i128, ptr [[TMP2]], i64 1
-; CHECK-NEXT:    [[TMP5]] = sub i64 [[TMP3]], 1
-; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[TMP6]], label %[[STORELOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    [[TMP4]] = add i64 [[TMP3]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i64 [[TMP4]], [[X]]
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[LOADSTORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
index 1ead4ab570ad86..041f1630ffd145 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
@@ -4,15 +4,14 @@
 define void @memset_pattern_i128_1(ptr %a, i128 %value) nounwind {
 ; CHECK-LABEL: define void @memset_pattern_i128_1(
 ; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
-; CHECK:       [[STORELOOP]]:
-; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 1, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[LOADSTORELOOP:.*]]
+; CHECK:       [[LOADSTORELOOP]]:
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[LOADSTORELOOP]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i128, ptr [[A]], i64 [[TMP2]]
 ; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
-; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 1
-; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    [[TMP3]] = add i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 1
+; CHECK-NEXT:    br i1 [[TMP4]], label %[[LOADSTORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -23,15 +22,14 @@ define void @memset_pattern_i128_1(ptr %a, i128 %value) nounwind {
 define void @memset_pattern_i128_16(ptr %a, i128 %value) nounwind {
 ; CHECK-LABEL: define void @memset_pattern_i128_16(
 ; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
-; CHECK:       [[STORELOOP]]:
-; CHECK-NEXT:    [[TMP1:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 16, [[TMP0]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 false, label %[[SPLIT:.*]], label %[[LOADSTORELOOP:.*]]
+; CHECK:       [[LOADSTORELOOP]]:
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP3:%.*]], %[[LOADSTORELOOP]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i128, ptr [[A]], i64 [[TMP2]]
 ; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP1]], align 1
-; CHECK-NEXT:    [[TMP3]] = getelementptr inbounds i128, ptr [[TMP1]], i64 1
-; CHECK-NEXT:    [[TMP4]] = sub i64 [[TMP2]], 1
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label %[[STORELOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    [[TMP3]] = add i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 16
+; CHECK-NEXT:    br i1 [[TMP4]], label %[[LOADSTORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -43,15 +41,14 @@ define void @memset_pattern_i127_x(ptr %a, i127 %value, i64 %x) nounwind {
 ; CHECK-LABEL: define void @memset_pattern_i127_x(
 ; CHECK-SAME: ptr [[A:%.*]], i127 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 0, [[X]]
-; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
-; CHECK:       [[STORELOOP]]:
-; CHECK-NEXT:    [[TMP2:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP3:%.*]] = phi i64 [ [[X]], [[TMP0]] ], [ [[TMP5:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[LOADSTORELOOP:.*]]
+; CHECK:       [[LOADSTORELOOP]]:
+; CHECK-NEXT:    [[TMP3:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP4:%.*]], %[[LOADSTORELOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i127, ptr [[A]], i64 [[TMP3]]
 ; CHECK-NEXT:    store i127 [[VALUE]], ptr [[TMP2]], align 1
-; CHECK-NEXT:    [[TMP4]] = getelementptr inbounds i127, ptr [[TMP2]], i64 1
-; CHECK-NEXT:    [[TMP5]] = sub i64 [[TMP3]], 1
-; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[TMP6]], label %[[STORELOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    [[TMP4]] = add i64 [[TMP3]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i64 [[TMP4]], [[X]]
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[LOADSTORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -63,15 +60,14 @@ define void @memset_pattern_i128_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; CHECK-LABEL: define void @memset_pattern_i128_x(
 ; CHECK-SAME: ptr [[A:%.*]], i128 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 0, [[X]]
-; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
-; CHECK:       [[STORELOOP]]:
-; CHECK-NEXT:    [[TMP4:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP5:%.*]] = phi i64 [ [[X]], [[TMP0]] ], [ [[TMP7:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[LOADSTORELOOP:.*]]
+; CHECK:       [[LOADSTORELOOP]]:
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP6:%.*]], %[[LOADSTORELOOP]] ]
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i128, ptr [[A]], i64 [[TMP2]]
 ; CHECK-NEXT:    store i128 [[VALUE]], ptr [[TMP4]], align 1
-; CHECK-NEXT:    [[TMP6]] = getelementptr inbounds i128, ptr [[TMP4]], i64 1
-; CHECK-NEXT:    [[TMP7]] = sub i64 [[TMP5]], 1
-; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[TMP8]], label %[[STORELOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    [[TMP6]] = add i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i64 [[TMP6]], [[X]]
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[LOADSTORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -83,15 +79,14 @@ define void @memset_pattern_i256_x(ptr %a, i256 %value, i64 %x) nounwind {
 ; CHECK-LABEL: define void @memset_pattern_i256_x(
 ; CHECK-SAME: ptr [[A:%.*]], i256 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 0, [[X]]
-; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
-; CHECK:       [[STORELOOP]]:
-; CHECK-NEXT:    [[TMP4:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP5:%.*]] = phi i64 [ [[X]], [[TMP0]] ], [ [[TMP7:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[LOADSTORELOOP:.*]]
+; CHECK:       [[LOADSTORELOOP]]:
+; CHECK-NEXT:    [[TMP2:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP6:%.*]], %[[LOADSTORELOOP]] ]
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i256, ptr [[A]], i64 [[TMP2]]
 ; CHECK-NEXT:    store i256 [[VALUE]], ptr [[TMP4]], align 1
-; CHECK-NEXT:    [[TMP6]] = getelementptr inbounds i256, ptr [[TMP4]], i64 1
-; CHECK-NEXT:    [[TMP7]] = sub i64 [[TMP5]], 1
-; CHECK-NEXT:    [[TMP8:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[TMP8]], label %[[STORELOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    [[TMP6]] = add i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i64 [[TMP6]], [[X]]
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[LOADSTORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
@@ -105,26 +100,24 @@ define void @memset_pattern_i15_x_alignment(ptr %a, i15 %value, i64 %x) nounwind
 ; CHECK-LABEL: define void @memset_pattern_i15_x_alignment(
 ; CHECK-SAME: ptr [[A:%.*]], i15 [[VALUE:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 0, [[X]]
-; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[STORELOOP:.*]]
-; CHECK:       [[STORELOOP]]:
-; CHECK-NEXT:    [[TMP2:%.*]] = phi ptr [ [[A]], [[TMP0:%.*]] ], [ [[TMP4:%.*]], %[[STORELOOP]] ]
-; CHECK-NEXT:    [[TMP3:%.*]] = phi i64 [ [[X]], [[TMP0]] ], [ [[TMP5:%.*]], %[[STORELOOP]] ]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[SPLIT:.*]], label %[[LOADSTORELOOP:.*]]
+; CHECK:       [[LOADSTORELOOP]]:
+; CHECK-NEXT:    [[TMP3:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP4:%.*]], %[[LOADSTORELOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i15, ptr [[A]], i64 [[TMP3]]
 ; CHECK-NEXT:    store i15 [[VALUE]], ptr [[TMP2]], align 1
-; CHECK-NEXT:    [[TMP4]] = getelementptr inbounds i15, ptr [[TMP2]], i64 1
-; CHECK-NEXT:    [[TMP5]] = sub i64 [[TMP3]], 1
-; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[TMP6]], label %[[STORELOOP]], label %[[SPLIT]]
+; CHECK-NEXT:    [[TMP4]] = add i64 [[TMP3]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i64 [[TMP4]], [[X]]
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[LOADSTORELOOP]], label %[[SPLIT]]
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 0, [[X]]
-; CHECK-NEXT:    br i1 [[TMP7]], label %[[SPLIT1:.*]], label %[[STORELOOP2:.*]]
-; CHECK:       [[STORELOOP2]]:
-; CHECK-NEXT:    [[TMP8:%.*]] = phi ptr [ [[A]], %[[SPLIT]] ], [ [[TMP10:%.*]], %[[STORELOOP2]] ]
-; CHECK-NEXT:    [[TMP9:%.*]] = phi i64 [ [[X]], %[[SPLIT]] ], [ [[TMP11:%.*]], %[[STORELOOP2]] ]
+; CHECK-NEXT:    br i1 [[TMP7]], label %[[SPLIT1:.*]], label %[[LOADSTORELOOP2:.*]]
+; CHECK:       [[LOADSTORELOOP2]]:
+; CHECK-NEXT:    [[TMP11:%.*]] = phi i64 [ 0, %[[SPLIT]] ], [ [[TMP9:%.*]], %[[LOADSTORELOOP2]] ]
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i15, ptr [[A]], i64 [[TMP11]]
 ; CHECK-NEXT:    store i15 [[VALUE]], ptr [[TMP8]], align 2
-; CHECK-NEXT:    [[TMP10]] = getelementptr inbounds i15, ptr [[TMP8]], i64 1
-; CHECK-NEXT:    [[TMP11]] = sub i64 [[TMP9]], 1
-; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP11]], 0
-; CHECK-NEXT:    br i1 [[TMP12]], label %[[STORELOOP2]], label %[[SPLIT1]]
+; CHECK-NEXT:    [[TMP9]] = add i64 [[TMP11]], 1
+; CHECK-NEXT:    [[TMP10:%.*]] = icmp ult i64 [[TMP9]], [[X]]
+; CHECK-NEXT:    br i1 [[TMP10]], label %[[LOADSTORELOOP2]], label %[[SPLIT1]]
 ; CHECK:       [[SPLIT1]]:
 ; CHECK-NEXT:    ret void
 ;

>From a7373b7931307c8b897ada88e61fd7bd26ceef96 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Fri, 8 Nov 2024 15:24:22 +0000
Subject: [PATCH 20/24] Rename to llvm.experimental.memset.pattern

---
 llvm/docs/LangRef.rst                         | 41 ++++++++++---------
 llvm/include/llvm/IR/InstVisitor.h            |  2 +-
 llvm/include/llvm/IR/IntrinsicInst.h          | 14 +++----
 llvm/include/llvm/IR/Intrinsics.td            |  2 +-
 llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp |  4 +-
 llvm/lib/IR/Verifier.cpp                      |  2 +-
 llvm/test/CodeGen/RISCV/memset-pattern.ll     |  8 ++--
 .../PowerPC/memset-pattern.ll                 |  2 +-
 .../RISCV/memset-pattern.ll                   | 14 +++----
 llvm/test/Verifier/intrinsic-immarg.ll        |  8 ++--
 llvm/test/Verifier/memset-pattern.ll          |  4 +-
 11 files changed, 50 insertions(+), 51 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index ec9435738e5bb2..0a4407bcb14fe8 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15434,28 +15434,29 @@ The behavior of '``llvm.memset.inline.*``' is equivalent to the behavior of
 '``llvm.memset.*``', but the generated code is guaranteed not to call any
 external functions.
 
-.. _int_memset_pattern:
+.. _int_experimental_memset_pattern:
 
-'``llvm.memset.pattern``' Intrinsic
+'``llvm.experimental.memset.pattern``' Intrinsic
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Syntax:
 """""""
 
-This is an overloaded intrinsic. You can use ``llvm.memset.pattern`` on
-any integer bit width that is an integral number of bytes and for different
-address spaces. Not all targets support all bit widths however.
+This is an overloaded intrinsic. You can use
+``llvm.experimental.memset.pattern`` on any integer bit width that is an
+integral number of bytes and for different address spaces. Not all targets
+support all bit widths however.
 
 ::
 
-      declare void @llvm.memset.pattern.p0.i128.i64(ptr <dest>, i128 <val>,
-                                                    i64 <count>, i1 <isvolatile>)
+      declare void @llvm.experimental.memset.pattern.p0.i128.i64(ptr <dest>, i128 <val>,
+                                                                 i64 <count>, i1 <isvolatile>)
 
 Overview:
 """""""""
 
-The '``llvm.memset.pattern.*``' intrinsics fill a block of memory with
-a particular value. This may be expanded to an inline loop, a sequence of
+The '``llvm.experimental.memset.pattern.*``' intrinsics fill a block of memory
+with a particular value. This may be expanded to an inline loop, a sequence of
 stores, or a libcall depending on what is available for the target and the
 expected performance and code size impact.
 
@@ -15471,21 +15472,21 @@ The :ref:`align <attr_align>` parameter attribute can be provided
 for the first argument.
 
 If the ``isvolatile`` parameter is ``true``, the
-``llvm.memset.pattern`` call is a :ref:`volatile operation <volatile>`. The
-detailed access behavior is not very cleanly specified and it is unwise to
-depend on it.
+``llvm.experimental.memset.pattern`` call is a :ref:`volatile operation
+<volatile>`. The detailed access behavior is not very cleanly specified and it
+is unwise to depend on it.
 
 Semantics:
 """"""""""
 
-The '``llvm.memset.pattern*``' intrinsic fills memory starting at the
-destination location with the given pattern ``<count>`` times. If the argument
-is known to be aligned to some boundary, this can be specified as an attribute
-on the argument. The pattern fills will respect the endianness of the target:
-i.e. on little endian targets, the least significant byte of the pattern is
-first in memory, while the most significant byte is first in memory for big
-endian targets. The memory address is incremented by the allocation size of
-the type.
+The '``llvm.experimental.memset.pattern*``' intrinsic fills memory starting at
+the destination location with the given pattern ``<count>`` times. If the
+argument is known to be aligned to some boundary, this can be specified as an
+attribute on the argument. The pattern fills will respect the endianness of
+the target: i.e. on little endian targets, the least significant byte of the
+pattern is first in memory, while the most significant byte is first in memory
+for big endian targets. The memory address is incremented by the allocation
+size of the type.
 
 If ``<count>`` is 0, it is no-op modulo the behavior of attributes attached to
 the arguments.
diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index aa4f0f36e4ed73..c356d957939841 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -296,7 +296,7 @@ class InstVisitor {
       case Intrinsic::memset:      DELEGATE(MemSetInst);
       case Intrinsic::memset_inline:
         DELEGATE(MemSetInlineInst);
-      case Intrinsic::memset_pattern:
+      case Intrinsic::experimental_memset_pattern:
         DELEGATE(MemSetPatternInst);
       case Intrinsic::vastart:     DELEGATE(VAStartInst);
       case Intrinsic::vaend:       DELEGATE(VAEndInst);
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index fcf7c8897f7eab..3edf645df84e1b 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1222,7 +1222,7 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
     case Intrinsic::memmove:
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
-    case Intrinsic::memset_pattern:
+    case Intrinsic::experimental_memset_pattern:
     case Intrinsic::memcpy_inline:
       return true;
     default:
@@ -1235,7 +1235,7 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
 };
 
 /// This class wraps the llvm.memset, llvm.memset.inline, and
-/// llvm.memset.pattern intrinsics.
+/// llvm.experimental.memset.pattern intrinsics.
 class MemSetInst : public MemSetBase<MemIntrinsic> {
 public:
   // Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -1243,7 +1243,7 @@ class MemSetInst : public MemSetBase<MemIntrinsic> {
     switch (I->getIntrinsicID()) {
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
-    case Intrinsic::memset_pattern:
+    case Intrinsic::experimental_memset_pattern:
       return true;
     default:
       return false;
@@ -1266,12 +1266,12 @@ class MemSetInlineInst : public MemSetInst {
   }
 };
 
-/// This class wraps the llvm.memset.pattern intrinsic.
+/// This class wraps the llvm.experimental.memset.pattern intrinsic.
 class MemSetPatternInst : public MemSetInst {
 public:
   // Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const IntrinsicInst *I) {
-    return I->getIntrinsicID() == Intrinsic::memset_pattern;
+    return I->getIntrinsicID() == Intrinsic::experimental_memset_pattern;
   }
   static bool classof(const Value *V) {
     return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
@@ -1354,7 +1354,7 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
     case Intrinsic::memmove:
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
-    case Intrinsic::memset_pattern:
+    case Intrinsic::experimental_memset_pattern:
     case Intrinsic::memcpy_element_unordered_atomic:
     case Intrinsic::memmove_element_unordered_atomic:
     case Intrinsic::memset_element_unordered_atomic:
@@ -1377,7 +1377,7 @@ class AnyMemSetInst : public MemSetBase<AnyMemIntrinsic> {
     switch (I->getIntrinsicID()) {
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
-    case Intrinsic::memset_pattern:
+    case Intrinsic::experimental_memset_pattern:
     case Intrinsic::memset_element_unordered_atomic:
       return true;
     default:
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index be79e145a5fe20..e46335688065a8 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1007,7 +1007,7 @@ def int_memset_inline
        ImmArg<ArgIndex<3>>]>;
 
 // Memset variant that writes a given pattern.
-def int_memset_pattern
+def int_experimental_memset_pattern
     : Intrinsic<[],
       [llvm_anyptr_ty, // Destination.
        llvm_anyint_ty, // Pattern value.
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index f1d63b32774ac6..47c632d4052795 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -320,7 +320,7 @@ bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
       Memset->eraseFromParent();
       break;
     }
-    case Intrinsic::memset_pattern: {
+    case Intrinsic::experimental_memset_pattern: {
       auto *Memset = cast<MemSetPatternInst>(Inst);
       expandMemSetAsLoop(Memset);
       Changed = true;
@@ -346,7 +346,7 @@ bool PreISelIntrinsicLowering::lowerIntrinsics(Module &M) const {
     case Intrinsic::memmove:
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
-    case Intrinsic::memset_pattern:
+    case Intrinsic::experimental_memset_pattern:
       Changed |= expandMemIntrinsicUses(F);
       break;
     case Intrinsic::load_relative:
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index b854664fe117b9..791391698ac271 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5520,7 +5520,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
   case Intrinsic::memmove:
   case Intrinsic::memset:
   case Intrinsic::memset_inline:
-  case Intrinsic::memset_pattern: {
+  case Intrinsic::experimental_memset_pattern: {
     break;
   }
   case Intrinsic::memcpy_element_unordered_atomic:
diff --git a/llvm/test/CodeGen/RISCV/memset-pattern.ll b/llvm/test/CodeGen/RISCV/memset-pattern.ll
index f89b11c90e44c3..14bdad0a88af48 100644
--- a/llvm/test/CodeGen/RISCV/memset-pattern.ll
+++ b/llvm/test/CodeGen/RISCV/memset-pattern.ll
@@ -47,7 +47,7 @@ define void @memset_1(ptr %a, i128 %value) nounwind {
 ; RV64-BOTH-NEXT:    bne a0, a3, .LBB0_1
 ; RV64-BOTH-NEXT:  # %bb.2: # %split
 ; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset.pattern(ptr align 8 %a, i128 %value, i64 1, i1 0)
+  tail call void @llvm.experimental.memset.pattern(ptr align 8 %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
@@ -197,7 +197,7 @@ define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
 ; RV64-FAST-NEXT:    bne a0, a3, .LBB1_1
 ; RV64-FAST-NEXT:  # %bb.2: # %split
 ; RV64-FAST-NEXT:    ret
-  tail call void @llvm.memset.pattern(ptr %a, i128 %value, i64 1, i1 0)
+  tail call void @llvm.experimental.memset.pattern(ptr %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
@@ -239,7 +239,7 @@ define void @memset_4(ptr %a, i128 %value) nounwind {
 ; RV64-BOTH-NEXT:    bne a0, a3, .LBB2_1
 ; RV64-BOTH-NEXT:  # %bb.2: # %split
 ; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset.pattern(ptr align 8 %a, i128 %value, i64 4, i1 0)
+  tail call void @llvm.experimental.memset.pattern(ptr align 8 %a, i128 %value, i64 4, i1 0)
   ret void
 }
 
@@ -292,6 +292,6 @@ define void @memset_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; RV64-BOTH-NEXT:    bltu a4, a3, .LBB3_2
 ; RV64-BOTH-NEXT:  .LBB3_3: # %split
 ; RV64-BOTH-NEXT:    ret
-  tail call void @llvm.memset.pattern(ptr align 8 %a, i128 %value, i64 %x, i1 0)
+  tail call void @llvm.experimental.memset.pattern(ptr align 8 %a, i128 %value, i64 %x, i1 0)
   ret void
 }
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
index 7f9b1536cda152..1f77c4a6051066 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/PowerPC/memset-pattern.ll
@@ -19,6 +19,6 @@ define void @memset.pattern(ptr %a, i128 %value, i64 %x) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.pattern(ptr %a, i128 %value, i64 %x, i1 0)
+  tail call void @llvm.experimental.memset.pattern(ptr %a, i128 %value, i64 %x, i1 0)
   ret void
 }
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
index 041f1630ffd145..d3ef9fe4cefbd2 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/RISCV/memset-pattern.ll
@@ -15,7 +15,7 @@ define void @memset_pattern_i128_1(ptr %a, i128 %value) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.pattern(ptr %a, i128 %value, i64 1, i1 0)
+  tail call void @llvm.experimental.memset.pattern(ptr %a, i128 %value, i64 1, i1 0)
   ret void
 }
 
@@ -33,7 +33,7 @@ define void @memset_pattern_i128_16(ptr %a, i128 %value) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.pattern(ptr %a, i128 %value, i64 16, i1 0)
+  tail call void @llvm.experimental.memset.pattern(ptr %a, i128 %value, i64 16, i1 0)
   ret void
 }
 
@@ -52,7 +52,7 @@ define void @memset_pattern_i127_x(ptr %a, i127 %value, i64 %x) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.pattern(ptr %a, i127 %value, i64 %x, i1 0)
+  tail call void @llvm.experimental.memset.pattern(ptr %a, i127 %value, i64 %x, i1 0)
   ret void
 }
 
@@ -71,7 +71,7 @@ define void @memset_pattern_i128_x(ptr %a, i128 %value, i64 %x) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.pattern(ptr %a, i128 %value, i64 %x, i1 0)
+  tail call void @llvm.experimental.memset.pattern(ptr %a, i128 %value, i64 %x, i1 0)
   ret void
 }
 
@@ -90,7 +90,7 @@ define void @memset_pattern_i256_x(ptr %a, i256 %value, i64 %x) nounwind {
 ; CHECK:       [[SPLIT]]:
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.memset.pattern(ptr %a, i256 %value, i64 %x, i1 0)
+  tail call void @llvm.experimental.memset.pattern(ptr %a, i256 %value, i64 %x, i1 0)
   ret void
 }
 
@@ -121,7 +121,7 @@ define void @memset_pattern_i15_x_alignment(ptr %a, i15 %value, i64 %x) nounwind
 ; CHECK:       [[SPLIT1]]:
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.pattern(ptr align 1 %a, i15 %value, i64 %x, i1 0)
-  call void @llvm.memset.pattern(ptr align 2 %a, i15 %value, i64 %x, i1 0)
+  call void @llvm.experimental.memset.pattern(ptr align 1 %a, i15 %value, i64 %x, i1 0)
+  call void @llvm.experimental.memset.pattern(ptr align 2 %a, i15 %value, i64 %x, i1 0)
   ret void
 }
diff --git a/llvm/test/Verifier/intrinsic-immarg.ll b/llvm/test/Verifier/intrinsic-immarg.ll
index 105f273938aa72..ab1286e8a3d760 100644
--- a/llvm/test/Verifier/intrinsic-immarg.ll
+++ b/llvm/test/Verifier/intrinsic-immarg.ll
@@ -63,17 +63,15 @@ define void @memset_inline_is_volatile(ptr %dest, i8 %value, i1 %is.volatile) {
   ret void
 }
 
-
-declare void @llvm.memset.pattern.p0.i32.i32(ptr nocapture, i32, i32, i1)
+declare void @llvm.experimental.memset.pattern.p0.i32.i32(ptr nocapture, i32, i32, i1)
 define void @memset_pattern_is_volatile(ptr %dest, i32 %value, i1 %is.volatile) {
   ; CHECK: immarg operand has non-immediate parameter
   ; CHECK-NEXT: i1 %is.volatile
-  ; CHECK-NEXT: call void @llvm.memset.pattern.p0.i32.i32(ptr %dest, i32 %value, i32 8, i1 %is.volatile)
-  call void @llvm.memset.pattern.p0.i32.i32(ptr %dest, i32 %value, i32 8, i1 %is.volatile)
+  ; CHECK-NEXT: call void @llvm.experimental.memset.pattern.p0.i32.i32(ptr %dest, i32 %value, i32 8, i1 %is.volatile)
+  call void @llvm.experimental.memset.pattern.p0.i32.i32(ptr %dest, i32 %value, i32 8, i1 %is.volatile)
   ret void
 }
 
-
 declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
 define void @objectsize(ptr %ptr, i1 %a, i1 %b, i1 %c) {
   ; CHECK: immarg operand has non-immediate parameter
diff --git a/llvm/test/Verifier/memset-pattern.ll b/llvm/test/Verifier/memset-pattern.ll
index 93d9ee1cfba109..7f5301976b7490 100644
--- a/llvm/test/Verifier/memset-pattern.ll
+++ b/llvm/test/Verifier/memset-pattern.ll
@@ -3,7 +3,7 @@
 ; CHECK: alignment is not a power of two
 
 define void @foo(ptr %P, i32 %value) {
-  call void @llvm.memset.pattern.p0.i32.i32(ptr align 3 %P, i32 %value, i32 4, i1 false)
+  call void @llvm.experimental.memset.pattern.p0.i32.i32(ptr align 3 %P, i32 %value, i32 4, i1 false)
   ret void
 }
-declare void @llvm.memset.pattern.p0.i32.i32(ptr nocapture, i32, i32, i1) nounwind
+declare void @llvm.experimental.memset.pattern.p0.i32.i32(ptr nocapture, i32, i32, i1) nounwind

>From a68aa8d375bd27cd6315700d9720a605db091983 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Fri, 8 Nov 2024 15:24:22 +0000
Subject: [PATCH 21/24] Move MemSetPattern out of the MemSet hierarchy

---
 llvm/include/llvm/IR/InstVisitor.h            |  4 +-
 llvm/include/llvm/IR/IntrinsicInst.h          | 37 +++++++++++++++----
 .../Transforms/Utils/LowerMemIntrinsics.h     |  4 ++
 llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp |  2 +-
 .../Transforms/Utils/LowerMemIntrinsics.cpp   |  9 +++++
 5 files changed, 47 insertions(+), 9 deletions(-)

diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index c356d957939841..5fc6fbfd0f28e4 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -208,7 +208,9 @@ class InstVisitor {
   RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); }
   RetTy visitMemSetInst(MemSetInst &I)            { DELEGATE(MemIntrinsic); }
   RetTy visitMemSetInlineInst(MemSetInlineInst &I){ DELEGATE(MemSetInst); }
-  RetTy visitMemSetPatternInst(MemSetPatternInst &I) { DELEGATE(MemSetInst); }
+  RetTy visitMemSetPatternInst(MemSetPatternInst &I) {
+    DELEGATE(IntrinsicInst);
+  }
   RetTy visitMemCpyInst(MemCpyInst &I)            { DELEGATE(MemTransferInst); }
   RetTy visitMemCpyInlineInst(MemCpyInlineInst &I){ DELEGATE(MemCpyInst); }
   RetTy visitMemMoveInst(MemMoveInst &I)          { DELEGATE(MemTransferInst); }
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 3edf645df84e1b..79b89f963fe4f0 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1222,7 +1222,6 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
     case Intrinsic::memmove:
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
-    case Intrinsic::experimental_memset_pattern:
     case Intrinsic::memcpy_inline:
       return true;
     default:
@@ -1234,8 +1233,7 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
   }
 };
 
-/// This class wraps the llvm.memset, llvm.memset.inline, and
-/// llvm.experimental.memset.pattern intrinsics.
+/// This class wraps the llvm.memset and llvm.memset.inline intrinsics.
 class MemSetInst : public MemSetBase<MemIntrinsic> {
 public:
   // Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -1243,7 +1241,6 @@ class MemSetInst : public MemSetBase<MemIntrinsic> {
     switch (I->getIntrinsicID()) {
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
-    case Intrinsic::experimental_memset_pattern:
       return true;
     default:
       return false;
@@ -1266,8 +1263,36 @@ class MemSetInlineInst : public MemSetInst {
   }
 };
 
+/// This is the base class for memset.pattern
+class MemSetPatternIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
+private:
+  enum { ARG_VOLATILE = 3 };
+
+public:
+  ConstantInt *getVolatileCst() const {
+    return cast<ConstantInt>(const_cast<Value *>(getArgOperand(ARG_VOLATILE)));
+  }
+
+  bool isVolatile() const { return !getVolatileCst()->isZero(); }
+
+  void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); }
+
+  // Methods for support of type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const IntrinsicInst *I) {
+    switch (I->getIntrinsicID()) {
+    case Intrinsic::experimental_memset_pattern:
+      return true;
+    default:
+      return false;
+    }
+  }
+  static bool classof(const Value *V) {
+    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+  }
+};
+
 /// This class wraps the llvm.experimental.memset.pattern intrinsic.
-class MemSetPatternInst : public MemSetInst {
+class MemSetPatternInst : public MemSetBase<MemSetPatternIntrinsic> {
 public:
   // Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const IntrinsicInst *I) {
@@ -1354,7 +1379,6 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
     case Intrinsic::memmove:
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
-    case Intrinsic::experimental_memset_pattern:
     case Intrinsic::memcpy_element_unordered_atomic:
     case Intrinsic::memmove_element_unordered_atomic:
     case Intrinsic::memset_element_unordered_atomic:
@@ -1377,7 +1401,6 @@ class AnyMemSetInst : public MemSetBase<AnyMemIntrinsic> {
     switch (I->getIntrinsicID()) {
     case Intrinsic::memset:
     case Intrinsic::memset_inline:
-    case Intrinsic::experimental_memset_pattern:
     case Intrinsic::memset_element_unordered_atomic:
       return true;
     default:
diff --git a/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h b/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
index 314435324b473b..1007d282b2ac5c 100644
--- a/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
+++ b/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
@@ -25,6 +25,7 @@ class Instruction;
 class MemCpyInst;
 class MemMoveInst;
 class MemSetInst;
+class MemSetPatternInst;
 class ScalarEvolution;
 class TargetTransformInfo;
 class Value;
@@ -57,6 +58,9 @@ bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI);
 /// Expand \p MemSet as a loop. \p MemSet is not deleted.
 void expandMemSetAsLoop(MemSetInst *MemSet);
 
+/// Expand \p MemSetPattern as a loop. \p MemSet is not deleted.
+void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet);
+
 /// Expand \p AtomicMemCpy as a loop. \p AtomicMemCpy is not deleted.
 void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy,
                               const TargetTransformInfo &TTI,
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 47c632d4052795..4a3d1673c2a7c1 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -322,7 +322,7 @@ bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
     }
     case Intrinsic::experimental_memset_pattern: {
       auto *Memset = cast<MemSetPatternInst>(Inst);
-      expandMemSetAsLoop(Memset);
+      expandMemSetPatternAsLoop(Memset);
       Changed = true;
       Memset->eraseFromParent();
       break;
diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index 546217093550a2..91291b429ea434 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -970,6 +970,15 @@ void llvm::expandMemSetAsLoop(MemSetInst *Memset) {
                    Memset->isVolatile());
 }
 
+void llvm::expandMemSetPatternAsLoop(MemSetPatternInst *Memset) {
+  createMemSetLoop(/* InsertBefore=*/Memset,
+                   /* DstAddr=*/Memset->getRawDest(),
+                   /* CopyLen=*/Memset->getLength(),
+                   /* SetValue=*/Memset->getValue(),
+                   /* Alignment=*/Memset->getDestAlign().valueOrOne(),
+                   Memset->isVolatile());
+}
+
 void llvm::expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemcpy,
                                     const TargetTransformInfo &TTI,
                                     ScalarEvolution *SE) {

>From 9580ab03cae66ff1e63d5ead4f694d232cf58647 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Fri, 8 Nov 2024 16:00:33 +0000
Subject: [PATCH 22/24] Fix underline length in langref

---
 llvm/docs/LangRef.rst | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 0a4407bcb14fe8..5eabf4dc1b5ae3 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -15437,7 +15437,7 @@ external functions.
 .. _int_experimental_memset_pattern:
 
 '``llvm.experimental.memset.pattern``' Intrinsic
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Syntax:
 """""""

>From 4ebc9853276ff04293a4d0260396753c5e7e7653 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Sat, 9 Nov 2024 11:40:06 +0000
Subject: [PATCH 23/24] Address review comments

---
 llvm/include/llvm/IR/IntrinsicInst.h | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 79b89f963fe4f0..4452080a76dd8c 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1263,7 +1263,7 @@ class MemSetInlineInst : public MemSetInst {
   }
 };
 
-/// This is the base class for memset.pattern
+/// This is the base class for llm.experimental.memset.pattern
 class MemSetPatternIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
 private:
   enum { ARG_VOLATILE = 3 };
@@ -1279,12 +1279,7 @@ class MemSetPatternIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
 
   // Methods for support of type inquiry through isa, cast, and dyn_cast:
   static bool classof(const IntrinsicInst *I) {
-    switch (I->getIntrinsicID()) {
-    case Intrinsic::experimental_memset_pattern:
-      return true;
-    default:
-      return false;
-    }
+    return I->getIntrinsicID() == Intrinsic::experimental_memset_pattern;
   }
   static bool classof(const Value *V) {
     return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));

>From 78bad3b075aaf26dd909a84eb45a7bf87a4d4c8f Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Sat, 9 Nov 2024 15:29:53 +0000
Subject: [PATCH 24/24] Verkfy llvm.experimental.memset.pattern pattern arg is
 integral number of bytes

---
 llvm/lib/IR/Verifier.cpp                                 | 6 +++++-
 .../{memset-pattern.ll => memset-pattern-align.ll}       | 0
 llvm/test/Verifier/memset-pattern-pattern-size.ll        | 9 +++++++++
 3 files changed, 14 insertions(+), 1 deletion(-)
 rename llvm/test/Verifier/{memset-pattern.ll => memset-pattern-align.ll} (100%)
 create mode 100644 llvm/test/Verifier/memset-pattern-pattern-size.ll

diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 791391698ac271..359a55bac4b56b 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5519,8 +5519,12 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
   case Intrinsic::memcpy_inline:
   case Intrinsic::memmove:
   case Intrinsic::memset:
-  case Intrinsic::memset_inline:
+  case Intrinsic::memset_inline: {
+    break;
+  }
   case Intrinsic::experimental_memset_pattern: {
+    Check(Call.getOperand(1)->getType()->getScalarSizeInBits() % 8 == 0,
+          "pattern type must be an integral number of bytes", Call);
     break;
   }
   case Intrinsic::memcpy_element_unordered_atomic:
diff --git a/llvm/test/Verifier/memset-pattern.ll b/llvm/test/Verifier/memset-pattern-align.ll
similarity index 100%
rename from llvm/test/Verifier/memset-pattern.ll
rename to llvm/test/Verifier/memset-pattern-align.ll
diff --git a/llvm/test/Verifier/memset-pattern-pattern-size.ll b/llvm/test/Verifier/memset-pattern-pattern-size.ll
new file mode 100644
index 00000000000000..df701b09fd52ce
--- /dev/null
+++ b/llvm/test/Verifier/memset-pattern-pattern-size.ll
@@ -0,0 +1,9 @@
+; RUN: not opt -passes=verify < %s 2>&1 | FileCheck %s
+
+; CHECK: pattern type must be an integral number of bytes
+
+define void @foo(ptr %P, i31 %value) {
+  call void @llvm.experimental.memset.pattern.p0.i31.i32(ptr align 4 %P, i31 %value, i32 4, i1 false)
+  ret void
+}
+declare void @llvm.experimental.memset.pattern.p0.i31.i32(ptr nocapture, i31, i32, i1) nounwind



More information about the llvm-commits mailing list