[llvm-branch-commits] [clang] [libc] [lldb] [llvm] [mlir] [Sparc] Fix instr desc of special register stores (PR #88971)

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Apr 16 16:10:39 PDT 2024


https://github.com/darkbuck updated https://github.com/llvm/llvm-project/pull/88971

>From d19bd05c79ad3b1a2c3cb439c3fc60825f66bed7 Mon Sep 17 00:00:00 2001
From: Hubert Tong <hubert-reinterpretcast at users.noreply.github.com>
Date: Tue, 16 Apr 2024 17:26:55 -0400
Subject: [PATCH 1/7] Clang Release Notes: Fix reST formatting

Fix a use of inline code markup to have a non-word character after the ending delimiter as required by reST.
---
 clang/docs/ReleaseNotes.rst | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index e6c345a2f5c0f5..4aedfafcb26aea 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -68,7 +68,7 @@ AST Dumping Potentially Breaking Changes
 
 Clang Frontend Potentially Breaking Changes
 -------------------------------------------
-- Removed support for constructing on-stack ``TemplateArgumentList``s; interfaces should instead
+- Removed support for constructing on-stack ``TemplateArgumentList``\ s; interfaces should instead
   use ``ArrayRef<TemplateArgument>`` to pass template arguments. Transitioning internal uses to
   ``ArrayRef<TemplateArgument>`` reduces AST memory usage by 0.4% when compiling clang, and is
   expected to show similar improvements on other workloads.

>From 3074060d6a1d7d2e74cb767876bd9e5192d12007 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Tue, 16 Apr 2024 14:28:45 -0700
Subject: [PATCH 2/7] [memprof] Use SizeIs (NFC) (#88984)

---
 llvm/unittests/ProfileData/MemProfTest.cpp | 30 +++++++++++-----------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/llvm/unittests/ProfileData/MemProfTest.cpp b/llvm/unittests/ProfileData/MemProfTest.cpp
index f596919ed039a8..7e00a80cacf933 100644
--- a/llvm/unittests/ProfileData/MemProfTest.cpp
+++ b/llvm/unittests/ProfileData/MemProfTest.cpp
@@ -183,13 +183,13 @@ TEST(MemProf, FillsValue) {
   // We expect 4 records. We attach alloc site data to foo and bar, i.e.
   // all frames bottom up until we find a non-inline frame. We attach call site
   // data to bar, xyz and abc.
-  ASSERT_EQ(Records.size(), 4U);
+  ASSERT_THAT(Records, SizeIs(4));
 
   // Check the memprof record for foo.
   const llvm::GlobalValue::GUID FooId = IndexedMemProfRecord::getGUID("foo");
   ASSERT_EQ(Records.count(FooId), 1U);
   const MemProfRecord &Foo = Records[FooId];
-  ASSERT_EQ(Foo.AllocSites.size(), 1U);
+  ASSERT_THAT(Foo.AllocSites, SizeIs(1));
   EXPECT_EQ(Foo.AllocSites[0].Info.getAllocCount(), 1U);
   EXPECT_THAT(Foo.AllocSites[0].CallStack[0],
               FrameContains("foo", 5U, 30U, true));
@@ -205,7 +205,7 @@ TEST(MemProf, FillsValue) {
   const llvm::GlobalValue::GUID BarId = IndexedMemProfRecord::getGUID("bar");
   ASSERT_EQ(Records.count(BarId), 1U);
   const MemProfRecord &Bar = Records[BarId];
-  ASSERT_EQ(Bar.AllocSites.size(), 1U);
+  ASSERT_THAT(Bar.AllocSites, SizeIs(1));
   EXPECT_EQ(Bar.AllocSites[0].Info.getAllocCount(), 1U);
   EXPECT_THAT(Bar.AllocSites[0].CallStack[0],
               FrameContains("foo", 5U, 30U, true));
@@ -216,8 +216,8 @@ TEST(MemProf, FillsValue) {
   EXPECT_THAT(Bar.AllocSites[0].CallStack[3],
               FrameContains("abc", 5U, 30U, false));
 
-  ASSERT_EQ(Bar.CallSites.size(), 1U);
-  ASSERT_EQ(Bar.CallSites[0].size(), 2U);
+  ASSERT_THAT(Bar.CallSites, SizeIs(1));
+  ASSERT_THAT(Bar.CallSites[0], SizeIs(2));
   EXPECT_THAT(Bar.CallSites[0][0], FrameContains("foo", 5U, 30U, true));
   EXPECT_THAT(Bar.CallSites[0][1], FrameContains("bar", 51U, 20U, false));
 
@@ -225,8 +225,8 @@ TEST(MemProf, FillsValue) {
   const llvm::GlobalValue::GUID XyzId = IndexedMemProfRecord::getGUID("xyz");
   ASSERT_EQ(Records.count(XyzId), 1U);
   const MemProfRecord &Xyz = Records[XyzId];
-  ASSERT_EQ(Xyz.CallSites.size(), 1U);
-  ASSERT_EQ(Xyz.CallSites[0].size(), 2U);
+  ASSERT_THAT(Xyz.CallSites, SizeIs(1));
+  ASSERT_THAT(Xyz.CallSites[0], SizeIs(2));
   // Expect the entire frame even though in practice we only need the first
   // entry here.
   EXPECT_THAT(Xyz.CallSites[0][0], FrameContains("xyz", 5U, 30U, true));
@@ -237,8 +237,8 @@ TEST(MemProf, FillsValue) {
   ASSERT_EQ(Records.count(AbcId), 1U);
   const MemProfRecord &Abc = Records[AbcId];
   EXPECT_TRUE(Abc.AllocSites.empty());
-  ASSERT_EQ(Abc.CallSites.size(), 1U);
-  ASSERT_EQ(Abc.CallSites[0].size(), 2U);
+  ASSERT_THAT(Abc.CallSites, SizeIs(1));
+  ASSERT_THAT(Abc.CallSites[0], SizeIs(2));
   EXPECT_THAT(Abc.CallSites[0][0], FrameContains("xyz", 5U, 30U, true));
   EXPECT_THAT(Abc.CallSites[0][1], FrameContains("abc", 5U, 30U, false));
 }
@@ -393,9 +393,9 @@ TEST(MemProf, SymbolizationFilter) {
     Records.push_back(KeyRecordPair.second);
   }
 
-  ASSERT_EQ(Records.size(), 1U);
-  ASSERT_EQ(Records[0].AllocSites.size(), 1U);
-  ASSERT_EQ(Records[0].AllocSites[0].CallStack.size(), 1U);
+  ASSERT_THAT(Records, SizeIs(1));
+  ASSERT_THAT(Records[0].AllocSites, SizeIs(1));
+  ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(1));
   EXPECT_THAT(Records[0].AllocSites[0].CallStack[0],
               FrameContains("foo", 5U, 30U, false));
 }
@@ -427,9 +427,9 @@ TEST(MemProf, BaseMemProfReader) {
     Records.push_back(KeyRecordPair.second);
   }
 
-  ASSERT_EQ(Records.size(), 1U);
-  ASSERT_EQ(Records[0].AllocSites.size(), 1U);
-  ASSERT_EQ(Records[0].AllocSites[0].CallStack.size(), 2U);
+  ASSERT_THAT(Records, SizeIs(1));
+  ASSERT_THAT(Records[0].AllocSites, SizeIs(1));
+  ASSERT_THAT(Records[0].AllocSites[0].CallStack, SizeIs(2));
   EXPECT_THAT(Records[0].AllocSites[0].CallStack[0],
               FrameContains("foo", 20U, 5U, true));
   EXPECT_THAT(Records[0].AllocSites[0].CallStack[1],

>From b1385dbd98e877a374ce303fd9d1774faf98e31b Mon Sep 17 00:00:00 2001
From: Michael Jones <michaelrj at google.com>
Date: Tue, 16 Apr 2024 14:39:43 -0700
Subject: [PATCH 3/7] [libc][NFC] fix typo in fenv type proxy headers (#88982)

libc.incude.fenv ->
libc.include.fenv
---
 libc/hdr/types/CMakeLists.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/libc/hdr/types/CMakeLists.txt b/libc/hdr/types/CMakeLists.txt
index ecb952b60cc061..f53766777e7530 100644
--- a/libc/hdr/types/CMakeLists.txt
+++ b/libc/hdr/types/CMakeLists.txt
@@ -28,7 +28,7 @@ add_proxy_header_library(
     fenv_t.h
   FULL_BUILD_DEPENDS
     libc.include.llvm-libc-types.fenv_t
-    libc.incude.fenv
+    libc.include.fenv
 )
 
 add_proxy_header_library(
@@ -37,5 +37,5 @@ add_proxy_header_library(
     fexcept_t.h
   FULL_BUILD_DEPENDS
     libc.include.llvm-libc-types.fexcept_t
-    libc.incude.fenv
+    libc.include.fenv
 )

>From 8aa061ffc75adfab4b3084c918e7d4a3ccd5ba43 Mon Sep 17 00:00:00 2001
From: Peiming Liu <peiming at google.com>
Date: Tue, 16 Apr 2024 15:18:47 -0700
Subject: [PATCH 4/7] [mlir][sparse][NFC] switching to using `let
 argments/results` in td files (#88994)

followed the same style used in "TensorOps.td".
---
 .../SparseTensor/IR/SparseTensorOps.td        | 324 ++++++++++--------
 1 file changed, 174 insertions(+), 150 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index d7121e8320a4bc..4e4441c640ed95 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -27,9 +27,7 @@ class SparseTensor_Op<string mnemonic, list<Trait> traits = []>
 // Sparse Tensor Operations.
 //===----------------------------------------------------------------------===//
 
-def SparseTensor_NewOp : SparseTensor_Op<"new", [Pure]>,
-    Arguments<(ins AnyType:$source)>,
-    Results<(outs AnySparseTensor:$result)> {
+def SparseTensor_NewOp : SparseTensor_Op<"new", [Pure]> {
   string summary = "Materializes a new sparse tensor from given source";
   string description = [{
     Materializes a sparse tensor with contents taken from an opaque pointer
@@ -51,15 +49,14 @@ def SparseTensor_NewOp : SparseTensor_Op<"new", [Pure]>,
     sparse_tensor.new %source : !Source to tensor<1024x1024xf64, #CSR>
     ```
   }];
+
+  let arguments = (ins AnyType:$source);
+  let results = (outs AnySparseTensor:$result);
   let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)";
 }
 
-def SparseTensor_AssembleOp : SparseTensor_Op<"assemble", [Pure]>,
-    Arguments<(ins Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$levels,
-                   TensorOf<[AnyType]>:$values)>,
-    Results<(outs AnySparseTensor: $result)> {
+def SparseTensor_AssembleOp : SparseTensor_Op<"assemble", [Pure]> {
   let summary = "Returns a sparse tensor assembled from the given levels and values";
-
   let description = [{
     Assembles the per-level position and coordinate arrays together with
     the values arrays into a sparse tensor. The order and types of the
@@ -93,6 +90,9 @@ def SparseTensor_AssembleOp : SparseTensor_Op<"assemble", [Pure]>,
     ```
   }];
 
+  let arguments = (ins Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$levels,
+                   TensorOf<[AnyType]>:$values);
+  let results = (outs AnySparseTensor: $result);
   let assemblyFormat =
     "` ` `(` $levels       `)` `,` $values attr-dict `:`"
     "    `(` type($levels) `)` `,` type($values) `to` type($result)";
@@ -100,16 +100,8 @@ def SparseTensor_AssembleOp : SparseTensor_Op<"assemble", [Pure]>,
   let hasVerifier = 1;
 }
 
-def SparseTensor_DisassembleOp : SparseTensor_Op<"disassemble", [Pure, SameVariadicResultSize]>,
-    Arguments<(ins AnySparseTensor:$tensor,
-                   Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$out_levels,
-                   TensorOf<[AnyType]>:$out_values)>,
-    Results<(outs Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$ret_levels,
-                  TensorOf<[AnyType]>:$ret_values,
-                  Variadic<AnyIndexingScalarLike>:$lvl_lens,
-                  AnyIndexingScalarLike:$val_len)> {
+def SparseTensor_DisassembleOp : SparseTensor_Op<"disassemble", [Pure, SameVariadicResultSize]> {
   let summary = "Copies the levels and values of the given sparse tensor";
-
   let description = [{
     The disassemble operation is the inverse of `sparse_tensor::assemble`.
     It copies the per-level position and coordinate arrays together with
@@ -143,6 +135,13 @@ def SparseTensor_DisassembleOp : SparseTensor_Op<"disassemble", [Pure, SameVaria
     ```
   }];
 
+  let arguments = (ins AnySparseTensor:$tensor,
+                   Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$out_levels,
+                   TensorOf<[AnyType]>:$out_values);
+  let results = (outs Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$ret_levels,
+                  TensorOf<[AnyType]>:$ret_values,
+                  Variadic<AnyIndexingScalarLike>:$lvl_lens,
+                  AnyIndexingScalarLike:$val_len);
   let assemblyFormat =
     "$tensor attr-dict `:` type($tensor)"
     "`out_lvls` `(` $out_levels `:` type($out_levels) `)` "
@@ -154,9 +153,7 @@ def SparseTensor_DisassembleOp : SparseTensor_Op<"disassemble", [Pure, SameVaria
 }
 
 def SparseTensor_ConvertOp : SparseTensor_Op<"convert",
-  [Pure, StageWithSortSparseOpInterface]>,
-    Arguments<(ins AnyTensor:$source)>,
-    Results<(outs AnyTensor:$dest)> {
+  [Pure, StageWithSortSparseOpInterface]> {
   string summary = "Converts between different tensor types";
   string description = [{
     Converts one sparse or dense tensor type to another tensor type. The rank
@@ -197,20 +194,22 @@ def SparseTensor_ConvertOp : SparseTensor_Op<"convert",
 
   }];
 
+  let arguments = (ins AnyTensor:$source);
+  let results = (outs AnyTensor:$dest);
+  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)";
+
   let extraClassDeclaration = [{
      // Whether the convert can be done by a single step or it would require
      // an extra sort. Inherited from StageWithSortSparseOpInterface.
      bool needsExtraSort();
   }];
 
-  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)";
   let hasFolder = 1;
   let hasVerifier = 1;
 }
 
-def SparseTensor_ReinterpretMapOp : SparseTensor_Op<"reinterpret_map", [NoMemoryEffect]>,
-    Arguments<(ins AnySparseTensor:$source)>,
-    Results<(outs AnySparseTensor:$dest)> {
+def SparseTensor_ReinterpretMapOp : SparseTensor_Op<"reinterpret_map",
+    [NoMemoryEffect]> {
   let summary = "Reinterprets the dimension/level maps of the source tensor";
   let description = [{
     Reinterprets the dimension-to-level and level-to-dimension map specified in
@@ -248,19 +247,20 @@ def SparseTensor_ReinterpretMapOp : SparseTensor_Op<"reinterpret_map", [NoMemory
     ```
     }];
 
+  let arguments = (ins AnySparseTensor:$source);
+  let results = (outs AnySparseTensor:$dest);
+  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)";
+
   let builders = [
     OpBuilder<(ins "SparseTensorEncodingAttr":$dstEnc, "Value":$source)>
   ];
 
-  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)";
   let hasFolder = 1;
   let hasVerifier = 1;
 }
 
 def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions",
-      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
-    Arguments<(ins AnySparseTensor:$tensor, LevelAttr:$level)>,
-    Results<(outs AnyNon0RankedMemRef:$result)> {
+      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
   let summary = "Extracts the `level`-th positions array of the `tensor`";
   let description = [{
     Returns the positions array of the tensor's storage at the given
@@ -280,14 +280,16 @@ def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions",
        : tensor<64x64xf64, #CSR> to memref<?xindex>
     ```
   }];
+
+  let arguments = (ins AnySparseTensor:$tensor, LevelAttr:$level);
+  let results = (outs AnyNon0RankedMemRef:$result);
   let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
+
   let hasVerifier = 1;
 }
 
 def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates",
-      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
-    Arguments<(ins AnySparseTensor:$tensor, LevelAttr:$level)>,
-    Results<(outs AnyNon0RankedMemRef:$result)> {
+      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
   let summary = "Extracts the `level`-th coordinates array of the `tensor`";
   let description = [{
     Returns the coordinates array of the tensor's storage at the given
@@ -307,14 +309,16 @@ def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates",
        : tensor<64x64xf64, #CSR> to memref<?xindex>
     ```
   }];
+
+  let arguments = (ins AnySparseTensor:$tensor, LevelAttr:$level);
+  let results = (outs AnyNon0RankedMemRef:$result);
   let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
+
   let hasVerifier = 1;
 }
 
 def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer",
-      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
-    Arguments<(ins AnySparseTensor:$tensor)>,
-    Results<(outs AnyNon0RankedMemRef:$result)> {
+      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
   let summary = "Extracts the linear coordinates array from a tensor";
   let description = [{
     Returns the linear coordinates array for a sparse tensor with
@@ -339,14 +343,16 @@ def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer",
        : tensor<64x64xf64, #COO> to memref<?xindex>
     ```
   }];
+
+  let arguments = (ins AnySparseTensor:$tensor);
+  let results = (outs AnyNon0RankedMemRef:$result);
   let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
+
   let hasVerifier = 1;
 }
 
 def SparseTensor_ToValuesOp : SparseTensor_Op<"values",
-      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]>,
-    Arguments<(ins AnySparseTensor:$tensor)>,
-    Results<(outs AnyNon0RankedMemRef:$result)> {
+      [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
   let summary = "Extracts numerical values array from a tensor";
   let description = [{
     Returns the values array of the sparse storage format for the given
@@ -365,13 +371,15 @@ def SparseTensor_ToValuesOp : SparseTensor_Op<"values",
     %1 = sparse_tensor.values %0 : tensor<64x64xf64, #CSR> to memref<?xf64>
     ```
   }];
+
+  let arguments = (ins AnySparseTensor:$tensor);
+  let results = (outs AnyNon0RankedMemRef:$result);
   let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
+
   let hasVerifier = 1;
 }
 
-def SparseTensor_NumberOfEntriesOp : SparseTensor_Op<"number_of_entries", [Pure]>,
-    Arguments<(ins AnySparseTensor:$tensor)>,
-    Results<(outs Index:$result)> {
+def SparseTensor_NumberOfEntriesOp : SparseTensor_Op<"number_of_entries", [Pure]> {
   let summary = "Returns the number of entries that are stored in the tensor.";
   let description = [{
     Returns the number of entries that are stored in the given sparse tensor.
@@ -385,14 +393,14 @@ def SparseTensor_NumberOfEntriesOp : SparseTensor_Op<"number_of_entries", [Pure]
     %noe = sparse_tensor.number_of_entries %tensor : tensor<64x64xf64, #CSR>
     ```
   }];
+
+  let arguments = (ins AnySparseTensor:$tensor);
+  let results = (outs Index:$result);
   let assemblyFormat = "$tensor attr-dict `:` type($tensor)";
 }
 
 def SparseTensor_ConcatenateOp : SparseTensor_Op<"concatenate",
-                                 [Pure, StageWithSortSparseOpInterface]>,
-    Arguments<(ins Variadic<AnyRankedTensor>:$inputs, DimensionAttr:$dimension)>,
-    Results<(outs AnyRankedTensor:$result)> {
-
+      [Pure, StageWithSortSparseOpInterface]> {
   let summary = "Concatenates a list of tensors into a single tensor.";
   let description = [{
      Concatenates a list input tensors and the output tensor with the same
@@ -418,13 +426,14 @@ def SparseTensor_ConcatenateOp : SparseTensor_Op<"concatenate",
      bool needsExtraSort();
   }];
 
+  let arguments = (ins Variadic<AnyRankedTensor>:$inputs, DimensionAttr:$dimension);
+  let results = (outs AnyRankedTensor:$result);
   let assemblyFormat = "$inputs attr-dict `:` type($inputs) `to` type($result)";
+
   let hasVerifier = 1;
 }
 
-def SparseTensor_ToSliceOffsetOp : SparseTensor_Op<"slice.offset", [Pure]>,
-    Arguments<(ins AnySparseTensorSlice:$slice, IndexAttr:$dim)>,
-    Results<(outs Index:$offset)> {
+def SparseTensor_ToSliceOffsetOp : SparseTensor_Op<"slice.offset", [Pure]> {
   let summary = "Extracts the offset of the sparse tensor slice at the given dimension";
   let description = [{
     Extracts the offset of the sparse tensor slice at the given dimension.
@@ -445,13 +454,15 @@ def SparseTensor_ToSliceOffsetOp : SparseTensor_Op<"slice.offset", [Pure]>,
     // %2 = %v2
     ```
   }];
+
+  let arguments = (ins AnySparseTensorSlice:$slice, IndexAttr:$dim);
+  let results = (outs Index:$offset);
   let assemblyFormat = "$slice `at` $dim attr-dict `:` type($slice)";
+
   let hasVerifier = 1;
 }
 
-def SparseTensor_ToSliceStrideOp : SparseTensor_Op<"slice.stride", [Pure]>,
-    Arguments<(ins AnySparseTensorSlice:$slice, IndexAttr:$dim)>,
-    Results<(outs Index:$stride)> {
+def SparseTensor_ToSliceStrideOp : SparseTensor_Op<"slice.stride", [Pure]> {
   let summary = "Extracts the stride of the sparse tensor slice at the given dimension";
   let description = [{
     Extracts the stride of the sparse tensor slice at the given dimension.
@@ -473,7 +484,11 @@ def SparseTensor_ToSliceStrideOp : SparseTensor_Op<"slice.stride", [Pure]>,
 
     ```
   }];
+
+  let arguments = (ins AnySparseTensorSlice:$slice, IndexAttr:$dim);
+  let results = (outs Index:$stride);
   let assemblyFormat = "$slice `at` $dim attr-dict `:` type($slice)";
+
   let hasVerifier = 1;
 }
 
@@ -482,9 +497,7 @@ def SparseTensor_ToSliceStrideOp : SparseTensor_Op<"slice.stride", [Pure]>,
 //===----------------------------------------------------------------------===//
 
 def SparseTensor_StorageSpecifierInitOp : SparseTensor_Op<"storage_specifier.init",
-  [Pure]>,
-    Arguments<(ins Optional<SparseTensorStorageSpecifier>:$source)>,
-    Results<(outs SparseTensorStorageSpecifier:$result)> {
+      [Pure]> {
   let summary = "";
   let description = [{
     Returns an initial storage specifier value.  A storage specifier
@@ -515,6 +528,10 @@ def SparseTensor_StorageSpecifierInitOp : SparseTensor_Op<"storage_specifier.ini
     ```
   }];
 
+  let arguments = (ins Optional<SparseTensorStorageSpecifier>:$source);
+  let results = (outs SparseTensorStorageSpecifier:$result);
+  let assemblyFormat = "attr-dict (`with` $source^)? `:` (`from` qualified(type($source))^ `to`)?"
+                                                        " qualified(type($result))";
   let builders = [
     OpBuilder<(ins "Type":$result),
     [{
@@ -522,15 +539,10 @@ def SparseTensor_StorageSpecifierInitOp : SparseTensor_Op<"storage_specifier.ini
     }]>
   ];
 
-  let assemblyFormat = "attr-dict (`with` $source^)? `:` (`from` qualified(type($source))^ `to`)?"
-                                                        " qualified(type($result))";
+
 }
 
-def SparseTensor_GetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.get", [Pure]>,
-    Arguments<(ins SparseTensorStorageSpecifier:$specifier,
-                   SparseTensorStorageSpecifierKindAttr:$specifierKind,
-                   OptionalAttr<LevelAttr>:$level)>,
-    Results<(outs Index:$result)> {
+def SparseTensor_GetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.get", [Pure]> {
   let summary = "";
   let description = [{
     Returns the requested field of the given storage_specifier.
@@ -543,19 +555,19 @@ def SparseTensor_GetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.get"
     ```
   }];
 
+  let arguments = (ins SparseTensorStorageSpecifier:$specifier,
+                   SparseTensorStorageSpecifierKindAttr:$specifierKind,
+                   OptionalAttr<LevelAttr>:$level);
+  let results = (outs Index:$result);
   let assemblyFormat = "$specifier $specifierKind (`at` $level^)? attr-dict"
                        "`:` qualified(type($specifier))";
+
   let hasVerifier = 1;
   let hasFolder = 1;
 }
 
 def SparseTensor_SetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.set",
-    [Pure, AllTypesMatch<["result", "specifier"]>]>,
-    Arguments<(ins SparseTensorStorageSpecifier:$specifier,
-                   SparseTensorStorageSpecifierKindAttr:$specifierKind,
-                   OptionalAttr<LevelAttr>:$level,
-                   Index:$value)>,
-    Results<(outs SparseTensorStorageSpecifier:$result)> {
+    [Pure, AllTypesMatch<["result", "specifier"]>]> {
   let summary = "";
   let description = [{
     Set the field of the storage specifier to the given input value. Returns
@@ -568,8 +580,15 @@ def SparseTensor_SetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.set"
        : !sparse_tensor.storage_specifier<#COO>
     ```
   }];
+
+  let arguments = (ins SparseTensorStorageSpecifier:$specifier,
+                   SparseTensorStorageSpecifierKindAttr:$specifierKind,
+                   OptionalAttr<LevelAttr>:$level,
+                   Index:$value);
+  let results = (outs SparseTensorStorageSpecifier:$result);
   let assemblyFormat = "$specifier $specifierKind (`at` $level^)? `with` $value"
                        " attr-dict `:` qualified(type($result))";
+
   let hasVerifier = 1;
 }
 
@@ -577,9 +596,7 @@ def SparseTensor_SetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.set"
 // Sparse Tensor Coordinate Operations.
 //===----------------------------------------------------------------------===//
 
-def SparseTensor_LvlOp : SparseTensor_Op<"lvl", [ConditionallySpeculatable, NoMemoryEffect]>,
-    Arguments<(ins AnySparseTensor:$source, Index:$index)>,
-    Results<(outs Index:$result)> {
+def SparseTensor_LvlOp : SparseTensor_Op<"lvl", [ConditionallySpeculatable, NoMemoryEffect]> {
   let summary = "level index operation";
   let description = [{
     The `sparse_tensor.lvl` behaves similar to `tensor.dim` operation.
@@ -615,9 +632,9 @@ def SparseTensor_LvlOp : SparseTensor_Op<"lvl", [ConditionallySpeculatable, NoMe
     ```
   }];
 
-  let assemblyFormat = [{
-    attr-dict $source `,` $index `:` type($source)
-  }];
+  let arguments = (ins AnySparseTensor:$source, Index:$index);
+  let results = (outs Index:$result);
+  let assemblyFormat = "attr-dict $source `,` $index `:` type($source) ";
 
   let builders = [
     OpBuilder<(ins "Value":$source, "int64_t":$index)>
@@ -635,11 +652,7 @@ def SparseTensor_LvlOp : SparseTensor_Op<"lvl", [ConditionallySpeculatable, NoMe
   let hasFolder = 1;
 }
 
-def SparseTensor_CrdTranslateOp : SparseTensor_Op<"crd_translate", [Pure]>,
-    Arguments<(ins Variadic<Index>:$in_crds,
-                   SparseTensorCrdTransDirectionAttr:$direction,
-                   SparseTensorEncodingAttr:$encoder)>,
-    Results<(outs Variadic<Index>:$out_crds)> {
+def SparseTensor_CrdTranslateOp : SparseTensor_Op<"crd_translate", [Pure]> {
   string summary = "Performs coordinate translation between level and dimension coordinate space.";
   string description = [{
     Performs coordinate translation between level and dimension coordinate space according
@@ -652,7 +665,13 @@ def SparseTensor_CrdTranslateOp : SparseTensor_Op<"crd_translate", [Pure]>,
                        : index, index, index, index
     ```
   }];
+
+  let arguments = (ins Variadic<Index>:$in_crds,
+                   SparseTensorCrdTransDirectionAttr:$direction,
+                   SparseTensorEncodingAttr:$encoder);
+  let results = (outs Variadic<Index>:$out_crds);
   let assemblyFormat = "$direction `[` $in_crds `]` `as` $encoder attr-dict `:` type($out_crds)";
+
   let hasVerifier = 1;
   let hasFolder = 1;
 }
@@ -669,13 +688,7 @@ def SparseTensor_PushBackOp : SparseTensor_Op<"push_back",
     [TypesMatchWith<"value type matches element type of inBuffer",
                     "inBuffer", "value",
                     "::llvm::cast<ShapedType>($_self).getElementType()">,
-     AllTypesMatch<["inBuffer", "outBuffer"]>]>,
-    Arguments<(ins Index:$curSize,
-               StridedMemRefRankOf<[AnyType], [1]>:$inBuffer,
-               AnyType:$value, Optional<Index>:$n,
-               UnitAttr:$inbounds)>,
-    Results<(outs StridedMemRefRankOf<[AnyType], [1]>:$outBuffer,
-             Index:$newSize)>  {
+     AllTypesMatch<["inBuffer", "outBuffer"]>]> {
   string summary = "Pushes a value to the back of a given buffer";
   string description = [{
     Pushes `value` to the end of the given sparse tensor storage buffer
@@ -719,6 +732,13 @@ def SparseTensor_PushBackOp : SparseTensor_Op<"push_back",
        : xindex, memref<?xf64>, f64
     ```
   }];
+
+  let arguments = (ins Index:$curSize,
+                       StridedMemRefRankOf<[AnyType], [1]>:$inBuffer,
+                       AnyType:$value, Optional<Index>:$n,
+                       UnitAttr:$inbounds);
+  let results = (outs StridedMemRefRankOf<[AnyType], [1]>:$outBuffer,
+                      Index:$newSize);
   let assemblyFormat = "(`inbounds` $inbounds^)? $curSize `,` $inBuffer"
                        " `,` $value (`,` $n^ )?  attr-dict `:`"
                        " type($curSize) `,` type($inBuffer) `,`"
@@ -732,12 +752,7 @@ def SparseTensor_PushBackOp : SparseTensor_Op<"push_back",
   let hasVerifier = 1;
 }
 
-def SparseTensor_ExpandOp : SparseTensor_Op<"expand", []>,
-    Arguments<(ins AnySparseTensor:$tensor)>,
-    Results<(outs AnyStridedMemRefOfRank<1>:$values,
-                  StridedMemRefRankOf<[I1],[1]>:$filled,
-                  StridedMemRefRankOf<[Index],[1]>:$added,
-                  Index:$count)> {
+def SparseTensor_ExpandOp : SparseTensor_Op<"expand", []> {
   string summary = "Expands an access pattern for insertion";
   string description = [{
     Performs an access pattern expansion for the innermost levels of the
@@ -771,19 +786,19 @@ def SparseTensor_ExpandOp : SparseTensor_Op<"expand", []>,
       : tensor<4x4xf64, #CSR> to memref<?xf64>, memref<?xi1>, memref<?xindex>
     ```
   }];
+
+
+  let arguments = (ins AnySparseTensor:$tensor);
+  let results = (outs AnyStridedMemRefOfRank<1>:$values,
+                      StridedMemRefRankOf<[I1],[1]>:$filled,
+                      StridedMemRefRankOf<[Index],[1]>:$added,
+                      Index:$count);
   let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($values)"
                        " `,` type($filled) `,` type($added)";
 }
 
 def SparseTensor_CompressOp : SparseTensor_Op<"compress",
-    [AllTypesMatch<["tensor", "result"]>]>,
-    Arguments<(ins AnyStridedMemRefOfRank<1>:$values,
-                   StridedMemRefRankOf<[I1],[1]>:$filled,
-                   StridedMemRefRankOf<[Index],[1]>:$added,
-                   Index:$count,
-                   AnySparseTensor:$tensor,
-                   Variadic<Index>:$lvlCoords)>,
-    Results<(outs AnySparseTensor:$result)> {
+    [AllTypesMatch<["tensor", "result"]>]> {
   string summary = "Compressed an access pattern for insertion";
   string description = [{
     Finishes a single access pattern expansion by moving inserted elements
@@ -807,6 +822,14 @@ def SparseTensor_CompressOp : SparseTensor_Op<"compress",
       : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<4x4xf64, #CSR>
     ```
   }];
+
+  let arguments = (ins AnyStridedMemRefOfRank<1>:$values,
+                   StridedMemRefRankOf<[I1],[1]>:$filled,
+                   StridedMemRefRankOf<[Index],[1]>:$added,
+                   Index:$count,
+                   AnySparseTensor:$tensor,
+                   Variadic<Index>:$lvlCoords);
+  let results = (outs AnySparseTensor:$result);
   let assemblyFormat = "$values `,` $filled `,` $added `,` $count"
                        " `into` $tensor `[` $lvlCoords `]` attr-dict"
                        " `:` type($values) `,` type($filled) `,` type($added)"
@@ -814,9 +837,7 @@ def SparseTensor_CompressOp : SparseTensor_Op<"compress",
   let hasVerifier = 1;
 }
 
-def SparseTensor_LoadOp : SparseTensor_Op<"load", [SameOperandsAndResultType]>,
-    Arguments<(ins AnySparseTensor:$tensor, UnitAttr:$hasInserts)>,
-    Results<(outs AnyTensor:$result)> {
+def SparseTensor_LoadOp : SparseTensor_Op<"load", [SameOperandsAndResultType]> {
   let summary =
     "Rematerializes tensor from underlying sparse storage format";
   let description = [{
@@ -845,11 +866,13 @@ def SparseTensor_LoadOp : SparseTensor_Op<"load", [SameOperandsAndResultType]>,
     %1 = sparse_tensor.load %0 hasInserts : tensor<16x32xf32, #CSR>
     ```
   }];
+
+  let arguments = (ins AnySparseTensor:$tensor, UnitAttr:$hasInserts);
+  let results = (outs AnyTensor:$result);
   let assemblyFormat = "$tensor (`hasInserts` $hasInserts^)? attr-dict `:` type($tensor)";
 }
 
-def SparseTensor_OutOp : SparseTensor_Op<"out", []>,
-    Arguments<(ins AnySparseTensor:$tensor, AnyType:$dest)> {
+def SparseTensor_OutOp : SparseTensor_Op<"out", []> {
   string summary = "Outputs a sparse tensor to the given destination";
   string description = [{
     Outputs the contents of a sparse tensor to the destination defined by an
@@ -868,6 +891,8 @@ def SparseTensor_OutOp : SparseTensor_Op<"out", []>,
     sparse_tensor.out %t, %dest : tensor<1024x1024xf64, #CSR>, !Dest
     ```
   }];
+
+  let arguments = (ins AnySparseTensor:$tensor, AnyType:$dest);
   let assemblyFormat = "$tensor `,` $dest attr-dict `:` type($tensor) `,` type($dest)";
 }
 
@@ -875,11 +900,7 @@ def SparseTensor_OutOp : SparseTensor_Op<"out", []>,
 // Sparse Tensor Sorting/Ordering Operations.
 //===----------------------------------------------------------------------===//
 
-def SparseTensor_SortOp : SparseTensor_Op<"sort">,
-    Arguments<(ins Index:$n, StridedMemRefRankOf<[AnyInteger, Index], [1]>:$xy,
-               Variadic<StridedMemRefRankOf<[AnyType], [1]>>:$ys,
-               AffineMapAttr:$perm_map, OptionalAttr<IndexAttr>:$ny,
-               SparseTensorSortKindAttr:$algorithm)>  {
+def SparseTensor_SortOp : SparseTensor_Op<"sort"> {
   let summary = "Sorts the arrays in xs and ys lexicographically on the "
                 "integral values found in the xs list";
   let description = [{
@@ -904,16 +925,18 @@ def SparseTensor_SortOp : SparseTensor_Op<"sort">,
     ```
   }];
 
+  let arguments = (ins Index:$n,
+                       StridedMemRefRankOf<[AnyInteger, Index], [1]>:$xy,
+                       Variadic<StridedMemRefRankOf<[AnyType], [1]>>:$ys,
+                       AffineMapAttr:$perm_map, OptionalAttr<IndexAttr>:$ny,
+                       SparseTensorSortKindAttr:$algorithm);
   let assemblyFormat = "$algorithm $n"
                        "`,`$xy (`jointly` $ys^)? attr-dict"
                        "`:` type($xy) (`jointly` type($ys)^)?";
   let hasVerifier = 1;
 }
 
-def SparseTensor_ReorderCOOOp : SparseTensor_Op<"reorder_coo", [Pure]>,
-    Arguments<(ins AnySparseTensor: $input_coo,
-                   SparseTensorSortKindAttr:$algorithm)>,
-    Results<(outs AnySparseTensor: $result_coo)> {
+def SparseTensor_ReorderCOOOp : SparseTensor_Op<"reorder_coo", [Pure]> {
   let summary = "Reorder the input COO such that it has the the same order as "
                 "the output COO";
   let description = [{
@@ -933,6 +956,9 @@ def SparseTensor_ReorderCOOOp : SparseTensor_Op<"reorder_coo", [Pure]>,
     ```
   }];
 
+  let arguments = (ins AnySparseTensor: $input_coo,
+                       SparseTensorSortKindAttr:$algorithm);
+  let results = (outs AnySparseTensor: $result_coo);
   let assemblyFormat = "$algorithm $input_coo attr-dict"
                        "`:` type($input_coo) `to` type($result_coo)";
 
@@ -944,9 +970,7 @@ def SparseTensor_ReorderCOOOp : SparseTensor_Op<"reorder_coo", [Pure]>,
 // Sparse Tensor Syntax Operations.
 //===----------------------------------------------------------------------===//
 
-def SparseTensor_BinaryOp : SparseTensor_Op<"binary", [Pure]>,
-    Arguments<(ins AnyType:$x, AnyType:$y, UnitAttr:$left_identity, UnitAttr:$right_identity)>,
-    Results<(outs AnyType:$output)> {
+def SparseTensor_BinaryOp : SparseTensor_Op<"binary", [Pure]> {
   let summary = "Binary set operation utilized within linalg.generic";
   let description = [{
       Defines a computation within a `linalg.generic` operation that takes two
@@ -1054,18 +1078,24 @@ def SparseTensor_BinaryOp : SparseTensor_Op<"binary", [Pure]>,
   }];
 
   let regions = (region AnyRegion:$overlapRegion, AnyRegion:$leftRegion, AnyRegion:$rightRegion);
+  let arguments = (ins AnyType:$x, AnyType:$y, UnitAttr:$left_identity, UnitAttr:$right_identity);
+  let results = (outs AnyType:$output);
   let assemblyFormat = [{
         $x `,` $y `:` attr-dict type($x) `,` type($y) `to` type($output) `\n`
         `overlap` `=` $overlapRegion `\n`
         `left` `=` (`identity` $left_identity^):($leftRegion)? `\n`
         `right` `=` (`identity` $right_identity^):($rightRegion)?
   }];
+
   let hasVerifier = 1;
 }
 
-def SparseTensor_UnaryOp : SparseTensor_Op<"unary", [Pure]>,
-    Arguments<(ins AnyType:$x)>,
-    Results<(outs AnyType:$output)> {
+def SparseTensor_UnaryOp : SparseTensor_Op<"unary", [Pure]> {
+
+  let arguments = (ins AnyType:$x);
+
+  let results = (outs AnyType:$output);
+
   let summary = "Unary set operation utilized within linalg.generic";
   let description = [{
       Defines a computation with a `linalg.generic` operation that takes a single
@@ -1162,9 +1192,7 @@ def SparseTensor_UnaryOp : SparseTensor_Op<"unary", [Pure]>,
   let hasVerifier = 1;
 }
 
-def SparseTensor_ReduceOp : SparseTensor_Op<"reduce", [Pure, SameOperandsAndResultType]>,
-    Arguments<(ins AnyType:$x, AnyType:$y, AnyType:$identity)>,
-    Results<(outs AnyType:$output)> {
+def SparseTensor_ReduceOp : SparseTensor_Op<"reduce", [Pure, SameOperandsAndResultType]> {
   let summary = "Custom reduction operation utilized within linalg.generic";
   let description = [{
       Defines a computation with a `linalg.generic` operation that takes two
@@ -1208,16 +1236,14 @@ def SparseTensor_ReduceOp : SparseTensor_Op<"reduce", [Pure, SameOperandsAndResu
   }];
 
   let regions = (region SizedRegion<1>:$region);
+  let arguments = (ins AnyType:$x, AnyType:$y, AnyType:$identity);
+  let results = (outs AnyType:$output);
+  let assemblyFormat = "$x `,` $y `,` $identity attr-dict `:` type($output) $region";
 
-  let assemblyFormat = [{
-         $x `,` $y `,` $identity attr-dict `:` type($output) $region
-  }];
   let hasVerifier = 1;
 }
 
-def SparseTensor_SelectOp : SparseTensor_Op<"select", [Pure, SameOperandsAndResultType]>,
-    Arguments<(ins AnyType:$x)>,
-    Results<(outs AnyType:$output)> {
+def SparseTensor_SelectOp : SparseTensor_Op<"select", [Pure, SameOperandsAndResultType]> {
   let summary = "Select operation utilized within linalg.generic";
   let description = [{
       Defines an evaluation within a `linalg.generic` operation that takes a single
@@ -1269,16 +1295,16 @@ def SparseTensor_SelectOp : SparseTensor_Op<"select", [Pure, SameOperandsAndResu
   }];
 
   let regions = (region SizedRegion<1>:$region);
-  let assemblyFormat = [{
-         $x attr-dict `:` type($x) $region
-  }];
+  let arguments = (ins AnyType:$x);
+  let results = (outs AnyType:$output);
+  let assemblyFormat = "$x attr-dict `:` type($x) $region";
+
   let hasVerifier = 1;
 }
 
 def SparseTensor_YieldOp : SparseTensor_Op<"yield", [Pure, Terminator,
     ParentOneOf<["BinaryOp", "UnaryOp", "ReduceOp", "SelectOp",
-                 "ForeachOp"]>]>,
-    Arguments<(ins Variadic<AnyType>:$results)> {
+                 "ForeachOp"]>]> {
   let summary = "Yield from sparse_tensor set-like operations";
   let description = [{
       Yields a value from within a `binary`, `unary`, `reduce`,
@@ -1319,17 +1345,12 @@ def SparseTensor_YieldOp : SparseTensor_Op<"yield", [Pure, Terminator,
      }
   }];
 
-  let assemblyFormat = [{
-        $results attr-dict `:` type($results)
-  }];
+  let arguments = (ins Variadic<AnyType>:$results);
+  let assemblyFormat = "$results attr-dict `:` type($results)";
 }
 
 def SparseTensor_ForeachOp : SparseTensor_Op<"foreach",
-    [SingleBlockImplicitTerminator<"YieldOp">]>,
-     Arguments<(ins AnyTensor:$tensor,
-                    Variadic<AnyType>:$initArgs,
-                    OptionalAttr<AffineMapAttr>:$order)>,
-     Results<(outs Variadic<AnyType>:$results)> {
+    [SingleBlockImplicitTerminator<"YieldOp">]> {
   let summary = "Iterates over elements in a tensor";
   let description = [{
      Iterates over stored elements in a tensor (which are typically, but not always,
@@ -1424,6 +1445,10 @@ def SparseTensor_ForeachOp : SparseTensor_Op<"foreach",
   ];
 
   let regions = (region SizedRegion<1>:$region);
+  let arguments = (ins AnyTensor:$tensor,
+                       Variadic<AnyType>:$initArgs,
+                       OptionalAttr<AffineMapAttr>:$order);
+  let results = (outs Variadic<AnyType>:$results);
   let assemblyFormat = "`in` $tensor (`init``(`$initArgs^`)`)? attr-dict"
                        "    `:` type($tensor) (`,` type($initArgs)^)?"
                        "  (`->` type($results)^)?  `do` $region";
@@ -1436,13 +1461,6 @@ def SparseTensor_ForeachOp : SparseTensor_Op<"foreach",
 
 def ExtractIterSpaceOp : SparseTensor_Op<"extract_iteration_space",
     [Pure, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
-
-  let arguments = (ins AnySparseTensor:$tensor,
-                       Optional<AnySparseIterator>:$parentIter,
-                       LevelAttr:$loLvl, LevelAttr:$hiLvl);
-
-  let results = (outs AnySparseIterSpace:$resultSpace);
-
   let summary = "Extracts an iteration space from a sparse tensor between certain levels";
   let description = [{
       Extracts a `!sparse_tensor.iter_space` from a sparse tensor between
@@ -1485,17 +1503,21 @@ def ExtractIterSpaceOp : SparseTensor_Op<"extract_iteration_space",
     }
   }];
 
-  let hasVerifier = 1;
+  let arguments = (ins AnySparseTensor:$tensor,
+                       Optional<AnySparseIterator>:$parentIter,
+                       LevelAttr:$loLvl, LevelAttr:$hiLvl);
+  let results = (outs AnySparseIterSpace:$resultSpace);
   let assemblyFormat = "$tensor (`at` $parentIter^)? `lvls` `=` custom<LevelRange>($loLvl, $hiLvl) "
                        " attr-dict `:` type($tensor) (`,` type($parentIter)^)?";
+
+  let hasVerifier = 1;
 }
 
 //===----------------------------------------------------------------------===//
 // Sparse Tensor Debugging and Test-Only Operations.
 //===----------------------------------------------------------------------===//
 
-def SparseTensor_PrintOp : SparseTensor_Op<"print">,
-    Arguments<(ins AnySparseTensor:$tensor)> {
+def SparseTensor_PrintOp : SparseTensor_Op<"print"> {
   string summary = "Prints a sparse tensor (for testing and debugging)";
   string description = [{
     Prints the individual components of a sparse tensors (the positions,
@@ -1509,6 +1531,8 @@ def SparseTensor_PrintOp : SparseTensor_Op<"print">,
     sparse_tensor.print %tensor : tensor<1024x1024xf64, #CSR>
     ```
   }];
+
+  let arguments = (ins AnySparseTensor:$tensor);
   let assemblyFormat = "$tensor attr-dict `:` type($tensor)";
 }
 

>From 9067070d91e9d8cdd8509ffa56a076f08a3d7281 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 16 Apr 2024 15:40:32 -0700
Subject: [PATCH 5/7] [RISCV] Re-separate unaligned scalar and vector memory
 features in the backend. (#88954)

This is largely a revert of commit
e81796671890b59c110f8e41adc7ca26f8484d20.

As #88029 shows, there exists hardware that only supports unaligned
scalar.

I'm leaving how this gets exposed to the clang interface to a future
patch.
---
 clang/lib/Basic/Targets/RISCV.cpp                |  3 ++-
 clang/lib/Driver/ToolChains/Arch/RISCV.cpp       | 16 +++++++++++-----
 clang/test/Driver/riscv-features.c               |  4 ++--
 llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp |  4 ++--
 llvm/lib/Target/RISCV/RISCVFeatures.td           | 13 +++++++++----
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp      | 16 ++++++++--------
 llvm/lib/Target/RISCV/RISCVProcessors.td         |  6 ++++--
 llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h |  4 ++--
 llvm/test/CodeGen/RISCV/memcpy-inline.ll         |  4 ++--
 llvm/test/CodeGen/RISCV/memcpy.ll                |  4 ++--
 llvm/test/CodeGen/RISCV/memset-inline.ll         |  4 ++--
 llvm/test/CodeGen/RISCV/pr56110.ll               |  2 +-
 .../CodeGen/RISCV/riscv-func-target-feature.ll   |  2 +-
 .../RISCV/rvv/concat-vectors-constant-stride.ll  |  4 ++--
 .../rvv/fixed-vectors-strided-load-combine.ll    |  2 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll |  4 ++--
 llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll     |  4 ++--
 llvm/test/CodeGen/RISCV/rvv/memset-inline.ll     |  4 ++--
 .../CodeGen/RISCV/rvv/unaligned-loads-stores.ll  |  4 ++--
 llvm/test/CodeGen/RISCV/unaligned-load-store.ll  |  4 ++--
 llvm/utils/TableGen/RISCVTargetDefEmitter.cpp    | 12 ++++++++++--
 21 files changed, 71 insertions(+), 49 deletions(-)

diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index f3d705e1551fe2..a7ce9dda34bdde 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -353,7 +353,8 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
   if (ISAInfo->hasExtension("zfh") || ISAInfo->hasExtension("zhinx"))
     HasLegalHalfType = true;
 
-  FastUnalignedAccess = llvm::is_contained(Features, "+fast-unaligned-access");
+  FastUnalignedAccess = llvm::is_contained(Features, "+unaligned-scalar-mem") &&
+                        llvm::is_contained(Features, "+unaligned-vector-mem");
 
   if (llvm::is_contained(Features, "+experimental"))
     HasExperimental = true;
diff --git a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index b1dd7c4372d475..96b3cc3bb8ffb1 100644
--- a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -68,8 +68,10 @@ static void getRISCFeaturesFromMcpu(const Driver &D, const Arg *A,
           << A->getSpelling() << Mcpu;
   }
 
-  if (llvm::RISCV::hasFastUnalignedAccess(Mcpu))
-    Features.push_back("+fast-unaligned-access");
+  if (llvm::RISCV::hasFastUnalignedAccess(Mcpu)) {
+    Features.push_back("+unaligned-scalar-mem");
+    Features.push_back("+unaligned-vector-mem");
+  }
 }
 
 void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
@@ -168,12 +170,16 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
   }
 
   // Android requires fast unaligned access on RISCV64.
-  if (Triple.isAndroid())
-    Features.push_back("+fast-unaligned-access");
+  if (Triple.isAndroid()) {
+    Features.push_back("+unaligned-scalar-mem");
+    Features.push_back("+unaligned-vector-mem");
+  }
 
   // -mstrict-align is default, unless -mno-strict-align is specified.
   AddTargetFeature(Args, Features, options::OPT_mno_strict_align,
-                   options::OPT_mstrict_align, "fast-unaligned-access");
+                   options::OPT_mstrict_align, "unaligned-scalar-mem");
+  AddTargetFeature(Args, Features, options::OPT_mno_strict_align,
+                   options::OPT_mstrict_align, "unaligned-vector-mem");
 
   // Now add any that the user explicitly requested on the command line,
   // which may override the defaults.
diff --git a/clang/test/Driver/riscv-features.c b/clang/test/Driver/riscv-features.c
index ce4947d2bc47b4..5e1db5ba1ed3e9 100644
--- a/clang/test/Driver/riscv-features.c
+++ b/clang/test/Driver/riscv-features.c
@@ -38,8 +38,8 @@
 // RUN: %clang --target=riscv32-unknown-elf -### %s -mno-strict-align 2>&1 | FileCheck %s -check-prefix=FAST-UNALIGNED-ACCESS
 // RUN: %clang --target=riscv32-unknown-elf -### %s -mstrict-align 2>&1 | FileCheck %s -check-prefix=NO-FAST-UNALIGNED-ACCESS
 
-// FAST-UNALIGNED-ACCESS: "-target-feature" "+fast-unaligned-access"
-// NO-FAST-UNALIGNED-ACCESS: "-target-feature" "-fast-unaligned-access"
+// FAST-UNALIGNED-ACCESS: "-target-feature" "+unaligned-scalar-mem" "-target-feature" "+unaligned-vector-mem"
+// NO-FAST-UNALIGNED-ACCESS: "-target-feature" "-unaligned-scalar-mem" "-target-feature" "-unaligned-vector-mem"
 
 // RUN: %clang --target=riscv32-unknown-elf -### %s 2>&1 | FileCheck %s -check-prefix=NOUWTABLE
 // RUN: %clang --target=riscv32-unknown-elf -fasynchronous-unwind-tables -### %s 2>&1 | FileCheck %s -check-prefix=UWTABLE
diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 173995f05b51cc..d93709ac03420e 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -326,8 +326,8 @@ bool RISCVExpandPseudo::expandRV32ZdinxStore(MachineBasicBlock &MBB,
       .setMemRefs(MMOLo);
 
   if (MBBI->getOperand(2).isGlobal() || MBBI->getOperand(2).isCPI()) {
-    // FIXME: Zdinx RV32 can not work on unaligned memory.
-    assert(!STI->hasFastUnalignedAccess());
+    // FIXME: Zdinx RV32 can not work on unaligned scalar memory.
+    assert(!STI->enableUnalignedScalarMem());
 
     assert(MBBI->getOperand(2).getOffset() % 8 == 0);
     MBBI->getOperand(2).setOffset(MBBI->getOperand(2).getOffset() + 4);
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 59962216e0c041..561187c39a4a04 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1183,10 +1183,15 @@ def FeatureTrailingSeqCstFence : SubtargetFeature<"seq-cst-trailing-fence",
                                           "true",
                                           "Enable trailing fence for seq-cst store.">;
 
-def FeatureFastUnalignedAccess
-   : SubtargetFeature<"fast-unaligned-access", "HasFastUnalignedAccess",
-                      "true", "Has reasonably performant unaligned "
-                      "loads and stores (both scalar and vector)">;
+def FeatureUnalignedScalarMem
+   : SubtargetFeature<"unaligned-scalar-mem", "EnableUnalignedScalarMem",
+                      "true", "Has reasonably performant unaligned scalar "
+                      "loads and stores">;
+
+def FeatureUnalignedVectorMem
+   : SubtargetFeature<"unaligned-vector-mem", "EnableUnalignedVectorMem",
+                      "true", "Has reasonably performant unaligned vector "
+                      "loads and stores">;
 
 def FeaturePostRAScheduler : SubtargetFeature<"use-postra-scheduler",
     "UsePostRAScheduler", "true", "Schedule again after register allocation">;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7b4bec2f65b741..b0deb1d2669952 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1924,7 +1924,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
   // replace. If we don't support unaligned scalar mem, prefer the constant
   // pool.
   // TODO: Can the caller pass down the alignment?
-  if (!Subtarget.hasFastUnalignedAccess())
+  if (!Subtarget.enableUnalignedScalarMem())
     return true;
 
   // Prefer to keep the load if it would require many instructions.
@@ -15837,7 +15837,7 @@ static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask,
   if (WiderElementSize > ST.getELen()/8)
     return false;
 
-  if (!ST.hasFastUnalignedAccess() && BaseAlign < WiderElementSize)
+  if (!ST.enableUnalignedVectorMem() && BaseAlign < WiderElementSize)
     return false;
 
   for (unsigned i = 0; i < Index->getNumOperands(); i++) {
@@ -20663,8 +20663,8 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
     unsigned *Fast) const {
   if (!VT.isVector()) {
     if (Fast)
-      *Fast = Subtarget.hasFastUnalignedAccess();
-    return Subtarget.hasFastUnalignedAccess();
+      *Fast = Subtarget.enableUnalignedScalarMem();
+    return Subtarget.enableUnalignedScalarMem();
   }
 
   // All vector implementations must support element alignment
@@ -20680,8 +20680,8 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
   // misaligned accesses.  TODO: Work through the codegen implications of
   // allowing such accesses to be formed, and considered fast.
   if (Fast)
-    *Fast = Subtarget.hasFastUnalignedAccess();
-  return Subtarget.hasFastUnalignedAccess();
+    *Fast = Subtarget.enableUnalignedVectorMem();
+  return Subtarget.enableUnalignedVectorMem();
 }
 
 
@@ -20716,7 +20716,7 @@ EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op,
 
   // Do we have sufficient alignment for our preferred VT?  If not, revert
   // to largest size allowed by our alignment criteria.
-  if (PreferredVT != MVT::i8 && !Subtarget.hasFastUnalignedAccess()) {
+  if (PreferredVT != MVT::i8 && !Subtarget.enableUnalignedVectorMem()) {
     Align RequiredAlign(PreferredVT.getStoreSize());
     if (Op.isFixedDstAlign())
       RequiredAlign = std::min(RequiredAlign, Op.getDstAlign());
@@ -20908,7 +20908,7 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
   if (!isLegalElementTypeForRVV(ScalarType))
     return false;
 
-  if (!Subtarget.hasFastUnalignedAccess() &&
+  if (!Subtarget.enableUnalignedVectorMem() &&
       Alignment < ScalarType.getStoreSize())
     return false;
 
diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td
index 739b50749e1323..f9a557e02bfe1a 100644
--- a/llvm/lib/Target/RISCV/RISCVProcessors.td
+++ b/llvm/lib/Target/RISCV/RISCVProcessors.td
@@ -257,7 +257,8 @@ def SIFIVE_P450 : RISCVProcessorModel<"sifive-p450", SiFiveP400Model,
                                        FeatureStdExtZbb,
                                        FeatureStdExtZbs,
                                        FeatureStdExtZfhmin,
-                                       FeatureFastUnalignedAccess],
+                                       FeatureUnalignedScalarMem,
+                                       FeatureUnalignedVectorMem],
                                       [TuneNoDefaultUnroll,
                                        TuneConditionalCompressedMoveFusion,
                                        TuneLUIADDIFusion,
@@ -295,7 +296,8 @@ def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", SiFiveP600Model,
                                        FeatureStdExtZvkng,
                                        FeatureStdExtZvksc,
                                        FeatureStdExtZvksg,
-                                       FeatureFastUnalignedAccess],
+                                       FeatureUnalignedScalarMem,
+                                       FeatureUnalignedVectorMem],
                                       [TuneNoDefaultUnroll,
                                        TuneConditionalCompressedMoveFusion,
                                        TuneLUIADDIFusion,
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index e0c0e6517b6f1f..2f9281ab892447 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -228,7 +228,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
       return false;
 
     EVT ElemType = DataTypeVT.getScalarType();
-    if (!ST->hasFastUnalignedAccess() && Alignment < ElemType.getStoreSize())
+    if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
       return false;
 
     return TLI->isLegalElementTypeForRVV(ElemType);
@@ -253,7 +253,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
       return false;
 
     EVT ElemType = DataTypeVT.getScalarType();
-    if (!ST->hasFastUnalignedAccess() && Alignment < ElemType.getStoreSize())
+    if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
       return false;
 
     return TLI->isLegalElementTypeForRVV(ElemType);
diff --git a/llvm/test/CodeGen/RISCV/memcpy-inline.ll b/llvm/test/CodeGen/RISCV/memcpy-inline.ll
index 343695ee37da84..833e07351eec77 100644
--- a/llvm/test/CodeGen/RISCV/memcpy-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memcpy-inline.ll
@@ -3,9 +3,9 @@
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32
 ; RUN: llc < %s -mtriple=riscv64 \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64
-; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access \
+; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
-; RUN: llc < %s -mtriple=riscv64 -mattr=+fast-unaligned-access \
+; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
 
 ; ----------------------------------------------------------------------
diff --git a/llvm/test/CodeGen/RISCV/memcpy.ll b/llvm/test/CodeGen/RISCV/memcpy.ll
index 12ec0881b20d9f..02f582339d0b78 100644
--- a/llvm/test/CodeGen/RISCV/memcpy.ll
+++ b/llvm/test/CodeGen/RISCV/memcpy.ll
@@ -3,9 +3,9 @@
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32
 ; RUN: llc < %s -mtriple=riscv64 \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64
-; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access \
+; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
-; RUN: llc < %s -mtriple=riscv64 -mattr=+fast-unaligned-access \
+; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
 %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
 
diff --git a/llvm/test/CodeGen/RISCV/memset-inline.ll b/llvm/test/CodeGen/RISCV/memset-inline.ll
index cc22b77c641e27..55fe81a58805ed 100644
--- a/llvm/test/CodeGen/RISCV/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memset-inline.ll
@@ -3,9 +3,9 @@
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64
-; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+fast-unaligned-access \
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+unaligned-scalar-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+fast-unaligned-access \
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+unaligned-scalar-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
 %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
 
diff --git a/llvm/test/CodeGen/RISCV/pr56110.ll b/llvm/test/CodeGen/RISCV/pr56110.ll
index c795b17419f564..fa441f5fc3aef4 100644
--- a/llvm/test/CodeGen/RISCV/pr56110.ll
+++ b/llvm/test/CodeGen/RISCV/pr56110.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=riscv32 | FileCheck %s
-; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem | FileCheck %s
 
 define void @foo_set(ptr nocapture noundef %a, i32 noundef %v) {
 ; CHECK-LABEL: foo_set:
diff --git a/llvm/test/CodeGen/RISCV/riscv-func-target-feature.ll b/llvm/test/CodeGen/RISCV/riscv-func-target-feature.ll
index a03dadbc1d1160..d627ae9c90394e 100644
--- a/llvm/test/CodeGen/RISCV/riscv-func-target-feature.ll
+++ b/llvm/test/CodeGen/RISCV/riscv-func-target-feature.ll
@@ -36,7 +36,7 @@ entry:
 }
 
 ; CHECK-NOT: .option push
-define void @test5() "target-features"="+fast-unaligned-access" {
+define void @test5() "target-features"="+unaligned-scalar-mem" {
 ; CHECK-LABEL: test5
 ; CHECK-NOT: .option pop
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/concat-vectors-constant-stride.ll b/llvm/test/CodeGen/RISCV/rvv/concat-vectors-constant-stride.ll
index f244810e739d93..ff35043dbd7e75 100644
--- a/llvm/test/CodeGen/RISCV/rvv/concat-vectors-constant-stride.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/concat-vectors-constant-stride.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+fast-unaligned-access -target-abi=ilp32 \
+; RUN: llc -mtriple=riscv32 -mattr=+v,+unaligned-vector-mem -target-abi=ilp32 \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v,+fast-unaligned-access -target-abi=lp64 \
+; RUN: llc -mtriple=riscv64 -mattr=+v,+unaligned-vector-mem -target-abi=lp64 \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 define void @constant_forward_stride(ptr %s, ptr %d) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index 657d52354aa39f..f0fcc482e2207e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,RV64
-; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+fast-unaligned-access -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64,RV64-MISALIGN
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+unaligned-vector-mem -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64,RV64-MISALIGN
 
 ; RUN: llc -mtriple=riscv64 -mattr=+f,+zfh,+zve64f,+zvl128b,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,ZVE64F
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index fffc4d6c08335c..36c36a13964c92 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -3,9 +3,9 @@
 ; RUN:   | FileCheck %s --check-prefixes=SLOW,RV32-SLOW
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s --check-prefixes=SLOW,RV64-SLOW
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+fast-unaligned-access -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+unaligned-vector-mem -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s --check-prefixes=FAST,RV32-FAST
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+fast-unaligned-access -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+unaligned-vector-mem -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s --check-prefixes=FAST,RV64-FAST
 
 define <4 x i32> @load_v4i32_align1(ptr %ptr) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll
index 485f94ee2a1026..53598c609107b0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll
@@ -3,9 +3,9 @@
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64
-; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+fast-unaligned-access \
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+unaligned-scalar-mem,+unaligned-vector-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
-; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+fast-unaligned-access \
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+unaligned-scalar-mem,+unaligned-vector-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
 
 ; ----------------------------------------------------------------------
diff --git a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
index 0e7e914cf68e8a..accc18519d6260 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
@@ -3,9 +3,9 @@
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64
-; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+fast-unaligned-access \
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+unaligned-scalar-mem,,+unaligned-vector-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+fast-unaligned-access \
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+unaligned-scalar-mem,+unaligned-vector-mem \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
 %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll b/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll
index f488baf5a9d9fe..1491bb6c337a02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll
@@ -3,9 +3,9 @@
 ; RUN:    -verify-machineinstrs | FileCheck %s
 ; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v < %s \
 ; RUN:    -verify-machineinstrs | FileCheck %s
-; RUN: llc -mtriple riscv32 -mattr=+d,+zfh,+zvfh,+v,+fast-unaligned-access < %s \
+; RUN: llc -mtriple riscv32 -mattr=+d,+zfh,+zvfh,+v,+unaligned-vector-mem < %s \
 ; RUN:    -verify-machineinstrs | FileCheck --check-prefix=FAST %s
-; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v,+fast-unaligned-access < %s \
+; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v,+unaligned-vector-mem < %s \
 ; RUN:    -verify-machineinstrs | FileCheck --check-prefix=FAST %s
 
 
diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
index 599b0d08629eaf..ce0d8fedbfb88f 100644
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -3,9 +3,9 @@
 ; RUN:   | FileCheck -check-prefixes=ALL,SLOW,RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefixes=ALL,SLOW,RV64I %s
-; RUN: llc -mtriple=riscv32 -mattr=+fast-unaligned-access -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefixes=ALL,FAST,RV32I-FAST %s
-; RUN: llc -mtriple=riscv64 -mattr=+fast-unaligned-access -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefixes=ALL,FAST,RV64I-FAST %s
 
 ; A collection of cases showing codegen for unaligned loads and stores
diff --git a/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp b/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp
index 7a6439cb94910e..e57bc6fb507e32 100644
--- a/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp
+++ b/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp
@@ -60,11 +60,19 @@ static void EmitRISCVTargetDef(RecordKeeper &RK, raw_ostream &OS) {
     if (MArch.empty())
       MArch = getMArch(*Rec);
 
-    const bool FastUnalignedAccess =
+    bool FastScalarUnalignedAccess =
         any_of(Rec->getValueAsListOfDefs("Features"), [&](auto &Feature) {
-          return Feature->getValueAsString("Name") == "fast-unaligned-access";
+          return Feature->getValueAsString("Name") == "unaligned-scalar-mem";
         });
 
+    bool FastVectorUnalignedAccess =
+        any_of(Rec->getValueAsListOfDefs("Features"), [&](auto &Feature) {
+          return Feature->getValueAsString("Name") == "unaligned-vector-mem";
+        });
+
+    bool FastUnalignedAccess =
+        FastScalarUnalignedAccess && FastVectorUnalignedAccess;
+
     OS << "PROC(" << Rec->getName() << ", "
        << "{\"" << Rec->getValueAsString("Name") << "\"}, "
        << "{\"" << MArch << "\"}, " << FastUnalignedAccess << ")\n";

>From 988ffd06722e7e056b239efe497345ac97be33db Mon Sep 17 00:00:00 2001
From: Usama Hameed <u_hameed at apple.com>
Date: Tue, 16 Apr 2024 16:00:14 -0700
Subject: [PATCH 6/7] Add asan tests for libsanitizers. (#88349) (#88962)

The previous patch was reverted because the test fails to build when
libsanitizers is not present. This patch catches the BuildError
exception and skips the test appropriately.

This patch tests LLDB integration with libsanitizers for ASan.

rdar://111856681
---
 lldb/test/API/functionalities/asan/Makefile   |  6 +-
 .../functionalities/asan/TestMemoryHistory.py | 74 ++++++++++++++++++-
 .../functionalities/asan/TestReportData.py    | 21 +++++-
 3 files changed, 94 insertions(+), 7 deletions(-)

diff --git a/lldb/test/API/functionalities/asan/Makefile b/lldb/test/API/functionalities/asan/Makefile
index 4913a18d8cc6f9..d66696fed7078f 100644
--- a/lldb/test/API/functionalities/asan/Makefile
+++ b/lldb/test/API/functionalities/asan/Makefile
@@ -1,4 +1,8 @@
 C_SOURCES := main.c
-CFLAGS_EXTRAS := -fsanitize=address -g -gcolumn-info
+asan: CFLAGS_EXTRAS := -fsanitize=address -g -gcolumn-info
+asan: all
+
+libsanitizers: CFLAGS_EXTRAS := -fsanitize=address -fsanitize-stable-abi -g -gcolumn-info
+libsanitizers: all
 
 include Makefile.rules
diff --git a/lldb/test/API/functionalities/asan/TestMemoryHistory.py b/lldb/test/API/functionalities/asan/TestMemoryHistory.py
index 00162ae8822c74..41ab25823f5cc6 100644
--- a/lldb/test/API/functionalities/asan/TestMemoryHistory.py
+++ b/lldb/test/API/functionalities/asan/TestMemoryHistory.py
@@ -8,16 +8,24 @@
 from lldbsuite.test.lldbtest import *
 from lldbsuite.test import lldbplatform
 from lldbsuite.test import lldbutil
-
+from lldbsuite.test_event.build_exception import BuildError
 
 class AsanTestCase(TestBase):
     @skipIfFreeBSD  # llvm.org/pr21136 runtimes not yet available by default
     @expectedFailureNetBSD
     @skipUnlessAddressSanitizer
     def test(self):
-        self.build()
+        self.build(make_targets=["asan"])
         self.asan_tests()
 
+    @skipIf(oslist=no_match(["macosx"]))
+    def test_libsanitizers_asan(self):
+        try:
+            self.build(make_targets=["libsanitizers"])
+        except BuildError as e:
+            self.skipTest("failed to build with libsanitizers")
+        self.libsanitizer_tests()
+
     def setUp(self):
         # Call super's setUp().
         TestBase.setUp(self)
@@ -26,6 +34,68 @@ def setUp(self):
         self.line_free = line_number("main.c", "// free line")
         self.line_breakpoint = line_number("main.c", "// break line")
 
+    # Test line numbers: rdar://126237493
+    def libsanitizer_tests(self):
+        target = self.createTestTarget()
+
+        self.runCmd(
+            "env SanitizersAddress=1 MallocSanitizerZone=1 MallocSecureAllocator=0"
+        )
+
+        self.runCmd("run")
+
+        # In libsanitizers, memory history is not supported until a report has been generated
+        self.expect(
+            "thread list",
+            "Process should be stopped due to ASan report",
+            substrs=["stopped", "stop reason = Use of deallocated memory"],
+        )
+
+        # test the 'memory history' command
+        self.expect(
+            "memory history 'pointer'",
+            substrs=[
+                "Memory deallocated by Thread",
+                "a.out`f2",
+                "main.c",
+                "Memory allocated by Thread",
+                "a.out`f1",
+                "main.c",
+            ],
+        )
+
+        # do the same using SB API
+        process = self.dbg.GetSelectedTarget().process
+        val = (
+            process.GetSelectedThread().GetSelectedFrame().EvaluateExpression("pointer")
+        )
+        addr = val.GetValueAsUnsigned()
+        threads = process.GetHistoryThreads(addr)
+        self.assertEqual(threads.GetSize(), 2)
+
+        history_thread = threads.GetThreadAtIndex(0)
+        self.assertTrue(history_thread.num_frames >= 2)
+        self.assertEqual(
+            history_thread.frames[1].GetLineEntry().GetFileSpec().GetFilename(),
+            "main.c",
+        )
+
+        history_thread = threads.GetThreadAtIndex(1)
+        self.assertTrue(history_thread.num_frames >= 2)
+        self.assertEqual(
+            history_thread.frames[1].GetLineEntry().GetFileSpec().GetFilename(),
+            "main.c",
+        )
+
+        # let's free the container (SBThreadCollection) and see if the
+        # SBThreads still live
+        threads = None
+        self.assertTrue(history_thread.num_frames >= 2)
+        self.assertEqual(
+            history_thread.frames[1].GetLineEntry().GetFileSpec().GetFilename(),
+            "main.c",
+        )
+
     def asan_tests(self):
         target = self.createTestTarget()
 
diff --git a/lldb/test/API/functionalities/asan/TestReportData.py b/lldb/test/API/functionalities/asan/TestReportData.py
index 543c5fe66a208d..5e4c179e2a4819 100644
--- a/lldb/test/API/functionalities/asan/TestReportData.py
+++ b/lldb/test/API/functionalities/asan/TestReportData.py
@@ -8,7 +8,7 @@
 from lldbsuite.test.decorators import *
 from lldbsuite.test.lldbtest import *
 from lldbsuite.test import lldbutil
-
+from lldbsuite.test_event.build_exception import BuildError
 
 class AsanTestReportDataCase(TestBase):
     @skipIfFreeBSD  # llvm.org/pr21136 runtimes not yet available by default
@@ -16,9 +16,17 @@ class AsanTestReportDataCase(TestBase):
     @skipUnlessAddressSanitizer
     @skipIf(archs=["i386"], bugnumber="llvm.org/PR36710")
     def test(self):
-        self.build()
+        self.build(make_targets=["asan"])
         self.asan_tests()
 
+    @skipIf(oslist=no_match(["macosx"]))
+    def test_libsanitizers_asan(self):
+        try:
+            self.build(make_targets=["libsanitizers"])
+        except BuildError as e:
+            self.skipTest("failed to build with libsanitizers")
+        self.asan_tests(libsanitizers=True)
+
     def setUp(self):
         # Call super's setUp().
         TestBase.setUp(self)
@@ -29,10 +37,15 @@ def setUp(self):
         self.line_crash = line_number("main.c", "// BOOM line")
         self.col_crash = 16
 
-    def asan_tests(self):
+    def asan_tests(self, libsanitizers=False):
         target = self.createTestTarget()
 
-        self.registerSanitizerLibrariesWithTarget(target)
+        if libsanitizers:
+            self.runCmd(
+                "env SanitizersAddress=1 MallocSanitizerZone=1 MallocSecureAllocator=0"
+            )
+        else:
+            self.registerSanitizerLibrariesWithTarget(target)
 
         self.runCmd("run")
 

>From 50a371795bcfe0731f8882e42712dff33cbbef9b Mon Sep 17 00:00:00 2001
From: darkbuck <michael.hliao at gmail.com>
Date: Tue, 16 Apr 2024 19:10:11 -0400
Subject: [PATCH 7/7] [X86] Fix instr desc of CFCMOV's 'mr' variants

- With the memory operand as the destination, 'mr' variants of CFCMOV
  works like STORE and their memory operands should be input operands
  instead of output ones.

Reviewers: XinWang10, arsenm

Pull Request: https://github.com/llvm/llvm-project/pull/88970
---
 llvm/lib/Target/X86/X86InstrCMovSetCC.td | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/X86/X86InstrCMovSetCC.td b/llvm/lib/Target/X86/X86InstrCMovSetCC.td
index 27a0c889a4da3e..e27aa4115990e9 100644
--- a/llvm/lib/Target/X86/X86InstrCMovSetCC.td
+++ b/llvm/lib/Target/X86/X86InstrCMovSetCC.td
@@ -58,8 +58,8 @@ let SchedRW = [WriteCMOV.Folded, WriteCMOV.ReadAfterFold] in {
 }
 let SchedRW = [WriteCMOV, ReadDefault, ReadDefault, ReadDefault, ReadDefault, ReadDefault],
     Predicates = [HasCMOV, HasCF, In64BitMode], mayStore = 1 in
-  def mr : ITy<0x40, MRMDestMemCC, t, (outs t.MemOperand:$dst),
-                (ins t.RegClass:$src1, ccode:$cond),
+  def mr : ITy<0x40, MRMDestMemCC, t, (outs),
+                (ins t.MemOperand:$dst, t.RegClass:$src1, ccode:$cond),
                 "cfcmov${cond}", unaryop_ndd_args, []>, UseEFLAGS, NF;
 }
 



More information about the llvm-branch-commits mailing list