[llvm] [AArch64] Mark umull as commutative (PR #152158)

Cullen Rhodes via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 6 04:21:15 PDT 2025


https://github.com/c-rhodes updated https://github.com/llvm/llvm-project/pull/152158

>From 05a6114fe45420ffe5e55d9efedad7b2a1f67820 Mon Sep 17 00:00:00 2001
From: Cullen Rhodes <cullen.rhodes at arm.com>
Date: Tue, 5 Aug 2025 14:14:01 +0000
Subject: [PATCH 1/2] [AArch64] Precommit test for commutable [usp]mull
 (gh-issue #61461)

---
 .../CodeGen/AArch64/arm64-neon-mul-div.ll     | 13 ++++
 llvm/test/CodeGen/AArch64/arm64-vmul.ll       | 77 +++++++++++++++++--
 2 files changed, 82 insertions(+), 8 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
index ecf3f69825c0e..e3515124a6d34 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
@@ -1608,6 +1608,19 @@ define <16 x i8> @poly_mulv16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
    ret <16 x i8> %prod
 }
 
+define <16 x i8> @commutable_poly_mul(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK-LABEL: commutable_poly_mul:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pmul v2.16b, v0.16b, v1.16b
+; CHECK-NEXT:    pmul v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    add v0.16b, v2.16b, v0.16b
+; CHECK-NEXT:    ret
+  %1 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+  %2 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %rhs, <16 x i8> %lhs)
+  %3 = add <16 x i8> %1, %2
+  ret <16 x i8> %3
+}
+
 declare <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16>, <4 x i16>)
 declare <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16>, <8 x i16>)
 declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index 07400bbb2f58c..1c8484e58fe79 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -3,6 +3,7 @@
 ; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; CHECK-GI:       warning: Instruction selection used fallback path for pmull8h
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for commutable_pmull8h
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqdmulh_1s
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fmls_2s
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fmls_4s
@@ -78,6 +79,21 @@ define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind {
   ret <2 x i64> %tmp3
 }
 
+define void @commutable_smull(<2 x i32> %A, <2 x i32> %B, ptr %C) {
+; CHECK-LABEL: commutable_smull:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull v2.2d, v0.2s, v1.2s
+; CHECK-NEXT:    smull v0.2d, v1.2s, v0.2s
+; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    ret
+  %1 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %B)
+  %2 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %B, <2 x i32> %A)
+  store <2 x i64> %1, ptr %C
+  %C.gep.2 = getelementptr i8, ptr %C, i64 16
+  store <2 x i64> %2, ptr %C.gep.2
+  ret void
+}
+
 declare <8 x i16>  @llvm.aarch64.neon.smull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
@@ -121,6 +137,21 @@ define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind {
   ret <2 x i64> %tmp3
 }
 
+define void @commutable_umull(<2 x i32> %A, <2 x i32> %B, ptr %C) {
+; CHECK-LABEL: commutable_umull:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull v2.2d, v0.2s, v1.2s
+; CHECK-NEXT:    umull v0.2d, v1.2s, v0.2s
+; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    ret
+  %1 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %B)
+  %2 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %B, <2 x i32> %A)
+  store <2 x i64> %1, ptr %C
+  %C.gep.2 = getelementptr i8, ptr %C, i64 16
+  store <2 x i64> %2, ptr %C.gep.2
+  ret void
+}
+
 declare <8 x i16>  @llvm.aarch64.neon.umull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
@@ -212,6 +243,21 @@ define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind {
   ret <8 x i16> %tmp3
 }
 
+define void @commutable_pmull8h(<8 x i8> %A, <8 x i8> %B, ptr %C) {
+; CHECK-LABEL: commutable_pmull8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pmull v2.8h, v0.8b, v1.8b
+; CHECK-NEXT:    pmull v0.8h, v1.8b, v0.8b
+; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    ret
+  %1 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %A, <8 x i8> %B)
+  %2 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %B, <8 x i8> %A)
+  store <8 x i16> %1, ptr %C
+  %C.gep.1 = getelementptr i8, ptr %C, i8 16
+  store <8 x i16> %2, ptr %C.gep.1
+  ret void
+}
+
 declare <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
 
 define <4 x i16> @sqdmulh_4h(ptr %A, ptr %B) nounwind {
@@ -487,10 +533,10 @@ define void @smlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
 ; CHECK-GI-LABEL: smlal2d_chain_with_constant:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mvn v3.8b, v2.8b
-; CHECK-GI-NEXT:    adrp x8, .LCPI27_0
+; CHECK-GI-NEXT:    adrp x8, .LCPI30_0
 ; CHECK-GI-NEXT:    smull v1.2d, v1.2s, v3.2s
 ; CHECK-GI-NEXT:    smlal v1.2d, v0.2s, v2.2s
-; CHECK-GI-NEXT:    ldr q0, [x8, :lo12:.LCPI27_0]
+; CHECK-GI-NEXT:    ldr q0, [x8, :lo12:.LCPI30_0]
 ; CHECK-GI-NEXT:    add v0.2d, v1.2d, v0.2d
 ; CHECK-GI-NEXT:    str q0, [x0]
 ; CHECK-GI-NEXT:    ret
@@ -566,8 +612,8 @@ define void @smlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
 ;
 ; CHECK-GI-LABEL: smlsl2d_chain_with_constant:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    adrp x8, .LCPI31_0
-; CHECK-GI-NEXT:    ldr q3, [x8, :lo12:.LCPI31_0]
+; CHECK-GI-NEXT:    adrp x8, .LCPI34_0
+; CHECK-GI-NEXT:    ldr q3, [x8, :lo12:.LCPI34_0]
 ; CHECK-GI-NEXT:    smlsl v3.2d, v0.2s, v2.2s
 ; CHECK-GI-NEXT:    mvn v0.8b, v2.8b
 ; CHECK-GI-NEXT:    smlsl v3.2d, v1.2s, v0.2s
@@ -829,10 +875,10 @@ define void @umlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
 ; CHECK-GI-LABEL: umlal2d_chain_with_constant:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mvn v3.8b, v2.8b
-; CHECK-GI-NEXT:    adrp x8, .LCPI43_0
+; CHECK-GI-NEXT:    adrp x8, .LCPI46_0
 ; CHECK-GI-NEXT:    umull v1.2d, v1.2s, v3.2s
 ; CHECK-GI-NEXT:    umlal v1.2d, v0.2s, v2.2s
-; CHECK-GI-NEXT:    ldr q0, [x8, :lo12:.LCPI43_0]
+; CHECK-GI-NEXT:    ldr q0, [x8, :lo12:.LCPI46_0]
 ; CHECK-GI-NEXT:    add v0.2d, v1.2d, v0.2d
 ; CHECK-GI-NEXT:    str q0, [x0]
 ; CHECK-GI-NEXT:    ret
@@ -908,8 +954,8 @@ define void @umlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
 ;
 ; CHECK-GI-LABEL: umlsl2d_chain_with_constant:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    adrp x8, .LCPI47_0
-; CHECK-GI-NEXT:    ldr q3, [x8, :lo12:.LCPI47_0]
+; CHECK-GI-NEXT:    adrp x8, .LCPI50_0
+; CHECK-GI-NEXT:    ldr q3, [x8, :lo12:.LCPI50_0]
 ; CHECK-GI-NEXT:    umlsl v3.2d, v0.2s, v2.2s
 ; CHECK-GI-NEXT:    mvn v0.8b, v2.8b
 ; CHECK-GI-NEXT:    umlsl v3.2d, v1.2s, v0.2s
@@ -3222,6 +3268,21 @@ define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind {
   ret <16 x i8> %val
 }
 
+define <16 x i8> @test_commutable_pmull_64(i64 %l, i64 %r) nounwind {
+; CHECK-LABEL: test_commutable_pmull_64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x1
+; CHECK-NEXT:    fmov d1, x0
+; CHECK-NEXT:    pmull v2.1q, v1.1d, v0.1d
+; CHECK-NEXT:    pmull v0.1q, v0.1d, v1.1d
+; CHECK-NEXT:    add v0.16b, v2.16b, v0.16b
+; CHECK-NEXT:    ret
+  %1 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %l, i64 %r)
+  %2 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %r, i64 %l)
+  %3 = add <16 x i8> %1, %2
+  ret <16 x i8> %3
+}
+
 declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64)
 
 define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind {

>From dec20b09e3c4153cb417946bbbb4a1020906705f Mon Sep 17 00:00:00 2001
From: Cullen Rhodes <cullen.rhodes at arm.com>
Date: Tue, 24 Jun 2025 09:09:45 +0000
Subject: [PATCH 2/2] [AArch64] Mark [usp]mull as commutative

Fixes #61461.
---
 llvm/include/llvm/IR/IntrinsicsAArch64.td     | 16 ++++++++-------
 .../lib/Target/AArch64/AArch64InstrFormats.td |  8 +++-----
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |  7 +++++--
 .../CodeGen/AArch64/arm64-neon-mul-div.ll     |  5 ++---
 llvm/test/CodeGen/AArch64/arm64-vmul.ll       | 20 ++++++++-----------
 5 files changed, 27 insertions(+), 29 deletions(-)

diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index ca6e2128812f7..321e20c6f5e82 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -296,13 +296,15 @@ let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
   def int_aarch64_neon_sqrdmlah : AdvSIMD_3IntArg_Intrinsic;
   def int_aarch64_neon_sqrdmlsh : AdvSIMD_3IntArg_Intrinsic;
 
-  // Vector Polynominal Multiply
-  def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
-
-  // Vector Long Multiply
-  def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
-  def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
-  def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+  let IntrProperties = [IntrNoMem, Commutative] in {
+    // Vector Polynominal Multiply
+    def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
+
+    // Vector Long Multiply
+    def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
+    def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
+    def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+  }
 
   // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
   // it with a v16i8.
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index ba7cbccc0bcd6..fe4b185b923c0 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -7359,7 +7359,9 @@ multiclass SIMDDifferentThreeVectorBD<bit U, bits<4> opc, string asm,
       [(set (v8i16 V128:$Rd), (OpNode (v8i8 V64:$Rn), (v8i8 V64:$Rm)))]>;
   def v16i8  : BaseSIMDDifferentThreeVector<U, 0b001, opc,
                                             V128, V128, V128,
-                                            asm#"2", ".8h", ".16b", ".16b", []>;
+                                            asm#"2", ".8h", ".16b", ".16b",
+      [(set (v8i16 V128:$Rd), (OpNode (v8i8 (extract_high_v16i8 (v16i8 V128:$Rn))),
+                                      (v8i8 (extract_high_v16i8 (v16i8 V128:$Rm)))))]>;
   let Predicates = [HasAES] in {
     def v1i64  : BaseSIMDDifferentThreeVector<U, 0b110, opc,
                                               V128, V64, V64,
@@ -7371,10 +7373,6 @@ multiclass SIMDDifferentThreeVectorBD<bit U, bits<4> opc, string asm,
         [(set (v16i8 V128:$Rd), (OpNode (extract_high_v2i64 (v2i64 V128:$Rn)),
                                         (extract_high_v2i64 (v2i64 V128:$Rm))))]>;
   }
-
-  def : Pat<(v8i16 (OpNode (v8i8 (extract_high_v16i8 (v16i8 V128:$Rn))),
-                          (v8i8 (extract_high_v16i8 (v16i8 V128:$Rm))))),
-      (!cast<Instruction>(NAME#"v16i8") V128:$Rn, V128:$Rm)>;
 }
 
 multiclass SIMDLongThreeVectorHS<bit U, bits<4> opc, string asm,
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index ac31236d8f2cf..34f532a384625 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6055,6 +6055,7 @@ defm MLA      : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
 defm MLS      : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
 
 defm MUL      : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
+let isCommutable = 1 in
 defm PMUL     : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
 defm SABA     : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
       TriOpFrag<(add node:$LHS, (abds node:$MHS, node:$RHS))> >;
@@ -6806,7 +6807,11 @@ defm ADDHN  : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>
 defm SUBHN  : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
 defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
 defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
+let isCommutable = 1 in {
 defm PMULL  : SIMDDifferentThreeVectorBD<0,0b1110,"pmull", AArch64pmull>;
+defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", AArch64smull>;
+defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", AArch64umull>;
+}
 defm SABAL  : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal", abds>;
 defm SABDL   : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl", abds>;
 defm SADDL   : SIMDLongThreeVectorBHS<   0, 0b0000, "saddl",
@@ -6817,7 +6822,6 @@ defm SMLAL   : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
     TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>>;
 defm SMLSL   : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
     TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>>;
-defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", AArch64smull>;
 defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal", saddsat>;
 defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl", ssubsat>;
 defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
@@ -6835,7 +6839,6 @@ defm UMLAL   : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
     TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
 defm UMLSL   : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
     TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
-defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", AArch64umull>;
 defm USUBL   : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
                  BinOpFrag<(sub (zanyext node:$LHS), (zanyext node:$RHS))>>;
 defm USUBW   : SIMDWideThreeVectorBHS<   1, 0b0011, "usubw",
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
index e3515124a6d34..0d427c05e3b77 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
@@ -1611,9 +1611,8 @@ define <16 x i8> @poly_mulv16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
 define <16 x i8> @commutable_poly_mul(<16 x i8> %lhs, <16 x i8> %rhs) {
 ; CHECK-LABEL: commutable_poly_mul:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    pmul v2.16b, v0.16b, v1.16b
-; CHECK-NEXT:    pmul v0.16b, v1.16b, v0.16b
-; CHECK-NEXT:    add v0.16b, v2.16b, v0.16b
+; CHECK-NEXT:    pmul v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    add v0.16b, v0.16b, v0.16b
 ; CHECK-NEXT:    ret
   %1 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
   %2 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %rhs, <16 x i8> %lhs)
diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index 1c8484e58fe79..87a6debb40f6e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -82,9 +82,8 @@ define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind {
 define void @commutable_smull(<2 x i32> %A, <2 x i32> %B, ptr %C) {
 ; CHECK-LABEL: commutable_smull:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    smull v2.2d, v0.2s, v1.2s
-; CHECK-NEXT:    smull v0.2d, v1.2s, v0.2s
-; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    smull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
   %1 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %B)
   %2 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %B, <2 x i32> %A)
@@ -140,9 +139,8 @@ define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind {
 define void @commutable_umull(<2 x i32> %A, <2 x i32> %B, ptr %C) {
 ; CHECK-LABEL: commutable_umull:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    umull v2.2d, v0.2s, v1.2s
-; CHECK-NEXT:    umull v0.2d, v1.2s, v0.2s
-; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    umull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
   %1 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %B)
   %2 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %B, <2 x i32> %A)
@@ -246,9 +244,8 @@ define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind {
 define void @commutable_pmull8h(<8 x i8> %A, <8 x i8> %B, ptr %C) {
 ; CHECK-LABEL: commutable_pmull8h:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    pmull v2.8h, v0.8b, v1.8b
-; CHECK-NEXT:    pmull v0.8h, v1.8b, v0.8b
-; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    pmull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
   %1 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %A, <8 x i8> %B)
   %2 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %B, <8 x i8> %A)
@@ -3273,9 +3270,8 @@ define <16 x i8> @test_commutable_pmull_64(i64 %l, i64 %r) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov d0, x1
 ; CHECK-NEXT:    fmov d1, x0
-; CHECK-NEXT:    pmull v2.1q, v1.1d, v0.1d
-; CHECK-NEXT:    pmull v0.1q, v0.1d, v1.1d
-; CHECK-NEXT:    add v0.16b, v2.16b, v0.16b
+; CHECK-NEXT:    pmull v0.1q, v1.1d, v0.1d
+; CHECK-NEXT:    add v0.16b, v0.16b, v0.16b
 ; CHECK-NEXT:    ret
   %1 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %l, i64 %r)
   %2 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %r, i64 %l)



More information about the llvm-commits mailing list