[llvm] 879dddf - [AArch64] Add tests for umulh. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 2 10:58:38 PST 2025


Author: David Green
Date: 2025-12-02T18:58:32Z
New Revision: 879dddf2b4ede2e6474964f9e5b63545d271c733

URL: https://github.com/llvm/llvm-project/commit/879dddf2b4ede2e6474964f9e5b63545d271c733
DIFF: https://github.com/llvm/llvm-project/commit/879dddf2b4ede2e6474964f9e5b63545d271c733.diff

LOG: [AArch64] Add tests for umulh. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sve-int-mulh-pred.ll
    llvm/test/CodeGen/AArch64/sve2-int-mulh.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-int-mulh-pred.ll b/llvm/test/CodeGen/AArch64/sve-int-mulh-pred.ll
index 32760caa524ec..146720febf486 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-mulh-pred.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-mulh-pred.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
 
 ;
 ; SMULH
 ;
 
-define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
+define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: smulh_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
@@ -19,7 +19,7 @@ define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
   ret <vscale x 16 x i8> %tr
 }
 
-define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
+define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: smulh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
@@ -33,7 +33,7 @@ define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %
   ret <vscale x 8 x i16> %tr
 }
 
-define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
+define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: smulh_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -47,7 +47,7 @@ define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
   ret <vscale x 4 x i32> %tr
 }
 
-define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
+define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: smulh_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -65,7 +65,7 @@ define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
 ; UMULH
 ;
 
-define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
+define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: umulh_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
@@ -79,7 +79,7 @@ define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
   ret <vscale x 16 x i8> %tr
 }
 
-define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
+define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: umulh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
@@ -93,7 +93,7 @@ define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %
   ret <vscale x 8 x i16> %tr
 }
 
-define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
+define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: umulh_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -107,7 +107,7 @@ define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
   ret <vscale x 4 x i32> %tr
 }
 
-define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
+define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: umulh_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -121,4 +121,262 @@ define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
   ret <vscale x 2 x i64> %tr
 }
 
-attributes #0 = { "target-features"="+sve" }
+
+; Fixed-length 128bits
+
+define <16 x i8> @smulh_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: smulh_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull2 v2.8h, v0.16b, v1.16b
+; CHECK-NEXT:    smull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    uzp2 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ret
+  %1 = sext <16 x i8> %a to <16 x i16>
+  %2 = sext <16 x i8> %b to <16 x i16>
+  %mul = mul <16 x i16> %1, %2
+  %shr = lshr <16 x i16> %mul, splat(i16 8)
+  %tr = trunc <16 x i16> %shr to <16 x i8>
+  ret <16 x i8> %tr
+}
+
+define <8 x i16> @smulh_v8i16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: smulh_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull2 v2.4s, v0.8h, v1.8h
+; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    ret
+  %1 = sext <8 x i16> %a to <8 x i32>
+  %2 = sext <8 x i16> %b to <8 x i32>
+  %mul = mul <8 x i32> %1, %2
+  %shr = lshr <8 x i32> %mul, splat(i32 16)
+  %tr = trunc <8 x i32> %shr to <8 x i16>
+  ret <8 x i16> %tr
+}
+
+define <4 x i32> @smulh_v4i32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: smulh_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull2 v2.2d, v0.4s, v1.4s
+; CHECK-NEXT:    smull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    uzp2 v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    ret
+  %1 = sext <4 x i32> %a to <4 x i64>
+  %2 = sext <4 x i32> %b to <4 x i64>
+  %mul = mul <4 x i64> %1, %2
+  %shr = lshr <4 x i64> %mul, splat(i64 32)
+  %tr = trunc <4 x i64> %shr to <4 x i32>
+  ret <4 x i32> %tr
+}
+
+define <2 x i64> @smulh_v2i64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: smulh_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, v0.d[1]
+; CHECK-NEXT:    mov x9, v1.d[1]
+; CHECK-NEXT:    fmov x10, d0
+; CHECK-NEXT:    fmov x11, d1
+; CHECK-NEXT:    smulh x10, x10, x11
+; CHECK-NEXT:    smulh x8, x8, x9
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
+  %1 = sext <2 x i64> %a to <2 x i128>
+  %2 = sext <2 x i64> %b to <2 x i128>
+  %mul = mul <2 x i128> %1, %2
+  %shr = lshr <2 x i128> %mul, splat(i128 64)
+  %tr = trunc <2 x i128> %shr to <2 x i64>
+  ret <2 x i64> %tr
+}
+
+define <16 x i8> @umulh_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: umulh_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull2 v2.8h, v0.16b, v1.16b
+; CHECK-NEXT:    umull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    uzp2 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ret
+  %1 = zext <16 x i8> %a to <16 x i16>
+  %2 = zext <16 x i8> %b to <16 x i16>
+  %mul = mul <16 x i16> %1, %2
+  %shr = lshr <16 x i16> %mul, splat(i16 8)
+  %tr = trunc <16 x i16> %shr to <16 x i8>
+  ret <16 x i8> %tr
+}
+
+define <8 x i16> @umulh_v8i16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: umulh_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull2 v2.4s, v0.8h, v1.8h
+; CHECK-NEXT:    umull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    ret
+  %1 = zext <8 x i16> %a to <8 x i32>
+  %2 = zext <8 x i16> %b to <8 x i32>
+  %mul = mul <8 x i32> %1, %2
+  %shr = lshr <8 x i32> %mul, splat(i32 16)
+  %tr = trunc <8 x i32> %shr to <8 x i16>
+  ret <8 x i16> %tr
+}
+
+define <4 x i32> @umulh_v4i32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: umulh_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull2 v2.2d, v0.4s, v1.4s
+; CHECK-NEXT:    umull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    uzp2 v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    ret
+  %1 = zext <4 x i32> %a to <4 x i64>
+  %2 = zext <4 x i32> %b to <4 x i64>
+  %mul = mul <4 x i64> %1, %2
+  %shr = lshr <4 x i64> %mul, splat(i64 32)
+  %tr = trunc <4 x i64> %shr to <4 x i32>
+  ret <4 x i32> %tr
+}
+
+define <2 x i64> @umulh_v2i64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: umulh_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, v0.d[1]
+; CHECK-NEXT:    mov x9, v1.d[1]
+; CHECK-NEXT:    fmov x10, d0
+; CHECK-NEXT:    fmov x11, d1
+; CHECK-NEXT:    umulh x10, x10, x11
+; CHECK-NEXT:    umulh x8, x8, x9
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
+  %1 = zext <2 x i64> %a to <2 x i128>
+  %2 = zext <2 x i64> %b to <2 x i128>
+  %mul = mul <2 x i128> %1, %2
+  %shr = lshr <2 x i128> %mul, splat(i128 64)
+  %tr = trunc <2 x i128> %shr to <2 x i64>
+  ret <2 x i64> %tr
+}
+
+
+
+; Fixed-length 64bits
+
+define <8 x i8> @smulh_v8i8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: smulh_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    shrn v0.8b, v0.8h, #8
+; CHECK-NEXT:    ret
+  %1 = sext <8 x i8> %a to <8 x i16>
+  %2 = sext <8 x i8> %b to <8 x i16>
+  %mul = mul <8 x i16> %1, %2
+  %shr = lshr <8 x i16> %mul, splat(i16 8)
+  %tr = trunc <8 x i16> %shr to <8 x i8>
+  ret <8 x i8> %tr
+}
+
+define <4 x i16> @smulh_v4i16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: smulh_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-NEXT:    ret
+  %1 = sext <4 x i16> %a to <4 x i32>
+  %2 = sext <4 x i16> %b to <4 x i32>
+  %mul = mul <4 x i32> %1, %2
+  %shr = lshr <4 x i32> %mul, splat(i32 16)
+  %tr = trunc <4 x i32> %shr to <4 x i16>
+  ret <4 x i16> %tr
+}
+
+define <2 x i32> @smulh_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: smulh_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    shrn v0.2s, v0.2d, #32
+; CHECK-NEXT:    ret
+  %1 = sext <2 x i32> %a to <2 x i64>
+  %2 = sext <2 x i32> %b to <2 x i64>
+  %mul = mul <2 x i64> %1, %2
+  %shr = lshr <2 x i64> %mul, splat(i64 32)
+  %tr = trunc <2 x i64> %shr to <2 x i32>
+  ret <2 x i32> %tr
+}
+
+define <1 x i64> @smulh_v1i64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-LABEL: smulh_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    smulh x8, x8, x9
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %1 = sext <1 x i64> %a to <1 x i128>
+  %2 = sext <1 x i64> %b to <1 x i128>
+  %mul = mul <1 x i128> %1, %2
+  %shr = lshr <1 x i128> %mul, splat(i128 64)
+  %tr = trunc <1 x i128> %shr to <1 x i64>
+  ret <1 x i64> %tr
+}
+
+define <8 x i8> @umulh_v8i8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: umulh_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    shrn v0.8b, v0.8h, #8
+; CHECK-NEXT:    ret
+  %1 = zext <8 x i8> %a to <8 x i16>
+  %2 = zext <8 x i8> %b to <8 x i16>
+  %mul = mul <8 x i16> %1, %2
+  %shr = lshr <8 x i16> %mul, splat(i16 8)
+  %tr = trunc <8 x i16> %shr to <8 x i8>
+  ret <8 x i8> %tr
+}
+
+define <4 x i16> @umulh_v4i16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: umulh_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-NEXT:    ret
+  %1 = zext <4 x i16> %a to <4 x i32>
+  %2 = zext <4 x i16> %b to <4 x i32>
+  %mul = mul <4 x i32> %1, %2
+  %shr = lshr <4 x i32> %mul, splat(i32 16)
+  %tr = trunc <4 x i32> %shr to <4 x i16>
+  ret <4 x i16> %tr
+}
+
+define <2 x i32> @umulh_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: umulh_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    shrn v0.2s, v0.2d, #32
+; CHECK-NEXT:    ret
+  %1 = zext <2 x i32> %a to <2 x i64>
+  %2 = zext <2 x i32> %b to <2 x i64>
+  %mul = mul <2 x i64> %1, %2
+  %shr = lshr <2 x i64> %mul, splat(i64 32)
+  %tr = trunc <2 x i64> %shr to <2 x i32>
+  ret <2 x i32> %tr
+}
+
+define <1 x i64> @umulh_v1i64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-LABEL: umulh_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    umulh x8, x8, x9
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %1 = zext <1 x i64> %a to <1 x i128>
+  %2 = zext <1 x i64> %b to <1 x i128>
+  %mul = mul <1 x i128> %1, %2
+  %shr = lshr <1 x i128> %mul, splat(i128 64)
+  %tr = trunc <1 x i128> %shr to <1 x i64>
+  ret <1 x i64> %tr
+}
+

diff  --git a/llvm/test/CodeGen/AArch64/sve2-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve2-int-mulh.ll
index bcf76d5b13d62..d7534712b53a0 100644
--- a/llvm/test/CodeGen/AArch64/sve2-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-int-mulh.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
 
 ;
 ; SMULH
 ;
 
-define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
+define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: smulh_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.b, z0.b, z1.b
@@ -18,7 +18,7 @@ define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
   ret <vscale x 16 x i8> %tr
 }
 
-define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
+define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: smulh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.h, z0.h, z1.h
@@ -31,7 +31,7 @@ define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %
   ret <vscale x 8 x i16> %tr
 }
 
-define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
+define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: smulh_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.s, z0.s, z1.s
@@ -44,7 +44,7 @@ define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
   ret <vscale x 4 x i32> %tr
 }
 
-define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
+define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: smulh_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smulh z0.d, z0.d, z1.d
@@ -61,7 +61,7 @@ define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
 ; UMULH
 ;
 
-define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
+define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: umulh_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.b, z0.b, z1.b
@@ -74,7 +74,7 @@ define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
   ret <vscale x 16 x i8> %tr
 }
 
-define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
+define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: umulh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.h, z0.h, z1.h
@@ -87,7 +87,7 @@ define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %
   ret <vscale x 8 x i16> %tr
 }
 
-define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
+define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: umulh_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.s, z0.s, z1.s
@@ -100,7 +100,7 @@ define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
   ret <vscale x 4 x i32> %tr
 }
 
-define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
+define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: umulh_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umulh z0.d, z0.d, z1.d
@@ -113,4 +113,261 @@ define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
   ret <vscale x 2 x i64> %tr
 }
 
-attributes #0 = { "target-features"="+sve2" }
+
+; Fixed-length 128bits
+
+define <16 x i8> @smulh_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: smulh_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull2 v2.8h, v0.16b, v1.16b
+; CHECK-NEXT:    smull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    uzp2 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ret
+  %1 = sext <16 x i8> %a to <16 x i16>
+  %2 = sext <16 x i8> %b to <16 x i16>
+  %mul = mul <16 x i16> %1, %2
+  %shr = lshr <16 x i16> %mul, splat(i16 8)
+  %tr = trunc <16 x i16> %shr to <16 x i8>
+  ret <16 x i8> %tr
+}
+
+define <8 x i16> @smulh_v8i16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: smulh_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull2 v2.4s, v0.8h, v1.8h
+; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    ret
+  %1 = sext <8 x i16> %a to <8 x i32>
+  %2 = sext <8 x i16> %b to <8 x i32>
+  %mul = mul <8 x i32> %1, %2
+  %shr = lshr <8 x i32> %mul, splat(i32 16)
+  %tr = trunc <8 x i32> %shr to <8 x i16>
+  ret <8 x i16> %tr
+}
+
+define <4 x i32> @smulh_v4i32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: smulh_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull2 v2.2d, v0.4s, v1.4s
+; CHECK-NEXT:    smull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    uzp2 v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    ret
+  %1 = sext <4 x i32> %a to <4 x i64>
+  %2 = sext <4 x i32> %b to <4 x i64>
+  %mul = mul <4 x i64> %1, %2
+  %shr = lshr <4 x i64> %mul, splat(i64 32)
+  %tr = trunc <4 x i64> %shr to <4 x i32>
+  ret <4 x i32> %tr
+}
+
+define <2 x i64> @smulh_v2i64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: smulh_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, v0.d[1]
+; CHECK-NEXT:    mov x9, v1.d[1]
+; CHECK-NEXT:    fmov x10, d0
+; CHECK-NEXT:    fmov x11, d1
+; CHECK-NEXT:    smulh x10, x10, x11
+; CHECK-NEXT:    smulh x8, x8, x9
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
+  %1 = sext <2 x i64> %a to <2 x i128>
+  %2 = sext <2 x i64> %b to <2 x i128>
+  %mul = mul <2 x i128> %1, %2
+  %shr = lshr <2 x i128> %mul, splat(i128 64)
+  %tr = trunc <2 x i128> %shr to <2 x i64>
+  ret <2 x i64> %tr
+}
+
+define <16 x i8> @umulh_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: umulh_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull2 v2.8h, v0.16b, v1.16b
+; CHECK-NEXT:    umull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    uzp2 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ret
+  %1 = zext <16 x i8> %a to <16 x i16>
+  %2 = zext <16 x i8> %b to <16 x i16>
+  %mul = mul <16 x i16> %1, %2
+  %shr = lshr <16 x i16> %mul, splat(i16 8)
+  %tr = trunc <16 x i16> %shr to <16 x i8>
+  ret <16 x i8> %tr
+}
+
+define <8 x i16> @umulh_v8i16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: umulh_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull2 v2.4s, v0.8h, v1.8h
+; CHECK-NEXT:    umull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    ret
+  %1 = zext <8 x i16> %a to <8 x i32>
+  %2 = zext <8 x i16> %b to <8 x i32>
+  %mul = mul <8 x i32> %1, %2
+  %shr = lshr <8 x i32> %mul, splat(i32 16)
+  %tr = trunc <8 x i32> %shr to <8 x i16>
+  ret <8 x i16> %tr
+}
+
+define <4 x i32> @umulh_v4i32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: umulh_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull2 v2.2d, v0.4s, v1.4s
+; CHECK-NEXT:    umull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    uzp2 v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    ret
+  %1 = zext <4 x i32> %a to <4 x i64>
+  %2 = zext <4 x i32> %b to <4 x i64>
+  %mul = mul <4 x i64> %1, %2
+  %shr = lshr <4 x i64> %mul, splat(i64 32)
+  %tr = trunc <4 x i64> %shr to <4 x i32>
+  ret <4 x i32> %tr
+}
+
+define <2 x i64> @umulh_v2i64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: umulh_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, v0.d[1]
+; CHECK-NEXT:    mov x9, v1.d[1]
+; CHECK-NEXT:    fmov x10, d0
+; CHECK-NEXT:    fmov x11, d1
+; CHECK-NEXT:    umulh x10, x10, x11
+; CHECK-NEXT:    umulh x8, x8, x9
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ret
+  %1 = zext <2 x i64> %a to <2 x i128>
+  %2 = zext <2 x i64> %b to <2 x i128>
+  %mul = mul <2 x i128> %1, %2
+  %shr = lshr <2 x i128> %mul, splat(i128 64)
+  %tr = trunc <2 x i128> %shr to <2 x i64>
+  ret <2 x i64> %tr
+}
+
+
+
+; Fixed-length 64bits
+
+define <8 x i8> @smulh_v8i8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: smulh_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    shrn v0.8b, v0.8h, #8
+; CHECK-NEXT:    ret
+  %1 = sext <8 x i8> %a to <8 x i16>
+  %2 = sext <8 x i8> %b to <8 x i16>
+  %mul = mul <8 x i16> %1, %2
+  %shr = lshr <8 x i16> %mul, splat(i16 8)
+  %tr = trunc <8 x i16> %shr to <8 x i8>
+  ret <8 x i8> %tr
+}
+
+define <4 x i16> @smulh_v4i16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: smulh_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-NEXT:    ret
+  %1 = sext <4 x i16> %a to <4 x i32>
+  %2 = sext <4 x i16> %b to <4 x i32>
+  %mul = mul <4 x i32> %1, %2
+  %shr = lshr <4 x i32> %mul, splat(i32 16)
+  %tr = trunc <4 x i32> %shr to <4 x i16>
+  ret <4 x i16> %tr
+}
+
+define <2 x i32> @smulh_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: smulh_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    shrn v0.2s, v0.2d, #32
+; CHECK-NEXT:    ret
+  %1 = sext <2 x i32> %a to <2 x i64>
+  %2 = sext <2 x i32> %b to <2 x i64>
+  %mul = mul <2 x i64> %1, %2
+  %shr = lshr <2 x i64> %mul, splat(i64 32)
+  %tr = trunc <2 x i64> %shr to <2 x i32>
+  ret <2 x i32> %tr
+}
+
+define <1 x i64> @smulh_v1i64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-LABEL: smulh_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    smulh x8, x8, x9
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %1 = sext <1 x i64> %a to <1 x i128>
+  %2 = sext <1 x i64> %b to <1 x i128>
+  %mul = mul <1 x i128> %1, %2
+  %shr = lshr <1 x i128> %mul, splat(i128 64)
+  %tr = trunc <1 x i128> %shr to <1 x i64>
+  ret <1 x i64> %tr
+}
+
+define <8 x i8> @umulh_v8i8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: umulh_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    shrn v0.8b, v0.8h, #8
+; CHECK-NEXT:    ret
+  %1 = zext <8 x i8> %a to <8 x i16>
+  %2 = zext <8 x i8> %b to <8 x i16>
+  %mul = mul <8 x i16> %1, %2
+  %shr = lshr <8 x i16> %mul, splat(i16 8)
+  %tr = trunc <8 x i16> %shr to <8 x i8>
+  ret <8 x i8> %tr
+}
+
+define <4 x i16> @umulh_v4i16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: umulh_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-NEXT:    ret
+  %1 = zext <4 x i16> %a to <4 x i32>
+  %2 = zext <4 x i16> %b to <4 x i32>
+  %mul = mul <4 x i32> %1, %2
+  %shr = lshr <4 x i32> %mul, splat(i32 16)
+  %tr = trunc <4 x i32> %shr to <4 x i16>
+  ret <4 x i16> %tr
+}
+
+define <2 x i32> @umulh_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: umulh_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    shrn v0.2s, v0.2d, #32
+; CHECK-NEXT:    ret
+  %1 = zext <2 x i32> %a to <2 x i64>
+  %2 = zext <2 x i32> %b to <2 x i64>
+  %mul = mul <2 x i64> %1, %2
+  %shr = lshr <2 x i64> %mul, splat(i64 32)
+  %tr = trunc <2 x i64> %shr to <2 x i32>
+  ret <2 x i32> %tr
+}
+
+define <1 x i64> @umulh_v1i64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-LABEL: umulh_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    umulh x8, x8, x9
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+  %1 = zext <1 x i64> %a to <1 x i128>
+  %2 = zext <1 x i64> %b to <1 x i128>
+  %mul = mul <1 x i128> %1, %2
+  %shr = lshr <1 x i128> %mul, splat(i128 64)
+  %tr = trunc <1 x i128> %shr to <1 x i64>
+  ret <1 x i64> %tr
+}


        


More information about the llvm-commits mailing list