[llvm] [AArch64] Add SVE lowering of fixed-length UABD/SABD (PR #104991)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 20 08:07:48 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: Sander de Smalen (sdesmalen-arm)
<details>
<summary>Changes</summary>
---
Full diff: https://github.com/llvm/llvm-project/pull/104991.diff
3 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+2)
- (added) llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll (+183)
- (added) llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-abd.ll (+292)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 97fb2c5f552731..6eb91ee1b26b7d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2055,6 +2055,8 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
bool PreferSVE = !PreferNEON && Subtarget->isSVEAvailable();
// Lower fixed length vector operations to scalable equivalents.
+ setOperationAction(ISD::ABDS, VT, Default);
+ setOperationAction(ISD::ABDU, VT, Default);
setOperationAction(ISD::ABS, VT, Default);
setOperationAction(ISD::ADD, VT, Default);
setOperationAction(ISD::AND, VT, Default);
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll
new file mode 100644
index 00000000000000..08a974fa2d9f40
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll
@@ -0,0 +1,183 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256
+; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; Don't use SVE for 128-bit vectors.
+define void @sabd_v16i8_v16i16(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: sabd_v16i8_v16i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: sabd v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+ %a.ld = load <16 x i8>, ptr %a
+ %b.ld = load <16 x i8>, ptr %b
+ %a.sext = sext <16 x i8> %a.ld to <16 x i16>
+ %b.sext = sext <16 x i8> %b.ld to <16 x i16>
+ %sub = sub <16 x i16> %a.sext, %b.sext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ store <16 x i8> %trunc, ptr %a
+ ret void
+}
+
+; Don't use SVE for 128-bit vectors.
+define void @sabd_v16i8_v16i32(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: sabd_v16i8_v16i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: sabd v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+ %a.ld = load <16 x i8>, ptr %a
+ %b.ld = load <16 x i8>, ptr %b
+ %a.sext = sext <16 x i8> %a.ld to <16 x i32>
+ %b.sext = sext <16 x i8> %b.ld to <16 x i32>
+ %sub = sub <16 x i32> %a.sext, %b.sext
+ %abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true)
+ %trunc = trunc <16 x i32> %abs to <16 x i8>
+ store <16 x i8> %trunc, ptr %a
+ ret void
+}
+
+; Don't use SVE for 128-bit vectors.
+define void @sabd_v16i8_v16i64(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: sabd_v16i8_v16i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: sabd v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+ %a.ld = load <16 x i8>, ptr %a
+ %b.ld = load <16 x i8>, ptr %b
+ %a.sext = sext <16 x i8> %a.ld to <16 x i64>
+ %b.sext = sext <16 x i8> %b.ld to <16 x i64>
+ %sub = sub <16 x i64> %a.sext, %b.sext
+ %abs = call <16 x i64> @llvm.abs.v16i64(<16 x i64> %sub, i1 true)
+ %trunc = trunc <16 x i64> %abs to <16 x i8>
+ store <16 x i8> %trunc, ptr %a
+ ret void
+}
+
+define void @sabd_v32i8_v32i16(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: sabd_v32i8_v32i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.b, vl32
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
+; CHECK-NEXT: sabd z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: st1b { z0.b }, p0, [x0]
+; CHECK-NEXT: ret
+ %a.ld = load <32 x i8>, ptr %a
+ %b.ld = load <32 x i8>, ptr %b
+ %a.sext = sext <32 x i8> %a.ld to <32 x i16>
+ %b.sext = sext <32 x i8> %b.ld to <32 x i16>
+ %sub = sub <32 x i16> %a.sext, %b.sext
+ %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
+ %trunc = trunc <32 x i16> %abs to <32 x i8>
+ store <32 x i8> %trunc, ptr %a
+ ret void
+}
+
+define void @uabd_v32i8_v32i16(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: uabd_v32i8_v32i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.b, vl32
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
+; CHECK-NEXT: uabd z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: st1b { z0.b }, p0, [x0]
+; CHECK-NEXT: ret
+ %a.ld = load <32 x i8>, ptr %a
+ %b.ld = load <32 x i8>, ptr %b
+ %a.zext = zext <32 x i8> %a.ld to <32 x i16>
+ %b.zext = zext <32 x i8> %b.ld to <32 x i16>
+ %sub = sub <32 x i16> %a.zext, %b.zext
+ %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true)
+ %trunc = trunc <32 x i16> %abs to <32 x i8>
+ store <32 x i8> %trunc, ptr %a
+ ret void
+}
+
+define void @sabd_v32i8_v32i32(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: sabd_v32i8_v32i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.b, vl32
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
+; CHECK-NEXT: sabd z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: st1b { z0.b }, p0, [x0]
+; CHECK-NEXT: ret
+ %a.ld = load <32 x i8>, ptr %a
+ %b.ld = load <32 x i8>, ptr %b
+ %a.sext = sext <32 x i8> %a.ld to <32 x i32>
+ %b.sext = sext <32 x i8> %b.ld to <32 x i32>
+ %sub = sub <32 x i32> %a.sext, %b.sext
+ %abs = call <32 x i32> @llvm.abs.v32i32(<32 x i32> %sub, i1 true)
+ %trunc = trunc <32 x i32> %abs to <32 x i8>
+ store <32 x i8> %trunc, ptr %a
+ ret void
+}
+
+define void @sabd_v32i8_v32i64(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: sabd_v32i8_v32i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.b, vl32
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1]
+; CHECK-NEXT: sabd z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: st1b { z0.b }, p0, [x0]
+; CHECK-NEXT: ret
+ %a.ld = load <32 x i8>, ptr %a
+ %b.ld = load <32 x i8>, ptr %b
+ %a.sext = sext <32 x i8> %a.ld to <32 x i64>
+ %b.sext = sext <32 x i8> %b.ld to <32 x i64>
+ %sub = sub <32 x i64> %a.sext, %b.sext
+ %abs = call <32 x i64> @llvm.abs.v32i64(<32 x i64> %sub, i1 true)
+ %trunc = trunc <32 x i64> %abs to <32 x i8>
+ store <32 x i8> %trunc, ptr %a
+ ret void
+}
+
+define void @sabd_v64i8_v64i64(ptr %a, ptr %b) #0 {
+; VBITS_GE_256-LABEL: sabd_v64i8_v64i64:
+; VBITS_GE_256: // %bb.0:
+; VBITS_GE_256-NEXT: ptrue p0.b, vl32
+; VBITS_GE_256-NEXT: mov w8, #32 // =0x20
+; VBITS_GE_256-NEXT: ld1b { z0.b }, p0/z, [x0, x8]
+; VBITS_GE_256-NEXT: ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
+; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
+; VBITS_GE_256-NEXT: sabd z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_256-NEXT: movprfx z1, z2
+; VBITS_GE_256-NEXT: sabd z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
+; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: ret
+;
+; VBITS_GE_512-LABEL: sabd_v64i8_v64i64:
+; VBITS_GE_512: // %bb.0:
+; VBITS_GE_512-NEXT: ptrue p0.b, vl64
+; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_512-NEXT: ld1b { z1.b }, p0/z, [x1]
+; VBITS_GE_512-NEXT: sabd z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_512-NEXT: st1b { z0.b }, p0, [x0]
+; VBITS_GE_512-NEXT: ret
+ %a.ld = load <64 x i8>, ptr %a
+ %b.ld = load <64 x i8>, ptr %b
+ %a.sext = sext <64 x i8> %a.ld to <64 x i64>
+ %b.sext = sext <64 x i8> %b.ld to <64 x i64>
+ %sub = sub <64 x i64> %a.sext, %b.sext
+ %abs = call <64 x i64> @llvm.abs.v64i64(<64 x i64> %sub, i1 true)
+ %trunc = trunc <64 x i64> %abs to <64 x i8>
+ store <64 x i8> %trunc, ptr %a
+ ret void
+}
+
+attributes #0 = { "target-features"="+neon,+sve" }
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-abd.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-abd.ll
new file mode 100644
index 00000000000000..2dd64bc7df189a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-abd.ll
@@ -0,0 +1,292 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming < %s | FileCheck %s
+; RUN: llc -force-streaming-compatible < %s | FileCheck %s --check-prefix=NONEON-NOSVE
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define void @uabd_v16i8_v16i16(ptr %a, ptr %b) {
+; CHECK-LABEL: uabd_v16i8_v16i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.b, vl16
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: uabd z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+;
+; NONEON-NOSVE-LABEL: uabd_v16i8_v16i16:
+; NONEON-NOSVE: // %bb.0:
+; NONEON-NOSVE-NEXT: ldr q0, [x1]
+; NONEON-NOSVE-NEXT: ldr q1, [x0]
+; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #-48]!
+; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #31]
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #15]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #14]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #47]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #30]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #13]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #46]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #29]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #12]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #45]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #28]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #11]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #44]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #27]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #10]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #43]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #26]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #9]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #42]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #25]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #8]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #41]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #24]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #7]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #40]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #23]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #6]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #39]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #22]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #5]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #38]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #21]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #4]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #37]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #20]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #3]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #36]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #19]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #2]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #35]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #18]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp, #1]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #34]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #17]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrb w9, [sp]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #33]
+; NONEON-NOSVE-NEXT: ldrb w8, [sp, #16]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, hi
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: strb w8, [sp, #32]
+; NONEON-NOSVE-NEXT: ldr q0, [sp, #32]
+; NONEON-NOSVE-NEXT: str q0, [x0]
+; NONEON-NOSVE-NEXT: add sp, sp, #48
+; NONEON-NOSVE-NEXT: ret
+ %a.ld = load <16 x i8>, ptr %a
+ %b.ld = load <16 x i8>, ptr %b
+ %a.sext = zext <16 x i8> %a.ld to <16 x i16>
+ %b.sext = zext <16 x i8> %b.ld to <16 x i16>
+ %sub = sub <16 x i16> %a.sext, %b.sext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ store <16 x i8> %trunc, ptr %a
+ ret void
+}
+
+define void @sabd_v16i8_v16i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sabd_v16i8_v16i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.b, vl16
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: sabd z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: str q0, [x0]
+; CHECK-NEXT: ret
+;
+; NONEON-NOSVE-LABEL: sabd_v16i8_v16i16:
+; NONEON-NOSVE: // %bb.0:
+; NONEON-NOSVE-NEXT: ldr q0, [x1]
+; NONEON-NOSVE-NEXT: ldr q1, [x0]
+; NONEON-NOSVE-NEXT: stp q1, q0, [sp, #-48]!
+; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 48
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #31]
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #15]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #14]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #47]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #30]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #13]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #46]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #29]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #12]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #45]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #28]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #11]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #44]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #27]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #10]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #43]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #26]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #9]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #42]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #25]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #8]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #41]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #24]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #7]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #40]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #23]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #6]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #39]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #22]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #5]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #38]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #21]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #4]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #37]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #20]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #3]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #36]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #19]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #2]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #35]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #18]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp, #1]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #34]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #17]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: ldrsb w9, [sp]
+; NONEON-NOSVE-NEXT: strb w8, [sp, #33]
+; NONEON-NOSVE-NEXT: ldrsb w8, [sp, #16]
+; NONEON-NOSVE-NEXT: subs w8, w9, w8
+; NONEON-NOSVE-NEXT: csetm w9, gt
+; NONEON-NOSVE-NEXT: eor w8, w8, w9
+; NONEON-NOSVE-NEXT: sub w8, w9, w8
+; NONEON-NOSVE-NEXT: strb w8, [sp, #32]
+; NONEON-NOSVE-NEXT: ldr q0, [sp, #32]
+; NONEON-NOSVE-NEXT: str q0, [x0]
+; NONEON-NOSVE-NEXT: add sp, sp, #48
+; NONEON-NOSVE-NEXT: ret
+ %a.ld = load <16 x i8>, ptr %a
+ %b.ld = load <16 x i8>, ptr %b
+ %a.sext = sext <16 x i8> %a.ld to <16 x i16>
+ %b.sext = sext <16 x i8> %b.ld to <16 x i16>
+ %sub = sub <16 x i16> %a.sext, %b.sext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ store <16 x i8> %trunc, ptr %a
+ ret void
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/104991
More information about the llvm-commits
mailing list