[llvm] [AArch64][SVE2] Generate urshr rounding shift rights (PR #78374)
David Green via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 30 11:19:26 PST 2024
================
@@ -0,0 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64 -mattr=+sve < %s -o - | FileCheck --check-prefixes=CHECK,SVE %s
+; RUN: llc -mtriple=aarch64 -mattr=+sve2 < %s -o - | FileCheck --check-prefixes=CHECK,SVE2 %s
+
+; Wrong add/shift amount. Should be 32 for shift of 6.
+define <vscale x 2 x i64> @neg_urshr_1(<vscale x 2 x i64> %x) {
+; CHECK-LABEL: neg_urshr_1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add z0.d, z0.d, #16 // =0x10
+; CHECK-NEXT: lsr z0.d, z0.d, #6
+; CHECK-NEXT: ret
+ %add = add nuw nsw <vscale x 2 x i64> %x, splat (i64 16)
+ %sh = lshr <vscale x 2 x i64> %add, splat (i64 6)
+ ret <vscale x 2 x i64> %sh
+}
+
+; Vector Shift.
+define <vscale x 2 x i64> @neg_urshr_2(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y) {
+; CHECK-LABEL: neg_urshr_2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: add z0.d, z0.d, #32 // =0x20
+; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: ret
+ %add = add nuw nsw <vscale x 2 x i64> %x, splat (i64 32)
+ %sh = lshr <vscale x 2 x i64> %add, %y
+ ret <vscale x 2 x i64> %sh
+}
+
+; Vector Add.
+define <vscale x 2 x i64> @neg_urshr_3(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y) {
+; CHECK-LABEL: neg_urshr_3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add z0.d, z0.d, z1.d
+; CHECK-NEXT: lsr z0.d, z0.d, #6
+; CHECK-NEXT: ret
+ %add = add nuw nsw <vscale x 2 x i64> %x, %y
+ %sh = lshr <vscale x 2 x i64> %add, splat (i64 6)
+ ret <vscale x 2 x i64> %sh
+}
+
+; Add has two uses.
+define <vscale x 2 x i64> @neg_urshr_4(<vscale x 2 x i64> %x) {
+; CHECK-LABEL: neg_urshr_4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK-NEXT: add z0.d, z0.d, #32 // =0x20
+; CHECK-NEXT: lsr z8.d, z0.d, #6
+; CHECK-NEXT: bl use
+; CHECK-NEXT: mov z0.d, z8.d
+; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %add = add nuw nsw <vscale x 2 x i64> %x, splat (i64 32)
+ %sh = lshr <vscale x 2 x i64> %add, splat (i64 6)
+ call void @use(<vscale x 2 x i64> %add)
----------------
davemgreen wrote:
Can you store the value to introduce an extra use.
https://github.com/llvm/llvm-project/pull/78374
More information about the llvm-commits
mailing list