[compiler-rt] [Compiler-rt] Add AArch64 routines for __arm_agnostic("sme_za_state") (PR #120059)

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 19 05:23:53 PST 2024


================
@@ -204,6 +206,170 @@ DEFINE_COMPILERRT_FUNCTION(__arm_get_current_vg)
   ret
 END_COMPILERRT_FUNCTION(__arm_get_current_vg)
 
+// The diagram below describes the layout used in the following routines:
+// * __arm_sme_state_size
+// * __arm_sme_save
+// * __arm_sme_restore
+//
+// +---------------------------------+
+// |             ...                 |
+// |           ZA buffer             |
+// |             ...                 |
+// +---------------------------------+ <- @96
+// |         ZT0 contents            |
+// +---------------------------------+ <- @32
+// | byte 15-10: zero (reserved)     |
+// | byte   9-8: num_za_save_slices  |           TPIDR2 block
+// | byte   7-0: za_save_buffer      |
+// +---------------------------------+ <- @16
+// | bit  127-1: zero (reserved)     |           Internal state for __arm_sme_save/restore
+// | bit      0: VALID               |
+// +---------------------------------+ <- @0
+
+DEFINE_COMPILERRT_FUNCTION(__arm_sme_state_size)
+  .variant_pcs __arm_sme_state_size
+  BTI_C
+
+  // Test if SME is available and ZA state is 'active'.
+  adrp    x17, CPU_FEATS_SYMBOL
+  ldr     x17, [x17, CPU_FEATS_SYMBOL_OFFSET]
+  tbz     x17, #FEAT_SME_BIT, 0f
+  mrs     x16, SVCR
+  tbz     x16, #1, 0f
+  mrs     x16, TPIDR2_EL0
+  cbnz    x16, 0f
+
+  // Size = HAS_FEAT_SME2 ? 96 : 32
+  tst     x17, #FEAT_SME2_MASK
+  mov     w17, #32
+  mov     w16, #96
+  csel    x16, x17, x16, eq
+
+  // Size = Size + (SVLB * SVLB)
+  rdsvl   x17, #1
+  madd    x0, x17, x17, x16
+  ret
+
+0:
+  // Default case, 16 bytes is minimum (to encode VALID bit, multiple of 16 bytes)
+  mov w0, #16
+  ret
+END_COMPILERRT_FUNCTION(__arm_sme_state_size)
+
+DEFINE_COMPILERRT_FUNCTION(__arm_sme_save)
+  .variant_pcs __arm_sme_save
+  BTI_C
+
+  // If PTR is not 16-byte aligned, abort.
+  tst     x0, #0xF
+  b.ne    3f
+
+  // Clear internal state bits
+  stp     xzr, xzr, [x0]
+
+  // If SME is not available, PSTATE.ZA = 0 or TPIDR2_EL0 != 0, return.
+  adrp    x17, CPU_FEATS_SYMBOL
+  ldr     x17, [x17, CPU_FEATS_SYMBOL_OFFSET]
+  tbz     x17, #FEAT_SME_BIT, 2f
+  mrs     x16, SVCR
+  tbz     x16, #1, 2f
+  mrs     x16, TPIDR2_EL0
+  cbnz    x16, 2f
+
+  # ZA or ZT0 need saving, we can now set internal VALID bit to 1
+  mov     w16, #1
+  str     x16, [x0]
+
+  add     x18, x0, #32
+  tbz     x17, #FEAT_SME2_BIT, 1f
+
+  // Store ZT0
+  str     zt0, [x18]
+  add     x18, x18, #64
+  b       1f
----------------
paulwalker-arm wrote:

This branch is not necessary because we should fall through.

https://github.com/llvm/llvm-project/pull/120059


More information about the llvm-commits mailing list