[llvm] [AArch64] Mark AESD and AESE instructions as commutative. (PR #83390)

David Green via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 29 01:39:08 PST 2024


https://github.com/davemgreen updated https://github.com/llvm/llvm-project/pull/83390

>From 3c2905d18338d910bafe9a6266b00b50b9078cd1 Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Thu, 29 Feb 2024 09:38:10 +0000
Subject: [PATCH] [AArch64] Mark AESD and AESE as commutative.

This come from https://discourse.llvm.org/t/combining-aes-and-xor-can-be-improved-further/77248.

These instructions start out with:
  XOR Vd, Vn
  <some complicated math>
The initial XOR means that they can be treated as commutative, removing some of
the unnecessary mov's introduced during register allocation.
---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td     | 2 ++
 llvm/test/CodeGen/AArch64/aes.ll                | 6 ++----
 llvm/test/CodeGen/AArch64/misched-fusion-aes.ll | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index b01a8cd00025f8..0fc91be1ad56d2 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -8216,8 +8216,10 @@ defm ST4 : SIMDLdSt4SingleAliases<"st4">;
 //----------------------------------------------------------------------------
 
 let Predicates = [HasAES] in {
+let isCommutable = 1 in {
 def AESErr   : AESTiedInst<0b0100, "aese",   int_aarch64_crypto_aese>;
 def AESDrr   : AESTiedInst<0b0101, "aesd",   int_aarch64_crypto_aesd>;
+}
 def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
 def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
 }
diff --git a/llvm/test/CodeGen/AArch64/aes.ll b/llvm/test/CodeGen/AArch64/aes.ll
index 2bef28de895baf..386114f4a0d79d 100644
--- a/llvm/test/CodeGen/AArch64/aes.ll
+++ b/llvm/test/CodeGen/AArch64/aes.ll
@@ -16,8 +16,7 @@ define <16 x i8> @aese(<16 x i8> %a, <16 x i8> %b) {
 define <16 x i8> @aese_c(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: aese_c:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    aese v1.16b, v0.16b
-; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    aese v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %r = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %b, <16 x i8> %a)
   ret <16 x i8> %r
@@ -35,8 +34,7 @@ define <16 x i8> @aesd(<16 x i8> %a, <16 x i8> %b) {
 define <16 x i8> @aesd_c(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: aesd_c:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    aesd v1.16b, v0.16b
-; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    aesd v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %r = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %b, <16 x i8> %a)
   ret <16 x i8> %r
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll b/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
index bf166954d80c98..dc6fa9128e9336 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
@@ -206,7 +206,7 @@ entry:
   %aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2
   %in2 = load <16 x i8>, ptr %p2, align 16
   %aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2
-  %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2
+  %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %aesmc1, <16 x i8> %in2) #2
   store <16 x i8> %aesmc1, ptr %x3, align 16
   %in3 = load <16 x i8>, ptr %p3, align 16
   %aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2



More information about the llvm-commits mailing list