[llvm] r309495 - [AArch64] Tie source and destination operands for AESMC/AESIMC.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Sat Jul 29 13:35:28 PDT 2017


Author: fhahn
Date: Sat Jul 29 13:35:28 2017
New Revision: 309495

URL: http://llvm.org/viewvc/llvm-project?rev=309495&view=rev
Log:
[AArch64] Tie source and destination operands for AESMC/AESIMC. 

Summary:
Most CPUs implementing AES fusion require instruction pairs of the form
    AESE Vn, _
    AESMC Vn, Vn
and
    AESD Vn, _
    AESIMC Vn, Vn

The constraint is added to AES(I)MC instructions which use the result of
an AES(E|D) instruction by using AES(I)MCTrr pseudo instructions, which
constraint source and destination registers to be the same.

A nice side effect of this change is that now all possible pairs are
scheduled back-to-back on the exynos-m1 for the misched-fusion-aes.ll
test case.

I had to update aes_load_store. The version I added initially was very
reduced and with the new constraint, AESE/AESMC could not be scheduled
back-to-back. I updated the test to be more realistic and still expose
the same scheduling problem as the initial test case.

Reviewers: t.p.northover, rengolin, evandro, kristof.beyls, silviu.baranga

Reviewed By: t.p.northover, evandro

Subscribers: aemerson, javed.absar, llvm-commits

Differential Revision: https://reviews.llvm.org/D35299

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
    llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/trunk/lib/Target/AArch64/AArch64MacroFusion.cpp
    llvm/trunk/test/CodeGen/AArch64/misched-fusion-aes.ll
    llvm/trunk/test/MC/AArch64/arm64-crypto.s

Modified: llvm/trunk/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp?rev=309495&r1=309494&r2=309495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp Sat Jul 29 13:35:28 2017
@@ -966,6 +966,18 @@ bool AArch64ExpandPseudo::expandMI(Machi
   case AArch64::CMP_SWAP_128:
     return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
 
+  case AArch64::AESMCrrTied:
+  case AArch64::AESIMCrrTied: {
+    MachineInstrBuilder MIB =
+    BuildMI(MBB, MBBI, MI.getDebugLoc(),
+            TII->get(Opcode == AArch64::AESMCrrTied ? AArch64::AESMCrr :
+                                                      AArch64::AESIMCrr))
+      .add(MI.getOperand(0))
+      .add(MI.getOperand(1));
+    transferImpOps(MI, MIB, MIB);
+    MI.eraseFromParent();
+    return true;
+   }
   }
   return false;
 }

Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td?rev=309495&r1=309494&r2=309495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td Sat Jul 29 13:35:28 2017
@@ -37,6 +37,9 @@ def HasFullFP16      : Predicate<"Subtar
                                  AssemblerPredicate<"FeatureFullFP16", "fullfp16">;
 def HasSPE           : Predicate<"Subtarget->hasSPE()">,
                                  AssemblerPredicate<"FeatureSPE", "spe">;
+def HasFuseAES       : Predicate<"Subtarget->hasFuseAES()">,
+                                 AssemblerPredicate<"FeatureFuseAES",
+                                 "fuse-aes">;
 def HasSVE           : Predicate<"Subtarget->hasSVE()">,
                                  AssemblerPredicate<"FeatureSVE", "sve">;
 
@@ -5304,6 +5307,31 @@ def AESDrr   : AESTiedInst<0b0101, "aesd
 def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
 def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
 
+// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
+// for AES fusion on some CPUs.
+let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
+def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
+                        Sched<[WriteV]>;
+def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
+                         Sched<[WriteV]>;
+}
+
+// Only use constrained versions of AES(I)MC instructions if they are paired with
+// AESE/AESD.
+def : Pat<(v16i8 (int_aarch64_crypto_aesmc
+            (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
+                                            (v16i8 V128:$src2))))),
+          (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
+                                             (v16i8 V128:$src2)))))>,
+          Requires<[HasFuseAES]>;
+
+def : Pat<(v16i8 (int_aarch64_crypto_aesimc
+            (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
+                                            (v16i8 V128:$src2))))),
+          (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
+                                              (v16i8 V128:$src2)))))>,
+          Requires<[HasFuseAES]>;
+
 def SHA1Crrr     : SHATiedInstQSV<0b000, "sha1c",   int_aarch64_crypto_sha1c>;
 def SHA1Prrr     : SHATiedInstQSV<0b001, "sha1p",   int_aarch64_crypto_sha1p>;
 def SHA1Mrrr     : SHATiedInstQSV<0b010, "sha1m",   int_aarch64_crypto_sha1m>;

Modified: llvm/trunk/lib/Target/AArch64/AArch64MacroFusion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64MacroFusion.cpp?rev=309495&r1=309494&r2=309495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64MacroFusion.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64MacroFusion.cpp Sat Jul 29 13:35:28 2017
@@ -118,11 +118,13 @@ static bool shouldScheduleAdjacent(const
     // Fuse AES crypto operations.
     switch(SecondOpcode) {
     // AES encode.
-    case AArch64::AESMCrr :
+    case AArch64::AESMCrr:
+    case AArch64::AESMCrrTied:
       return FirstOpcode == AArch64::AESErr ||
              FirstOpcode == AArch64::INSTRUCTION_LIST_END;
     // AES decode.
     case AArch64::AESIMCrr:
+    case AArch64::AESIMCrrTied:
       return FirstOpcode == AArch64::AESDrr ||
              FirstOpcode == AArch64::INSTRUCTION_LIST_END;
     }

Modified: llvm/trunk/test/CodeGen/AArch64/misched-fusion-aes.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/misched-fusion-aes.ll?rev=309495&r1=309494&r2=309495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/misched-fusion-aes.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/misched-fusion-aes.ll Sat Jul 29 13:35:28 2017
@@ -1,10 +1,10 @@
-; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=+fuse-aes,+crypto | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS
-; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=generic -mattr=+crypto | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS
-; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a53 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS
-; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS
-; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS
-; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a73 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSEALLPAIRS
-; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1  | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKM1
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=+fuse-aes,+crypto | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=generic -mattr=+crypto | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a53 | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a73 | FileCheck %s
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1  | FileCheck %s
 
 declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k)
 declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d)
@@ -76,41 +76,23 @@ define void @aesea(<16 x i8>* %a0, <16 x
   ret void
 
 ; CHECK-LABEL: aesea:
-; CHECKFUSEALLPAIRS: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VA]]
-; CHECKFUSEALLPAIRS: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VB]]
-; CHECKFUSEALLPAIRS: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VC]]
-; CHECKFUSEALLPAIRS: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VD]]
-; CHECKFUSEALLPAIRS: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VE]]
-; CHECKFUSEALLPAIRS: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VF]]
-; CHECKFUSEALLPAIRS: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VG]]
-; CHECKFUSEALLPAIRS: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesmc {{v[0-7].16b}}, [[VH]]
-; CHECKFUSEALLPAIRS-NOT: aesmc
-
-; CHECKM1: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VA]]
-; CHECKM1: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VB]]
-; CHECKM1: aese {{v[0-7].16b}}, {{v[0-7].16b}}
-; CHECKM1: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VC]]
-; CHECKM1: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VD]]
-; CHECKM1: aesmc {{v[0-7].16b}}, [[VH]]
-; CHECKM1: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VE]]
-; CHECKM1: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VF]]
-; CHECKM1: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VG]]
+; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesmc [[VA]], [[VA]]
+; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesmc [[VB]], [[VB]]
+; CHECK: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesmc [[VC]], [[VC]]
+; CHECK: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesmc [[VD]], [[VD]]
+; CHECK: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesmc [[VE]], [[VE]]
+; CHECK: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesmc [[VF]], [[VF]]
+; CHECK: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesmc [[VG]], [[VG]]
+; CHECK: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesmc [[VH]], [[VH]]
+; CHECK-NOT: aesmc
 }
 
 define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) {
@@ -178,41 +160,23 @@ define void @aesda(<16 x i8>* %a0, <16 x
   ret void
 
 ; CHECK-LABEL: aesda:
-; CHECKFUSEALLPAIRS: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VA]]
-; CHECKFUSEALLPAIRS: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VB]]
-; CHECKFUSEALLPAIRS: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VC]]
-; CHECKFUSEALLPAIRS: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VD]]
-; CHECKFUSEALLPAIRS: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VE]]
-; CHECKFUSEALLPAIRS: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VF]]
-; CHECKFUSEALLPAIRS: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VG]]
-; CHECKFUSEALLPAIRS: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKFUSEALLPAIRS-NEXT: aesimc {{v[0-7].16b}}, [[VH]]
-; CHECKFUSEALLPAIRS-NOT: aesimc
-
-; CHECKM1: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VA]]
-; CHECKM1: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VB]]
-; CHECKM1: aesd {{v[0-7].16b}}, {{v[0-7].16b}}
-; CHECKM1: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VC]]
-; CHECKM1: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VD]]
-; CHECKM1: aesimc {{v[0-7].16b}}, [[VH]]
-; CHECKM1: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VE]]
-; CHECKM1: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VF]]
-; CHECKM1: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VG]]
+; CHECK: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesimc [[VA]], [[VA]]
+; CHECK: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesimc [[VB]], [[VB]]
+; CHECK: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesimc [[VC]], [[VC]]
+; CHECK: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesimc [[VD]], [[VD]]
+; CHECK: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesimc [[VE]], [[VE]]
+; CHECK: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesimc [[VF]], [[VF]]
+; CHECK: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesimc [[VG]], [[VG]]
+; CHECK: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesimc [[VH]], [[VH]]
+; CHECK-NOT: aesimc
 }
 
 define void @aes_load_store(<16 x i8> *%p1, <16 x i8> *%p2 , <16 x i8> *%p3) {
@@ -225,20 +189,20 @@ entry:
   %in1 = load <16 x i8>, <16 x i8>* %p1, align 16
   store <16 x i8> %in1, <16 x i8>* %x1, align 16
   %aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2
-  store <16 x i8> %aese1, <16 x i8>* %x2, align 16
   %in2 = load <16 x i8>, <16 x i8>* %p2, align 16
   %aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2
-  store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16
   %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2
-  store <16 x i8> %aese2, <16 x i8>* %x4, align 16
+  store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16
+  %in3 = load <16 x i8>, <16 x i8>* %p3, align 16
   %aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2
-  store <16 x i8> %aesmc2, <16 x i8>* %x5, align 16
+  %aese3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %aesmc2, <16 x i8> %in3) #2
+  store <16 x i8> %aese3, <16 x i8>* %x5, align 16
   ret void
 
 ; CHECK-LABEL: aes_load_store:
 ; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECK-NEXT: aesmc {{v[0-7].16b}}, [[VA]]
+; CHECK-NEXT: aesmc [[VA]], [[VA]]
 ; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
-; CHECK-NEXT: aesmc {{v[0-7].16b}}, [[VB]]
+; CHECK-NEXT: aesmc [[VB]], [[VB]]
 ; CHECK-NOT: aesmc
 }

Modified: llvm/trunk/test/MC/AArch64/arm64-crypto.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/AArch64/arm64-crypto.s?rev=309495&r1=309494&r2=309495&view=diff
==============================================================================
--- llvm/trunk/test/MC/AArch64/arm64-crypto.s (original)
+++ llvm/trunk/test/MC/AArch64/arm64-crypto.s Sat Jul 29 13:35:28 2017
@@ -1,4 +1,5 @@
 ; RUN: llvm-mc -triple arm64-apple-darwin -mattr=crypto -show-encoding -output-asm-variant=1 < %s | FileCheck %s
+; RUN: llvm-mc -triple arm64-apple-darwin -mattr='+crypto,+fuse-aes' -show-encoding -output-asm-variant=1 < %s | FileCheck %s
 
 foo:
   aese.16b v0, v1




More information about the llvm-commits mailing list