[llvm] [AArch64] Generate rev16 for certain uses of __builtin_bswap16 (PR #105375)

via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 5 06:14:28 PDT 2024


https://github.com/adprasad-nvidia updated https://github.com/llvm/llvm-project/pull/105375

>From b054b1bb850d22016616eb62e01b48b2308f15fb Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Tue, 13 Aug 2024 16:40:58 +0530
Subject: [PATCH 01/12] [REV] Generate rev16 for all (srl (bswap x), (i64 16))
 instructions

---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index ccef85bfaa8afc..30beae95ebe53f 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -2836,8 +2836,8 @@ def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
 def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
 def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
 
-// Match (srl (bswap x), C) -> revC if the upper bswap bits are known zero.
-def : Pat<(srl (bswap top16Zero:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
+// Match (srl (bswap x), C) -> revC.
+def : Pat<(srl (bswap GPR32:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
 def : Pat<(srl (bswap top32Zero:$Rn), (i64 32)), (REV32Xr GPR64:$Rn)>;
 
 def : Pat<(or (and (srl GPR64:$Rn, (i64 8)), (i64 0x00ff00ff00ff00ff)),

>From 4e18fdce4f0a3924e6fa66a50871ab756183d7c6 Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Tue, 13 Aug 2024 18:03:34 +0530
Subject: [PATCH 02/12] [REV] Update test files

---
 llvm/test/CodeGen/AArch64/arm64-rev.ll         | 15 +++++----------
 llvm/test/CodeGen/AArch64/bswap.ll             |  3 +--
 llvm/test/CodeGen/AArch64/memcmp.ll            | 15 +++++----------
 llvm/test/CodeGen/AArch64/merge-trunc-store.ll | 12 ++++--------
 4 files changed, 15 insertions(+), 30 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/arm64-rev.ll b/llvm/test/CodeGen/AArch64/arm64-rev.ll
index f548a0e01feee6..b0fd0d33f0b522 100644
--- a/llvm/test/CodeGen/AArch64/arm64-rev.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-rev.ll
@@ -27,15 +27,13 @@ entry:
 define i32 @test_rev_w_srl16(i16 %a) {
 ; CHECK-SD-LABEL: test_rev_w_srl16:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    rev w8, w0
-; CHECK-SD-NEXT:    lsr w0, w8, #16
+; CHECK-SD-NEXT:    rev16 w0, w0
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_rev_w_srl16:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    and w8, w0, #0xffff
-; CHECK-GI-NEXT:    rev w8, w8
-; CHECK-GI-NEXT:    lsr w0, w8, #16
+; CHECK-GI-NEXT:    rev16 w0, w8
 ; CHECK-GI-NEXT:    ret
 entry:
   %0 = zext i16 %a to i32
@@ -48,8 +46,7 @@ define i32 @test_rev_w_srl16_load(ptr %a) {
 ; CHECK-LABEL: test_rev_w_srl16_load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrh w8, [x0]
-; CHECK-NEXT:    rev w8, w8
-; CHECK-NEXT:    lsr w0, w8, #16
+; CHECK-NEXT:    rev16 w0, w8
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i16, ptr %a
@@ -71,8 +68,7 @@ define i32 @test_rev_w_srl16_add(i8 %a, i8 %b) {
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    and w8, w1, #0xff
 ; CHECK-GI-NEXT:    add w8, w8, w0, uxtb
-; CHECK-GI-NEXT:    rev w8, w8
-; CHECK-GI-NEXT:    lsr w0, w8, #16
+; CHECK-GI-NEXT:    rev16 w0, w8
 ; CHECK-GI-NEXT:    ret
 entry:
   %0 = zext i8 %a to i32
@@ -472,8 +468,7 @@ define void @test_rev16_truncstore() {
 ; CHECK-GI-NEXT:  .LBB30_1: // %cleanup
 ; CHECK-GI-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-GI-NEXT:    ldrh w8, [x8]
-; CHECK-GI-NEXT:    rev w8, w8
-; CHECK-GI-NEXT:    lsr w8, w8, #16
+; CHECK-GI-NEXT:    rev16 w8, w8
 ; CHECK-GI-NEXT:    strh w8, [x8]
 ; CHECK-GI-NEXT:    tbz wzr, #0, .LBB30_1
 ; CHECK-GI-NEXT:  .LBB30_2: // %fail
diff --git a/llvm/test/CodeGen/AArch64/bswap.ll b/llvm/test/CodeGen/AArch64/bswap.ll
index 9ee924dd2548a6..ce5f902069e4ff 100644
--- a/llvm/test/CodeGen/AArch64/bswap.ll
+++ b/llvm/test/CodeGen/AArch64/bswap.ll
@@ -6,8 +6,7 @@
 define i16 @bswap_i16(i16 %a){
 ; CHECK-LABEL: bswap_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rev w8, w0
-; CHECK-NEXT:    lsr w0, w8, #16
+; CHECK-NEXT:    rev16 w0, w0
 ; CHECK-NEXT:    ret
     %3 = call i16 @llvm.bswap.i16(i16 %a)
     ret i16 %3
diff --git a/llvm/test/CodeGen/AArch64/memcmp.ll b/llvm/test/CodeGen/AArch64/memcmp.ll
index 4da7c8c95a4e4f..0a6a03844128c3 100644
--- a/llvm/test/CodeGen/AArch64/memcmp.ll
+++ b/llvm/test/CodeGen/AArch64/memcmp.ll
@@ -39,9 +39,8 @@ define i32 @length2(ptr %X, ptr %Y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    ldrh w9, [x1]
-; CHECK-NEXT:    rev w8, w8
+; CHECK-NEXT:    rev16 w8, w8
 ; CHECK-NEXT:    rev w9, w9
-; CHECK-NEXT:    lsr w8, w8, #16
 ; CHECK-NEXT:    sub w0, w8, w9, lsr #16
 ; CHECK-NEXT:    ret
   %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 2) nounwind
@@ -93,9 +92,8 @@ define i1 @length2_lt(ptr %X, ptr %Y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    ldrh w9, [x1]
-; CHECK-NEXT:    rev w8, w8
+; CHECK-NEXT:    rev16 w8, w8
 ; CHECK-NEXT:    rev w9, w9
-; CHECK-NEXT:    lsr w8, w8, #16
 ; CHECK-NEXT:    sub w8, w8, w9, lsr #16
 ; CHECK-NEXT:    lsr w0, w8, #31
 ; CHECK-NEXT:    ret
@@ -109,9 +107,8 @@ define i1 @length2_gt(ptr %X, ptr %Y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    ldrh w9, [x1]
-; CHECK-NEXT:    rev w8, w8
+; CHECK-NEXT:    rev16 w8, w8
 ; CHECK-NEXT:    rev w9, w9
-; CHECK-NEXT:    lsr w8, w8, #16
 ; CHECK-NEXT:    sub w8, w8, w9, lsr #16
 ; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    cset w0, gt
@@ -536,10 +533,8 @@ define i32 @length10(ptr %X, ptr %Y) nounwind {
 ; CHECK-NEXT:  // %bb.1: // %loadbb1
 ; CHECK-NEXT:    ldrh w8, [x0, #8]
 ; CHECK-NEXT:    ldrh w9, [x1, #8]
-; CHECK-NEXT:    rev w8, w8
-; CHECK-NEXT:    rev w9, w9
-; CHECK-NEXT:    lsr w8, w8, #16
-; CHECK-NEXT:    lsr w9, w9, #16
+; CHECK-NEXT:    rev16 w8, w8
+; CHECK-NEXT:    rev16 w9, w9
 ; CHECK-NEXT:    cmp x8, x9
 ; CHECK-NEXT:    b.ne .LBB32_3
 ; CHECK-NEXT:  // %bb.2:
diff --git a/llvm/test/CodeGen/AArch64/merge-trunc-store.ll b/llvm/test/CodeGen/AArch64/merge-trunc-store.ll
index b161d746ad11d5..4fcd030db1bace 100644
--- a/llvm/test/CodeGen/AArch64/merge-trunc-store.ll
+++ b/llvm/test/CodeGen/AArch64/merge-trunc-store.ll
@@ -10,8 +10,7 @@ define void @le_i16_to_i8(i16 %x, ptr %p0) {
 ;
 ; BE-LABEL: le_i16_to_i8:
 ; BE:       // %bb.0:
-; BE-NEXT:    rev w8, w0
-; BE-NEXT:    lsr w8, w8, #16
+; BE-NEXT:    rev16 w8, w0
 ; BE-NEXT:    strh w8, [x1]
 ; BE-NEXT:    ret
   %sh1 = lshr i16 %x, 8
@@ -31,8 +30,7 @@ define void @le_i16_to_i8_order(i16 %x, ptr %p0) {
 ;
 ; BE-LABEL: le_i16_to_i8_order:
 ; BE:       // %bb.0:
-; BE-NEXT:    rev w8, w0
-; BE-NEXT:    lsr w8, w8, #16
+; BE-NEXT:    rev16 w8, w0
 ; BE-NEXT:    strh w8, [x1]
 ; BE-NEXT:    ret
   %sh1 = lshr i16 %x, 8
@@ -47,8 +45,7 @@ define void @le_i16_to_i8_order(i16 %x, ptr %p0) {
 define void @be_i16_to_i8_offset(i16 %x, ptr %p0) {
 ; LE-LABEL: be_i16_to_i8_offset:
 ; LE:       // %bb.0:
-; LE-NEXT:    rev w8, w0
-; LE-NEXT:    lsr w8, w8, #16
+; LE-NEXT:    rev16 w8, w0
 ; LE-NEXT:    sturh w8, [x1, #11]
 ; LE-NEXT:    ret
 ;
@@ -69,8 +66,7 @@ define void @be_i16_to_i8_offset(i16 %x, ptr %p0) {
 define void @be_i16_to_i8_order(i16 %x, ptr %p0) {
 ; LE-LABEL: be_i16_to_i8_order:
 ; LE:       // %bb.0:
-; LE-NEXT:    rev w8, w0
-; LE-NEXT:    lsr w8, w8, #16
+; LE-NEXT:    rev16 w8, w0
 ; LE-NEXT:    strh w8, [x1]
 ; LE-NEXT:    ret
 ;

>From a35e2bf8600199978bcd712f2001a2d13352f322 Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Thu, 29 Aug 2024 04:38:11 +0530
Subject: [PATCH 03/12] Revert "[REV] Update test files"

This reverts commit 5cda4a951123b38114e4ba2fb224aebf71981bbf.
---
 llvm/test/CodeGen/AArch64/arm64-rev.ll         | 15 ++++++++++-----
 llvm/test/CodeGen/AArch64/bswap.ll             |  3 ++-
 llvm/test/CodeGen/AArch64/memcmp.ll            | 15 ++++++++++-----
 llvm/test/CodeGen/AArch64/merge-trunc-store.ll | 12 ++++++++----
 4 files changed, 30 insertions(+), 15 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/arm64-rev.ll b/llvm/test/CodeGen/AArch64/arm64-rev.ll
index b0fd0d33f0b522..f548a0e01feee6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-rev.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-rev.ll
@@ -27,13 +27,15 @@ entry:
 define i32 @test_rev_w_srl16(i16 %a) {
 ; CHECK-SD-LABEL: test_rev_w_srl16:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    rev16 w0, w0
+; CHECK-SD-NEXT:    rev w8, w0
+; CHECK-SD-NEXT:    lsr w0, w8, #16
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_rev_w_srl16:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    and w8, w0, #0xffff
-; CHECK-GI-NEXT:    rev16 w0, w8
+; CHECK-GI-NEXT:    rev w8, w8
+; CHECK-GI-NEXT:    lsr w0, w8, #16
 ; CHECK-GI-NEXT:    ret
 entry:
   %0 = zext i16 %a to i32
@@ -46,7 +48,8 @@ define i32 @test_rev_w_srl16_load(ptr %a) {
 ; CHECK-LABEL: test_rev_w_srl16_load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrh w8, [x0]
-; CHECK-NEXT:    rev16 w0, w8
+; CHECK-NEXT:    rev w8, w8
+; CHECK-NEXT:    lsr w0, w8, #16
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i16, ptr %a
@@ -68,7 +71,8 @@ define i32 @test_rev_w_srl16_add(i8 %a, i8 %b) {
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    and w8, w1, #0xff
 ; CHECK-GI-NEXT:    add w8, w8, w0, uxtb
-; CHECK-GI-NEXT:    rev16 w0, w8
+; CHECK-GI-NEXT:    rev w8, w8
+; CHECK-GI-NEXT:    lsr w0, w8, #16
 ; CHECK-GI-NEXT:    ret
 entry:
   %0 = zext i8 %a to i32
@@ -468,7 +472,8 @@ define void @test_rev16_truncstore() {
 ; CHECK-GI-NEXT:  .LBB30_1: // %cleanup
 ; CHECK-GI-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-GI-NEXT:    ldrh w8, [x8]
-; CHECK-GI-NEXT:    rev16 w8, w8
+; CHECK-GI-NEXT:    rev w8, w8
+; CHECK-GI-NEXT:    lsr w8, w8, #16
 ; CHECK-GI-NEXT:    strh w8, [x8]
 ; CHECK-GI-NEXT:    tbz wzr, #0, .LBB30_1
 ; CHECK-GI-NEXT:  .LBB30_2: // %fail
diff --git a/llvm/test/CodeGen/AArch64/bswap.ll b/llvm/test/CodeGen/AArch64/bswap.ll
index ce5f902069e4ff..9ee924dd2548a6 100644
--- a/llvm/test/CodeGen/AArch64/bswap.ll
+++ b/llvm/test/CodeGen/AArch64/bswap.ll
@@ -6,7 +6,8 @@
 define i16 @bswap_i16(i16 %a){
 ; CHECK-LABEL: bswap_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rev16 w0, w0
+; CHECK-NEXT:    rev w8, w0
+; CHECK-NEXT:    lsr w0, w8, #16
 ; CHECK-NEXT:    ret
     %3 = call i16 @llvm.bswap.i16(i16 %a)
     ret i16 %3
diff --git a/llvm/test/CodeGen/AArch64/memcmp.ll b/llvm/test/CodeGen/AArch64/memcmp.ll
index 0a6a03844128c3..4da7c8c95a4e4f 100644
--- a/llvm/test/CodeGen/AArch64/memcmp.ll
+++ b/llvm/test/CodeGen/AArch64/memcmp.ll
@@ -39,8 +39,9 @@ define i32 @length2(ptr %X, ptr %Y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    ldrh w9, [x1]
-; CHECK-NEXT:    rev16 w8, w8
+; CHECK-NEXT:    rev w8, w8
 ; CHECK-NEXT:    rev w9, w9
+; CHECK-NEXT:    lsr w8, w8, #16
 ; CHECK-NEXT:    sub w0, w8, w9, lsr #16
 ; CHECK-NEXT:    ret
   %m = tail call i32 @memcmp(ptr %X, ptr %Y, i64 2) nounwind
@@ -92,8 +93,9 @@ define i1 @length2_lt(ptr %X, ptr %Y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    ldrh w9, [x1]
-; CHECK-NEXT:    rev16 w8, w8
+; CHECK-NEXT:    rev w8, w8
 ; CHECK-NEXT:    rev w9, w9
+; CHECK-NEXT:    lsr w8, w8, #16
 ; CHECK-NEXT:    sub w8, w8, w9, lsr #16
 ; CHECK-NEXT:    lsr w0, w8, #31
 ; CHECK-NEXT:    ret
@@ -107,8 +109,9 @@ define i1 @length2_gt(ptr %X, ptr %Y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    ldrh w9, [x1]
-; CHECK-NEXT:    rev16 w8, w8
+; CHECK-NEXT:    rev w8, w8
 ; CHECK-NEXT:    rev w9, w9
+; CHECK-NEXT:    lsr w8, w8, #16
 ; CHECK-NEXT:    sub w8, w8, w9, lsr #16
 ; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    cset w0, gt
@@ -533,8 +536,10 @@ define i32 @length10(ptr %X, ptr %Y) nounwind {
 ; CHECK-NEXT:  // %bb.1: // %loadbb1
 ; CHECK-NEXT:    ldrh w8, [x0, #8]
 ; CHECK-NEXT:    ldrh w9, [x1, #8]
-; CHECK-NEXT:    rev16 w8, w8
-; CHECK-NEXT:    rev16 w9, w9
+; CHECK-NEXT:    rev w8, w8
+; CHECK-NEXT:    rev w9, w9
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    lsr w9, w9, #16
 ; CHECK-NEXT:    cmp x8, x9
 ; CHECK-NEXT:    b.ne .LBB32_3
 ; CHECK-NEXT:  // %bb.2:
diff --git a/llvm/test/CodeGen/AArch64/merge-trunc-store.ll b/llvm/test/CodeGen/AArch64/merge-trunc-store.ll
index 4fcd030db1bace..b161d746ad11d5 100644
--- a/llvm/test/CodeGen/AArch64/merge-trunc-store.ll
+++ b/llvm/test/CodeGen/AArch64/merge-trunc-store.ll
@@ -10,7 +10,8 @@ define void @le_i16_to_i8(i16 %x, ptr %p0) {
 ;
 ; BE-LABEL: le_i16_to_i8:
 ; BE:       // %bb.0:
-; BE-NEXT:    rev16 w8, w0
+; BE-NEXT:    rev w8, w0
+; BE-NEXT:    lsr w8, w8, #16
 ; BE-NEXT:    strh w8, [x1]
 ; BE-NEXT:    ret
   %sh1 = lshr i16 %x, 8
@@ -30,7 +31,8 @@ define void @le_i16_to_i8_order(i16 %x, ptr %p0) {
 ;
 ; BE-LABEL: le_i16_to_i8_order:
 ; BE:       // %bb.0:
-; BE-NEXT:    rev16 w8, w0
+; BE-NEXT:    rev w8, w0
+; BE-NEXT:    lsr w8, w8, #16
 ; BE-NEXT:    strh w8, [x1]
 ; BE-NEXT:    ret
   %sh1 = lshr i16 %x, 8
@@ -45,7 +47,8 @@ define void @le_i16_to_i8_order(i16 %x, ptr %p0) {
 define void @be_i16_to_i8_offset(i16 %x, ptr %p0) {
 ; LE-LABEL: be_i16_to_i8_offset:
 ; LE:       // %bb.0:
-; LE-NEXT:    rev16 w8, w0
+; LE-NEXT:    rev w8, w0
+; LE-NEXT:    lsr w8, w8, #16
 ; LE-NEXT:    sturh w8, [x1, #11]
 ; LE-NEXT:    ret
 ;
@@ -66,7 +69,8 @@ define void @be_i16_to_i8_offset(i16 %x, ptr %p0) {
 define void @be_i16_to_i8_order(i16 %x, ptr %p0) {
 ; LE-LABEL: be_i16_to_i8_order:
 ; LE:       // %bb.0:
-; LE-NEXT:    rev16 w8, w0
+; LE-NEXT:    rev w8, w0
+; LE-NEXT:    lsr w8, w8, #16
 ; LE-NEXT:    strh w8, [x1]
 ; LE-NEXT:    ret
 ;

>From e732da70c32e2591fe18d8b1b9de8909002fc00d Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Thu, 29 Aug 2024 04:38:16 +0530
Subject: [PATCH 04/12] Revert "[REV] Generate rev16 for all (srl (bswap x),
 (i64 16)) instructions"

This reverts commit 7d0d37404c613be62e84536d8efd675756160867.
---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 30beae95ebe53f..ccef85bfaa8afc 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -2836,8 +2836,8 @@ def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
 def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
 def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
 
-// Match (srl (bswap x), C) -> revC.
-def : Pat<(srl (bswap GPR32:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
+// Match (srl (bswap x), C) -> revC if the upper bswap bits are known zero.
+def : Pat<(srl (bswap top16Zero:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
 def : Pat<(srl (bswap top32Zero:$Rn), (i64 32)), (REV32Xr GPR64:$Rn)>;
 
 def : Pat<(or (and (srl GPR64:$Rn, (i64 8)), (i64 0x00ff00ff00ff00ff)),

>From 601a5945b35fca6ffce923a0072940c31e58d0e3 Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Thu, 29 Aug 2024 04:39:11 +0530
Subject: [PATCH 05/12] [AArch64] Lower __builtin_bswap16 to rev16 if return
 value is 16-bit

Fixes #77222.
---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 16 ++++++++++++
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |  4 +++
 llvm/test/CodeGen/AArch64/bswap.ll            | 26 +++++++++++++++----
 3 files changed, 41 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 5e3f9364ac3e12..de4e15dec43a25 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -22212,6 +22212,22 @@ static SDValue performExtendCombine(SDNode *N,
       N->getOperand(0)->getOpcode() == ISD::SETCC)
     return performSignExtendSetCCCombine(N, DCI, DAG);
 
+  // If we see (any_extend (bswap ...)) with bswap returning an i16, we know
+  // that the top half of the result register must be unused, due to the
+  // any_extend. This means that we can replace this pattern with (rev16
+  // (any_extend ...)). This saves a machine instruction compared to (lsr (rev
+  // ...)), which is what this pattern would otherwise be lowered to.
+  if (N->getOpcode() == ISD::ANY_EXTEND &&
+      N->getOperand(0).getOpcode() == ISD::BSWAP &&
+      N->getOperand(0).getValueType().isScalarInteger() &&
+      N->getOperand(0).getValueType().getFixedSizeInBits() == 16) {
+    SDNode *BswapNode = N->getOperand(0).getNode();
+    SDValue NewAnyExtend = DAG.getNode(ISD::ANY_EXTEND, SDLoc(BswapNode),
+                                       EVT(MVT::i32), BswapNode->getOperand(0));
+    return DAG.getNode(AArch64ISD::REV16, SDLoc(N), N->getValueType(0),
+                       NewAnyExtend);
+  }
+
   return SDValue();
 }
 
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index ccef85bfaa8afc..080209535653bd 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -758,6 +758,8 @@ def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
 def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
 def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
 
+def AArch64rev16_scalar : SDNode<"AArch64ISD::REV16", SDTIntUnaryOp>;
+
 def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
 def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
 def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
@@ -2840,6 +2842,8 @@ def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
 def : Pat<(srl (bswap top16Zero:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
 def : Pat<(srl (bswap top32Zero:$Rn), (i64 32)), (REV32Xr GPR64:$Rn)>;
 
+def : Pat<(AArch64rev16_scalar GPR32:$Rn), (REV16Wr GPR32:$Rn)>;
+
 def : Pat<(or (and (srl GPR64:$Rn, (i64 8)), (i64 0x00ff00ff00ff00ff)),
               (and (shl GPR64:$Rn, (i64 8)), (i64 0xff00ff00ff00ff00))),
           (REV16Xr GPR64:$Rn)>;
diff --git a/llvm/test/CodeGen/AArch64/bswap.ll b/llvm/test/CodeGen/AArch64/bswap.ll
index 9ee924dd2548a6..0816851c4de629 100644
--- a/llvm/test/CodeGen/AArch64/bswap.ll
+++ b/llvm/test/CodeGen/AArch64/bswap.ll
@@ -3,16 +3,32 @@
 ; RUN: llc -mtriple=aarch64 -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; ====== Scalar Tests =====
-define i16 @bswap_i16(i16 %a){
-; CHECK-LABEL: bswap_i16:
+define i16 @bswap_i16_to_i16(i16 %a){
+; CHECK-SD-LABEL: bswap_i16_to_i16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    rev16 w0, w0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: bswap_i16_to_i16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    rev w8, w0
+; CHECK-GI-NEXT:    lsr w0, w8, #16
+; CHECK-GI-NEXT:    ret
+    %3 = call i16 @llvm.bswap.i16(i16 %a)
+    ret i16 %3
+}
+declare i16 @llvm.bswap.i16(i16)
+
+define i32 @bswap_i16_to_i32(i16 %a){
+; CHECK-LABEL: bswap_i16_to_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rev w8, w0
 ; CHECK-NEXT:    lsr w0, w8, #16
 ; CHECK-NEXT:    ret
-    %3 = call i16 @llvm.bswap.i16(i16 %a)
-    ret i16 %3
+  %3 = call i16 @llvm.bswap.i16(i16 %a)
+  %4 = zext i16 %3 to i32
+  ret i32 %4
 }
-declare i16 @llvm.bswap.i16(i16)
 
 define i32 @bswap_i32(i32 %a){
 ; CHECK-LABEL: bswap_i32:

>From e2fb50cdc0404fafc943acaa29f7d86da6376bc6 Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Wed, 4 Sep 2024 18:52:15 +0530
Subject: [PATCH 06/12] Simplify handling of value types

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index de4e15dec43a25..f3e06e22659454 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -22219,11 +22219,11 @@ static SDValue performExtendCombine(SDNode *N,
   // ...)), which is what this pattern would otherwise be lowered to.
   if (N->getOpcode() == ISD::ANY_EXTEND &&
       N->getOperand(0).getOpcode() == ISD::BSWAP &&
-      N->getOperand(0).getValueType().isScalarInteger() &&
-      N->getOperand(0).getValueType().getFixedSizeInBits() == 16) {
+      N->getOperand(0).getValueType() == MVT::i16) {
     SDNode *BswapNode = N->getOperand(0).getNode();
-    SDValue NewAnyExtend = DAG.getNode(ISD::ANY_EXTEND, SDLoc(BswapNode),
-                                       EVT(MVT::i32), BswapNode->getOperand(0));
+    SDValue NewAnyExtend =
+        DAG.getNode(ISD::ANY_EXTEND, SDLoc(BswapNode), N->getValueType(0),
+                    BswapNode->getOperand(0));
     return DAG.getNode(AArch64ISD::REV16, SDLoc(N), N->getValueType(0),
                        NewAnyExtend);
   }

>From 7ccd0dfd71b16d6f776e2901bf511ae7acd4745c Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Wed, 4 Sep 2024 22:03:49 +0530
Subject: [PATCH 07/12] Add check on any_extend output type

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index f3e06e22659454..ad06593be58115 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -22219,10 +22219,13 @@ static SDValue performExtendCombine(SDNode *N,
   // ...)), which is what this pattern would otherwise be lowered to.
   if (N->getOpcode() == ISD::ANY_EXTEND &&
       N->getOperand(0).getOpcode() == ISD::BSWAP &&
-      N->getOperand(0).getValueType() == MVT::i16) {
+      N->getOperand(0).getValueType() == MVT::i16 &&
+      (N->getValueType(0) == MVT::i32 ||
+       N->getValueType(0) == MVT::i64)) {
     SDNode *BswapNode = N->getOperand(0).getNode();
+    SDLoc DL(N);
     SDValue NewAnyExtend =
-        DAG.getNode(ISD::ANY_EXTEND, SDLoc(BswapNode), N->getValueType(0),
+        DAG.getNode(ISD::ANY_EXTEND, DL, N->getValueType(0),
                     BswapNode->getOperand(0));
     return DAG.getNode(AArch64ISD::REV16, SDLoc(N), N->getValueType(0),
                        NewAnyExtend);

>From c85f2d42c7d4c9e0fada298562ab2a9d54aa432b Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Wed, 4 Sep 2024 23:05:58 +0530
Subject: [PATCH 08/12] Add TD pattern for REV16Xr

---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td | 1 +
 1 file changed, 1 insertion(+)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 080209535653bd..61efe1d29b8361 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -2843,6 +2843,7 @@ def : Pat<(srl (bswap top16Zero:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
 def : Pat<(srl (bswap top32Zero:$Rn), (i64 32)), (REV32Xr GPR64:$Rn)>;
 
 def : Pat<(AArch64rev16_scalar GPR32:$Rn), (REV16Wr GPR32:$Rn)>;
+def : Pat<(AArch64rev16_scalar GPR64:$Rn), (REV16Xr GPR64:$Rn)>;
 
 def : Pat<(or (and (srl GPR64:$Rn, (i64 8)), (i64 0x00ff00ff00ff00ff)),
               (and (shl GPR64:$Rn, (i64 8)), (i64 0xff00ff00ff00ff00))),

>From 899ef15b14ee53e9d6e4085139e193675d017a82 Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Thu, 5 Sep 2024 17:44:25 +0530
Subject: [PATCH 09/12] Add comment explaining type check

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index ad06593be58115..a12584226dc139 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -22217,6 +22217,9 @@ static SDValue performExtendCombine(SDNode *N,
   // any_extend. This means that we can replace this pattern with (rev16
   // (any_extend ...)). This saves a machine instruction compared to (lsr (rev
   // ...)), which is what this pattern would otherwise be lowered to.
+  // Only apply this optimisation if any_extend in original pattern to i32 or i64,
+  // because this type will become the input type to REV16 in the new pattern, so
+  // must be a legitimate REV16 input type. 
   if (N->getOpcode() == ISD::ANY_EXTEND &&
       N->getOperand(0).getOpcode() == ISD::BSWAP &&
       N->getOperand(0).getValueType() == MVT::i16 &&

>From 63541c2ba4fbb4fd3cf6385858417e182acfd290 Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Thu, 5 Sep 2024 17:45:27 +0530
Subject: [PATCH 10/12] Add new tests for i16 to i64 and i28

---
 llvm/test/CodeGen/AArch64/bswap.ll | 23 +++++++++++++++++++++--
 1 file changed, 21 insertions(+), 2 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/bswap.ll b/llvm/test/CodeGen/AArch64/bswap.ll
index 0816851c4de629..2d6dbca0a0eb47 100644
--- a/llvm/test/CodeGen/AArch64/bswap.ll
+++ b/llvm/test/CodeGen/AArch64/bswap.ll
@@ -3,7 +3,9 @@
 ; RUN: llc -mtriple=aarch64 -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; ====== Scalar Tests =====
-define i16 @bswap_i16_to_i16(i16 %a){
+
+; ====== Scalar bswap.i16 Tests =====
+define i16 @bswap_i16_to_i16_anyext(i16 %a){
 ; CHECK-SD-LABEL: bswap_i16_to_i16:
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    rev16 w0, w0
@@ -19,7 +21,23 @@ define i16 @bswap_i16_to_i16(i16 %a){
 }
 declare i16 @llvm.bswap.i16(i16)
 
-define i32 @bswap_i16_to_i32(i16 %a){
+; The zext here is optimised to an any_extend during isel.
+define i64 @bswap_i16_to_i64_anyext(i16 %a) {
+    %3 = call i16 @llvm.bswap.i16(i16 %a)
+    %4 = zext i16 %3 to i64
+    %5 = shl i64 %5, 48
+    ret i64 %5
+}
+
+; The zext here is optimised to an any_extend during isel..
+define i128 @bswap_i16_to_i128_anyext(i16 %a) {
+    %3 = call i16 @llvm.bswap.i16(i16 %a)
+    %4 = zext i16 %3 to i128
+    %5 = shl i128 %4, 112
+    ret i128 %d
+}
+
+define i32 @bswap_i16_to_i32_zext(i16 %a){
 ; CHECK-LABEL: bswap_i16_to_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rev w8, w0
@@ -30,6 +48,7 @@ define i32 @bswap_i16_to_i32(i16 %a){
   ret i32 %4
 }
 
+; ====== Other scalar bswap tests =====
 define i32 @bswap_i32(i32 %a){
 ; CHECK-LABEL: bswap_i32:
 ; CHECK:       // %bb.0:

>From 3cb271b3b4827492448b582e5b0ef85febb5d006 Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Thu, 5 Sep 2024 18:06:49 +0530
Subject: [PATCH 11/12] Update tests

---
 llvm/test/CodeGen/AArch64/bswap.ll | 43 ++++++++++++++++++++++++++----
 1 file changed, 38 insertions(+), 5 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/bswap.ll b/llvm/test/CodeGen/AArch64/bswap.ll
index 2d6dbca0a0eb47..e90014be21deb3 100644
--- a/llvm/test/CodeGen/AArch64/bswap.ll
+++ b/llvm/test/CodeGen/AArch64/bswap.ll
@@ -6,12 +6,12 @@
 
 ; ====== Scalar bswap.i16 Tests =====
 define i16 @bswap_i16_to_i16_anyext(i16 %a){
-; CHECK-SD-LABEL: bswap_i16_to_i16:
+; CHECK-SD-LABEL: bswap_i16_to_i16_anyext:
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    rev16 w0, w0
 ; CHECK-SD-NEXT:    ret
 ;
-; CHECK-GI-LABEL: bswap_i16_to_i16:
+; CHECK-GI-LABEL: bswap_i16_to_i16_anyext:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    rev w8, w0
 ; CHECK-GI-NEXT:    lsr w0, w8, #16
@@ -23,22 +23,55 @@ declare i16 @llvm.bswap.i16(i16)
 
 ; The zext here is optimised to an any_extend during isel.
 define i64 @bswap_i16_to_i64_anyext(i16 %a) {
+; CHECK-SD-LABEL: bswap_i16_to_i64_anyext:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT:    rev16 x8, x0
+; CHECK-SD-NEXT:    lsl x0, x8, #48
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: bswap_i16_to_i64_anyext:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    rev w8, w0
+; CHECK-GI-NEXT:    lsr w8, w8, #16
+; CHECK-GI-NEXT:    and x8, x8, #0xffff
+; CHECK-GI-NEXT:    lsl x0, x8, #48
+; CHECK-GI-NEXT:    ret
     %3 = call i16 @llvm.bswap.i16(i16 %a)
     %4 = zext i16 %3 to i64
-    %5 = shl i64 %5, 48
+    %5 = shl i64 %4, 48
     ret i64 %5
 }
 
 ; The zext here is optimised to an any_extend during isel..
 define i128 @bswap_i16_to_i128_anyext(i16 %a) {
+; CHECK-SD-LABEL: bswap_i16_to_i128_anyext:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, w0
+; CHECK-SD-NEXT:    mov x0, xzr
+; CHECK-SD-NEXT:    rev w8, w8
+; CHECK-SD-NEXT:    lsr w8, w8, #16
+; CHECK-SD-NEXT:    lsl x1, x8, #48
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: bswap_i16_to_i128_anyext:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, w0
+; CHECK-GI-NEXT:    mov x0, xzr
+; CHECK-GI-NEXT:    rev w8, w8
+; CHECK-GI-NEXT:    lsr w8, w8, #16
+; CHECK-GI-NEXT:    bfi x8, x8, #32, #32
+; CHECK-GI-NEXT:    and x8, x8, #0xffff
+; CHECK-GI-NEXT:    lsl x1, x8, #48
+; CHECK-GI-NEXT:    ret
     %3 = call i16 @llvm.bswap.i16(i16 %a)
     %4 = zext i16 %3 to i128
     %5 = shl i128 %4, 112
-    ret i128 %d
+    ret i128 %5
 }
 
 define i32 @bswap_i16_to_i32_zext(i16 %a){
-; CHECK-LABEL: bswap_i16_to_i32:
+; CHECK-LABEL: bswap_i16_to_i32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rev w8, w0
 ; CHECK-NEXT:    lsr w0, w8, #16

>From 2d935a9ee484400c4457cfa82efe27b2fedb98ca Mon Sep 17 00:00:00 2001
From: adprasad <adprasad at nvidia.com>
Date: Thu, 5 Sep 2024 18:07:26 +0530
Subject: [PATCH 12/12] Run clang-format

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index a12584226dc139..ff881e3ce4226d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -22217,19 +22217,17 @@ static SDValue performExtendCombine(SDNode *N,
   // any_extend. This means that we can replace this pattern with (rev16
   // (any_extend ...)). This saves a machine instruction compared to (lsr (rev
   // ...)), which is what this pattern would otherwise be lowered to.
-  // Only apply this optimisation if any_extend in original pattern to i32 or i64,
-  // because this type will become the input type to REV16 in the new pattern, so
-  // must be a legitimate REV16 input type. 
+  // Only apply this optimisation if any_extend in original pattern to i32 or
+  // i64, because this type will become the input type to REV16 in the new
+  // pattern, so must be a legitimate REV16 input type.
   if (N->getOpcode() == ISD::ANY_EXTEND &&
       N->getOperand(0).getOpcode() == ISD::BSWAP &&
       N->getOperand(0).getValueType() == MVT::i16 &&
-      (N->getValueType(0) == MVT::i32 ||
-       N->getValueType(0) == MVT::i64)) {
+      (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64)) {
     SDNode *BswapNode = N->getOperand(0).getNode();
     SDLoc DL(N);
-    SDValue NewAnyExtend =
-        DAG.getNode(ISD::ANY_EXTEND, DL, N->getValueType(0),
-                    BswapNode->getOperand(0));
+    SDValue NewAnyExtend = DAG.getNode(ISD::ANY_EXTEND, DL, N->getValueType(0),
+                                       BswapNode->getOperand(0));
     return DAG.getNode(AArch64ISD::REV16, SDLoc(N), N->getValueType(0),
                        NewAnyExtend);
   }



More information about the llvm-commits mailing list