[llvm] 8982454 - [AArch64] Add rev codegen tests for bfloat16. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 23 23:59:12 PST 2025


Author: David Green
Date: 2025-02-24T07:59:08Z
New Revision: 89824547ecdcab53734c02621b92f901f1615393

URL: https://github.com/llvm/llvm-project/commit/89824547ecdcab53734c02621b92f901f1615393
DIFF: https://github.com/llvm/llvm-project/commit/89824547ecdcab53734c02621b92f901f1615393.diff

LOG: [AArch64] Add rev codegen tests for bfloat16. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/arm64-rev.ll
    llvm/test/CodeGen/AArch64/neon-reverseshuffle.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/arm64-rev.ll b/llvm/test/CodeGen/AArch64/arm64-rev.ll
index f548a0e01feee..14ab7b5108125 100644
--- a/llvm/test/CodeGen/AArch64/arm64-rev.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-rev.ll
@@ -213,9 +213,9 @@ define <8 x i8> @test_vrev64D8(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    rev64.8b v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-	ret <8 x i8> %tmp2
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+  ret <8 x i8> %tmp2
 }
 
 define <4 x i16> @test_vrev64D16(ptr %A) nounwind {
@@ -224,9 +224,9 @@ define <4 x i16> @test_vrev64D16(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    rev64.4h v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-	ret <4 x i16> %tmp2
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+  ret <4 x i16> %tmp2
 }
 
 define <2 x i32> @test_vrev64D32(ptr %A) nounwind {
@@ -235,9 +235,9 @@ define <2 x i32> @test_vrev64D32(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    rev64.2s v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, ptr %A
-	%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-	ret <2 x i32> %tmp2
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
+  ret <2 x i32> %tmp2
 }
 
 define <2 x float> @test_vrev64Df(ptr %A) nounwind {
@@ -246,9 +246,9 @@ define <2 x float> @test_vrev64Df(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    rev64.2s v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x float>, ptr %A
-	%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
-	ret <2 x float> %tmp2
+  %tmp1 = load <2 x float>, ptr %A
+  %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
+  ret <2 x float> %tmp2
 }
 
 define <16 x i8> @test_vrev64Q8(ptr %A) nounwind {
@@ -257,9 +257,9 @@ define <16 x i8> @test_vrev64Q8(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    rev64.16b v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
-	ret <16 x i8> %tmp2
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
+  ret <16 x i8> %tmp2
 }
 
 define <8 x i16> @test_vrev64Q16(ptr %A) nounwind {
@@ -268,9 +268,9 @@ define <8 x i16> @test_vrev64Q16(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    rev64.8h v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
-	ret <8 x i16> %tmp2
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+  ret <8 x i16> %tmp2
 }
 
 define <4 x i32> @test_vrev64Q32(ptr %A) nounwind {
@@ -279,9 +279,9 @@ define <4 x i32> @test_vrev64Q32(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    rev64.4s v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, ptr %A
-	%tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-	ret <4 x i32> %tmp2
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  ret <4 x i32> %tmp2
 }
 
 define <4 x float> @test_vrev64Qf(ptr %A) nounwind {
@@ -290,9 +290,9 @@ define <4 x float> @test_vrev64Qf(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    rev64.4s v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x float>, ptr %A
-	%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-	ret <4 x float> %tmp2
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  ret <4 x float> %tmp2
 }
 
 define <8 x i8> @test_vrev32D8(ptr %A) nounwind {
@@ -301,9 +301,9 @@ define <8 x i8> @test_vrev32D8(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    rev32.8b v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
-	ret <8 x i8> %tmp2
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+  ret <8 x i8> %tmp2
 }
 
 define <4 x i16> @test_vrev32D16(ptr %A) nounwind {
@@ -312,9 +312,9 @@ define <4 x i16> @test_vrev32D16(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    rev32.4h v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, ptr %A
-	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-	ret <4 x i16> %tmp2
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  ret <4 x i16> %tmp2
 }
 
 define <16 x i8> @test_vrev32Q8(ptr %A) nounwind {
@@ -323,9 +323,9 @@ define <16 x i8> @test_vrev32Q8(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    rev32.16b v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
-	ret <16 x i8> %tmp2
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
+  ret <16 x i8> %tmp2
 }
 
 define <8 x i16> @test_vrev32Q16(ptr %A) nounwind {
@@ -334,9 +334,9 @@ define <8 x i16> @test_vrev32Q16(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    rev32.8h v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
-	ret <8 x i16> %tmp2
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x i16> %tmp2
 }
 
 define <8 x i8> @test_vrev16D8(ptr %A) nounwind {
@@ -345,9 +345,9 @@ define <8 x i8> @test_vrev16D8(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    rev16.8b v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
-	ret <8 x i8> %tmp2
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x i8> %tmp2
 }
 
 define <16 x i8> @test_vrev16Q8(ptr %A) nounwind {
@@ -356,9 +356,81 @@ define <16 x i8> @test_vrev16Q8(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    rev16.16b v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, ptr %A
-	%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
-	ret <16 x i8> %tmp2
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+  ret <16 x i8> %tmp2
+}
+
+define <4 x half> @test_vrev32Df16(<4 x half> %A) nounwind {
+; CHECK-LABEL: test_vrev32Df16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rev32.4h v0, v0
+; CHECK-NEXT:    ret
+  %tmp2 = shufflevector <4 x half> %A, <4 x half> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  ret <4 x half> %tmp2
+}
+
+define <8 x half> @test_vrev32Qf16(<8 x half> %A) nounwind {
+; CHECK-LABEL: test_vrev32Qf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rev32.8h v0, v0
+; CHECK-NEXT:    ret
+  %tmp2 = shufflevector <8 x half> %A, <8 x half> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x half> %tmp2
+}
+
+define <4 x half> @test_vrev64Df16(<4 x half> %A) nounwind {
+; CHECK-LABEL: test_vrev64Df16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rev64.4h v0, v0
+; CHECK-NEXT:    ret
+  %tmp2 = shufflevector <4 x half> %A, <4 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+  ret <4 x half> %tmp2
+}
+
+define <8 x half> @test_vrev64Qf16(<8 x half> %A) nounwind {
+; CHECK-LABEL: test_vrev64Qf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rev64.8h v0, v0
+; CHECK-NEXT:    ret
+  %tmp2 = shufflevector <8 x half> %A, <8 x half> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+  ret <8 x half> %tmp2
+}
+
+define <4 x bfloat> @test_vrev32Dbf16(<4 x bfloat> %A) nounwind {
+; CHECK-LABEL: test_vrev32Dbf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rev32.4h v0, v0
+; CHECK-NEXT:    ret
+  %tmp2 = shufflevector <4 x bfloat> %A, <4 x bfloat> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  ret <4 x bfloat> %tmp2
+}
+
+define <8 x bfloat> @test_vrev32Qbf16(<8 x bfloat> %A) nounwind {
+; CHECK-LABEL: test_vrev32Qbf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rev32.8h v0, v0
+; CHECK-NEXT:    ret
+  %tmp2 = shufflevector <8 x bfloat> %A, <8 x bfloat> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x bfloat> %tmp2
+}
+
+define <4 x bfloat> @test_vrev64Dbf16(<4 x bfloat> %A) nounwind {
+; CHECK-LABEL: test_vrev64Dbf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rev64.4h v0, v0
+; CHECK-NEXT:    ret
+  %tmp2 = shufflevector <4 x bfloat> %A, <4 x bfloat> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+  ret <4 x bfloat> %tmp2
+}
+
+define <8 x bfloat> @test_vrev64Qbf16(<8 x bfloat> %A) nounwind {
+; CHECK-LABEL: test_vrev64Qbf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rev64.8h v0, v0
+; CHECK-NEXT:    ret
+  %tmp2 = shufflevector <8 x bfloat> %A, <8 x bfloat> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+  ret <8 x bfloat> %tmp2
 }
 
 ; Undef shuffle indices should not prevent matching to VREV:
@@ -369,9 +441,9 @@ define <8 x i8> @test_vrev64D8_undef(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    rev64.8b v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, ptr %A
-	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
-	ret <8 x i8> %tmp2
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
+  ret <8 x i8> %tmp2
 }
 
 define <8 x i16> @test_vrev32Q16_undef(ptr %A) nounwind {
@@ -380,9 +452,9 @@ define <8 x i16> @test_vrev32Q16_undef(ptr %A) nounwind {
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    rev32.8h v0, v0
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, ptr %A
-	%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
-	ret <8 x i16> %tmp2
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
+  ret <8 x i16> %tmp2
 }
 
 ; vrev <4 x i16> should use REV32 and not REV64
@@ -426,9 +498,9 @@ define void @float_vrev64(ptr nocapture %source, ptr nocapture %dest) nounwind n
 ; CHECK-GI-LABEL: float_vrev64:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    movi d0, #0000000000000000
-; CHECK-GI-NEXT:    adrp x8, .LCPI28_0
+; CHECK-GI-NEXT:    adrp x8, .LCPI36_0
 ; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI28_0]
+; CHECK-GI-NEXT:    ldr q2, [x8, :lo12:.LCPI36_0]
 ; CHECK-GI-NEXT:    tbl.16b v0, { v0, v1 }, v2
 ; CHECK-GI-NEXT:    str q0, [x1, #176]
 ; CHECK-GI-NEXT:    ret
@@ -456,27 +528,27 @@ declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) nounwind readnone
 define void @test_rev16_truncstore() {
 ; CHECK-SD-LABEL: test_rev16_truncstore:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    cbnz wzr, .LBB30_2
-; CHECK-SD-NEXT:  .LBB30_1: // %cleanup
+; CHECK-SD-NEXT:    cbnz wzr, .LBB38_2
+; CHECK-SD-NEXT:  .LBB38_1: // %cleanup
 ; CHECK-SD-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-SD-NEXT:    ldrh w8, [x8]
 ; CHECK-SD-NEXT:    rev16 w8, w8
 ; CHECK-SD-NEXT:    strh w8, [x8]
-; CHECK-SD-NEXT:    cbz wzr, .LBB30_1
-; CHECK-SD-NEXT:  .LBB30_2: // %fail
+; CHECK-SD-NEXT:    cbz wzr, .LBB38_1
+; CHECK-SD-NEXT:  .LBB38_2: // %fail
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_rev16_truncstore:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    tbnz wzr, #0, .LBB30_2
-; CHECK-GI-NEXT:  .LBB30_1: // %cleanup
+; CHECK-GI-NEXT:    tbnz wzr, #0, .LBB38_2
+; CHECK-GI-NEXT:  .LBB38_1: // %cleanup
 ; CHECK-GI-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-GI-NEXT:    ldrh w8, [x8]
 ; CHECK-GI-NEXT:    rev w8, w8
 ; CHECK-GI-NEXT:    lsr w8, w8, #16
 ; CHECK-GI-NEXT:    strh w8, [x8]
-; CHECK-GI-NEXT:    tbz wzr, #0, .LBB30_1
-; CHECK-GI-NEXT:  .LBB30_2: // %fail
+; CHECK-GI-NEXT:    tbz wzr, #0, .LBB38_1
+; CHECK-GI-NEXT:  .LBB38_2: // %fail
 ; CHECK-GI-NEXT:    ret
 entry:
   br label %body

diff  --git a/llvm/test/CodeGen/AArch64/neon-reverseshuffle.ll b/llvm/test/CodeGen/AArch64/neon-reverseshuffle.ll
index db5b93282e9c4..5a2f996810b5f 100644
--- a/llvm/test/CodeGen/AArch64/neon-reverseshuffle.ll
+++ b/llvm/test/CodeGen/AArch64/neon-reverseshuffle.ll
@@ -198,3 +198,24 @@ entry:
   %V128 = shufflevector <4 x half> %a, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   ret <4 x half> %V128
 }
+
+define <8 x bfloat> @v8bf16(<8 x bfloat> %a) {
+; CHECK-LABEL: v8bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    rev64 v0.8h, v0.8h
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    ret
+entry:
+  %V128 = shufflevector <8 x bfloat> %a, <8 x bfloat> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+  ret <8 x bfloat> %V128
+}
+
+define <4 x bfloat> @v4bf16(<4 x bfloat> %a) {
+; CHECK-LABEL: v4bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    rev64 v0.4h, v0.4h
+; CHECK-NEXT:    ret
+entry:
+  %V128 = shufflevector <4 x bfloat> %a, <4 x bfloat> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+  ret <4 x bfloat> %V128
+}


        


More information about the llvm-commits mailing list