[llvm] 07bb044 - [RISCV] Add build_vector coverage when zbkb is available
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 8 14:25:12 PDT 2024
Author: Philip Reames
Date: 2024-07-08T14:24:44-07:00
New Revision: 07bb0444ddb96495905fbc13a04ad39972808dfb
URL: https://github.com/llvm/llvm-project/commit/07bb0444ddb96495905fbc13a04ad39972808dfb
DIFF: https://github.com/llvm/llvm-project/commit/07bb0444ddb96495905fbc13a04ad39972808dfb.diff
LOG: [RISCV] Add build_vector coverage when zbkb is available
An uncomping change will make much more complete use of packh, packw, and
pack during element packing inside build_vector lowering.
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index d49929ce90c47..03ed6883b537d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -1,8 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-ONLY
; RUN: llc -mtriple=riscv32 -mattr=+v,+zba,+zbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32VB
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zba,+zbb,+zbkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32VB-PACK
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64V,RV64V-ONLY
; RUN: llc -mtriple=riscv64 -mattr=+v,+rva22u64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64V,RVA22U64
+; RUN: llc -mtriple=riscv64 -mattr=+v,+rva22u64,+zbkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64V,RVA22U64-PACK
; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVE32
define void @buildvec_vid_v16i8(ptr %x) {
@@ -1274,6 +1276,51 @@ define <16 x i8> @buildvec_v16i8_loads_contigous(ptr %p) {
; RV32VB-NEXT: vslide1down.vx v8, v8, a0
; RV32VB-NEXT: ret
;
+; RV32VB-PACK-LABEL: buildvec_v16i8_loads_contigous:
+; RV32VB-PACK: # %bb.0:
+; RV32VB-PACK-NEXT: lbu a1, 0(a0)
+; RV32VB-PACK-NEXT: lbu a2, 1(a0)
+; RV32VB-PACK-NEXT: lbu a3, 2(a0)
+; RV32VB-PACK-NEXT: lbu a4, 3(a0)
+; RV32VB-PACK-NEXT: packh a1, a1, a2
+; RV32VB-PACK-NEXT: slli a3, a3, 16
+; RV32VB-PACK-NEXT: slli a4, a4, 24
+; RV32VB-PACK-NEXT: or a3, a4, a3
+; RV32VB-PACK-NEXT: lbu a2, 4(a0)
+; RV32VB-PACK-NEXT: lbu a4, 5(a0)
+; RV32VB-PACK-NEXT: lbu a5, 6(a0)
+; RV32VB-PACK-NEXT: lbu a6, 7(a0)
+; RV32VB-PACK-NEXT: or a1, a1, a3
+; RV32VB-PACK-NEXT: packh a2, a2, a4
+; RV32VB-PACK-NEXT: slli a5, a5, 16
+; RV32VB-PACK-NEXT: slli a6, a6, 24
+; RV32VB-PACK-NEXT: or a3, a6, a5
+; RV32VB-PACK-NEXT: lbu a4, 8(a0)
+; RV32VB-PACK-NEXT: lbu a5, 9(a0)
+; RV32VB-PACK-NEXT: lbu a6, 10(a0)
+; RV32VB-PACK-NEXT: lbu a7, 11(a0)
+; RV32VB-PACK-NEXT: or a2, a2, a3
+; RV32VB-PACK-NEXT: packh a3, a4, a5
+; RV32VB-PACK-NEXT: slli a6, a6, 16
+; RV32VB-PACK-NEXT: slli a7, a7, 24
+; RV32VB-PACK-NEXT: or a4, a7, a6
+; RV32VB-PACK-NEXT: lbu a5, 12(a0)
+; RV32VB-PACK-NEXT: lbu a6, 13(a0)
+; RV32VB-PACK-NEXT: lbu a7, 14(a0)
+; RV32VB-PACK-NEXT: lbu a0, 15(a0)
+; RV32VB-PACK-NEXT: or a3, a3, a4
+; RV32VB-PACK-NEXT: packh a4, a5, a6
+; RV32VB-PACK-NEXT: slli a7, a7, 16
+; RV32VB-PACK-NEXT: slli a0, a0, 24
+; RV32VB-PACK-NEXT: or a0, a0, a7
+; RV32VB-PACK-NEXT: or a0, a4, a0
+; RV32VB-PACK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32VB-PACK-NEXT: vmv.v.x v8, a1
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a2
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a3
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RV32VB-PACK-NEXT: ret
+;
; RV64V-ONLY-LABEL: buildvec_v16i8_loads_contigous:
; RV64V-ONLY: # %bb.0:
; RV64V-ONLY-NEXT: addi a1, a0, 8
@@ -1366,6 +1413,55 @@ define <16 x i8> @buildvec_v16i8_loads_contigous(ptr %p) {
; RVA22U64-NEXT: vslide1down.vx v8, v8, a0
; RVA22U64-NEXT: ret
;
+; RVA22U64-PACK-LABEL: buildvec_v16i8_loads_contigous:
+; RVA22U64-PACK: # %bb.0:
+; RVA22U64-PACK-NEXT: lbu a1, 0(a0)
+; RVA22U64-PACK-NEXT: lbu a2, 1(a0)
+; RVA22U64-PACK-NEXT: lbu a3, 2(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 3(a0)
+; RVA22U64-PACK-NEXT: packh a1, a1, a2
+; RVA22U64-PACK-NEXT: slli a3, a3, 16
+; RVA22U64-PACK-NEXT: slli a4, a4, 24
+; RVA22U64-PACK-NEXT: or a3, a3, a4
+; RVA22U64-PACK-NEXT: lbu a2, 4(a0)
+; RVA22U64-PACK-NEXT: or a6, a1, a3
+; RVA22U64-PACK-NEXT: lbu a3, 5(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 6(a0)
+; RVA22U64-PACK-NEXT: slli a2, a2, 32
+; RVA22U64-PACK-NEXT: lbu a5, 7(a0)
+; RVA22U64-PACK-NEXT: slli a3, a3, 40
+; RVA22U64-PACK-NEXT: or a2, a2, a3
+; RVA22U64-PACK-NEXT: slli a4, a4, 48
+; RVA22U64-PACK-NEXT: slli a5, a5, 56
+; RVA22U64-PACK-NEXT: or a4, a4, a5
+; RVA22U64-PACK-NEXT: or a2, a2, a4
+; RVA22U64-PACK-NEXT: lbu a3, 8(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 9(a0)
+; RVA22U64-PACK-NEXT: lbu a5, 10(a0)
+; RVA22U64-PACK-NEXT: lbu a1, 11(a0)
+; RVA22U64-PACK-NEXT: or a2, a6, a2
+; RVA22U64-PACK-NEXT: packh a3, a3, a4
+; RVA22U64-PACK-NEXT: slli a5, a5, 16
+; RVA22U64-PACK-NEXT: slli a1, a1, 24
+; RVA22U64-PACK-NEXT: or a1, a1, a5
+; RVA22U64-PACK-NEXT: lbu a4, 12(a0)
+; RVA22U64-PACK-NEXT: or a1, a1, a3
+; RVA22U64-PACK-NEXT: lbu a3, 13(a0)
+; RVA22U64-PACK-NEXT: lbu a5, 14(a0)
+; RVA22U64-PACK-NEXT: slli a4, a4, 32
+; RVA22U64-PACK-NEXT: lbu a0, 15(a0)
+; RVA22U64-PACK-NEXT: slli a3, a3, 40
+; RVA22U64-PACK-NEXT: or a3, a3, a4
+; RVA22U64-PACK-NEXT: slli a5, a5, 48
+; RVA22U64-PACK-NEXT: slli a0, a0, 56
+; RVA22U64-PACK-NEXT: or a0, a0, a5
+; RVA22U64-PACK-NEXT: or a0, a0, a3
+; RVA22U64-PACK-NEXT: or a0, a0, a1
+; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.v.x v8, a2
+; RVA22U64-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RVA22U64-PACK-NEXT: ret
+;
; RV64ZVE32-LABEL: buildvec_v16i8_loads_contigous:
; RV64ZVE32: # %bb.0:
; RV64ZVE32-NEXT: addi a1, a0, 8
@@ -1550,6 +1646,51 @@ define <16 x i8> @buildvec_v16i8_loads_gather(ptr %p) {
; RV32VB-NEXT: vslide1down.vx v8, v8, a0
; RV32VB-NEXT: ret
;
+; RV32VB-PACK-LABEL: buildvec_v16i8_loads_gather:
+; RV32VB-PACK: # %bb.0:
+; RV32VB-PACK-NEXT: lbu a1, 0(a0)
+; RV32VB-PACK-NEXT: lbu a2, 1(a0)
+; RV32VB-PACK-NEXT: lbu a3, 22(a0)
+; RV32VB-PACK-NEXT: lbu a4, 31(a0)
+; RV32VB-PACK-NEXT: packh a1, a1, a2
+; RV32VB-PACK-NEXT: slli a3, a3, 16
+; RV32VB-PACK-NEXT: slli a4, a4, 24
+; RV32VB-PACK-NEXT: or a3, a4, a3
+; RV32VB-PACK-NEXT: lbu a2, 44(a0)
+; RV32VB-PACK-NEXT: lbu a4, 55(a0)
+; RV32VB-PACK-NEXT: lbu a5, 623(a0)
+; RV32VB-PACK-NEXT: lbu a6, 75(a0)
+; RV32VB-PACK-NEXT: or a1, a1, a3
+; RV32VB-PACK-NEXT: packh a2, a2, a4
+; RV32VB-PACK-NEXT: slli a5, a5, 16
+; RV32VB-PACK-NEXT: slli a6, a6, 24
+; RV32VB-PACK-NEXT: or a3, a6, a5
+; RV32VB-PACK-NEXT: lbu a4, 82(a0)
+; RV32VB-PACK-NEXT: lbu a5, 93(a0)
+; RV32VB-PACK-NEXT: lbu a6, 105(a0)
+; RV32VB-PACK-NEXT: lbu a7, 161(a0)
+; RV32VB-PACK-NEXT: or a2, a2, a3
+; RV32VB-PACK-NEXT: packh a3, a4, a5
+; RV32VB-PACK-NEXT: slli a6, a6, 16
+; RV32VB-PACK-NEXT: slli a7, a7, 24
+; RV32VB-PACK-NEXT: or a4, a7, a6
+; RV32VB-PACK-NEXT: lbu a5, 124(a0)
+; RV32VB-PACK-NEXT: lbu a6, 163(a0)
+; RV32VB-PACK-NEXT: lbu a7, 144(a0)
+; RV32VB-PACK-NEXT: lbu a0, 154(a0)
+; RV32VB-PACK-NEXT: or a3, a3, a4
+; RV32VB-PACK-NEXT: packh a4, a5, a6
+; RV32VB-PACK-NEXT: slli a7, a7, 16
+; RV32VB-PACK-NEXT: slli a0, a0, 24
+; RV32VB-PACK-NEXT: or a0, a0, a7
+; RV32VB-PACK-NEXT: or a0, a4, a0
+; RV32VB-PACK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32VB-PACK-NEXT: vmv.v.x v8, a1
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a2
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a3
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RV32VB-PACK-NEXT: ret
+;
; RV64V-ONLY-LABEL: buildvec_v16i8_loads_gather:
; RV64V-ONLY: # %bb.0:
; RV64V-ONLY-NEXT: addi a1, a0, 82
@@ -1642,6 +1783,55 @@ define <16 x i8> @buildvec_v16i8_loads_gather(ptr %p) {
; RVA22U64-NEXT: vslide1down.vx v8, v8, a0
; RVA22U64-NEXT: ret
;
+; RVA22U64-PACK-LABEL: buildvec_v16i8_loads_gather:
+; RVA22U64-PACK: # %bb.0:
+; RVA22U64-PACK-NEXT: lbu a1, 0(a0)
+; RVA22U64-PACK-NEXT: lbu a2, 1(a0)
+; RVA22U64-PACK-NEXT: lbu a3, 22(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 31(a0)
+; RVA22U64-PACK-NEXT: packh a1, a1, a2
+; RVA22U64-PACK-NEXT: slli a3, a3, 16
+; RVA22U64-PACK-NEXT: slli a4, a4, 24
+; RVA22U64-PACK-NEXT: or a3, a3, a4
+; RVA22U64-PACK-NEXT: lbu a2, 44(a0)
+; RVA22U64-PACK-NEXT: or a6, a1, a3
+; RVA22U64-PACK-NEXT: lbu a3, 55(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 623(a0)
+; RVA22U64-PACK-NEXT: slli a2, a2, 32
+; RVA22U64-PACK-NEXT: lbu a5, 75(a0)
+; RVA22U64-PACK-NEXT: slli a3, a3, 40
+; RVA22U64-PACK-NEXT: or a2, a2, a3
+; RVA22U64-PACK-NEXT: slli a4, a4, 48
+; RVA22U64-PACK-NEXT: slli a5, a5, 56
+; RVA22U64-PACK-NEXT: or a4, a4, a5
+; RVA22U64-PACK-NEXT: or a2, a2, a4
+; RVA22U64-PACK-NEXT: lbu a3, 82(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 93(a0)
+; RVA22U64-PACK-NEXT: lbu a5, 105(a0)
+; RVA22U64-PACK-NEXT: lbu a1, 161(a0)
+; RVA22U64-PACK-NEXT: or a2, a6, a2
+; RVA22U64-PACK-NEXT: packh a3, a3, a4
+; RVA22U64-PACK-NEXT: slli a5, a5, 16
+; RVA22U64-PACK-NEXT: slli a1, a1, 24
+; RVA22U64-PACK-NEXT: or a1, a1, a5
+; RVA22U64-PACK-NEXT: lbu a4, 124(a0)
+; RVA22U64-PACK-NEXT: or a1, a1, a3
+; RVA22U64-PACK-NEXT: lbu a3, 163(a0)
+; RVA22U64-PACK-NEXT: lbu a5, 144(a0)
+; RVA22U64-PACK-NEXT: slli a4, a4, 32
+; RVA22U64-PACK-NEXT: lbu a0, 154(a0)
+; RVA22U64-PACK-NEXT: slli a3, a3, 40
+; RVA22U64-PACK-NEXT: or a3, a3, a4
+; RVA22U64-PACK-NEXT: slli a5, a5, 48
+; RVA22U64-PACK-NEXT: slli a0, a0, 56
+; RVA22U64-PACK-NEXT: or a0, a0, a5
+; RVA22U64-PACK-NEXT: or a0, a0, a3
+; RVA22U64-PACK-NEXT: or a0, a0, a1
+; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.v.x v8, a2
+; RVA22U64-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RVA22U64-PACK-NEXT: ret
+;
; RV64ZVE32-LABEL: buildvec_v16i8_loads_gather:
; RV64ZVE32: # %bb.0:
; RV64ZVE32-NEXT: addi a1, a0, 82
@@ -1785,6 +1975,33 @@ define <16 x i8> @buildvec_v16i8_undef_low_half(ptr %p) {
; RV32VB-NEXT: vslide1down.vx v8, v8, a0
; RV32VB-NEXT: ret
;
+; RV32VB-PACK-LABEL: buildvec_v16i8_undef_low_half:
+; RV32VB-PACK: # %bb.0:
+; RV32VB-PACK-NEXT: lbu a1, 82(a0)
+; RV32VB-PACK-NEXT: lbu a2, 93(a0)
+; RV32VB-PACK-NEXT: lbu a3, 105(a0)
+; RV32VB-PACK-NEXT: lbu a4, 161(a0)
+; RV32VB-PACK-NEXT: packh a1, a1, a2
+; RV32VB-PACK-NEXT: slli a3, a3, 16
+; RV32VB-PACK-NEXT: slli a4, a4, 24
+; RV32VB-PACK-NEXT: or a3, a4, a3
+; RV32VB-PACK-NEXT: lbu a2, 124(a0)
+; RV32VB-PACK-NEXT: lbu a4, 163(a0)
+; RV32VB-PACK-NEXT: lbu a5, 144(a0)
+; RV32VB-PACK-NEXT: lbu a0, 154(a0)
+; RV32VB-PACK-NEXT: or a1, a1, a3
+; RV32VB-PACK-NEXT: packh a2, a2, a4
+; RV32VB-PACK-NEXT: slli a5, a5, 16
+; RV32VB-PACK-NEXT: slli a0, a0, 24
+; RV32VB-PACK-NEXT: or a0, a0, a5
+; RV32VB-PACK-NEXT: or a0, a2, a0
+; RV32VB-PACK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32VB-PACK-NEXT: vmv.v.i v8, 0
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, zero
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a1
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RV32VB-PACK-NEXT: ret
+;
; RV64V-ONLY-LABEL: buildvec_v16i8_undef_low_half:
; RV64V-ONLY: # %bb.0:
; RV64V-ONLY-NEXT: addi a1, a0, 82
@@ -1835,6 +2052,34 @@ define <16 x i8> @buildvec_v16i8_undef_low_half(ptr %p) {
; RVA22U64-NEXT: vslide1down.vx v8, v8, a0
; RVA22U64-NEXT: ret
;
+; RVA22U64-PACK-LABEL: buildvec_v16i8_undef_low_half:
+; RVA22U64-PACK: # %bb.0:
+; RVA22U64-PACK-NEXT: lbu a1, 82(a0)
+; RVA22U64-PACK-NEXT: lbu a2, 93(a0)
+; RVA22U64-PACK-NEXT: lbu a3, 105(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 161(a0)
+; RVA22U64-PACK-NEXT: packh a1, a1, a2
+; RVA22U64-PACK-NEXT: slli a3, a3, 16
+; RVA22U64-PACK-NEXT: slli a4, a4, 24
+; RVA22U64-PACK-NEXT: or a3, a3, a4
+; RVA22U64-PACK-NEXT: lbu a2, 124(a0)
+; RVA22U64-PACK-NEXT: or a1, a1, a3
+; RVA22U64-PACK-NEXT: lbu a3, 163(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 144(a0)
+; RVA22U64-PACK-NEXT: slli a2, a2, 32
+; RVA22U64-PACK-NEXT: lbu a0, 154(a0)
+; RVA22U64-PACK-NEXT: slli a3, a3, 40
+; RVA22U64-PACK-NEXT: or a2, a2, a3
+; RVA22U64-PACK-NEXT: slli a4, a4, 48
+; RVA22U64-PACK-NEXT: slli a0, a0, 56
+; RVA22U64-PACK-NEXT: or a0, a0, a4
+; RVA22U64-PACK-NEXT: or a0, a0, a2
+; RVA22U64-PACK-NEXT: or a0, a0, a1
+; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.v.i v8, 0
+; RVA22U64-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RVA22U64-PACK-NEXT: ret
+;
; RV64ZVE32-LABEL: buildvec_v16i8_undef_low_half:
; RV64ZVE32: # %bb.0:
; RV64ZVE32-NEXT: addi a1, a0, 82
@@ -1935,6 +2180,33 @@ define <16 x i8> @buildvec_v16i8_undef_high_half(ptr %p) {
; RV32VB-NEXT: vslide1down.vx v8, v8, zero
; RV32VB-NEXT: ret
;
+; RV32VB-PACK-LABEL: buildvec_v16i8_undef_high_half:
+; RV32VB-PACK: # %bb.0:
+; RV32VB-PACK-NEXT: lbu a1, 0(a0)
+; RV32VB-PACK-NEXT: lbu a2, 1(a0)
+; RV32VB-PACK-NEXT: lbu a3, 22(a0)
+; RV32VB-PACK-NEXT: lbu a4, 31(a0)
+; RV32VB-PACK-NEXT: packh a1, a1, a2
+; RV32VB-PACK-NEXT: slli a3, a3, 16
+; RV32VB-PACK-NEXT: slli a4, a4, 24
+; RV32VB-PACK-NEXT: or a3, a4, a3
+; RV32VB-PACK-NEXT: lbu a2, 44(a0)
+; RV32VB-PACK-NEXT: lbu a4, 55(a0)
+; RV32VB-PACK-NEXT: lbu a5, 623(a0)
+; RV32VB-PACK-NEXT: lbu a0, 75(a0)
+; RV32VB-PACK-NEXT: or a1, a1, a3
+; RV32VB-PACK-NEXT: packh a2, a2, a4
+; RV32VB-PACK-NEXT: slli a5, a5, 16
+; RV32VB-PACK-NEXT: slli a0, a0, 24
+; RV32VB-PACK-NEXT: or a0, a0, a5
+; RV32VB-PACK-NEXT: or a0, a2, a0
+; RV32VB-PACK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32VB-PACK-NEXT: vmv.v.x v8, a1
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, zero
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, zero
+; RV32VB-PACK-NEXT: ret
+;
; RV64V-ONLY-LABEL: buildvec_v16i8_undef_high_half:
; RV64V-ONLY: # %bb.0:
; RV64V-ONLY-NEXT: lbu a1, 1(a0)
@@ -1985,6 +2257,34 @@ define <16 x i8> @buildvec_v16i8_undef_high_half(ptr %p) {
; RVA22U64-NEXT: vslide1down.vx v8, v8, zero
; RVA22U64-NEXT: ret
;
+; RVA22U64-PACK-LABEL: buildvec_v16i8_undef_high_half:
+; RVA22U64-PACK: # %bb.0:
+; RVA22U64-PACK-NEXT: lbu a1, 0(a0)
+; RVA22U64-PACK-NEXT: lbu a2, 1(a0)
+; RVA22U64-PACK-NEXT: lbu a3, 22(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 31(a0)
+; RVA22U64-PACK-NEXT: packh a1, a1, a2
+; RVA22U64-PACK-NEXT: slli a3, a3, 16
+; RVA22U64-PACK-NEXT: slli a4, a4, 24
+; RVA22U64-PACK-NEXT: or a3, a3, a4
+; RVA22U64-PACK-NEXT: lbu a2, 44(a0)
+; RVA22U64-PACK-NEXT: or a1, a1, a3
+; RVA22U64-PACK-NEXT: lbu a3, 55(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 623(a0)
+; RVA22U64-PACK-NEXT: slli a2, a2, 32
+; RVA22U64-PACK-NEXT: lbu a0, 75(a0)
+; RVA22U64-PACK-NEXT: slli a3, a3, 40
+; RVA22U64-PACK-NEXT: or a2, a2, a3
+; RVA22U64-PACK-NEXT: slli a4, a4, 48
+; RVA22U64-PACK-NEXT: slli a0, a0, 56
+; RVA22U64-PACK-NEXT: or a0, a0, a4
+; RVA22U64-PACK-NEXT: or a0, a0, a2
+; RVA22U64-PACK-NEXT: or a0, a0, a1
+; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.v.x v8, a0
+; RVA22U64-PACK-NEXT: vslide1down.vx v8, v8, zero
+; RVA22U64-PACK-NEXT: ret
+;
; RV64ZVE32-LABEL: buildvec_v16i8_undef_high_half:
; RV64ZVE32: # %bb.0:
; RV64ZVE32-NEXT: lbu a1, 1(a0)
@@ -2094,6 +2394,35 @@ define <16 x i8> @buildvec_v16i8_undef_edges(ptr %p) {
; RV32VB-NEXT: vslide1down.vx v8, v8, zero
; RV32VB-NEXT: ret
;
+; RV32VB-PACK-LABEL: buildvec_v16i8_undef_edges:
+; RV32VB-PACK: # %bb.0:
+; RV32VB-PACK-NEXT: lbu a1, 44(a0)
+; RV32VB-PACK-NEXT: lbu a2, 55(a0)
+; RV32VB-PACK-NEXT: lbu a3, 623(a0)
+; RV32VB-PACK-NEXT: lbu a4, 75(a0)
+; RV32VB-PACK-NEXT: lbu a5, 31(a0)
+; RV32VB-PACK-NEXT: packh a1, a1, a2
+; RV32VB-PACK-NEXT: slli a3, a3, 16
+; RV32VB-PACK-NEXT: slli a4, a4, 24
+; RV32VB-PACK-NEXT: or a3, a4, a3
+; RV32VB-PACK-NEXT: lbu a2, 82(a0)
+; RV32VB-PACK-NEXT: lbu a4, 93(a0)
+; RV32VB-PACK-NEXT: or a1, a1, a3
+; RV32VB-PACK-NEXT: lbu a3, 105(a0)
+; RV32VB-PACK-NEXT: lbu a0, 161(a0)
+; RV32VB-PACK-NEXT: packh a2, a2, a4
+; RV32VB-PACK-NEXT: slli a5, a5, 24
+; RV32VB-PACK-NEXT: slli a3, a3, 16
+; RV32VB-PACK-NEXT: slli a0, a0, 24
+; RV32VB-PACK-NEXT: or a0, a0, a3
+; RV32VB-PACK-NEXT: or a0, a2, a0
+; RV32VB-PACK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32VB-PACK-NEXT: vmv.v.x v8, a5
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a1
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, zero
+; RV32VB-PACK-NEXT: ret
+;
; RV64V-ONLY-LABEL: buildvec_v16i8_undef_edges:
; RV64V-ONLY: # %bb.0:
; RV64V-ONLY-NEXT: addi a1, a0, 31
@@ -2154,6 +2483,36 @@ define <16 x i8> @buildvec_v16i8_undef_edges(ptr %p) {
; RVA22U64-NEXT: vslide1down.vx v8, v8, a0
; RVA22U64-NEXT: ret
;
+; RVA22U64-PACK-LABEL: buildvec_v16i8_undef_edges:
+; RVA22U64-PACK: # %bb.0:
+; RVA22U64-PACK-NEXT: lbu a1, 31(a0)
+; RVA22U64-PACK-NEXT: lbu a2, 44(a0)
+; RVA22U64-PACK-NEXT: slli a1, a1, 24
+; RVA22U64-PACK-NEXT: lbu a3, 55(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 623(a0)
+; RVA22U64-PACK-NEXT: slli a2, a2, 32
+; RVA22U64-PACK-NEXT: lbu a5, 75(a0)
+; RVA22U64-PACK-NEXT: slli a3, a3, 40
+; RVA22U64-PACK-NEXT: or a2, a2, a3
+; RVA22U64-PACK-NEXT: slli a4, a4, 48
+; RVA22U64-PACK-NEXT: slli a5, a5, 56
+; RVA22U64-PACK-NEXT: or a4, a4, a5
+; RVA22U64-PACK-NEXT: or a2, a2, a4
+; RVA22U64-PACK-NEXT: lbu a3, 82(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 93(a0)
+; RVA22U64-PACK-NEXT: lbu a5, 105(a0)
+; RVA22U64-PACK-NEXT: lbu a0, 161(a0)
+; RVA22U64-PACK-NEXT: add.uw a1, a1, a2
+; RVA22U64-PACK-NEXT: packh a2, a3, a4
+; RVA22U64-PACK-NEXT: slli a5, a5, 16
+; RVA22U64-PACK-NEXT: slli a0, a0, 24
+; RVA22U64-PACK-NEXT: or a0, a0, a5
+; RVA22U64-PACK-NEXT: or a0, a0, a2
+; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.v.x v8, a1
+; RVA22U64-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RVA22U64-PACK-NEXT: ret
+;
; RV64ZVE32-LABEL: buildvec_v16i8_undef_edges:
; RV64ZVE32: # %bb.0:
; RV64ZVE32-NEXT: addi a1, a0, 31
@@ -2279,6 +2638,34 @@ define <16 x i8> @buildvec_v16i8_loads_undef_scattered(ptr %p) {
; RV32VB-NEXT: vslide1down.vx v8, v8, a0
; RV32VB-NEXT: ret
;
+; RV32VB-PACK-LABEL: buildvec_v16i8_loads_undef_scattered:
+; RV32VB-PACK: # %bb.0:
+; RV32VB-PACK-NEXT: lbu a1, 0(a0)
+; RV32VB-PACK-NEXT: lbu a2, 1(a0)
+; RV32VB-PACK-NEXT: lbu a3, 44(a0)
+; RV32VB-PACK-NEXT: lbu a4, 55(a0)
+; RV32VB-PACK-NEXT: lbu a5, 75(a0)
+; RV32VB-PACK-NEXT: packh a1, a1, a2
+; RV32VB-PACK-NEXT: packh a2, a3, a4
+; RV32VB-PACK-NEXT: slli a5, a5, 24
+; RV32VB-PACK-NEXT: lbu a3, 82(a0)
+; RV32VB-PACK-NEXT: lbu a4, 93(a0)
+; RV32VB-PACK-NEXT: or a2, a2, a5
+; RV32VB-PACK-NEXT: lbu a5, 144(a0)
+; RV32VB-PACK-NEXT: lbu a6, 154(a0)
+; RV32VB-PACK-NEXT: packh a3, a3, a4
+; RV32VB-PACK-NEXT: lbu a0, 124(a0)
+; RV32VB-PACK-NEXT: slli a5, a5, 16
+; RV32VB-PACK-NEXT: slli a6, a6, 24
+; RV32VB-PACK-NEXT: or a4, a6, a5
+; RV32VB-PACK-NEXT: or a0, a0, a4
+; RV32VB-PACK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32VB-PACK-NEXT: vmv.v.x v8, a1
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a2
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a3
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RV32VB-PACK-NEXT: ret
+;
; RV64V-ONLY-LABEL: buildvec_v16i8_loads_undef_scattered:
; RV64V-ONLY: # %bb.0:
; RV64V-ONLY-NEXT: addi a1, a0, 82
@@ -2345,6 +2732,37 @@ define <16 x i8> @buildvec_v16i8_loads_undef_scattered(ptr %p) {
; RVA22U64-NEXT: vslide1down.vx v8, v8, a0
; RVA22U64-NEXT: ret
;
+; RVA22U64-PACK-LABEL: buildvec_v16i8_loads_undef_scattered:
+; RVA22U64-PACK: # %bb.0:
+; RVA22U64-PACK-NEXT: lbu a1, 0(a0)
+; RVA22U64-PACK-NEXT: lbu a2, 1(a0)
+; RVA22U64-PACK-NEXT: lbu a3, 44(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 55(a0)
+; RVA22U64-PACK-NEXT: packh a1, a1, a2
+; RVA22U64-PACK-NEXT: lbu a2, 75(a0)
+; RVA22U64-PACK-NEXT: slli a3, a3, 32
+; RVA22U64-PACK-NEXT: slli a4, a4, 40
+; RVA22U64-PACK-NEXT: or a3, a3, a4
+; RVA22U64-PACK-NEXT: slli a2, a2, 56
+; RVA22U64-PACK-NEXT: or a1, a1, a2
+; RVA22U64-PACK-NEXT: lbu a2, 82(a0)
+; RVA22U64-PACK-NEXT: lbu a4, 93(a0)
+; RVA22U64-PACK-NEXT: or a1, a1, a3
+; RVA22U64-PACK-NEXT: lbu a3, 144(a0)
+; RVA22U64-PACK-NEXT: lbu a5, 154(a0)
+; RVA22U64-PACK-NEXT: packh a2, a2, a4
+; RVA22U64-PACK-NEXT: lbu a0, 124(a0)
+; RVA22U64-PACK-NEXT: slli a3, a3, 48
+; RVA22U64-PACK-NEXT: slli a5, a5, 56
+; RVA22U64-PACK-NEXT: or a3, a3, a5
+; RVA22U64-PACK-NEXT: slli a0, a0, 32
+; RVA22U64-PACK-NEXT: or a0, a0, a2
+; RVA22U64-PACK-NEXT: or a0, a0, a3
+; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.v.x v8, a1
+; RVA22U64-PACK-NEXT: vslide1down.vx v8, v8, a0
+; RVA22U64-PACK-NEXT: ret
+;
; RV64ZVE32-LABEL: buildvec_v16i8_loads_undef_scattered:
; RV64ZVE32: # %bb.0:
; RV64ZVE32-NEXT: addi a1, a0, 82
@@ -2470,6 +2888,25 @@ define <8 x i8> @buildvec_v8i8_pack(i8 %e1, i8 %e2, i8 %e3, i8 %e4, i8 %e5, i8 %
; RV32VB-NEXT: vslide1down.vx v8, v8, a4
; RV32VB-NEXT: ret
;
+; RV32VB-PACK-LABEL: buildvec_v8i8_pack:
+; RV32VB-PACK: # %bb.0:
+; RV32VB-PACK-NEXT: packh a4, a4, a5
+; RV32VB-PACK-NEXT: slli a7, a7, 24
+; RV32VB-PACK-NEXT: andi a5, a6, 255
+; RV32VB-PACK-NEXT: slli a5, a5, 16
+; RV32VB-PACK-NEXT: or a4, a4, a7
+; RV32VB-PACK-NEXT: or a4, a4, a5
+; RV32VB-PACK-NEXT: packh a0, a0, a1
+; RV32VB-PACK-NEXT: slli a3, a3, 24
+; RV32VB-PACK-NEXT: andi a1, a2, 255
+; RV32VB-PACK-NEXT: slli a1, a1, 16
+; RV32VB-PACK-NEXT: or a0, a0, a3
+; RV32VB-PACK-NEXT: or a0, a0, a1
+; RV32VB-PACK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32VB-PACK-NEXT: vmv.v.x v8, a0
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a4
+; RV32VB-PACK-NEXT: ret
+;
; RV64V-ONLY-LABEL: buildvec_v8i8_pack:
; RV64V-ONLY: # %bb.0:
; RV64V-ONLY-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
@@ -2512,6 +2949,30 @@ define <8 x i8> @buildvec_v8i8_pack(i8 %e1, i8 %e2, i8 %e3, i8 %e4, i8 %e5, i8 %
; RVA22U64-NEXT: vmv.s.x v8, a0
; RVA22U64-NEXT: ret
;
+; RVA22U64-PACK-LABEL: buildvec_v8i8_pack:
+; RVA22U64-PACK: # %bb.0:
+; RVA22U64-PACK-NEXT: andi a4, a4, 255
+; RVA22U64-PACK-NEXT: slli a4, a4, 32
+; RVA22U64-PACK-NEXT: andi a5, a5, 255
+; RVA22U64-PACK-NEXT: slli a5, a5, 40
+; RVA22U64-PACK-NEXT: or a4, a4, a5
+; RVA22U64-PACK-NEXT: slli a7, a7, 56
+; RVA22U64-PACK-NEXT: andi a5, a6, 255
+; RVA22U64-PACK-NEXT: slli a5, a5, 48
+; RVA22U64-PACK-NEXT: or a5, a7, a5
+; RVA22U64-PACK-NEXT: or a4, a4, a5
+; RVA22U64-PACK-NEXT: packh a0, a0, a1
+; RVA22U64-PACK-NEXT: andi a1, a2, 255
+; RVA22U64-PACK-NEXT: slli a1, a1, 16
+; RVA22U64-PACK-NEXT: andi a2, a3, 255
+; RVA22U64-PACK-NEXT: slli a2, a2, 24
+; RVA22U64-PACK-NEXT: or a1, a1, a2
+; RVA22U64-PACK-NEXT: or a0, a0, a1
+; RVA22U64-PACK-NEXT: or a0, a0, a4
+; RVA22U64-PACK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.s.x v8, a0
+; RVA22U64-PACK-NEXT: ret
+;
; RV64ZVE32-LABEL: buildvec_v8i8_pack:
; RV64ZVE32: # %bb.0:
; RV64ZVE32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
@@ -2570,6 +3031,20 @@ define <6 x i8> @buildvec_v6i8_pack(i8 %e1, i8 %e2, i8 %e3, i8 %e4, i8 %e5, i8 %
; RV32VB-NEXT: vslide1down.vx v8, v8, a1
; RV32VB-NEXT: ret
;
+; RV32VB-PACK-LABEL: buildvec_v6i8_pack:
+; RV32VB-PACK: # %bb.0:
+; RV32VB-PACK-NEXT: packh a0, a0, a1
+; RV32VB-PACK-NEXT: slli a3, a3, 24
+; RV32VB-PACK-NEXT: andi a1, a2, 255
+; RV32VB-PACK-NEXT: slli a1, a1, 16
+; RV32VB-PACK-NEXT: or a0, a0, a3
+; RV32VB-PACK-NEXT: or a0, a0, a1
+; RV32VB-PACK-NEXT: packh a1, a4, a5
+; RV32VB-PACK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32VB-PACK-NEXT: vmv.v.x v8, a0
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a1
+; RV32VB-PACK-NEXT: ret
+;
; RV64V-ONLY-LABEL: buildvec_v6i8_pack:
; RV64V-ONLY: # %bb.0:
; RV64V-ONLY-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
@@ -2604,6 +3079,25 @@ define <6 x i8> @buildvec_v6i8_pack(i8 %e1, i8 %e2, i8 %e3, i8 %e4, i8 %e5, i8 %
; RVA22U64-NEXT: vmv.s.x v8, a0
; RVA22U64-NEXT: ret
;
+; RVA22U64-PACK-LABEL: buildvec_v6i8_pack:
+; RVA22U64-PACK: # %bb.0:
+; RVA22U64-PACK-NEXT: packh a0, a0, a1
+; RVA22U64-PACK-NEXT: andi a1, a2, 255
+; RVA22U64-PACK-NEXT: slli a1, a1, 16
+; RVA22U64-PACK-NEXT: andi a2, a3, 255
+; RVA22U64-PACK-NEXT: slli a2, a2, 24
+; RVA22U64-PACK-NEXT: or a1, a1, a2
+; RVA22U64-PACK-NEXT: or a0, a0, a1
+; RVA22U64-PACK-NEXT: andi a1, a4, 255
+; RVA22U64-PACK-NEXT: slli a1, a1, 32
+; RVA22U64-PACK-NEXT: andi a2, a5, 255
+; RVA22U64-PACK-NEXT: slli a2, a2, 40
+; RVA22U64-PACK-NEXT: or a1, a1, a2
+; RVA22U64-PACK-NEXT: or a0, a0, a1
+; RVA22U64-PACK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.s.x v8, a0
+; RVA22U64-PACK-NEXT: ret
+;
; RV64ZVE32-LABEL: buildvec_v6i8_pack:
; RV64ZVE32: # %bb.0:
; RV64ZVE32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
@@ -2647,6 +3141,15 @@ define <4 x i16> @buildvec_v4i16_pack(i16 %e1, i16 %e2, i16 %e3, i16 %e4) {
; RV32VB-NEXT: vslide1down.vx v8, v8, a2
; RV32VB-NEXT: ret
;
+; RV32VB-PACK-LABEL: buildvec_v4i16_pack:
+; RV32VB-PACK: # %bb.0:
+; RV32VB-PACK-NEXT: pack a2, a2, a3
+; RV32VB-PACK-NEXT: pack a0, a0, a1
+; RV32VB-PACK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32VB-PACK-NEXT: vmv.v.x v8, a0
+; RV32VB-PACK-NEXT: vslide1down.vx v8, v8, a2
+; RV32VB-PACK-NEXT: ret
+;
; RV64V-ONLY-LABEL: buildvec_v4i16_pack:
; RV64V-ONLY: # %bb.0:
; RV64V-ONLY-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
@@ -2671,6 +3174,21 @@ define <4 x i16> @buildvec_v4i16_pack(i16 %e1, i16 %e2, i16 %e3, i16 %e4) {
; RVA22U64-NEXT: vmv.s.x v8, a0
; RVA22U64-NEXT: ret
;
+; RVA22U64-PACK-LABEL: buildvec_v4i16_pack:
+; RVA22U64-PACK: # %bb.0:
+; RVA22U64-PACK-NEXT: slli a3, a3, 48
+; RVA22U64-PACK-NEXT: zext.h a2, a2
+; RVA22U64-PACK-NEXT: slli a2, a2, 32
+; RVA22U64-PACK-NEXT: or a2, a2, a3
+; RVA22U64-PACK-NEXT: zext.h a0, a0
+; RVA22U64-PACK-NEXT: zext.h a1, a1
+; RVA22U64-PACK-NEXT: slli a1, a1, 16
+; RVA22U64-PACK-NEXT: or a0, a0, a1
+; RVA22U64-PACK-NEXT: or a0, a0, a2
+; RVA22U64-PACK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.s.x v8, a0
+; RVA22U64-PACK-NEXT: ret
+;
; RV64ZVE32-LABEL: buildvec_v4i16_pack:
; RV64ZVE32: # %bb.0:
; RV64ZVE32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
@@ -2709,6 +3227,13 @@ define <2 x i32> @buildvec_v2i32_pack(i32 %e1, i32 %e2) {
; RVA22U64-NEXT: vmv.s.x v8, a0
; RVA22U64-NEXT: ret
;
+; RVA22U64-PACK-LABEL: buildvec_v2i32_pack:
+; RVA22U64-PACK: # %bb.0:
+; RVA22U64-PACK-NEXT: pack a0, a0, a1
+; RVA22U64-PACK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.s.x v8, a0
+; RVA22U64-PACK-NEXT: ret
+;
; RV64ZVE32-LABEL: buildvec_v2i32_pack:
; RV64ZVE32: # %bb.0:
; RV64ZVE32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
More information about the llvm-commits
mailing list