[llvm] 7c652fe - [RISCV] Add tests for vnsrl.vx where shift amount is truncated

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 26 12:26:50 PDT 2023


Author: Luke Lau
Date: 2023-07-26T20:26:27+01:00
New Revision: 7c652feb95d7806a249f84540d23c62a828d438d

URL: https://github.com/llvm/llvm-project/commit/7c652feb95d7806a249f84540d23c62a828d438d
DIFF: https://github.com/llvm/llvm-project/commit/7c652feb95d7806a249f84540d23c62a828d438d.diff

LOG: [RISCV] Add tests for vnsrl.vx where shift amount is truncated

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D155927

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
index 30eaabfb6c5a81..1a80e0a7c65795 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32 \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN:     -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV32 %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN:     -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV64 %s
 
 define <vscale x 1 x i32> @vnsrl_wx_i64_nxv1i32(<vscale x 1 x i64> %va, i64 %b) {
 ; CHECK-LABEL: vnsrl_wx_i64_nxv1i32:
@@ -632,3 +632,77 @@ define <vscale x 8 x i32> @vnsrl_wi_i32_nxv8i32_zext(<vscale x 8 x i64> %va) {
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %y
 }
+
+define <vscale x 1 x i16> @vnsrl_wx_i64_nxv1i16(<vscale x 1 x i32> %va, i64 %b) {
+; RV32-LABEL: vnsrl_wx_i64_nxv1i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vnsrl.wi v9, v9, 0
+; RV32-NEXT:    vsrl.vv v8, v8, v9
+; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vnsrl_wx_i64_nxv1i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v9, a0
+; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT:    vnsrl.wi v9, v9, 0
+; RV64-NEXT:    vsrl.vv v8, v8, v9
+; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = trunc <vscale x 1 x i64> %splat to <vscale x 1 x i32>
+  %x = lshr <vscale x 1 x i32> %va, %vb
+  %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %y
+}
+
+define <vscale x 1 x i8> @vnsrl_wx_i64_nxv1i8(<vscale x 1 x i16> %va, i64 %b) {
+; RV32-LABEL: vnsrl_wx_i64_nxv1i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vnsrl.wi v9, v9, 0
+; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; RV32-NEXT:    vnsrl.wi v9, v9, 0
+; RV32-NEXT:    vsrl.vv v8, v8, v9
+; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vnsrl.wi v8, v8, 0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vnsrl_wx_i64_nxv1i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v9, a0
+; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT:    vnsrl.wi v9, v9, 0
+; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; RV64-NEXT:    vnsrl.wi v9, v9, 0
+; RV64-NEXT:    vsrl.vv v8, v8, v9
+; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    ret
+  %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = trunc <vscale x 1 x i64> %splat to <vscale x 1 x i16>
+  %x = lshr <vscale x 1 x i16> %va, %vb
+  %y = trunc <vscale x 1 x i16> %x to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %y
+}


        


More information about the llvm-commits mailing list