[llvm] c31a810 - [RISCV] Add -mattr=+v to intrinsic-cttz-elts.ll. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 15 12:05:38 PDT 2024


Author: Craig Topper
Date: 2024-04-15T12:01:41-07:00
New Revision: c31a8104d1173d79f8b71518829046b441d98d59

URL: https://github.com/llvm/llvm-project/commit/c31a8104d1173d79f8b71518829046b441d98d59
DIFF: https://github.com/llvm/llvm-project/commit/c31a8104d1173d79f8b71518829046b441d98d59.diff

LOG: [RISCV] Add -mattr=+v to intrinsic-cttz-elts.ll. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
index 15abc9b75883c8..49d4760a2e9abf 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
@@ -1,38 +1,22 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -check-prefix=RV32
-; RUN: llc -mtriple=riscv64 < %s | FileCheck %s -check-prefix=RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v < %s | FileCheck %s -check-prefix=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v < %s | FileCheck %s -check-prefix=RV64
 
 ; FIXED WIDTH
 
 define i16 @ctz_v4i32(<4 x i32> %a) {
 ; RV32-LABEL: ctz_v4i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lw a3, 0(a0)
-; RV32-NEXT:    lw a1, 4(a0)
-; RV32-NEXT:    lw a2, 12(a0)
-; RV32-NEXT:    lw a4, 8(a0)
-; RV32-NEXT:    seqz a0, a3
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    andi a0, a0, 4
-; RV32-NEXT:    seqz a3, a4
-; RV32-NEXT:    addi a3, a3, -1
-; RV32-NEXT:    andi a3, a3, 2
-; RV32-NEXT:    bltu a3, a0, .LBB0_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    mv a0, a3
-; RV32-NEXT:  .LBB0_2:
-; RV32-NEXT:    snez a2, a2
-; RV32-NEXT:    seqz a1, a1
-; RV32-NEXT:    addi a1, a1, -1
-; RV32-NEXT:    andi a1, a1, 3
-; RV32-NEXT:    bltu a2, a1, .LBB0_4
-; RV32-NEXT:  # %bb.3:
-; RV32-NEXT:    mv a1, a2
-; RV32-NEXT:  .LBB0_4:
-; RV32-NEXT:    bltu a1, a0, .LBB0_6
-; RV32-NEXT:  # %bb.5:
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:  .LBB0_6:
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; RV32-NEXT:    vmv.v.i v8, 0
+; RV32-NEXT:    vmerge.vim v8, v8, -1, v0
+; RV32-NEXT:    vid.v v9
+; RV32-NEXT:    vrsub.vi v9, v9, 4
+; RV32-NEXT:    vand.vv v8, v8, v9
+; RV32-NEXT:    vredmaxu.vs v8, v8, v8
+; RV32-NEXT:    vmv.x.s a0, v8
 ; RV32-NEXT:    li a1, 4
 ; RV32-NEXT:    sub a1, a1, a0
 ; RV32-NEXT:    andi a0, a1, 255
@@ -40,32 +24,16 @@ define i16 @ctz_v4i32(<4 x i32> %a) {
 ;
 ; RV64-LABEL: ctz_v4i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lw a3, 0(a0)
-; RV64-NEXT:    lw a1, 8(a0)
-; RV64-NEXT:    lw a2, 24(a0)
-; RV64-NEXT:    lw a4, 16(a0)
-; RV64-NEXT:    seqz a0, a3
-; RV64-NEXT:    addi a0, a0, -1
-; RV64-NEXT:    andi a0, a0, 4
-; RV64-NEXT:    seqz a3, a4
-; RV64-NEXT:    addi a3, a3, -1
-; RV64-NEXT:    andi a3, a3, 2
-; RV64-NEXT:    bltu a3, a0, .LBB0_2
-; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    mv a0, a3
-; RV64-NEXT:  .LBB0_2:
-; RV64-NEXT:    snez a2, a2
-; RV64-NEXT:    seqz a1, a1
-; RV64-NEXT:    addi a1, a1, -1
-; RV64-NEXT:    andi a1, a1, 3
-; RV64-NEXT:    bltu a2, a1, .LBB0_4
-; RV64-NEXT:  # %bb.3:
-; RV64-NEXT:    mv a1, a2
-; RV64-NEXT:  .LBB0_4:
-; RV64-NEXT:    bltu a1, a0, .LBB0_6
-; RV64-NEXT:  # %bb.5:
-; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB0_6:
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vmsne.vi v0, v8, 0
+; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; RV64-NEXT:    vmv.v.i v8, 0
+; RV64-NEXT:    vmerge.vim v8, v8, -1, v0
+; RV64-NEXT:    vid.v v9
+; RV64-NEXT:    vrsub.vi v9, v9, 4
+; RV64-NEXT:    vand.vv v8, v8, v9
+; RV64-NEXT:    vredmaxu.vs v8, v8, v8
+; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    li a1, 4
 ; RV64-NEXT:    subw a1, a1, a0
 ; RV64-NEXT:    andi a0, a1, 255
@@ -79,14 +47,14 @@ define i16 @ctz_v4i32(<4 x i32> %a) {
 define i32 @ctz_v2i1_poison(<2 x i1> %a) {
 ; RV32-LABEL: ctz_v2i1_poison:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    andi a1, a1, 1
-; RV32-NEXT:    slli a0, a0, 31
-; RV32-NEXT:    srai a0, a0, 31
-; RV32-NEXT:    andi a0, a0, 2
-; RV32-NEXT:    bltu a1, a0, .LBB1_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:  .LBB1_2:
+; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT:    vmv.v.i v8, 0
+; RV32-NEXT:    vmerge.vim v8, v8, -1, v0
+; RV32-NEXT:    vid.v v9
+; RV32-NEXT:    vrsub.vi v9, v9, 2
+; RV32-NEXT:    vand.vv v8, v8, v9
+; RV32-NEXT:    vredmaxu.vs v8, v8, v8
+; RV32-NEXT:    vmv.x.s a0, v8
 ; RV32-NEXT:    li a1, 2
 ; RV32-NEXT:    sub a1, a1, a0
 ; RV32-NEXT:    andi a0, a1, 255
@@ -94,14 +62,14 @@ define i32 @ctz_v2i1_poison(<2 x i1> %a) {
 ;
 ; RV64-LABEL: ctz_v2i1_poison:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    andi a1, a1, 1
-; RV64-NEXT:    slli a0, a0, 63
-; RV64-NEXT:    srai a0, a0, 63
-; RV64-NEXT:    andi a0, a0, 2
-; RV64-NEXT:    bltu a1, a0, .LBB1_2
-; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    mv a0, a1
-; RV64-NEXT:  .LBB1_2:
+; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT:    vmv.v.i v8, 0
+; RV64-NEXT:    vmerge.vim v8, v8, -1, v0
+; RV64-NEXT:    vid.v v9
+; RV64-NEXT:    vrsub.vi v9, v9, 2
+; RV64-NEXT:    vand.vv v8, v8, v9
+; RV64-NEXT:    vredmaxu.vs v8, v8, v8
+; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    li a1, 2
 ; RV64-NEXT:    subw a1, a1, a0
 ; RV64-NEXT:    andi a0, a1, 255


        


More information about the llvm-commits mailing list