[llvm] b8b756c - [RISCV] Add missing check prefixes to vreductions-mask.ll. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 17 12:58:04 PST 2023


Author: Craig Topper
Date: 2023-01-17T12:57:51-08:00
New Revision: b8b756c6f12d0f5e63335cb0621f755ddd72cdee

URL: https://github.com/llvm/llvm-project/commit/b8b756c6f12d0f5e63335cb0621f755ddd72cdee
DIFF: https://github.com/llvm/llvm-project/commit/b8b756c6f12d0f5e63335cb0621f755ddd72cdee.diff

LOG: [RISCV] Add missing check prefixes to vreductions-mask.ll. NFC

There's a conflict between the riscv32 and riscv64 output for some
tests which caused the script to drop the check lines.

Add specific check prefixes for these cases.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll
index ed1f65adab56d..7ec32f395732d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare i1 @llvm.vector.reduce.or.nxv1i1(<vscale x 1 x i1>)
 
@@ -19,6 +19,21 @@ define signext i1 @vreduce_or_nxv1i1(<vscale x 1 x i1> %v) {
 declare i1 @llvm.vector.reduce.xor.nxv1i1(<vscale x 1 x i1>)
 
 define signext i1 @vreduce_xor_nxv1i1(<vscale x 1 x i1> %v) {
+; RV32-LABEL: vreduce_xor_nxv1i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_xor_nxv1i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.xor.nxv1i1(<vscale x 1 x i1> %v)
   ret i1 %red
 }
@@ -113,6 +128,21 @@ define signext i1 @vreduce_or_nxv2i1(<vscale x 2 x i1> %v) {
 declare i1 @llvm.vector.reduce.xor.nxv2i1(<vscale x 2 x i1>)
 
 define signext i1 @vreduce_xor_nxv2i1(<vscale x 2 x i1> %v) {
+; RV32-LABEL: vreduce_xor_nxv2i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_xor_nxv2i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.xor.nxv2i1(<vscale x 2 x i1> %v)
   ret i1 %red
 }
@@ -207,6 +237,21 @@ define signext i1 @vreduce_or_nxv4i1(<vscale x 4 x i1> %v) {
 declare i1 @llvm.vector.reduce.xor.nxv4i1(<vscale x 4 x i1>)
 
 define signext i1 @vreduce_xor_nxv4i1(<vscale x 4 x i1> %v) {
+; RV32-LABEL: vreduce_xor_nxv4i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_xor_nxv4i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.xor.nxv4i1(<vscale x 4 x i1> %v)
   ret i1 %red
 }
@@ -301,6 +346,21 @@ define signext i1 @vreduce_or_nxv8i1(<vscale x 8 x i1> %v) {
 declare i1 @llvm.vector.reduce.xor.nxv8i1(<vscale x 8 x i1>)
 
 define signext i1 @vreduce_xor_nxv8i1(<vscale x 8 x i1> %v) {
+; RV32-LABEL: vreduce_xor_nxv8i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_xor_nxv8i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.xor.nxv8i1(<vscale x 8 x i1> %v)
   ret i1 %red
 }
@@ -395,6 +455,21 @@ define signext i1 @vreduce_or_nxv16i1(<vscale x 16 x i1> %v) {
 declare i1 @llvm.vector.reduce.xor.nxv16i1(<vscale x 16 x i1>)
 
 define signext i1 @vreduce_xor_nxv16i1(<vscale x 16 x i1> %v) {
+; RV32-LABEL: vreduce_xor_nxv16i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_xor_nxv16i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.xor.nxv16i1(<vscale x 16 x i1> %v)
   ret i1 %red
 }
@@ -489,6 +564,21 @@ define signext i1 @vreduce_or_nxv32i1(<vscale x 32 x i1> %v) {
 declare i1 @llvm.vector.reduce.xor.nxv32i1(<vscale x 32 x i1>)
 
 define signext i1 @vreduce_xor_nxv32i1(<vscale x 32 x i1> %v) {
+; RV32-LABEL: vreduce_xor_nxv32i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_xor_nxv32i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.xor.nxv32i1(<vscale x 32 x i1> %v)
   ret i1 %red
 }
@@ -583,6 +673,21 @@ define signext i1 @vreduce_or_nxv64i1(<vscale x 64 x i1> %v) {
 declare i1 @llvm.vector.reduce.xor.nxv64i1(<vscale x 64 x i1>)
 
 define signext i1 @vreduce_xor_nxv64i1(<vscale x 64 x i1> %v) {
+; RV32-LABEL: vreduce_xor_nxv64i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_xor_nxv64i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.xor.nxv64i1(<vscale x 64 x i1> %v)
   ret i1 %red
 }
@@ -663,6 +768,21 @@ define signext i1 @vreduce_smin_nxv64i1(<vscale x 64 x i1> %v) {
 declare i1 @llvm.vector.reduce.add.nxv1i1(<vscale x 1 x i1>)
 
 define signext i1 @vreduce_add_nxv1i1(<vscale x 1 x i1> %v) {
+; RV32-LABEL: vreduce_add_nxv1i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_add_nxv1i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.add.nxv1i1(<vscale x 1 x i1> %v)
   ret i1 %red
 }
@@ -670,6 +790,21 @@ define signext i1 @vreduce_add_nxv1i1(<vscale x 1 x i1> %v) {
 declare i1 @llvm.vector.reduce.add.nxv2i1(<vscale x 2 x i1>)
 
 define signext i1 @vreduce_add_nxv2i1(<vscale x 2 x i1> %v) {
+; RV32-LABEL: vreduce_add_nxv2i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_add_nxv2i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.add.nxv2i1(<vscale x 2 x i1> %v)
   ret i1 %red
 }
@@ -677,6 +812,21 @@ define signext i1 @vreduce_add_nxv2i1(<vscale x 2 x i1> %v) {
 declare i1 @llvm.vector.reduce.add.nxv4i1(<vscale x 4 x i1>)
 
 define signext i1 @vreduce_add_nxv4i1(<vscale x 4 x i1> %v) {
+; RV32-LABEL: vreduce_add_nxv4i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_add_nxv4i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.add.nxv4i1(<vscale x 4 x i1> %v)
   ret i1 %red
 }
@@ -684,6 +834,21 @@ define signext i1 @vreduce_add_nxv4i1(<vscale x 4 x i1> %v) {
 declare i1 @llvm.vector.reduce.add.nxv8i1(<vscale x 8 x i1>)
 
 define signext i1 @vreduce_add_nxv8i1(<vscale x 8 x i1> %v) {
+; RV32-LABEL: vreduce_add_nxv8i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_add_nxv8i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.add.nxv8i1(<vscale x 8 x i1> %v)
   ret i1 %red
 }
@@ -691,6 +856,21 @@ define signext i1 @vreduce_add_nxv8i1(<vscale x 8 x i1> %v) {
 declare i1 @llvm.vector.reduce.add.nxv16i1(<vscale x 16 x i1>)
 
 define signext i1 @vreduce_add_nxv16i1(<vscale x 16 x i1> %v) {
+; RV32-LABEL: vreduce_add_nxv16i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_add_nxv16i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.add.nxv16i1(<vscale x 16 x i1> %v)
   ret i1 %red
 }
@@ -698,6 +878,21 @@ define signext i1 @vreduce_add_nxv16i1(<vscale x 16 x i1> %v) {
 declare i1 @llvm.vector.reduce.add.nxv32i1(<vscale x 32 x i1>)
 
 define signext i1 @vreduce_add_nxv32i1(<vscale x 32 x i1> %v) {
+; RV32-LABEL: vreduce_add_nxv32i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_add_nxv32i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.add.nxv32i1(<vscale x 32 x i1> %v)
   ret i1 %red
 }
@@ -705,6 +900,21 @@ define signext i1 @vreduce_add_nxv32i1(<vscale x 32 x i1> %v) {
 declare i1 @llvm.vector.reduce.add.nxv64i1(<vscale x 64 x i1>)
 
 define signext i1 @vreduce_add_nxv64i1(<vscale x 64 x i1> %v) {
+; RV32-LABEL: vreduce_add_nxv64i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_add_nxv64i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.add.nxv64i1(<vscale x 64 x i1> %v)
   ret i1 %red
 }


        


More information about the llvm-commits mailing list