[llvm] d994231 - {RISCV] Adjust check lines to reduce duplication

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 25 11:25:43 PDT 2023


Author: Philip Reames
Date: 2023-09-25T11:25:36-07:00
New Revision: d9942319d703be158e7090f4e6aba640e15d6a19

URL: https://github.com/llvm/llvm-project/commit/d9942319d703be158e7090f4e6aba640e15d6a19
DIFF: https://github.com/llvm/llvm-project/commit/d9942319d703be158e7090f4e6aba640e15d6a19.diff

LOG: {RISCV] Adjust check lines to reduce duplication

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
index a03633d63832eac..37a43c3550a5282 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 define <4 x i32> @add_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
 ; RV32-LABEL: add_constant_rhs:
@@ -259,43 +259,24 @@ define <4 x i32> @udiv_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
 
 
 define <4 x float> @fadd_constant_rhs(float %a, float %b, float %c, float %d) {
-; RV32-LABEL: fadd_constant_rhs:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lui a0, 269184
-; RV32-NEXT:    fmv.w.x fa5, a0
-; RV32-NEXT:    fadd.s fa4, fa0, fa5
-; RV32-NEXT:    lui a0, 269440
-; RV32-NEXT:    fmv.w.x fa0, a0
-; RV32-NEXT:    fadd.s fa1, fa1, fa0
-; RV32-NEXT:    lui a0, 262144
-; RV32-NEXT:    fmv.w.x fa0, a0
-; RV32-NEXT:    fadd.s fa2, fa2, fa0
-; RV32-NEXT:    fadd.s fa5, fa3, fa5
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vfslide1down.vf v8, v8, fa4
-; RV32-NEXT:    vfslide1down.vf v8, v8, fa1
-; RV32-NEXT:    vfslide1down.vf v8, v8, fa2
-; RV32-NEXT:    vfslide1down.vf v8, v8, fa5
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: fadd_constant_rhs:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, 269184
-; RV64-NEXT:    fmv.w.x fa5, a0
-; RV64-NEXT:    fadd.s fa4, fa0, fa5
-; RV64-NEXT:    lui a0, 269440
-; RV64-NEXT:    fmv.w.x fa0, a0
-; RV64-NEXT:    fadd.s fa1, fa1, fa0
-; RV64-NEXT:    lui a0, 262144
-; RV64-NEXT:    fmv.w.x fa0, a0
-; RV64-NEXT:    fadd.s fa2, fa2, fa0
-; RV64-NEXT:    fadd.s fa5, fa3, fa5
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vfslide1down.vf v8, v8, fa4
-; RV64-NEXT:    vfslide1down.vf v8, v8, fa1
-; RV64-NEXT:    vfslide1down.vf v8, v8, fa2
-; RV64-NEXT:    vfslide1down.vf v8, v8, fa5
-; RV64-NEXT:    ret
+; CHECK-LABEL: fadd_constant_rhs:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 269184
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    fadd.s fa4, fa0, fa5
+; CHECK-NEXT:    lui a0, 269440
+; CHECK-NEXT:    fmv.w.x fa0, a0
+; CHECK-NEXT:    fadd.s fa1, fa1, fa0
+; CHECK-NEXT:    lui a0, 262144
+; CHECK-NEXT:    fmv.w.x fa0, a0
+; CHECK-NEXT:    fadd.s fa2, fa2, fa0
+; CHECK-NEXT:    fadd.s fa5, fa3, fa5
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa4
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa2
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa5
+; CHECK-NEXT:    ret
   %e0 = fadd float %a, 23.0
   %e1 = fadd float %b, 25.0
   %e2 = fadd float %c, 2.0
@@ -308,43 +289,24 @@ define <4 x float> @fadd_constant_rhs(float %a, float %b, float %c, float %d) {
 }
 
 define <4 x float> @fdiv_constant_rhs(float %a, float %b, float %c, float %d) {
-; RV32-LABEL: fdiv_constant_rhs:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lui a0, 269184
-; RV32-NEXT:    fmv.w.x fa5, a0
-; RV32-NEXT:    fdiv.s fa4, fa0, fa5
-; RV32-NEXT:    lui a0, 269440
-; RV32-NEXT:    fmv.w.x fa0, a0
-; RV32-NEXT:    fdiv.s fa1, fa1, fa0
-; RV32-NEXT:    lui a0, 266752
-; RV32-NEXT:    fmv.w.x fa0, a0
-; RV32-NEXT:    fdiv.s fa2, fa2, fa0
-; RV32-NEXT:    fdiv.s fa5, fa3, fa5
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vfslide1down.vf v8, v8, fa4
-; RV32-NEXT:    vfslide1down.vf v8, v8, fa1
-; RV32-NEXT:    vfslide1down.vf v8, v8, fa2
-; RV32-NEXT:    vfslide1down.vf v8, v8, fa5
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: fdiv_constant_rhs:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, 269184
-; RV64-NEXT:    fmv.w.x fa5, a0
-; RV64-NEXT:    fdiv.s fa4, fa0, fa5
-; RV64-NEXT:    lui a0, 269440
-; RV64-NEXT:    fmv.w.x fa0, a0
-; RV64-NEXT:    fdiv.s fa1, fa1, fa0
-; RV64-NEXT:    lui a0, 266752
-; RV64-NEXT:    fmv.w.x fa0, a0
-; RV64-NEXT:    fdiv.s fa2, fa2, fa0
-; RV64-NEXT:    fdiv.s fa5, fa3, fa5
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vfslide1down.vf v8, v8, fa4
-; RV64-NEXT:    vfslide1down.vf v8, v8, fa1
-; RV64-NEXT:    vfslide1down.vf v8, v8, fa2
-; RV64-NEXT:    vfslide1down.vf v8, v8, fa5
-; RV64-NEXT:    ret
+; CHECK-LABEL: fdiv_constant_rhs:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 269184
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    fdiv.s fa4, fa0, fa5
+; CHECK-NEXT:    lui a0, 269440
+; CHECK-NEXT:    fmv.w.x fa0, a0
+; CHECK-NEXT:    fdiv.s fa1, fa1, fa0
+; CHECK-NEXT:    lui a0, 266752
+; CHECK-NEXT:    fmv.w.x fa0, a0
+; CHECK-NEXT:    fdiv.s fa2, fa2, fa0
+; CHECK-NEXT:    fdiv.s fa5, fa3, fa5
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa4
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa2
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa5
+; CHECK-NEXT:    ret
   %e0 = fdiv float %a, 23.0
   %e1 = fdiv float %b, 25.0
   %e2 = fdiv float %c, 10.0


        


More information about the llvm-commits mailing list