[llvm] c26510a - [RISCV] Fix intrinsic names in sf_vfwmacc_4x4x4.ll. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Dec 16 14:55:07 PST 2023


Author: Craig Topper
Date: 2023-12-16T14:54:50-08:00
New Revision: c26510a2bf369a0031a6757dedc1fe9f901b3975

URL: https://github.com/llvm/llvm-project/commit/c26510a2bf369a0031a6757dedc1fe9f901b3975
DIFF: https://github.com/llvm/llvm-project/commit/c26510a2bf369a0031a6757dedc1fe9f901b3975.diff

LOG: [RISCV] Fix intrinsic names in sf_vfwmacc_4x4x4.ll. NFC

The type strings in the intrinsic name were using f16 instead of
bf16 for float types. Nothing really checks these strings so everything
still worked.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll
index 0ba92c5f70e6ee..180155139b57b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll
@@ -4,7 +4,7 @@
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfmin,+xsfvfwmaccqqq \
 ; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
 
-declare <vscale x 1 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv1f32.nxv4f16.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv1f32.nxv4bf16.nxv1bf16.iXLen(
   <vscale x 1 x float>,
   <vscale x 4 x bfloat>,
   <vscale x 1 x bfloat>,
@@ -17,7 +17,7 @@ define <vscale x 1 x float> @intrinsic_vfwmacc_4x4x4_tu_f32mf2(<vscale x 1 x flo
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv1f32.nxv4f16.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv1f32.nxv4bf16.nxv1bf16.iXLen(
     <vscale x 1 x float> %0,
     <vscale x 4 x bfloat> %1,
     <vscale x 1 x bfloat> %2,
@@ -33,7 +33,7 @@ define <vscale x 1 x float> @intrinsic_vfwmacc_4x4x4_ta_f32mf2(<vscale x 1 x flo
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv1f32.nxv4f16.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv1f32.nxv4bf16.nxv1bf16.iXLen(
     <vscale x 1 x float> %0,
     <vscale x 4 x bfloat> %1,
     <vscale x 1 x bfloat> %2,
@@ -42,7 +42,7 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv2f32.nxv4f16.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv2f32.nxv4bf16.nxv2bf16.iXLen(
   <vscale x 2 x float>,
   <vscale x 4 x bfloat>,
   <vscale x 2 x bfloat>,
@@ -55,7 +55,7 @@ define <vscale x 2 x float> @intrinsic_vfwmacc_4x4x4_tu_f32m1(<vscale x 2 x floa
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv2f32.nxv4f16.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv2f32.nxv4bf16.nxv2bf16.iXLen(
     <vscale x 2 x float> %0,
     <vscale x 4 x bfloat> %1,
     <vscale x 2 x bfloat> %2,
@@ -71,7 +71,7 @@ define <vscale x 2 x float> @intrinsic_vfwmacc_4x4x4_ta_f32m1(<vscale x 2 x floa
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv2f32.nxv4f16.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv2f32.nxv4bf16.nxv2bf16.iXLen(
     <vscale x 2 x float> %0,
     <vscale x 4 x bfloat> %1,
     <vscale x 2 x bfloat> %2,
@@ -80,7 +80,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv4f32.nxv4f16.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv4f32.nxv4bf16.nxv4bf16.iXLen(
   <vscale x 4 x float>,
   <vscale x 4 x bfloat>,
   <vscale x 4 x bfloat>,
@@ -93,7 +93,7 @@ define <vscale x 4 x float> @intrinsic_vfwmacc_4x4x4_tu_f32m2(<vscale x 4 x floa
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v10, v11
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv4f32.nxv4f16.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv4f32.nxv4bf16.nxv4bf16.iXLen(
     <vscale x 4 x float> %0,
     <vscale x 4 x bfloat> %1,
     <vscale x 4 x bfloat> %2,
@@ -109,7 +109,7 @@ define <vscale x 4 x float> @intrinsic_vfwmacc_4x4x4_ta_f32m2(<vscale x 4 x floa
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v10, v11
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv4f32.nxv4f16.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv4f32.nxv4bf16.nxv4bf16.iXLen(
     <vscale x 4 x float> %0,
     <vscale x 4 x bfloat> %1,
     <vscale x 4 x bfloat> %2,
@@ -118,7 +118,7 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv8f32.nxv4f16.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv8f32.nxv4bf16.nxv8bf16.iXLen(
   <vscale x 8 x float>,
   <vscale x 4 x bfloat>,
   <vscale x 8 x bfloat>,
@@ -131,7 +131,7 @@ define <vscale x 8 x float> @intrinsic_vfwmacc_4x4x4_tu_f32m4(<vscale x 8 x floa
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v12, v14
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv8f32.nxv4f16.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv8f32.nxv4bf16.nxv8bf16.iXLen(
     <vscale x 8 x float> %0,
     <vscale x 4 x bfloat> %1,
     <vscale x 8 x bfloat> %2,
@@ -147,7 +147,7 @@ define <vscale x 8 x float> @intrinsic_vfwmacc_4x4x4_ta_f32m4(<vscale x 8 x floa
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v12, v14
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv8f32.nxv4f16.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv8f32.nxv4bf16.nxv8bf16.iXLen(
     <vscale x 8 x float> %0,
     <vscale x 4 x bfloat> %1,
     <vscale x 8 x bfloat> %2,
@@ -156,7 +156,7 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv16f32.nxv4f16.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv16f32.nxv4bf16.nxv16bf16.iXLen(
   <vscale x 16 x float>,
   <vscale x 4 x bfloat>,
   <vscale x 16 x bfloat>,
@@ -169,7 +169,7 @@ define <vscale x 16 x float> @intrinsic_vfwmacc_4x4x4_tu_f32m8(<vscale x 16 x fl
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v16, v20
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv16f32.nxv4f16.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv16f32.nxv4bf16.nxv16bf16.iXLen(
     <vscale x 16 x float> %0,
     <vscale x 4 x bfloat> %1,
     <vscale x 16 x bfloat> %2,
@@ -185,7 +185,7 @@ define <vscale x 16 x float> @intrinsic_vfwmacc_4x4x4_ta_f32m8(<vscale x 16 x fl
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v16, v20
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv16f32.nxv4f16.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv16f32.nxv4bf16.nxv16bf16.iXLen(
     <vscale x 16 x float> %0,
     <vscale x 4 x bfloat> %1,
     <vscale x 16 x bfloat> %2,


        


More information about the llvm-commits mailing list