[llvm] bc2fe4a - [RISCV] Add basic fault-first load coverage for VSETVLI insertion

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Mon May 23 10:29:07 PDT 2022


Author: Philip Reames
Date: 2022-05-23T10:11:07-07:00
New Revision: bc2fe4a0d675e7559de5734df0a4a40170389acc

URL: https://github.com/llvm/llvm-project/commit/bc2fe4a0d675e7559de5734df0a4a40170389acc
DIFF: https://github.com/llvm/llvm-project/commit/bc2fe4a0d675e7559de5734df0a4a40170389acc.diff

LOG: [RISCV] Add basic fault-first load coverage for VSETVLI insertion

Simplified version of a test taken from D123581.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 267a57c41c1ec..4e0364e41eb74 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -493,6 +493,34 @@ entry:
   ret i64 %vl
 }
 
+; Fault first loads can modify VL.
+; TODO: The first and third VSETVLIs are redundant here.
+define <vscale x 1 x i64> @vleNff(i64* %str, i64 %n, i64 %x) {
+; CHECK-LABEL: vleNff:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    vle64ff.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vl
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vadd.vx v8, v8, a2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 0, i64 2)
+  %1 = bitcast i64* %str to <vscale x 1 x i64>*
+  %2 = tail call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %1, i64 %0)
+  %3 = extractvalue { <vscale x 1 x i64>, i64 } %2, 0
+  %4 = extractvalue { <vscale x 1 x i64>, i64 } %2, 1
+  %5 = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> %3, <vscale x 1 x i64> %3, i64 %x, i64 %4)
+  ret <vscale x 1 x i64> %5
+}
+
+declare { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(
+  <vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64)
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(
+  <vscale x 1 x i64>, i64, i64)
+
 declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -501,6 +529,12 @@ declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
   i64,
   i64);
 
+declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
 declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,


        


More information about the llvm-commits mailing list