[llvm] 189ca69 - [RISCV] Use the new chain when converting a fixed RVV load

Roger Ferrer Ibanez via llvm-commits llvm-commits at lists.llvm.org
Fri May 13 15:21:53 PDT 2022


Author: Roger Ferrer Ibanez
Date: 2022-05-13T22:21:08Z
New Revision: 189ca6958e84272ced8b91011fc9b9b10ddbb99f

URL: https://github.com/llvm/llvm-project/commit/189ca6958e84272ced8b91011fc9b9b10ddbb99f
DIFF: https://github.com/llvm/llvm-project/commit/189ca6958e84272ced8b91011fc9b9b10ddbb99f.diff

LOG: [RISCV] Use the new chain when converting a fixed RVV load

When building the final merged node, we were using the original chain
rather than the output chain of the new operation. After some collapsing
of the chain this could cause the loads be incorrectly scheduled respect
to later stores.

This was uncovered by SingleSource/Regression/C/gcc-c-torture/execute/pr36038.c
of the llvm testsuite.

https://reviews.llvm.org/D125560

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
    llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 4754e8edc3a80..392594c27a91c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5721,7 +5721,7 @@ RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
                               Load->getMemoryVT(), Load->getMemOperand());
 
   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
-  return DAG.getMergeValues({Result, Load->getChain()}, DL);
+  return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL);
 }
 
 SDValue

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index 2804ea1886aae..1789f5dc55e76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -1138,10 +1138,10 @@ define <32 x i32> @call_split_vector_args(<2 x i32>* %pa, <32 x i32>* %pb) {
 ; LMULMAX1-NEXT:    vle32.v v8, (a0)
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vle32.v v13, (a1)
-; LMULMAX1-NEXT:    addi a0, a1, 16
-; LMULMAX1-NEXT:    vle32.v v14, (a0)
 ; LMULMAX1-NEXT:    addi a0, a1, 32
 ; LMULMAX1-NEXT:    vle32.v v15, (a0)
+; LMULMAX1-NEXT:    addi a0, a1, 16
+; LMULMAX1-NEXT:    vle32.v v14, (a0)
 ; LMULMAX1-NEXT:    addi a0, a1, 48
 ; LMULMAX1-NEXT:    vle32.v v16, (a0)
 ; LMULMAX1-NEXT:    addi a0, a1, 64

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
index 4bc19a7ddd020..68d1505aa882c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
@@ -54,19 +54,19 @@ define void @gather_const_v2f64(<2 x double>* %x) {
 define void @gather_const_v64f16(<64 x half>* %x) {
 ; LMULMAX8-LABEL: gather_const_v64f16:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, a0, 94
-; LMULMAX8-NEXT:    li a2, 64
-; LMULMAX8-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
-; LMULMAX8-NEXT:    vlse16.v v8, (a1), zero
+; LMULMAX8-NEXT:    li a1, 64
+; LMULMAX8-NEXT:    addi a2, a0, 94
+; LMULMAX8-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; LMULMAX8-NEXT:    vlse16.v v8, (a2), zero
 ; LMULMAX8-NEXT:    vse16.v v8, (a0)
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: gather_const_v64f16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 16
-; LMULMAX1-NEXT:    addi a2, a0, 48
-; LMULMAX1-NEXT:    addi a3, a0, 32
-; LMULMAX1-NEXT:    addi a4, a0, 80
+; LMULMAX1-NEXT:    addi a1, a0, 80
+; LMULMAX1-NEXT:    addi a2, a0, 16
+; LMULMAX1-NEXT:    addi a3, a0, 48
+; LMULMAX1-NEXT:    addi a4, a0, 32
 ; LMULMAX1-NEXT:    addi a5, a0, 94
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vlse16.v v8, (a5), zero
@@ -76,11 +76,11 @@ define void @gather_const_v64f16(<64 x half>* %x) {
 ; LMULMAX1-NEXT:    vse16.v v8, (a7)
 ; LMULMAX1-NEXT:    vse16.v v8, (a6)
 ; LMULMAX1-NEXT:    vse16.v v8, (a5)
+; LMULMAX1-NEXT:    vse16.v v8, (a1)
 ; LMULMAX1-NEXT:    vse16.v v8, (a4)
 ; LMULMAX1-NEXT:    vse16.v v8, (a3)
-; LMULMAX1-NEXT:    vse16.v v8, (a2)
 ; LMULMAX1-NEXT:    vse16.v v8, (a0)
-; LMULMAX1-NEXT:    vse16.v v8, (a1)
+; LMULMAX1-NEXT:    vse16.v v8, (a2)
 ; LMULMAX1-NEXT:    ret
   %a = load <64 x half>, <64 x half>* %x
   %b = extractelement <64 x half> %a, i32 47
@@ -93,33 +93,33 @@ define void @gather_const_v64f16(<64 x half>* %x) {
 define void @gather_const_v32f32(<32 x float>* %x) {
 ; LMULMAX8-LABEL: gather_const_v32f32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, a0, 68
-; LMULMAX8-NEXT:    li a2, 32
-; LMULMAX8-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
-; LMULMAX8-NEXT:    vlse32.v v8, (a1), zero
+; LMULMAX8-NEXT:    li a1, 32
+; LMULMAX8-NEXT:    addi a2, a0, 68
+; LMULMAX8-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; LMULMAX8-NEXT:    vlse32.v v8, (a2), zero
 ; LMULMAX8-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: gather_const_v32f32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 16
-; LMULMAX1-NEXT:    addi a2, a0, 48
-; LMULMAX1-NEXT:    addi a3, a0, 32
-; LMULMAX1-NEXT:    addi a4, a0, 80
+; LMULMAX1-NEXT:    addi a1, a0, 64
+; LMULMAX1-NEXT:    addi a2, a0, 16
+; LMULMAX1-NEXT:    addi a3, a0, 48
+; LMULMAX1-NEXT:    addi a4, a0, 32
 ; LMULMAX1-NEXT:    addi a5, a0, 68
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vlse32.v v8, (a5), zero
-; LMULMAX1-NEXT:    addi a5, a0, 64
+; LMULMAX1-NEXT:    addi a5, a0, 80
 ; LMULMAX1-NEXT:    addi a6, a0, 112
 ; LMULMAX1-NEXT:    addi a7, a0, 96
 ; LMULMAX1-NEXT:    vse32.v v8, (a7)
 ; LMULMAX1-NEXT:    vse32.v v8, (a6)
+; LMULMAX1-NEXT:    vse32.v v8, (a1)
 ; LMULMAX1-NEXT:    vse32.v v8, (a5)
 ; LMULMAX1-NEXT:    vse32.v v8, (a4)
 ; LMULMAX1-NEXT:    vse32.v v8, (a3)
-; LMULMAX1-NEXT:    vse32.v v8, (a2)
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
-; LMULMAX1-NEXT:    vse32.v v8, (a1)
+; LMULMAX1-NEXT:    vse32.v v8, (a2)
 ; LMULMAX1-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
   %b = extractelement <32 x float> %a, i32 17
@@ -140,23 +140,23 @@ define void @gather_const_v16f64(<16 x double>* %x) {
 ;
 ; LMULMAX1-LABEL: gather_const_v16f64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 16
-; LMULMAX1-NEXT:    addi a2, a0, 48
-; LMULMAX1-NEXT:    addi a3, a0, 32
-; LMULMAX1-NEXT:    addi a4, a0, 80
+; LMULMAX1-NEXT:    addi a1, a0, 80
+; LMULMAX1-NEXT:    addi a2, a0, 16
+; LMULMAX1-NEXT:    addi a3, a0, 48
+; LMULMAX1-NEXT:    addi a4, a0, 32
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
-; LMULMAX1-NEXT:    vlse64.v v8, (a4), zero
+; LMULMAX1-NEXT:    vlse64.v v8, (a1), zero
 ; LMULMAX1-NEXT:    addi a5, a0, 64
 ; LMULMAX1-NEXT:    addi a6, a0, 112
 ; LMULMAX1-NEXT:    addi a7, a0, 96
 ; LMULMAX1-NEXT:    vse64.v v8, (a7)
 ; LMULMAX1-NEXT:    vse64.v v8, (a6)
 ; LMULMAX1-NEXT:    vse64.v v8, (a5)
+; LMULMAX1-NEXT:    vse64.v v8, (a1)
 ; LMULMAX1-NEXT:    vse64.v v8, (a4)
 ; LMULMAX1-NEXT:    vse64.v v8, (a3)
-; LMULMAX1-NEXT:    vse64.v v8, (a2)
 ; LMULMAX1-NEXT:    vse64.v v8, (a0)
-; LMULMAX1-NEXT:    vse64.v v8, (a1)
+; LMULMAX1-NEXT:    vse64.v v8, (a2)
 ; LMULMAX1-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
   %b = extractelement <16 x double> %a, i32 10

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
index c2637891d42b8..b5fad01917e46 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
@@ -71,10 +71,10 @@ define void @gather_const_v2i64(<2 x i64>* %x) {
 define void @gather_const_v64i8(<64 x i8>* %x) {
 ; LMULMAX4-LABEL: gather_const_v64i8:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    addi a1, a0, 32
-; LMULMAX4-NEXT:    li a2, 64
-; LMULMAX4-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
-; LMULMAX4-NEXT:    vlse8.v v8, (a1), zero
+; LMULMAX4-NEXT:    li a1, 64
+; LMULMAX4-NEXT:    addi a2, a0, 32
+; LMULMAX4-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; LMULMAX4-NEXT:    vlse8.v v8, (a2), zero
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
 ;
@@ -101,10 +101,10 @@ define void @gather_const_v64i8(<64 x i8>* %x) {
 define void @gather_const_v16i16(<32 x i16>* %x) {
 ; LMULMAX4-LABEL: gather_const_v16i16:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    addi a1, a0, 50
-; LMULMAX4-NEXT:    li a2, 32
-; LMULMAX4-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
-; LMULMAX4-NEXT:    vlse16.v v8, (a1), zero
+; LMULMAX4-NEXT:    li a1, 32
+; LMULMAX4-NEXT:    addi a2, a0, 50
+; LMULMAX4-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; LMULMAX4-NEXT:    vlse16.v v8, (a2), zero
 ; LMULMAX4-NEXT:    vse16.v v8, (a0)
 ; LMULMAX4-NEXT:    ret
 ;
@@ -113,13 +113,13 @@ define void @gather_const_v16i16(<32 x i16>* %x) {
 ; LMULMAX1-NEXT:    addi a1, a0, 50
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-NEXT:    vlse16.v v8, (a1), zero
-; LMULMAX1-NEXT:    addi a1, a0, 16
-; LMULMAX1-NEXT:    addi a2, a0, 48
+; LMULMAX1-NEXT:    addi a1, a0, 48
+; LMULMAX1-NEXT:    addi a2, a0, 16
 ; LMULMAX1-NEXT:    addi a3, a0, 32
 ; LMULMAX1-NEXT:    vse16.v v8, (a3)
-; LMULMAX1-NEXT:    vse16.v v8, (a2)
-; LMULMAX1-NEXT:    vse16.v v8, (a0)
 ; LMULMAX1-NEXT:    vse16.v v8, (a1)
+; LMULMAX1-NEXT:    vse16.v v8, (a0)
+; LMULMAX1-NEXT:    vse16.v v8, (a2)
 ; LMULMAX1-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %x
   %b = extractelement <32 x i16> %a, i32 25
@@ -143,13 +143,13 @@ define void @gather_const_v16i32(<16 x i32>* %x) {
 ; LMULMAX1-NEXT:    addi a1, a0, 36
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vlse32.v v8, (a1), zero
-; LMULMAX1-NEXT:    addi a1, a0, 16
-; LMULMAX1-NEXT:    addi a2, a0, 48
-; LMULMAX1-NEXT:    addi a3, a0, 32
+; LMULMAX1-NEXT:    addi a1, a0, 32
+; LMULMAX1-NEXT:    addi a2, a0, 16
+; LMULMAX1-NEXT:    addi a3, a0, 48
+; LMULMAX1-NEXT:    vse32.v v8, (a1)
 ; LMULMAX1-NEXT:    vse32.v v8, (a3)
-; LMULMAX1-NEXT:    vse32.v v8, (a2)
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
-; LMULMAX1-NEXT:    vse32.v v8, (a1)
+; LMULMAX1-NEXT:    vse32.v v8, (a2)
 ; LMULMAX1-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %x
   %b = extractelement <16 x i32> %a, i32 9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll b/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll
index 3cf5f99314fbc..cbeb0ca18e8fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll
@@ -11,12 +11,12 @@ define void @do.memmove() nounwind {
 ; CHECK-NEXT:    addi a0, a0, %lo(c)
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    addi a1, a0, 16
+; CHECK-NEXT:    vle64.v v9, (a1)
 ; CHECK-NEXT:    addi a1, a0, 8
 ; CHECK-NEXT:    vse64.v v8, (a1)
-; CHECK-NEXT:    addi a1, a0, 16
-; CHECK-NEXT:    vle64.v v8, (a1)
 ; CHECK-NEXT:    addi a0, a0, 24
-; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v9, (a0)
 ; CHECK-NEXT:    ret
 entry:
   ; this thing is "__builtin_memmove(&c[1], &c[0], sizeof(c[0]) * 4);"


        


More information about the llvm-commits mailing list