[llvm] 542a83c - [RISCV] Correct load/store alignments in sink-splat-operands.ll. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri May 27 14:44:04 PDT 2022


Author: Craig Topper
Date: 2022-05-27T14:39:31-07:00
New Revision: 542a83c3622225f27c72d86c7af1fe1c56621116

URL: https://github.com/llvm/llvm-project/commit/542a83c3622225f27c72d86c7af1fe1c56621116
DIFF: https://github.com/llvm/llvm-project/commit/542a83c3622225f27c72d86c7af1fe1c56621116.diff

LOG: [RISCV] Correct load/store alignments in sink-splat-operands.ll. NFC

These should be aligned to the natural alignment of the element.
Probably copy/paste mistake from the i32 tests.

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D126567

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index fb69d3cca636..b3669c283e76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -3775,15 +3775,12 @@ define void @sink_splat_mul_lmul2(i64* nocapture %a, i64 signext %x) {
 ; CHECK-LABEL: sink_splat_mul_lmul2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 1024
-; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:  .LBB67_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vmul.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    bnez a2, .LBB67_1
@@ -3798,10 +3795,10 @@ vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = getelementptr inbounds i64, i64* %a, i64 %index
   %1 = bitcast i64* %0 to <4 x i64>*
-  %wide.load = load <4 x i64>, <4 x i64>* %1, align 4
+  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
   %2 = mul <4 x i64> %wide.load, %broadcast.splat
   %3 = bitcast i64* %0 to <4 x i64>*
-  store <4 x i64> %2, <4 x i64>* %3, align 4
+  store <4 x i64> %2, <4 x i64>* %3, align 8
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body
@@ -3814,15 +3811,12 @@ define void @sink_splat_add_lmul2(i64* nocapture %a, i64 signext %x) {
 ; CHECK-LABEL: sink_splat_add_lmul2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 1024
-; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:  .LBB68_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vadd.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    bnez a2, .LBB68_1
@@ -3837,10 +3831,10 @@ vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = getelementptr inbounds i64, i64* %a, i64 %index
   %1 = bitcast i64* %0 to <4 x i64>*
-  %wide.load = load <4 x i64>, <4 x i64>* %1, align 4
+  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
   %2 = add <4 x i64> %wide.load, %broadcast.splat
   %3 = bitcast i64* %0 to <4 x i64>*
-  store <4 x i64> %2, <4 x i64>* %3, align 4
+  store <4 x i64> %2, <4 x i64>* %3, align 8
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body
@@ -3853,15 +3847,12 @@ define void @sink_splat_sub_lmul2(i64* nocapture %a, i64 signext %x) {
 ; CHECK-LABEL: sink_splat_sub_lmul2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 1024
-; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:  .LBB69_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vsub.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    bnez a2, .LBB69_1
@@ -3876,10 +3867,10 @@ vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = getelementptr inbounds i64, i64* %a, i64 %index
   %1 = bitcast i64* %0 to <4 x i64>*
-  %wide.load = load <4 x i64>, <4 x i64>* %1, align 4
+  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
   %2 = sub <4 x i64> %wide.load, %broadcast.splat
   %3 = bitcast i64* %0 to <4 x i64>*
-  store <4 x i64> %2, <4 x i64>* %3, align 4
+  store <4 x i64> %2, <4 x i64>* %3, align 8
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body
@@ -3892,15 +3883,12 @@ define void @sink_splat_rsub_lmul2(i64* nocapture %a, i64 signext %x) {
 ; CHECK-LABEL: sink_splat_rsub_lmul2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 1024
-; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:  .LBB70_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vrsub.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    bnez a2, .LBB70_1
@@ -3915,10 +3903,10 @@ vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = getelementptr inbounds i64, i64* %a, i64 %index
   %1 = bitcast i64* %0 to <4 x i64>*
-  %wide.load = load <4 x i64>, <4 x i64>* %1, align 4
+  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
   %2 = sub <4 x i64> %broadcast.splat, %wide.load
   %3 = bitcast i64* %0 to <4 x i64>*
-  store <4 x i64> %2, <4 x i64>* %3, align 4
+  store <4 x i64> %2, <4 x i64>* %3, align 8
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body
@@ -3931,15 +3919,12 @@ define void @sink_splat_and_lmul2(i64* nocapture %a, i64 signext %x) {
 ; CHECK-LABEL: sink_splat_and_lmul2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 1024
-; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:  .LBB71_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vand.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    bnez a2, .LBB71_1
@@ -3954,10 +3939,10 @@ vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = getelementptr inbounds i64, i64* %a, i64 %index
   %1 = bitcast i64* %0 to <4 x i64>*
-  %wide.load = load <4 x i64>, <4 x i64>* %1, align 4
+  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
   %2 = and <4 x i64> %wide.load, %broadcast.splat
   %3 = bitcast i64* %0 to <4 x i64>*
-  store <4 x i64> %2, <4 x i64>* %3, align 4
+  store <4 x i64> %2, <4 x i64>* %3, align 8
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body
@@ -3970,15 +3955,12 @@ define void @sink_splat_or_lmul2(i64* nocapture %a, i64 signext %x) {
 ; CHECK-LABEL: sink_splat_or_lmul2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 1024
-; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:  .LBB72_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vor.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    bnez a2, .LBB72_1
@@ -3993,10 +3975,10 @@ vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = getelementptr inbounds i64, i64* %a, i64 %index
   %1 = bitcast i64* %0 to <4 x i64>*
-  %wide.load = load <4 x i64>, <4 x i64>* %1, align 4
+  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
   %2 = or <4 x i64> %wide.load, %broadcast.splat
   %3 = bitcast i64* %0 to <4 x i64>*
-  store <4 x i64> %2, <4 x i64>* %3, align 4
+  store <4 x i64> %2, <4 x i64>* %3, align 8
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body
@@ -4009,15 +3991,12 @@ define void @sink_splat_xor_lmul2(i64* nocapture %a, i64 signext %x) {
 ; CHECK-LABEL: sink_splat_xor_lmul2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a2, 1024
-; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:  .LBB73_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    vxor.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a2, -4
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    bnez a2, .LBB73_1
@@ -4032,10 +4011,10 @@ vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = getelementptr inbounds i64, i64* %a, i64 %index
   %1 = bitcast i64* %0 to <4 x i64>*
-  %wide.load = load <4 x i64>, <4 x i64>* %1, align 4
+  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
   %2 = xor <4 x i64> %wide.load, %broadcast.splat
   %3 = bitcast i64* %0 to <4 x i64>*
-  store <4 x i64> %2, <4 x i64>* %3, align 4
+  store <4 x i64> %2, <4 x i64>* %3, align 8
   %index.next = add nuw i64 %index, 4
   %4 = icmp eq i64 %index.next, 1024
   br i1 %4, label %for.cond.cleanup, label %vector.body


        


More information about the llvm-commits mailing list