[llvm] 6190d40 - [Hexagon] vect-vshifts.ll - regenerate test checks

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 7 06:35:49 PDT 2025


Author: Simon Pilgrim
Date: 2025-07-07T14:33:17+01:00
New Revision: 6190d407e09c01a85be860d82ca39fa8509e473d

URL: https://github.com/llvm/llvm-project/commit/6190d407e09c01a85be860d82ca39fa8509e473d
DIFF: https://github.com/llvm/llvm-project/commit/6190d407e09c01a85be860d82ca39fa8509e473d.diff

LOG: [Hexagon] vect-vshifts.ll - regenerate test checks

Added: 
    

Modified: 
    llvm/test/CodeGen/Hexagon/vect/vect-vshifts.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Hexagon/vect/vect-vshifts.ll b/llvm/test/CodeGen/Hexagon/vect/vect-vshifts.ll
index 568d596266b0c..f9092a83a628c 100644
--- a/llvm/test/CodeGen/Hexagon/vect/vect-vshifts.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/vect-vshifts.ll
@@ -1,12 +1,203 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
 
-; Check that store is post-incremented.
-; CHECK: r{{[0-9]+:[0-9]+}} = vasrw(r{{[0-9]+:[0-9]+}},r{{[0-9]+}})
-; CHECK: r{{[0-9]+:[0-9]+}} = vaslw(r{{[0-9]+:[0-9]+}},r{{[0-9]+}})
 target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
 target triple = "hexagon"
 
+; Check that store is post-incremented.
 define void @foo(ptr nocapture %buf, ptr nocapture %dest, i32 %offset, i32 %oddBlock, i32 %gb) #0 {
+; CHECK-LABEL: foo:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r2 = sub(r2,r3)
+; CHECK-NEXT:     r3 = sub(#31,r4)
+; CHECK-NEXT:     p0 = cmp.eq(r3,#0)
+; CHECK-NEXT:     r5 = memw(r0+#0)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = asr(r5,r4)
+; CHECK-NEXT:     r29 = add(r29,#-24)
+; CHECK-NEXT:     r9:8 = combine(#-1,#-1)
+; CHECK-NEXT:     memw(r0+#0) = r5.new
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r5 = and(r2,#7)
+; CHECK-NEXT:     r2 = r3
+; CHECK-NEXT:     memd(r29+#8) = r19:18
+; CHECK-NEXT:     memd(r29+#16) = r17:16
+; CHECK-NEXT:    } // 8-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7:6 = vaslw(r3:2,#1)
+; CHECK-NEXT:     if (p0) r5 = add(r5,#1)
+; CHECK-NEXT:     memd(r29+#0) = r21:20
+; CHECK-NEXT:    } // 8-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = addasl(r1,r5,#2)
+; CHECK-NEXT:     r7:6 = vaddw(r7:6,r9:8)
+; CHECK-NEXT:     r14 = memw(r1+r5<<#2)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r15 = memw(r0+#256)
+; CHECK-NEXT:     r10 = memw(r0+#512)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r9:8 = vasrw(r15:14,#31)
+; CHECK-NEXT:     r19:18 = vasrw(r15:14,r3)
+; CHECK-NEXT:     r12 = memw(r0+#1024)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r9:8,r19:18)
+; CHECK-NEXT:     r17:16 = xor(r9:8,r7:6)
+; CHECK-NEXT:     r11 = memw(r0+#768)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r15:14 = vmux(p0,r15:14,r17:16)
+; CHECK-NEXT:     r19:18 = vasrw(r11:10,#31)
+; CHECK-NEXT:     r13 = memw(r0+#1280)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r21:20 = vasrw(r11:10,r3)
+; CHECK-NEXT:     r15:14 = vaslw(r15:14,r4)
+; CHECK-NEXT:     r8 = memw(r0+#1536)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p1 = vcmpw.eq(r19:18,r21:20)
+; CHECK-NEXT:     r19:18 = xor(r19:18,r7:6)
+; CHECK-NEXT:     r9 = memw(r0+#1792)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r21:20 = vasrw(r13:12,#31)
+; CHECK-NEXT:     r11:10 = vmux(p1,r11:10,r19:18)
+; CHECK-NEXT:     memw(r0+#32) = r14
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r17:16 = vasrw(r13:12,r3)
+; CHECK-NEXT:     r11:10 = vaslw(r11:10,r4)
+; CHECK-NEXT:     r14 = memw(r0+#2048)
+; CHECK-NEXT:     memw(r1+r5<<#2) = r14
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r21:20,r17:16)
+; CHECK-NEXT:     r19:18 = xor(r21:20,r7:6)
+; CHECK-NEXT:     memw(r0+#288) = r15
+; CHECK-NEXT:     memw(r0+#256) = r15
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r17:16 = vasrw(r9:8,#31)
+; CHECK-NEXT:     r19:18 = vmux(p0,r13:12,r19:18)
+; CHECK-NEXT:     r15 = memw(r0+#2304)
+; CHECK-NEXT:     memw(r0+#544) = r10
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r21:20 = vasrw(r9:8,r3)
+; CHECK-NEXT:     memw(r0+#800) = r11
+; CHECK-NEXT:     memw(r0+#512) = r10
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r13:12 = vasrw(r15:14,#31)
+; CHECK-NEXT:     r11:10 = vasrw(r15:14,r3)
+; CHECK-NEXT:     memw(r0+#768) = r11
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r17:16,r21:20)
+; CHECK-NEXT:     p1 = vcmpw.eq(r13:12,r11:10)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r21:20 = xor(r13:12,r7:6)
+; CHECK-NEXT:     r17:16 = xor(r17:16,r7:6)
+; CHECK-NEXT:     r12 = memw(r0+#2560)
+; CHECK-NEXT:     r13 = memw(r0+#2816)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r11:10 = vaslw(r19:18,r4)
+; CHECK-NEXT:     r17:16 = vmux(p0,r9:8,r17:16)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r9:8 = vmux(p1,r15:14,r21:20)
+; CHECK-NEXT:     r19:18 = vasrw(r13:12,#31)
+; CHECK-NEXT:     r14 = memw(r0+#3072)
+; CHECK-NEXT:     memw(r0+#1056) = r10
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r21:20 = vasrw(r13:12,r3)
+; CHECK-NEXT:     r9:8 = vaslw(r9:8,r4)
+; CHECK-NEXT:     memw(r0+#1312) = r11
+; CHECK-NEXT:     memw(r0+#1024) = r10
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r11:10 = vaslw(r17:16,r4)
+; CHECK-NEXT:     p0 = vcmpw.eq(r19:18,r21:20)
+; CHECK-NEXT:     r15 = memw(r0+#3328)
+; CHECK-NEXT:     memw(r0+#1280) = r11
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r17:16 = xor(r19:18,r7:6)
+; CHECK-NEXT:     r21:20 = vasrw(r15:14,#31)
+; CHECK-NEXT:     memw(r0+#1568) = r10
+; CHECK-NEXT:     memw(r0+#1824) = r11
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r11:10 = vasrw(r15:14,r3)
+; CHECK-NEXT:     r13:12 = vmux(p0,r13:12,r17:16)
+; CHECK-NEXT:     memw(r0+#1536) = r10
+; CHECK-NEXT:     memw(r0+#1792) = r11
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = vcmpw.eq(r21:20,r11:10)
+; CHECK-NEXT:     r19:18 = xor(r21:20,r7:6)
+; CHECK-NEXT:     r10 = memw(r0+#3584)
+; CHECK-NEXT:     memw(r0+#2080) = r8
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r19:18 = vmux(p0,r15:14,r19:18)
+; CHECK-NEXT:     r13:12 = vaslw(r13:12,r4)
+; CHECK-NEXT:     r11 = memw(r0+#3840)
+; CHECK-NEXT:     memw(r0+#2336) = r9
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r21:20 = vasrw(r11:10,#31)
+; CHECK-NEXT:     r3:2 = vasrw(r11:10,r3)
+; CHECK-NEXT:     memw(r0+#2048) = r8
+; CHECK-NEXT:     memw(r0+#2304) = r9
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r7:6 = xor(r21:20,r7:6)
+; CHECK-NEXT:     p0 = vcmpw.eq(r21:20,r3:2)
+; CHECK-NEXT:     r17:16 = memd(r29+#16)
+; CHECK-NEXT:     memw(r0+#2592) = r12
+; CHECK-NEXT:    } // 8-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r3:2 = vmux(p0,r11:10,r7:6)
+; CHECK-NEXT:     r9:8 = vaslw(r19:18,r4)
+; CHECK-NEXT:     r19:18 = memd(r29+#8)
+; CHECK-NEXT:     memw(r0+#2848) = r13
+; CHECK-NEXT:    } // 8-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r3:2 = vaslw(r3:2,r4)
+; CHECK-NEXT:     r29 = add(r29,#24)
+; CHECK-NEXT:     r21:20 = memd(r29+#0)
+; CHECK-NEXT:     memw(r0+#2560) = r12
+; CHECK-NEXT:    } // 8-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     memw(r0+#2816) = r13
+; CHECK-NEXT:     memw(r0+#3104) = r8
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     memw(r0+#3360) = r9
+; CHECK-NEXT:     memw(r0+#3072) = r8
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     memw(r0+#3328) = r9
+; CHECK-NEXT:     memw(r0+#3616) = r2
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     memw(r0+#3872) = r3
+; CHECK-NEXT:     memw(r0+#3584) = r2
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:     memw(r0+#3840) = r3
+; CHECK-NEXT:    }
 entry:
   %0 = load i32, ptr %buf, align 4, !tbaa !0
   %shr = ashr i32 %0, %gb


        


More information about the llvm-commits mailing list