[llvm] 482a3bc - [SLP][X86] Add test case for #176906 (#178386)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 28 02:14:55 PST 2026


Author: Simon Pilgrim
Date: 2026-01-28T10:14:50Z
New Revision: 482a3bc861759ada060415be135dc5ef2a1ec8d0

URL: https://github.com/llvm/llvm-project/commit/482a3bc861759ada060415be135dc5ef2a1ec8d0
DIFF: https://github.com/llvm/llvm-project/commit/482a3bc861759ada060415be135dc5ef2a1ec8d0.diff

LOG: [SLP][X86] Add test case for #176906 (#178386)

Added: 
    llvm/test/Transforms/SLPVectorizer/X86/pr176906.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/pr176906.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr176906.ll
new file mode 100644
index 0000000000000..6f0f6296f45a8
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr176906.ll
@@ -0,0 +1,262 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-- -mcpu=x86-64    | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
+
+define i1 @PR176906(ptr %p) {
+; SSE-LABEL: define i1 @PR176906(
+; SSE-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; SSE-NEXT:    [[V:%.*]] = load <32 x i8>, ptr [[P]], align 1
+; SSE-NEXT:    [[TMP1:%.*]] = icmp sgt <32 x i8> [[V]], splat (i8 -1)
+; SSE-NEXT:    [[TMP2:%.*]] = zext <32 x i1> [[TMP1]] to <32 x i8>
+; SSE-NEXT:    [[TMP3:%.*]] = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> [[TMP2]])
+; SSE-NEXT:    [[OK:%.*]] = icmp eq i8 [[TMP3]], 32
+; SSE-NEXT:    ret i1 [[OK]]
+;
+; AVX2-LABEL: define i1 @PR176906(
+; AVX2-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; AVX2-NEXT:    [[V:%.*]] = load <32 x i8>, ptr [[P]], align 1
+; AVX2-NEXT:    [[TMP1:%.*]] = icmp sgt <32 x i8> [[V]], splat (i8 -1)
+; AVX2-NEXT:    [[TMP2:%.*]] = bitcast <32 x i1> [[TMP1]] to i32
+; AVX2-NEXT:    [[TMP3:%.*]] = call i32 @llvm.ctpop.i32(i32 [[TMP2]])
+; AVX2-NEXT:    [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8
+; AVX2-NEXT:    [[OK:%.*]] = icmp eq i8 [[TMP4]], 32
+; AVX2-NEXT:    ret i1 [[OK]]
+;
+; AVX512-LABEL: define i1 @PR176906(
+; AVX512-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; AVX512-NEXT:    [[V:%.*]] = load <32 x i8>, ptr [[P]], align 1
+; AVX512-NEXT:    [[TMP1:%.*]] = icmp sgt <32 x i8> [[V]], splat (i8 -1)
+; AVX512-NEXT:    [[TMP2:%.*]] = extractelement <32 x i1> [[TMP1]], i32 0
+; AVX512-NEXT:    [[Z:%.*]] = zext i1 [[TMP2]] to i8
+; AVX512-NEXT:    [[TMP3:%.*]] = extractelement <32 x i1> [[TMP1]], i32 1
+; AVX512-NEXT:    [[Z_1:%.*]] = zext i1 [[TMP3]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_1:%.*]] = add nuw nsw i8 [[Z]], [[Z_1]]
+; AVX512-NEXT:    [[TMP4:%.*]] = extractelement <32 x i1> [[TMP1]], i32 2
+; AVX512-NEXT:    [[Z_2:%.*]] = zext i1 [[TMP4]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_2:%.*]] = add nuw nsw i8 [[ACC_NEXT_1]], [[Z_2]]
+; AVX512-NEXT:    [[TMP5:%.*]] = extractelement <32 x i1> [[TMP1]], i32 3
+; AVX512-NEXT:    [[Z_3:%.*]] = zext i1 [[TMP5]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_3:%.*]] = add nuw nsw i8 [[ACC_NEXT_2]], [[Z_3]]
+; AVX512-NEXT:    [[TMP6:%.*]] = extractelement <32 x i1> [[TMP1]], i32 4
+; AVX512-NEXT:    [[Z_4:%.*]] = zext i1 [[TMP6]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_4:%.*]] = add nuw nsw i8 [[ACC_NEXT_3]], [[Z_4]]
+; AVX512-NEXT:    [[TMP7:%.*]] = extractelement <32 x i1> [[TMP1]], i32 5
+; AVX512-NEXT:    [[Z_5:%.*]] = zext i1 [[TMP7]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_5:%.*]] = add nuw nsw i8 [[ACC_NEXT_4]], [[Z_5]]
+; AVX512-NEXT:    [[TMP8:%.*]] = extractelement <32 x i1> [[TMP1]], i32 6
+; AVX512-NEXT:    [[Z_6:%.*]] = zext i1 [[TMP8]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_6:%.*]] = add nuw nsw i8 [[ACC_NEXT_5]], [[Z_6]]
+; AVX512-NEXT:    [[TMP9:%.*]] = extractelement <32 x i1> [[TMP1]], i32 7
+; AVX512-NEXT:    [[Z_7:%.*]] = zext i1 [[TMP9]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_7:%.*]] = add nuw nsw i8 [[ACC_NEXT_6]], [[Z_7]]
+; AVX512-NEXT:    [[TMP10:%.*]] = extractelement <32 x i1> [[TMP1]], i32 8
+; AVX512-NEXT:    [[Z_8:%.*]] = zext i1 [[TMP10]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_8:%.*]] = add nuw nsw i8 [[ACC_NEXT_7]], [[Z_8]]
+; AVX512-NEXT:    [[TMP11:%.*]] = extractelement <32 x i1> [[TMP1]], i32 9
+; AVX512-NEXT:    [[Z_9:%.*]] = zext i1 [[TMP11]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_9:%.*]] = add nuw nsw i8 [[ACC_NEXT_8]], [[Z_9]]
+; AVX512-NEXT:    [[TMP12:%.*]] = extractelement <32 x i1> [[TMP1]], i32 10
+; AVX512-NEXT:    [[Z_10:%.*]] = zext i1 [[TMP12]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_10:%.*]] = add nuw nsw i8 [[ACC_NEXT_9]], [[Z_10]]
+; AVX512-NEXT:    [[TMP13:%.*]] = extractelement <32 x i1> [[TMP1]], i32 11
+; AVX512-NEXT:    [[Z_11:%.*]] = zext i1 [[TMP13]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_11:%.*]] = add nuw nsw i8 [[ACC_NEXT_10]], [[Z_11]]
+; AVX512-NEXT:    [[TMP14:%.*]] = extractelement <32 x i1> [[TMP1]], i32 12
+; AVX512-NEXT:    [[Z_12:%.*]] = zext i1 [[TMP14]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_12:%.*]] = add nuw nsw i8 [[ACC_NEXT_11]], [[Z_12]]
+; AVX512-NEXT:    [[TMP15:%.*]] = extractelement <32 x i1> [[TMP1]], i32 13
+; AVX512-NEXT:    [[Z_13:%.*]] = zext i1 [[TMP15]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_13:%.*]] = add nuw nsw i8 [[ACC_NEXT_12]], [[Z_13]]
+; AVX512-NEXT:    [[TMP16:%.*]] = extractelement <32 x i1> [[TMP1]], i32 14
+; AVX512-NEXT:    [[Z_14:%.*]] = zext i1 [[TMP16]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_14:%.*]] = add nuw nsw i8 [[ACC_NEXT_13]], [[Z_14]]
+; AVX512-NEXT:    [[TMP17:%.*]] = extractelement <32 x i1> [[TMP1]], i32 15
+; AVX512-NEXT:    [[Z_15:%.*]] = zext i1 [[TMP17]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_15:%.*]] = add nuw nsw i8 [[ACC_NEXT_14]], [[Z_15]]
+; AVX512-NEXT:    [[TMP18:%.*]] = extractelement <32 x i1> [[TMP1]], i32 16
+; AVX512-NEXT:    [[Z_16:%.*]] = zext i1 [[TMP18]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_16:%.*]] = add nuw nsw i8 [[ACC_NEXT_15]], [[Z_16]]
+; AVX512-NEXT:    [[TMP19:%.*]] = extractelement <32 x i1> [[TMP1]], i32 17
+; AVX512-NEXT:    [[Z_17:%.*]] = zext i1 [[TMP19]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_17:%.*]] = add nuw nsw i8 [[ACC_NEXT_16]], [[Z_17]]
+; AVX512-NEXT:    [[TMP20:%.*]] = extractelement <32 x i1> [[TMP1]], i32 18
+; AVX512-NEXT:    [[Z_18:%.*]] = zext i1 [[TMP20]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_18:%.*]] = add nuw nsw i8 [[ACC_NEXT_17]], [[Z_18]]
+; AVX512-NEXT:    [[TMP21:%.*]] = extractelement <32 x i1> [[TMP1]], i32 19
+; AVX512-NEXT:    [[Z_19:%.*]] = zext i1 [[TMP21]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_19:%.*]] = add nuw nsw i8 [[ACC_NEXT_18]], [[Z_19]]
+; AVX512-NEXT:    [[TMP22:%.*]] = extractelement <32 x i1> [[TMP1]], i32 20
+; AVX512-NEXT:    [[Z_20:%.*]] = zext i1 [[TMP22]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_20:%.*]] = add nuw nsw i8 [[ACC_NEXT_19]], [[Z_20]]
+; AVX512-NEXT:    [[TMP23:%.*]] = extractelement <32 x i1> [[TMP1]], i32 21
+; AVX512-NEXT:    [[Z_21:%.*]] = zext i1 [[TMP23]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_21:%.*]] = add nuw nsw i8 [[ACC_NEXT_20]], [[Z_21]]
+; AVX512-NEXT:    [[TMP24:%.*]] = extractelement <32 x i1> [[TMP1]], i32 22
+; AVX512-NEXT:    [[Z_22:%.*]] = zext i1 [[TMP24]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_22:%.*]] = add nuw nsw i8 [[ACC_NEXT_21]], [[Z_22]]
+; AVX512-NEXT:    [[TMP25:%.*]] = extractelement <32 x i1> [[TMP1]], i32 23
+; AVX512-NEXT:    [[Z_23:%.*]] = zext i1 [[TMP25]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_23:%.*]] = add nuw nsw i8 [[ACC_NEXT_22]], [[Z_23]]
+; AVX512-NEXT:    [[TMP26:%.*]] = extractelement <32 x i1> [[TMP1]], i32 24
+; AVX512-NEXT:    [[Z_24:%.*]] = zext i1 [[TMP26]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_24:%.*]] = add nuw nsw i8 [[ACC_NEXT_23]], [[Z_24]]
+; AVX512-NEXT:    [[TMP27:%.*]] = extractelement <32 x i1> [[TMP1]], i32 25
+; AVX512-NEXT:    [[Z_25:%.*]] = zext i1 [[TMP27]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_25:%.*]] = add nuw nsw i8 [[ACC_NEXT_24]], [[Z_25]]
+; AVX512-NEXT:    [[TMP28:%.*]] = extractelement <32 x i1> [[TMP1]], i32 26
+; AVX512-NEXT:    [[Z_26:%.*]] = zext i1 [[TMP28]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_26:%.*]] = add nuw nsw i8 [[ACC_NEXT_25]], [[Z_26]]
+; AVX512-NEXT:    [[TMP29:%.*]] = extractelement <32 x i1> [[TMP1]], i32 27
+; AVX512-NEXT:    [[Z_27:%.*]] = zext i1 [[TMP29]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_27:%.*]] = add nuw nsw i8 [[ACC_NEXT_26]], [[Z_27]]
+; AVX512-NEXT:    [[TMP30:%.*]] = extractelement <32 x i1> [[TMP1]], i32 28
+; AVX512-NEXT:    [[Z_28:%.*]] = zext i1 [[TMP30]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_28:%.*]] = add nuw nsw i8 [[ACC_NEXT_27]], [[Z_28]]
+; AVX512-NEXT:    [[TMP31:%.*]] = extractelement <32 x i1> [[TMP1]], i32 29
+; AVX512-NEXT:    [[Z_29:%.*]] = zext i1 [[TMP31]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_29:%.*]] = add nuw nsw i8 [[ACC_NEXT_28]], [[Z_29]]
+; AVX512-NEXT:    [[TMP32:%.*]] = extractelement <32 x i1> [[TMP1]], i32 30
+; AVX512-NEXT:    [[Z_30:%.*]] = zext i1 [[TMP32]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_30:%.*]] = add nuw nsw i8 [[ACC_NEXT_29]], [[Z_30]]
+; AVX512-NEXT:    [[TMP33:%.*]] = extractelement <32 x i1> [[TMP1]], i32 31
+; AVX512-NEXT:    [[Z_31:%.*]] = zext i1 [[TMP33]] to i8
+; AVX512-NEXT:    [[ACC_NEXT_31:%.*]] = add nuw nsw i8 [[ACC_NEXT_30]], [[Z_31]]
+; AVX512-NEXT:    [[OK:%.*]] = icmp eq i8 [[ACC_NEXT_31]], 32
+; AVX512-NEXT:    ret i1 [[OK]]
+;
+  %v = load <32 x i8>, ptr %p, align 1
+  %i = extractelement <32 x i8> %v, i64 0
+  %b = icmp sgt i8 %i, -1
+  %z = zext i1 %b to i8
+  %i1 = extractelement <32 x i8> %v, i64 1
+  %b.1 = icmp sgt i8 %i1, -1
+  %z.1 = zext i1 %b.1 to i8
+  %acc.next.1 = add nuw nsw i8 %z, %z.1
+  %i2 = extractelement <32 x i8> %v, i64 2
+  %b.2 = icmp sgt i8 %i2, -1
+  %z.2 = zext i1 %b.2 to i8
+  %acc.next.2 = add nuw nsw i8 %acc.next.1, %z.2
+  %i3 = extractelement <32 x i8> %v, i64 3
+  %b.3 = icmp sgt i8 %i3, -1
+  %z.3 = zext i1 %b.3 to i8
+  %acc.next.3 = add nuw nsw i8 %acc.next.2, %z.3
+  %i4 = extractelement <32 x i8> %v, i64 4
+  %b.4 = icmp sgt i8 %i4, -1
+  %z.4 = zext i1 %b.4 to i8
+  %acc.next.4 = add nuw nsw i8 %acc.next.3, %z.4
+  %i5 = extractelement <32 x i8> %v, i64 5
+  %b.5 = icmp sgt i8 %i5, -1
+  %z.5 = zext i1 %b.5 to i8
+  %acc.next.5 = add nuw nsw i8 %acc.next.4, %z.5
+  %i6 = extractelement <32 x i8> %v, i64 6
+  %b.6 = icmp sgt i8 %i6, -1
+  %z.6 = zext i1 %b.6 to i8
+  %acc.next.6 = add nuw nsw i8 %acc.next.5, %z.6
+  %i7 = extractelement <32 x i8> %v, i64 7
+  %b.7 = icmp sgt i8 %i7, -1
+  %z.7 = zext i1 %b.7 to i8
+  %acc.next.7 = add nuw nsw i8 %acc.next.6, %z.7
+  %i8 = extractelement <32 x i8> %v, i64 8
+  %b.8 = icmp sgt i8 %i8, -1
+  %z.8 = zext i1 %b.8 to i8
+  %acc.next.8 = add nuw nsw i8 %acc.next.7, %z.8
+  %i9 = extractelement <32 x i8> %v, i64 9
+  %b.9 = icmp sgt i8 %i9, -1
+  %z.9 = zext i1 %b.9 to i8
+  %acc.next.9 = add nuw nsw i8 %acc.next.8, %z.9
+  %i10 = extractelement <32 x i8> %v, i64 10
+  %b.10 = icmp sgt i8 %i10, -1
+  %z.10 = zext i1 %b.10 to i8
+  %acc.next.10 = add nuw nsw i8 %acc.next.9, %z.10
+  %i11 = extractelement <32 x i8> %v, i64 11
+  %b.11 = icmp sgt i8 %i11, -1
+  %z.11 = zext i1 %b.11 to i8
+  %acc.next.11 = add nuw nsw i8 %acc.next.10, %z.11
+  %i12 = extractelement <32 x i8> %v, i64 12
+  %b.12 = icmp sgt i8 %i12, -1
+  %z.12 = zext i1 %b.12 to i8
+  %acc.next.12 = add nuw nsw i8 %acc.next.11, %z.12
+  %i13 = extractelement <32 x i8> %v, i64 13
+  %b.13 = icmp sgt i8 %i13, -1
+  %z.13 = zext i1 %b.13 to i8
+  %acc.next.13 = add nuw nsw i8 %acc.next.12, %z.13
+  %i14 = extractelement <32 x i8> %v, i64 14
+  %b.14 = icmp sgt i8 %i14, -1
+  %z.14 = zext i1 %b.14 to i8
+  %acc.next.14 = add nuw nsw i8 %acc.next.13, %z.14
+  %i15 = extractelement <32 x i8> %v, i64 15
+  %b.15 = icmp sgt i8 %i15, -1
+  %z.15 = zext i1 %b.15 to i8
+  %acc.next.15 = add nuw nsw i8 %acc.next.14, %z.15
+  %i16 = extractelement <32 x i8> %v, i64 16
+  %b.16 = icmp sgt i8 %i16, -1
+  %z.16 = zext i1 %b.16 to i8
+  %acc.next.16 = add nuw nsw i8 %acc.next.15, %z.16
+  %i17 = extractelement <32 x i8> %v, i64 17
+  %b.17 = icmp sgt i8 %i17, -1
+  %z.17 = zext i1 %b.17 to i8
+  %acc.next.17 = add nuw nsw i8 %acc.next.16, %z.17
+  %i18 = extractelement <32 x i8> %v, i64 18
+  %b.18 = icmp sgt i8 %i18, -1
+  %z.18 = zext i1 %b.18 to i8
+  %acc.next.18 = add nuw nsw i8 %acc.next.17, %z.18
+  %i19 = extractelement <32 x i8> %v, i64 19
+  %b.19 = icmp sgt i8 %i19, -1
+  %z.19 = zext i1 %b.19 to i8
+  %acc.next.19 = add nuw nsw i8 %acc.next.18, %z.19
+  %i20 = extractelement <32 x i8> %v, i64 20
+  %b.20 = icmp sgt i8 %i20, -1
+  %z.20 = zext i1 %b.20 to i8
+  %acc.next.20 = add nuw nsw i8 %acc.next.19, %z.20
+  %i21 = extractelement <32 x i8> %v, i64 21
+  %b.21 = icmp sgt i8 %i21, -1
+  %z.21 = zext i1 %b.21 to i8
+  %acc.next.21 = add nuw nsw i8 %acc.next.20, %z.21
+  %i22 = extractelement <32 x i8> %v, i64 22
+  %b.22 = icmp sgt i8 %i22, -1
+  %z.22 = zext i1 %b.22 to i8
+  %acc.next.22 = add nuw nsw i8 %acc.next.21, %z.22
+  %i23 = extractelement <32 x i8> %v, i64 23
+  %b.23 = icmp sgt i8 %i23, -1
+  %z.23 = zext i1 %b.23 to i8
+  %acc.next.23 = add nuw nsw i8 %acc.next.22, %z.23
+  %i24 = extractelement <32 x i8> %v, i64 24
+  %b.24 = icmp sgt i8 %i24, -1
+  %z.24 = zext i1 %b.24 to i8
+  %acc.next.24 = add nuw nsw i8 %acc.next.23, %z.24
+  %i25 = extractelement <32 x i8> %v, i64 25
+  %b.25 = icmp sgt i8 %i25, -1
+  %z.25 = zext i1 %b.25 to i8
+  %acc.next.25 = add nuw nsw i8 %acc.next.24, %z.25
+  %i26 = extractelement <32 x i8> %v, i64 26
+  %b.26 = icmp sgt i8 %i26, -1
+  %z.26 = zext i1 %b.26 to i8
+  %acc.next.26 = add nuw nsw i8 %acc.next.25, %z.26
+  %i27 = extractelement <32 x i8> %v, i64 27
+  %b.27 = icmp sgt i8 %i27, -1
+  %z.27 = zext i1 %b.27 to i8
+  %acc.next.27 = add nuw nsw i8 %acc.next.26, %z.27
+  %i28 = extractelement <32 x i8> %v, i64 28
+  %b.28 = icmp sgt i8 %i28, -1
+  %z.28 = zext i1 %b.28 to i8
+  %acc.next.28 = add nuw nsw i8 %acc.next.27, %z.28
+  %i29 = extractelement <32 x i8> %v, i64 29
+  %b.29 = icmp sgt i8 %i29, -1
+  %z.29 = zext i1 %b.29 to i8
+  %acc.next.29 = add nuw nsw i8 %acc.next.28, %z.29
+  %i30 = extractelement <32 x i8> %v, i64 30
+  %b.30 = icmp sgt i8 %i30, -1
+  %z.30 = zext i1 %b.30 to i8
+  %acc.next.30 = add nuw nsw i8 %acc.next.29, %z.30
+  %i31 = extractelement <32 x i8> %v, i64 31
+  %b.31 = icmp sgt i8 %i31, -1
+  %z.31 = zext i1 %b.31 to i8
+  %acc.next.31 = add nuw nsw i8 %acc.next.30, %z.31
+  %ok = icmp eq i8 %acc.next.31, 32
+  ret i1 %ok
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX: {{.*}}
+; CHECK: {{.*}}


        


More information about the llvm-commits mailing list