[llvm] 63f3a5b - [PhaseOrdering][X86] Add test coverage for #48223

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon May 12 01:16:33 PDT 2025


Author: Simon Pilgrim
Date: 2025-05-12T09:16:13+01:00
New Revision: 63f3a5babdc97992549748afb9d02747185032d3

URL: https://github.com/llvm/llvm-project/commit/63f3a5babdc97992549748afb9d02747185032d3
DIFF: https://github.com/llvm/llvm-project/commit/63f3a5babdc97992549748afb9d02747185032d3.diff

LOG: [PhaseOrdering][X86] Add test coverage for #48223

The X86 backend shuffle combining is saving us from some poor vectorised IR

Added: 
    llvm/test/Transforms/PhaseOrdering/X86/pr48223.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/PhaseOrdering/X86/pr48223.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr48223.ll
new file mode 100644
index 0000000000000..415089ae213bb
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr48223.ll
@@ -0,0 +1,163 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64    < %s | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v2 < %s | FileCheck %s --check-prefixes=CHECK,SSE,SSE4
+; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v3 < %s | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v4 < %s | FileCheck %s --check-prefixes=CHECK,AVX
+
+%"struct.std::array" = type { [8 x i16] }
+
+define { i64, i64 } @compute_min(ptr noundef nonnull align 2 dereferenceable(16) %x, ptr noundef nonnull align 2 dereferenceable(16) %y) {
+; SSE2-LABEL: @compute_min(
+; SSE2-NEXT:  entry:
+; SSE2-NEXT:    [[LD0:%.*]] = load i16, ptr [[Y:%.*]], align 2
+; SSE2-NEXT:    [[LD1:%.*]] = load i16, ptr [[X:%.*]], align 2
+; SSE2-NEXT:    [[LD2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0]], i16 [[LD1]])
+; SSE2-NEXT:    [[PT1_1:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 2
+; SSE2-NEXT:    [[PT0_1:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 2
+; SSE2-NEXT:    [[LD0_1:%.*]] = load i16, ptr [[PT0_1]], align 2
+; SSE2-NEXT:    [[LD1_1:%.*]] = load i16, ptr [[PT1_1]], align 2
+; SSE2-NEXT:    [[LD2_1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_1]], i16 [[LD1_1]])
+; SSE2-NEXT:    [[PT1_2:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 4
+; SSE2-NEXT:    [[PT0_2:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 4
+; SSE2-NEXT:    [[LD0_2:%.*]] = load i16, ptr [[PT0_2]], align 2
+; SSE2-NEXT:    [[LD1_2:%.*]] = load i16, ptr [[PT1_2]], align 2
+; SSE2-NEXT:    [[LD2_2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_2]], i16 [[LD1_2]])
+; SSE2-NEXT:    [[PT1_3:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 6
+; SSE2-NEXT:    [[PT0_3:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 6
+; SSE2-NEXT:    [[LD0_3:%.*]] = load i16, ptr [[PT0_3]], align 2
+; SSE2-NEXT:    [[LD1_3:%.*]] = load i16, ptr [[PT1_3]], align 2
+; SSE2-NEXT:    [[LD2_3:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_3]], i16 [[LD1_3]])
+; SSE2-NEXT:    [[PT1_4:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 8
+; SSE2-NEXT:    [[PT0_4:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 8
+; SSE2-NEXT:    [[LD0_4:%.*]] = load i16, ptr [[PT0_4]], align 2
+; SSE2-NEXT:    [[LD1_4:%.*]] = load i16, ptr [[PT1_4]], align 2
+; SSE2-NEXT:    [[LD2_4:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_4]], i16 [[LD1_4]])
+; SSE2-NEXT:    [[PT1_5:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 10
+; SSE2-NEXT:    [[PT0_5:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 10
+; SSE2-NEXT:    [[LD0_5:%.*]] = load i16, ptr [[PT0_5]], align 2
+; SSE2-NEXT:    [[LD1_5:%.*]] = load i16, ptr [[PT1_5]], align 2
+; SSE2-NEXT:    [[LD2_5:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_5]], i16 [[LD1_5]])
+; SSE2-NEXT:    [[PT1_6:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 12
+; SSE2-NEXT:    [[PT0_6:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 12
+; SSE2-NEXT:    [[LD0_6:%.*]] = load i16, ptr [[PT0_6]], align 2
+; SSE2-NEXT:    [[LD1_6:%.*]] = load i16, ptr [[PT1_6]], align 2
+; SSE2-NEXT:    [[LD2_6:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_6]], i16 [[LD1_6]])
+; SSE2-NEXT:    [[PT1_7:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 14
+; SSE2-NEXT:    [[PT0_7:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 14
+; SSE2-NEXT:    [[LD0_7:%.*]] = load i16, ptr [[PT0_7]], align 2
+; SSE2-NEXT:    [[LD1_7:%.*]] = load i16, ptr [[PT1_7]], align 2
+; SSE2-NEXT:    [[LD2_7:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_7]], i16 [[LD1_7]])
+; SSE2-NEXT:    [[RETVAL_SROA_4_0_INSERT_EXT:%.*]] = zext i16 [[LD2_3]] to i64
+; SSE2-NEXT:    [[RETVAL_SROA_4_0_INSERT_SHIFT:%.*]] = shl nuw i64 [[RETVAL_SROA_4_0_INSERT_EXT]], 48
+; SSE2-NEXT:    [[RETVAL_SROA_3_0_INSERT_EXT:%.*]] = zext i16 [[LD2_2]] to i64
+; SSE2-NEXT:    [[RETVAL_SROA_3_0_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_3_0_INSERT_EXT]], 32
+; SSE2-NEXT:    [[RETVAL_SROA_3_0_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_4_0_INSERT_SHIFT]], [[RETVAL_SROA_3_0_INSERT_SHIFT]]
+; SSE2-NEXT:    [[RETVAL_SROA_2_0_INSERT_EXT:%.*]] = zext i16 [[LD2_1]] to i64
+; SSE2-NEXT:    [[RETVAL_SROA_2_0_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_2_0_INSERT_EXT]], 16
+; SSE2-NEXT:    [[RETVAL_SROA_2_0_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_3_0_INSERT_INSERT]], [[RETVAL_SROA_2_0_INSERT_SHIFT]]
+; SSE2-NEXT:    [[RETVAL_SROA_0_0_INSERT_EXT:%.*]] = zext i16 [[LD2]] to i64
+; SSE2-NEXT:    [[TMP20:%.*]] = or disjoint i64 [[RETVAL_SROA_2_0_INSERT_INSERT]], [[RETVAL_SROA_0_0_INSERT_EXT]]
+; SSE2-NEXT:    [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[TMP20]], 0
+; SSE2-NEXT:    [[RETVAL_SROA_9_8_INSERT_EXT:%.*]] = zext i16 [[LD2_7]] to i64
+; SSE2-NEXT:    [[RETVAL_SROA_9_8_INSERT_SHIFT:%.*]] = shl nuw i64 [[RETVAL_SROA_9_8_INSERT_EXT]], 48
+; SSE2-NEXT:    [[RETVAL_SROA_8_8_INSERT_EXT:%.*]] = zext i16 [[LD2_6]] to i64
+; SSE2-NEXT:    [[RETVAL_SROA_8_8_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_8_8_INSERT_EXT]], 32
+; SSE2-NEXT:    [[RETVAL_SROA_8_8_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_9_8_INSERT_SHIFT]], [[RETVAL_SROA_8_8_INSERT_SHIFT]]
+; SSE2-NEXT:    [[RETVAL_SROA_7_8_INSERT_EXT:%.*]] = zext i16 [[LD2_5]] to i64
+; SSE2-NEXT:    [[RETVAL_SROA_7_8_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_7_8_INSERT_EXT]], 16
+; SSE2-NEXT:    [[RETVAL_SROA_7_8_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_8_8_INSERT_INSERT]], [[RETVAL_SROA_7_8_INSERT_SHIFT]]
+; SSE2-NEXT:    [[RETVAL_SROA_5_8_INSERT_EXT:%.*]] = zext i16 [[LD2_4]] to i64
+; SSE2-NEXT:    [[TMP21:%.*]] = or disjoint i64 [[RETVAL_SROA_7_8_INSERT_INSERT]], [[RETVAL_SROA_5_8_INSERT_EXT]]
+; SSE2-NEXT:    [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[TMP21]], 1
+; SSE2-NEXT:    ret { i64, i64 } [[DOTFCA_1_INSERT]]
+;
+; SSE4-LABEL: @compute_min(
+; SSE4-NEXT:  entry:
+; SSE4-NEXT:    [[TMP0:%.*]] = load <8 x i16>, ptr [[Y:%.*]], align 2
+; SSE4-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr [[X:%.*]], align 2
+; SSE4-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; SSE4-NEXT:    [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> poison, <2 x i32> <i32 0, i32 4>
+; SSE4-NEXT:    [[TMP4:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; SSE4-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i16> [[TMP4]], <8 x i16> poison, <2 x i32> <i32 1, i32 5>
+; SSE4-NEXT:    [[TMP6:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; SSE4-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i16> [[TMP6]], <8 x i16> poison, <2 x i32> <i32 3, i32 6>
+; SSE4-NEXT:    [[TMP8:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; SSE4-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i16> [[TMP8]], <8 x i16> poison, <2 x i32> <i32 2, i32 7>
+; SSE4-NEXT:    [[TMP10:%.*]] = zext <2 x i16> [[TMP9]] to <2 x i64>
+; SSE4-NEXT:    [[TMP11:%.*]] = shl nuw <2 x i64> [[TMP10]], <i64 32, i64 48>
+; SSE4-NEXT:    [[TMP12:%.*]] = zext <2 x i16> [[TMP7]] to <2 x i64>
+; SSE4-NEXT:    [[TMP13:%.*]] = shl nuw <2 x i64> [[TMP12]], <i64 48, i64 32>
+; SSE4-NEXT:    [[TMP14:%.*]] = or disjoint <2 x i64> [[TMP11]], [[TMP13]]
+; SSE4-NEXT:    [[TMP15:%.*]] = zext <2 x i16> [[TMP5]] to <2 x i64>
+; SSE4-NEXT:    [[TMP16:%.*]] = shl nuw nsw <2 x i64> [[TMP15]], splat (i64 16)
+; SSE4-NEXT:    [[TMP17:%.*]] = or disjoint <2 x i64> [[TMP14]], [[TMP16]]
+; SSE4-NEXT:    [[TMP18:%.*]] = zext <2 x i16> [[TMP3]] to <2 x i64>
+; SSE4-NEXT:    [[TMP19:%.*]] = or disjoint <2 x i64> [[TMP17]], [[TMP18]]
+; SSE4-NEXT:    [[TMP20:%.*]] = extractelement <2 x i64> [[TMP19]], i64 0
+; SSE4-NEXT:    [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[TMP20]], 0
+; SSE4-NEXT:    [[TMP21:%.*]] = extractelement <2 x i64> [[TMP19]], i64 1
+; SSE4-NEXT:    [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[TMP21]], 1
+; SSE4-NEXT:    ret { i64, i64 } [[DOTFCA_1_INSERT]]
+;
+; AVX-LABEL: @compute_min(
+; AVX-NEXT:  entry:
+; AVX-NEXT:    [[TMP0:%.*]] = load <8 x i16>, ptr [[Y:%.*]], align 2
+; AVX-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr [[X:%.*]], align 2
+; AVX-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; AVX-NEXT:    [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> poison, <2 x i32> <i32 0, i32 4>
+; AVX-NEXT:    [[TMP4:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; AVX-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i16> [[TMP4]], <8 x i16> poison, <2 x i32> <i32 1, i32 5>
+; AVX-NEXT:    [[TMP6:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; AVX-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i16> [[TMP6]], <8 x i16> poison, <2 x i32> <i32 3, i32 6>
+; AVX-NEXT:    [[TMP8:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+; AVX-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i16> [[TMP8]], <8 x i16> poison, <2 x i32> <i32 2, i32 7>
+; AVX-NEXT:    [[TMP10:%.*]] = zext <2 x i16> [[TMP9]] to <2 x i64>
+; AVX-NEXT:    [[TMP11:%.*]] = shl nuw <2 x i64> [[TMP10]], <i64 32, i64 48>
+; AVX-NEXT:    [[TMP12:%.*]] = zext <2 x i16> [[TMP7]] to <2 x i64>
+; AVX-NEXT:    [[TMP13:%.*]] = shl nuw <2 x i64> [[TMP12]], <i64 48, i64 32>
+; AVX-NEXT:    [[TMP14:%.*]] = or disjoint <2 x i64> [[TMP11]], [[TMP13]]
+; AVX-NEXT:    [[TMP15:%.*]] = zext <2 x i16> [[TMP5]] to <2 x i64>
+; AVX-NEXT:    [[TMP16:%.*]] = shl nuw nsw <2 x i64> [[TMP15]], splat (i64 16)
+; AVX-NEXT:    [[TMP17:%.*]] = or disjoint <2 x i64> [[TMP14]], [[TMP16]]
+; AVX-NEXT:    [[TMP18:%.*]] = zext <2 x i16> [[TMP3]] to <2 x i64>
+; AVX-NEXT:    [[TMP19:%.*]] = or disjoint <2 x i64> [[TMP17]], [[TMP18]]
+; AVX-NEXT:    [[TMP20:%.*]] = extractelement <2 x i64> [[TMP19]], i64 0
+; AVX-NEXT:    [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[TMP20]], 0
+; AVX-NEXT:    [[TMP21:%.*]] = extractelement <2 x i64> [[TMP19]], i64 1
+; AVX-NEXT:    [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[TMP21]], 1
+; AVX-NEXT:    ret { i64, i64 } [[DOTFCA_1_INSERT]]
+;
+entry:
+  %retval = alloca %"struct.std::array", align 2
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.body, %entry
+  %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  %cmp.not = icmp eq i32 %i.0, 8
+  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.cond
+  %.fca.0.load = load i64, ptr %retval, align 2
+  %.fca.0.insert = insertvalue { i64, i64 } poison, i64 %.fca.0.load, 0
+  %.fca.1.gep = getelementptr inbounds nuw i8, ptr %retval, i64 8
+  %.fca.1.load = load i64, ptr %.fca.1.gep, align 2
+  %.fca.1.insert = insertvalue { i64, i64 } %.fca.0.insert, i64 %.fca.1.load, 1
+  ret { i64, i64 } %.fca.1.insert
+
+for.body:                                         ; preds = %for.cond
+  %conv = zext nneg i32 %i.0 to i64
+  %pt1 = getelementptr inbounds nuw [8 x i16], ptr %x, i64 0, i64 %conv
+  %pt0 = getelementptr inbounds nuw [8 x i16], ptr %y, i64 0, i64 %conv
+  %ld0 = load i16, ptr %pt0, align 2
+  %ld1 = load i16, ptr %pt1, align 2
+  %cmp.i = icmp slt i16 %ld0, %ld1
+  %sel = select i1 %cmp.i, ptr %pt0, ptr %pt1
+  %ld2 = load i16, ptr %sel, align 2
+  %pt2 = getelementptr inbounds nuw [8 x i16], ptr %retval, i64 0, i64 %conv
+  store i16 %ld2, ptr %pt2, align 2
+  %inc = add nuw nsw i32 %i.0, 1
+  br label %for.cond
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
+; SSE: {{.*}}


        


More information about the llvm-commits mailing list