[llvm] r330368 - [Reassociate] add baseline tests for binop swapping; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 19 14:56:17 PDT 2018


Author: spatel
Date: Thu Apr 19 14:56:17 2018
New Revision: 330368

URL: http://llvm.org/viewvc/llvm-project?rev=330368&view=rev
Log:
[Reassociate] add baseline tests for binop swapping; NFC

Similar to rL330086, I don't know if we want to do these 
transforms here, but we might as well have the tests
here either way to show that this pass is missing 
potential functionality (intentionally or not).

Added:
    llvm/trunk/test/Transforms/Reassociate/matching-binops.ll

Added: llvm/trunk/test/Transforms/Reassociate/matching-binops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Reassociate/matching-binops.ll?rev=330368&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/Reassociate/matching-binops.ll (added)
+++ llvm/trunk/test/Transforms/Reassociate/matching-binops.ll Thu Apr 19 14:56:17 2018
@@ -0,0 +1,286 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -reassociate -S | FileCheck %s
+
+; PR37098 - https://bugs.llvm.org/show_bug.cgi?id=37098
+; In all positive tests, we should reassociate binops 
+; to allow more factoring folds.
+
+; There are 5 associative integer binops * 
+;           13 integer binops * 
+;           4 operand commutes =
+;           260 potential variations of this fold 
+; for integer binops. There are another 40 for FP.
+; Mix the commutation options to provide coverage using less tests.
+
+define i8 @and_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @and_shl(
+; CHECK-NEXT:    [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = and i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = shl i8 %x, %shamt
+  %sy = shl i8 %y, %shamt
+  %a = and i8 %sx, %z
+  %r = and i8 %sy, %a
+  ret i8 %r
+}
+
+define i8 @or_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @or_shl(
+; CHECK-NEXT:    [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = or i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = shl i8 %x, %shamt
+  %sy = shl i8 %y, %shamt
+  %a = or i8 %sx, %z
+  %r = or i8 %a, %sy
+  ret i8 %r
+}
+
+define i8 @xor_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @xor_shl(
+; CHECK-NEXT:    [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = xor i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = shl i8 %x, %shamt
+  %sy = shl i8 %y, %shamt
+  %a = xor i8 %z, %sx
+  %r = xor i8 %a, %sy
+  ret i8 %r
+}
+
+define i8 @and_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @and_lshr(
+; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = and i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = lshr i8 %x, %shamt
+  %sy = lshr i8 %y, %shamt
+  %a = and i8 %z, %sx
+  %r = and i8 %sy, %a
+  ret i8 %r
+}
+
+define i8 @or_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @or_lshr(
+; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = or i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = lshr i8 %x, %shamt
+  %sy = lshr i8 %y, %shamt
+  %a = or i8 %sx, %z
+  %r = or i8 %sy, %a
+  ret i8 %r
+}
+
+define i8 @xor_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @xor_lshr(
+; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = xor i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = lshr i8 %x, %shamt
+  %sy = lshr i8 %y, %shamt
+  %a = xor i8 %sx, %z
+  %r = xor i8 %a, %sy
+  ret i8 %r
+}
+
+define i8 @and_ashr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @and_ashr(
+; CHECK-NEXT:    [[SX:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = and i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = ashr i8 %x, %shamt
+  %sy = ashr i8 %y, %shamt
+  %a = and i8 %z, %sx
+  %r = and i8 %a, %sy
+  ret i8 %r
+}
+
+define i8 @or_ashr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @or_ashr(
+; CHECK-NEXT:    [[SX:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = or i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = ashr i8 %x, %shamt
+  %sy = ashr i8 %y, %shamt
+  %a = or i8 %z, %sx
+  %r = or i8 %sy, %a
+  ret i8 %r
+}
+
+; Vectors work too.
+
+define <2 x i8> @xor_ashr(<2 x i8> %x, <2 x i8> %y, <2 x i8> %z, <2 x i8> %shamt) {
+; CHECK-LABEL: @xor_ashr(
+; CHECK-NEXT:    [[SX:%.*]] = ashr <2 x i8> [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = ashr <2 x i8> [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = xor <2 x i8> [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[A]], [[SY]]
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %sx = ashr <2 x i8> %x, %shamt
+  %sy = ashr <2 x i8> %y, %shamt
+  %a = xor <2 x i8> %sx, %z
+  %r = xor <2 x i8> %a, %sy
+  ret <2 x i8> %r
+}
+
+; Negative test - different logic ops
+
+define i8 @or_and_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @or_and_shl(
+; CHECK-NEXT:    [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = shl i8 %x, %shamt
+  %sy = shl i8 %y, %shamt
+  %a = or i8 %sx, %z
+  %r = and i8 %sy, %a
+  ret i8 %r
+}
+
+; Negative test - different shift ops
+
+define i8 @or_lshr_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @or_lshr_shl(
+; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = or i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = lshr i8 %x, %shamt
+  %sy = shl i8 %y, %shamt
+  %a = or i8 %sx, %z
+  %r = or i8 %a, %sy
+  ret i8 %r
+}
+
+; Negative test - multi-use
+
+define i8 @xor_lshr_multiuse(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @xor_lshr_multiuse(
+; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[A:%.*]] = xor i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], [[SY]]
+; CHECK-NEXT:    [[R2:%.*]] = sdiv i8 [[A]], [[R]]
+; CHECK-NEXT:    ret i8 [[R2]]
+;
+  %sx = lshr i8 %x, %shamt
+  %sy = lshr i8 %y, %shamt
+  %a = xor i8 %sx, %z
+  %r = xor i8 %a, %sy
+  %r2 = sdiv i8 %a, %r
+  ret i8 %r2
+}
+
+; Math ops work too. Change instruction positions too to verify placement.
+
+define i8 @add_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: @add_lshr(
+; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
+; CHECK-NEXT:    [[A:%.*]] = add i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
+; CHECK-NEXT:    [[R:%.*]] = add i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = lshr i8 %x, %shamt
+  %a = add i8 %sx, %z
+  %sy = lshr i8 %y, %shamt
+  %r = add i8 %a, %sy
+  ret i8 %r
+}
+
+; Make sure wrapping flags are cleared.
+
+define i8 @mul_sub(i8 %x, i8 %y, i8 %z, i8 %m) {
+; CHECK-LABEL: @mul_sub(
+; CHECK-NEXT:    [[SX:%.*]] = sub i8 [[X:%.*]], [[M:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = sub i8 [[Y:%.*]], [[M]]
+; CHECK-NEXT:    [[A:%.*]] = mul nsw i8 [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = mul nuw i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = sub i8 %x, %m
+  %sy = sub i8 %y, %m
+  %a = mul nsw i8 %sx, %z
+  %r = mul nuw i8 %a, %sy
+  ret i8 %r
+}
+
+define i8 @add_mul(i8 %x, i8 %y, i8 %z, i8 %m) {
+; CHECK-LABEL: @add_mul(
+; CHECK-NEXT:    [[SX:%.*]] = mul nuw i8 [[X:%.*]], 42
+; CHECK-NEXT:    [[A:%.*]] = add nuw i8 [[Z:%.*]], [[SX]]
+; CHECK-NEXT:    [[SY:%.*]] = mul nsw i8 [[M:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = add nsw i8 [[A]], [[SY]]
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %sx = mul nuw i8 %x, 42
+  %a = add nuw i8 %sx, %z
+  %sy = mul nsw i8 %y, %m
+  %r = add nsw i8 %sy, %a
+  ret i8 %r
+}
+
+; Floating-point works too if it's not strict.
+; TODO: These should not require the full 'fast' FMF.
+
+define float @fadd_fmul(float %x, float %y, float %z, float %m) {
+; CHECK-LABEL: @fadd_fmul(
+; CHECK-NEXT:    [[SX:%.*]] = fmul float [[X:%.*]], [[M:%.*]]
+; CHECK-NEXT:    [[A:%.*]] = fadd fast float [[SX]], [[Z:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = fmul float [[Y:%.*]], [[M]]
+; CHECK-NEXT:    [[R:%.*]] = fadd fast float [[A]], [[SY]]
+; CHECK-NEXT:    ret float [[R]]
+;
+  %sx = fmul float %x, %m
+  %a = fadd fast float %sx, %z
+  %sy = fmul float %y, %m
+  %r = fadd fast float %sy, %a
+  ret float %r
+}
+
+define float @fmul_fdiv(float %x, float %y, float %z, float %m) {
+; CHECK-LABEL: @fmul_fdiv(
+; CHECK-NEXT:    [[SX:%.*]] = fdiv float [[X:%.*]], [[M:%.*]]
+; CHECK-NEXT:    [[SY:%.*]] = fdiv float [[Y:%.*]], 4.200000e+01
+; CHECK-NEXT:    [[A:%.*]] = fmul fast float [[SY]], [[Z:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = fmul fast float [[A]], [[SX]]
+; CHECK-NEXT:    ret float [[R]]
+;
+  %sx = fdiv float %x, %m
+  %sy = fdiv float %y, 42.0
+  %a = fmul fast float %z, %sx
+  %r = fmul fast float %sy, %a
+  ret float %r
+}
+




More information about the llvm-commits mailing list