[llvm] r366801 - [InstSimplify][NFC] Tests for skipping 'div-by-0' checks before inverted @llvm.umul.with.overflow
Roman Lebedev via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 23 05:42:49 PDT 2019
Author: lebedevri
Date: Tue Jul 23 05:42:49 2019
New Revision: 366801
URL: http://llvm.org/viewvc/llvm-project?rev=366801&view=rev
Log:
[InstSimplify][NFC] Tests for skipping 'div-by-0' checks before inverted @llvm.umul.with.overflow
It would be already handled by the non-inverted case if we were hoisting
the `not` in InstCombine, but we don't (granted, we don't sink it
in this case either), so this is a separate case.
Added:
llvm/trunk/test/Transforms/InstSimplify/div-by-0-guard-before-smul_ov-not.ll
llvm/trunk/test/Transforms/InstSimplify/div-by-0-guard-before-umul_ov-not.ll
Added: llvm/trunk/test/Transforms/InstSimplify/div-by-0-guard-before-smul_ov-not.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstSimplify/div-by-0-guard-before-smul_ov-not.ll?rev=366801&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstSimplify/div-by-0-guard-before-smul_ov-not.ll (added)
+++ llvm/trunk/test/Transforms/InstSimplify/div-by-0-guard-before-smul_ov-not.ll Tue Jul 23 05:42:49 2019
@@ -0,0 +1,106 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instsimplify -S | FileCheck %s
+
+declare { i4, i1 } @llvm.smul.with.overflow.i4(i4, i4) #1
+
+define i1 @t0_umul(i4 %size, i4 %nmemb) {
+; CHECK-LABEL: @t0_umul(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
+; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp eq i4 %size, 0
+ %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
+ %smul.ov = extractvalue { i4, i1 } %smul, 1
+ %phitmp = xor i1 %smul.ov, true
+ %or = or i1 %cmp, %phitmp
+ ret i1 %or
+}
+
+define i1 @t1_commutative(i4 %size, i4 %nmemb) {
+; CHECK-LABEL: @t1_commutative(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
+; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[PHITMP]], [[CMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp eq i4 %size, 0
+ %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
+ %smul.ov = extractvalue { i4, i1 } %smul, 1
+ %phitmp = xor i1 %smul.ov, true
+ %or = or i1 %phitmp, %cmp ; swapped
+ ret i1 %or
+}
+
+define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
+; CHECK-LABEL: @n2_wrong_size(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE1:%.*]], 0
+; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp eq i4 %size1, 0 ; not %size0
+ %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size0, i4 %nmemb)
+ %smul.ov = extractvalue { i4, i1 } %smul, 1
+ %phitmp = xor i1 %smul.ov, true
+ %or = or i1 %cmp, %phitmp
+ ret i1 %or
+}
+
+define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
+; CHECK-LABEL: @n3_wrong_pred(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
+; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp ne i4 %size, 0 ; not 'eq'
+ %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
+ %smul.ov = extractvalue { i4, i1 } %smul, 1
+ %phitmp = xor i1 %smul.ov, true
+ %or = or i1 %cmp, %phitmp
+ ret i1 %or
+}
+
+define i1 @n4_not_and(i4 %size, i4 %nmemb) {
+; CHECK-LABEL: @n4_not_and(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
+; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = and i1 [[CMP]], [[PHITMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp eq i4 %size, 0
+ %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
+ %smul.ov = extractvalue { i4, i1 } %smul, 1
+ %phitmp = xor i1 %smul.ov, true
+ %or = and i1 %cmp, %phitmp ; not 'or'
+ ret i1 %or
+}
+
+define i1 @n5_not_zero(i4 %size, i4 %nmemb) {
+; CHECK-LABEL: @n5_not_zero(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 1
+; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp eq i4 %size, 1 ; should be '0'
+ %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
+ %smul.ov = extractvalue { i4, i1 } %smul, 1
+ %phitmp = xor i1 %smul.ov, true
+ %or = or i1 %cmp, %phitmp
+ ret i1 %or
+}
Added: llvm/trunk/test/Transforms/InstSimplify/div-by-0-guard-before-umul_ov-not.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstSimplify/div-by-0-guard-before-umul_ov-not.ll?rev=366801&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstSimplify/div-by-0-guard-before-umul_ov-not.ll (added)
+++ llvm/trunk/test/Transforms/InstSimplify/div-by-0-guard-before-umul_ov-not.ll Tue Jul 23 05:42:49 2019
@@ -0,0 +1,106 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instsimplify -S | FileCheck %s
+
+declare { i4, i1 } @llvm.umul.with.overflow.i4(i4, i4) #1
+
+define i1 @t0_umul(i4 %size, i4 %nmemb) {
+; CHECK-LABEL: @t0_umul(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
+; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp eq i4 %size, 0
+ %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
+ %umul.ov = extractvalue { i4, i1 } %umul, 1
+ %phitmp = xor i1 %umul.ov, true
+ %or = or i1 %cmp, %phitmp
+ ret i1 %or
+}
+
+define i1 @t1_commutative(i4 %size, i4 %nmemb) {
+; CHECK-LABEL: @t1_commutative(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
+; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[PHITMP]], [[CMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp eq i4 %size, 0
+ %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
+ %umul.ov = extractvalue { i4, i1 } %umul, 1
+ %phitmp = xor i1 %umul.ov, true
+ %or = or i1 %phitmp, %cmp ; swapped
+ ret i1 %or
+}
+
+define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
+; CHECK-LABEL: @n2_wrong_size(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE1:%.*]], 0
+; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp eq i4 %size1, 0 ; not %size0
+ %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size0, i4 %nmemb)
+ %umul.ov = extractvalue { i4, i1 } %umul, 1
+ %phitmp = xor i1 %umul.ov, true
+ %or = or i1 %cmp, %phitmp
+ ret i1 %or
+}
+
+define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
+; CHECK-LABEL: @n3_wrong_pred(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
+; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp ne i4 %size, 0 ; not 'eq'
+ %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
+ %umul.ov = extractvalue { i4, i1 } %umul, 1
+ %phitmp = xor i1 %umul.ov, true
+ %or = or i1 %cmp, %phitmp
+ ret i1 %or
+}
+
+define i1 @n4_not_and(i4 %size, i4 %nmemb) {
+; CHECK-LABEL: @n4_not_and(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
+; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = and i1 [[CMP]], [[PHITMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp eq i4 %size, 0
+ %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
+ %umul.ov = extractvalue { i4, i1 } %umul, 1
+ %phitmp = xor i1 %umul.ov, true
+ %or = and i1 %cmp, %phitmp ; not 'or'
+ ret i1 %or
+}
+
+define i1 @n5_not_zero(i4 %size, i4 %nmemb) {
+; CHECK-LABEL: @n5_not_zero(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 1
+; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
+; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
+; CHECK-NEXT: ret i1 [[OR]]
+;
+ %cmp = icmp eq i4 %size, 1 ; should be '0'
+ %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
+ %umul.ov = extractvalue { i4, i1 } %umul, 1
+ %phitmp = xor i1 %umul.ov, true
+ %or = or i1 %cmp, %phitmp
+ ret i1 %or
+}
More information about the llvm-commits
mailing list