[llvm] [msan] Handle llvm.[us]cmp (starship operator) (PR #125804)

Thurston Dang via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 4 21:40:46 PST 2025


https://github.com/thurstond created https://github.com/llvm/llvm-project/pull/125804

Apply handleShadowOr to llvm.[us]cmp. Previously, llvm.[su]cmp was correctly handled heuristically when each parameter type is the same as the return type (e.g., `call i8 @llvm.ucmp.i8.i8(i8 %x, i8 %y)`) but handled incorrectly by visitInstruction when the return type is different e.g., (`call i8 @llvm.ucmp.i8.i62(i62 %x, i62 %y)`, `call <4 x i8> @llvm.ucmp.v4i8.v4i32(<4 x i32> %x, <4 x i32> %y)`).

Updates the tests from https://github.com/llvm/llvm-project/pull/125790

>From d62faa46c2f32476fceee1785c648233fadb42c0 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 5 Feb 2025 05:31:22 +0000
Subject: [PATCH] [msan] Handle llvm.[us]cmp (starship operator)

Apply handleShadowOr to llvm.[us]cmp. Previously, llvm.[su]cmp was
correctly handled heuristically when each parameter type is the same as
the return type (e.g., call i8 @llvm.ucmp.i8.i8(i8 %x, i8 %y)) but
handled incorrectly by visitInstruction when the return type is
different e.g., (call i8 @llvm.ucmp.i8.i62(i62 %x, i62 %y),
call <4 x i8> @llvm.ucmp.v4i8.v4i32(<4 x i32> %x, <4 x i32> %y))

Updates the tests in https://github.com/llvm/llvm-project/pull/125790
---
 .../Instrumentation/MemorySanitizer.cpp       |   6 +
 .../Instrumentation/MemorySanitizer/scmp.ll   | 273 +++++-------------
 .../Instrumentation/MemorySanitizer/ucmp.ll   | 228 +++++----------
 3 files changed, 154 insertions(+), 353 deletions(-)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index f3f2e5041fb1d3..7f714d0b3d1b0a 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -4800,6 +4800,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       break;
     }
 
+    case Intrinsic::scmp:
+    case Intrinsic::ucmp: {
+      handleShadowOr(I);
+      break;
+    }
+
     default:
       if (!handleUnknownIntrinsic(I))
         visitInstruction(I);
diff --git a/llvm/test/Instrumentation/MemorySanitizer/scmp.ll b/llvm/test/Instrumentation/MemorySanitizer/scmp.ll
index 89c5b283b25103..5c94c216106a2c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/scmp.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/scmp.ll
@@ -1,14 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
 ; RUN: opt < %s -passes=msan -S | FileCheck %s
 ;
-; llvm.scmp is correctly handled heuristically when each parameter is the same
-; type as the return type e.g.,
-;   call i8 @llvm.scmp.i8.i8(i8 %x, i8 %y)
-; but handled incorrectly by visitInstruction when the return type is different
-; e.g.,
-;   call i8 @llvm.scmp.i8.i62(i62 %x, i62 %y)
-;   call <4 x i8> @llvm.scmp.v4i8.v4i32(<4 x i32> %x, <4 x i32> %y)
-;
 ; Forked from llvm/test/CodeGen/X86/scmp.ll
 
 target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
@@ -21,8 +13,9 @@ define i8 @scmp.8.8(i8 %x, i8 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or i8 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i8 [[_MSPROP]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i8 @llvm.scmp.i8.i8(i8 [[X]], i8 [[Y]])
-; CHECK-NEXT:    store i8 [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP3]]
 ;
   %1 = call i8 @llvm.scmp(i8 %x, i8 %y)
@@ -35,16 +28,11 @@ define i8 @scmp.8.16(i16 %x, i16 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i16 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i16 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i16 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i16 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i16 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.scmp.i8.i16(i16 [[X]], i16 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.scmp(i16 %x, i16 %y)
@@ -57,16 +45,11 @@ define i8 @scmp.8.32(i32 %x, i32 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
@@ -79,16 +62,11 @@ define i8 @scmp.8.64(i64 %x, i64 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.scmp.i8.i64(i64 [[X]], i64 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.scmp(i64 %x, i64 %y)
@@ -101,16 +79,11 @@ define i8 @scmp.8.128(i128 %x, i128 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i128, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i128 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i128 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i128 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.scmp.i8.i128(i128 [[X]], i128 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.scmp(i128 %x, i128 %y)
@@ -124,8 +97,9 @@ define i32 @scmp.32.32(i32 %x, i32 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.scmp.i32.i32(i32 [[X]], i32 [[Y]])
-; CHECK-NEXT:    store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[TMP3]]
 ;
   %1 = call i32 @llvm.scmp(i32 %x, i32 %y)
@@ -138,16 +112,11 @@ define i32 @scmp.32.64(i64 %x, i64 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[_MSPROP1]] to i32
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.scmp.i32.i64(i64 [[X]], i64 [[Y]])
-; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[TMP5]]
 ;
   %1 = call i32 @llvm.scmp(i64 %x, i64 %y)
@@ -161,8 +130,9 @@ define i64 @scmp.64.64(i64 %x, i64 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.scmp.i64.i64(i64 [[X]], i64 [[Y]])
-; CHECK-NEXT:    store i64 [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i64 [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i64 [[TMP3]]
 ;
   %1 = call i64 @llvm.scmp(i64 %x, i64 %y)
@@ -175,16 +145,11 @@ define i4 @scmp_narrow_result(i32 %x, i32 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[_MSPROP1]] to i4
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i4 @llvm.scmp.i4.i32(i32 [[X]], i32 [[Y]])
-; CHECK-NEXT:    store i4 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i4 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i4 [[TMP5]]
 ;
   %1 = call i4 @llvm.scmp(i32 %x, i32 %y)
@@ -197,16 +162,11 @@ define i8 @scmp_narrow_op(i62 %x, i62 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i62, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i62, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i62 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i62 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i62 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i62 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i62 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.scmp.i8.i62(i62 [[X]], i62 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.scmp(i62 %x, i62 %y)
@@ -219,16 +179,11 @@ define i141 @scmp_wide_result(i32 %x, i32 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[_MSPROP1]] to i141
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i141 @llvm.scmp.i141.i32(i32 [[X]], i32 [[Y]])
-; CHECK-NEXT:    store i141 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i141 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i141 [[TMP5]]
 ;
   %1 = call i141 @llvm.scmp(i32 %x, i32 %y)
@@ -241,16 +196,11 @@ define i8 @scmp_wide_op(i109 %x, i109 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i109, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i109, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i109 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i109 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i109 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i109 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i109 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.scmp.i8.i109(i109 [[X]], i109 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.scmp(i109 %x, i109 %y)
@@ -263,16 +213,11 @@ define i41 @scmp_uncommon_types(i7 %x, i7 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i7, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i7, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i7 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i7 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i7 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i7 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i7 [[_MSPROP1]] to i41
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i41 @llvm.scmp.i41.i7(i7 [[X]], i7 [[Y]])
-; CHECK-NEXT:    store i41 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i41 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i41 [[TMP5]]
 ;
   %1 = call i41 @llvm.scmp(i7 %x, i7 %y)
@@ -286,8 +231,9 @@ define <4 x i32> @scmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <4 x i32> @llvm.scmp.v4i32.v4i32(<4 x i32> [[X]], <4 x i32> [[Y]])
-; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <4 x i32> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[TMP3]]
 ;
   %1 = call <4 x i32> @llvm.scmp(<4 x i32> %x, <4 x i32> %y)
@@ -300,18 +246,11 @@ define <4 x i8> @scmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind #0
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc <4 x i32> [[_MSPROP1]] to <4 x i8>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <4 x i8> @llvm.scmp.v4i8.v4i32(<4 x i32> [[X]], <4 x i32> [[Y]])
-; CHECK-NEXT:    store <4 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <4 x i8> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i8> [[TMP7]]
 ;
   %1 = call <4 x i8> @llvm.scmp(<4 x i32> %x, <4 x i32> %y)
@@ -324,18 +263,11 @@ define <4 x i32> @scmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i8> [[TMP1]] to i32
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i8> [[TMP2]] to i32
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i32 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i8> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <4 x i8> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <4 x i8> [[_MSPROP1]] to <4 x i32>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <4 x i32> @llvm.scmp.v4i32.v4i8(<4 x i8> [[X]], <4 x i8> [[Y]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[TMP7]]
 ;
   %1 = call <4 x i32> @llvm.scmp(<4 x i8> %x, <4 x i8> %y)
@@ -348,18 +280,11 @@ define <16 x i32> @scmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind #0
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <16 x i8> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <16 x i8> [[_MSPROP1]] to <16 x i32>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <16 x i32> @llvm.scmp.v16i32.v16i8(<16 x i8> [[X]], <16 x i8> [[Y]])
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <16 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i32> [[TMP7]]
 ;
   %1 = call <16 x i32> @llvm.scmp(<16 x i8> %x, <16 x i8> %y)
@@ -372,18 +297,11 @@ define <16 x i8> @scmp_wide_vec_op(<16 x i64> %x, <16 x i64> %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i64> [[TMP1]] to i1024
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i1024 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i64> [[TMP2]] to i1024
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i1024 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i64> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <16 x i64> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc <16 x i64> [[_MSPROP1]] to <16 x i8>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <16 x i8> @llvm.scmp.v16i8.v16i64(<16 x i64> [[X]], <16 x i64> [[Y]])
-; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <16 x i8> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i8> [[TMP7]]
 ;
   %1 = call <16 x i8> @llvm.scmp(<16 x i64> %x, <16 x i64> %y)
@@ -396,18 +314,11 @@ define <7 x i117> @scmp_uncommon_vectors(<7 x i7> %x, <7 x i7> %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <7 x i7>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <7 x i7>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <7 x i7> [[TMP1]] to i49
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i49 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <7 x i7> [[TMP2]] to i49
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i49 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <7 x i7> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <7 x i7> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <7 x i7> [[_MSPROP1]] to <7 x i117>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <7 x i117> @llvm.scmp.v7i117.v7i7(<7 x i7> [[X]], <7 x i7> [[Y]])
-; CHECK-NEXT:    store <7 x i117> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <7 x i117> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <7 x i117> [[TMP7]]
 ;
   %1 = call <7 x i117> @llvm.scmp(<7 x i7> %x, <7 x i7> %y)
@@ -420,18 +331,11 @@ define <1 x i3> @scmp_scalarize(<1 x i33> %x, <1 x i33> %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i33>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <1 x i33>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <1 x i33> [[TMP1]] to i33
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i33 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <1 x i33> [[TMP2]] to i33
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i33 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <1 x i33> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <1 x i33> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc <1 x i33> [[_MSPROP1]] to <1 x i3>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <1 x i3> @llvm.scmp.v1i3.v1i33(<1 x i33> [[X]], <1 x i33> [[Y]])
-; CHECK-NEXT:    store <1 x i3> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <1 x i3> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <1 x i3> [[TMP7]]
 ;
   %1 = call <1 x i3> @llvm.scmp(<1 x i33> %x, <1 x i33> %y)
@@ -444,18 +348,11 @@ define <2 x i8> @scmp_bool_operands(<2 x i1> %x, <2 x i1> %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i1>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i1> [[TMP1]] to i2
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i2 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i1> [[TMP2]] to i2
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i2 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <2 x i1> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <2 x i1> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <2 x i1> [[_MSPROP1]] to <2 x i8>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <2 x i8> @llvm.scmp.v2i8.v2i1(<2 x i1> [[X]], <2 x i1> [[Y]])
-; CHECK-NEXT:    store <2 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <2 x i8> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i8> [[TMP7]]
 ;
   %1 = call <2 x i8> @llvm.scmp(<2 x i1> %x, <2 x i1> %y)
@@ -468,18 +365,11 @@ define <2 x i16> @scmp_ret_wider_than_operands(<2 x i8> %x, <2 x i8> %y) nounwin
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i8> [[TMP1]] to i16
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i16 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i8> [[TMP2]] to i16
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i16 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <2 x i8> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <2 x i8> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <2 x i8> [[_MSPROP1]] to <2 x i16>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <2 x i16> @llvm.scmp.v2i16.v2i8(<2 x i8> [[X]], <2 x i8> [[Y]])
-; CHECK-NEXT:    store <2 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <2 x i16> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i16> [[TMP7]]
 ;
   %1 = call <2 x i16> @llvm.scmp(<2 x i8> %x, <2 x i8> %y)
@@ -487,6 +377,3 @@ define <2 x i16> @scmp_ret_wider_than_operands(<2 x i8> %x, <2 x i8> %y) nounwin
 }
 
 attributes #0 = { sanitize_memory }
-;.
-; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
-;.
diff --git a/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll b/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll
index 5e0a248afe7489..1b70242dae2b5a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll
@@ -1,14 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
 ; RUN: opt < %s -passes=msan -S | FileCheck %s
 ;
-; llvm.ucmp is correctly handled heuristically when each parameter is the same
-; type as the return type e.g.,
-;   call i8 @llvm.ucmp.i8.i8(i8 %x, i8 %y)
-; but handled incorrectly by visitInstruction when the return type is different  
-; e.g.,
-;   call i8 @llvm.ucmp.i8.i62(i62 %x, i62 %y)
-;   call <4 x i8> @llvm.ucmp.v4i8.v4i32(<4 x i32> %x, <4 x i32> %y)
-
 ; Forked from llvm/test/CodeGen/X86/ucmp.ll
 
 target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
@@ -21,8 +13,9 @@ define i8 @ucmp.8.8(i8 %x, i8 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or i8 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i8 [[_MSPROP]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i8 @llvm.ucmp.i8.i8(i8 [[X]], i8 [[Y]])
-; CHECK-NEXT:    store i8 [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP3]]
 ;
   %1 = call i8 @llvm.ucmp(i8 %x, i8 %y)
@@ -35,16 +28,11 @@ define i8 @ucmp.8.16(i16 %x, i16 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i16 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i16 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i16 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i16 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i16 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.ucmp.i8.i16(i16 [[X]], i16 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.ucmp(i16 %x, i16 %y)
@@ -57,16 +45,11 @@ define i8 @ucmp.8.32(i32 %x, i32 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
@@ -79,16 +62,11 @@ define i8 @ucmp.8.64(i64 %x, i64 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.ucmp.i8.i64(i64 [[X]], i64 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.ucmp(i64 %x, i64 %y)
@@ -101,16 +79,11 @@ define i8 @ucmp.8.128(i128 %x, i128 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i128, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i128 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i128 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i128 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.ucmp.i8.i128(i128 [[X]], i128 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.ucmp(i128 %x, i128 %y)
@@ -124,8 +97,9 @@ define i32 @ucmp.32.32(i32 %x, i32 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.ucmp.i32.i32(i32 [[X]], i32 [[Y]])
-; CHECK-NEXT:    store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[TMP3]]
 ;
   %1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
@@ -138,16 +112,11 @@ define i32 @ucmp.32.64(i64 %x, i64 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[_MSPROP1]] to i32
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.ucmp.i32.i64(i64 [[X]], i64 [[Y]])
-; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[TMP5]]
 ;
   %1 = call i32 @llvm.ucmp(i64 %x, i64 %y)
@@ -161,8 +130,9 @@ define i64 @ucmp.64.64(i64 %x, i64 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.ucmp.i64.i64(i64 [[X]], i64 [[Y]])
-; CHECK-NEXT:    store i64 [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i64 [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i64 [[TMP3]]
 ;
   %1 = call i64 @llvm.ucmp(i64 %x, i64 %y)
@@ -175,16 +145,11 @@ define i4 @ucmp_narrow_result(i32 %x, i32 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[_MSPROP1]] to i4
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i4 @llvm.ucmp.i4.i32(i32 [[X]], i32 [[Y]])
-; CHECK-NEXT:    store i4 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i4 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i4 [[TMP5]]
 ;
   %1 = call i4 @llvm.ucmp(i32 %x, i32 %y)
@@ -197,16 +162,11 @@ define i8 @ucmp_narrow_op(i62 %x, i62 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i62, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i62, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i62 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i62 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i62 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i62 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i62 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.ucmp.i8.i62(i62 [[X]], i62 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.ucmp(i62 %x, i62 %y)
@@ -219,16 +179,11 @@ define i141 @ucmp_wide_result(i32 %x, i32 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[_MSPROP1]] to i141
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i141 @llvm.ucmp.i141.i32(i32 [[X]], i32 [[Y]])
-; CHECK-NEXT:    store i141 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i141 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i141 [[TMP5]]
 ;
   %1 = call i141 @llvm.ucmp(i32 %x, i32 %y)
@@ -241,16 +196,11 @@ define i8 @ucmp_wide_op(i109 %x, i109 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i109, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i109, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i109 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i109 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i109 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i109 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i109 [[_MSPROP1]] to i8
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.ucmp.i8.i109(i109 [[X]], i109 [[Y]])
-; CHECK-NEXT:    store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i8 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i8 [[TMP5]]
 ;
   %1 = call i8 @llvm.ucmp(i109 %x, i109 %y)
@@ -263,16 +213,11 @@ define i41 @ucmp_uncommon_types(i7 %x, i7 %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i7, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i7, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i7 [[TMP1]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i7 [[TMP2]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       4:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or i7 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i7 [[_MSPROP]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i7 [[_MSPROP1]] to i41
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i41 @llvm.ucmp.i41.i7(i7 [[X]], i7 [[Y]])
-; CHECK-NEXT:    store i41 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i41 [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i41 [[TMP5]]
 ;
   %1 = call i41 @llvm.ucmp(i7 %x, i7 %y)
@@ -286,8 +231,9 @@ define <4 x i32> @ucmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <4 x i32> @llvm.ucmp.v4i32.v4i32(<4 x i32> [[X]], <4 x i32> [[Y]])
-; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <4 x i32> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[TMP3]]
 ;
   %1 = call <4 x i32> @llvm.ucmp(<4 x i32> %x, <4 x i32> %y)
@@ -300,18 +246,11 @@ define <4 x i8> @ucmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind #0
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc <4 x i32> [[_MSPROP1]] to <4 x i8>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <4 x i8> @llvm.ucmp.v4i8.v4i32(<4 x i32> [[X]], <4 x i32> [[Y]])
-; CHECK-NEXT:    store <4 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <4 x i8> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i8> [[TMP7]]
 ;
   %1 = call <4 x i8> @llvm.ucmp(<4 x i32> %x, <4 x i32> %y)
@@ -324,18 +263,11 @@ define <4 x i32> @ucmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i8> [[TMP1]] to i32
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i8> [[TMP2]] to i32
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i32 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i8> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <4 x i8> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <4 x i8> [[_MSPROP1]] to <4 x i32>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <4 x i32> @llvm.ucmp.v4i32.v4i8(<4 x i8> [[X]], <4 x i8> [[Y]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[TMP7]]
 ;
   %1 = call <4 x i32> @llvm.ucmp(<4 x i8> %x, <4 x i8> %y)
@@ -348,18 +280,11 @@ define <16 x i32> @ucmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind #0
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <16 x i8> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <16 x i8> [[_MSPROP1]] to <16 x i32>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <16 x i32> @llvm.ucmp.v16i32.v16i8(<16 x i8> [[X]], <16 x i8> [[Y]])
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <16 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i32> [[TMP7]]
 ;
   %1 = call <16 x i32> @llvm.ucmp(<16 x i8> %x, <16 x i8> %y)
@@ -372,18 +297,11 @@ define <16 x i8> @ucmp_wide_vec_op(<16 x i32> %x, <16 x i32> %y) nounwind #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i512 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc <16 x i32> [[_MSPROP1]] to <16 x i8>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <16 x i8> @llvm.ucmp.v16i8.v16i32(<16 x i32> [[X]], <16 x i32> [[Y]])
-; CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <16 x i8> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i8> [[TMP7]]
 ;
   %1 = call <16 x i8> @llvm.ucmp(<16 x i32> %x, <16 x i32> %y)
@@ -396,18 +314,11 @@ define <17 x i2> @ucmp_uncommon_vectors(<17 x i71> %x, <17 x i71> %y) nounwind #
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <17 x i71>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load <17 x i71>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <17 x i71> [[TMP1]] to i1207
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i1207 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <17 x i71> [[TMP2]] to i1207
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i1207 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       6:
+; CHECK-NEXT:    [[_MSPROP:%.*]] = or <17 x i71> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <17 x i71> [[_MSPROP]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc <17 x i71> [[_MSPROP1]] to <17 x i2>
 ; CHECK-NEXT:    [[TMP7:%.*]] = call <17 x i2> @llvm.ucmp.v17i2.v17i71(<17 x i71> [[X]], <17 x i71> [[Y]])
-; CHECK-NEXT:    store <17 x i2> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store <17 x i2> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <17 x i2> [[TMP7]]
 ;
   %1 = call <17 x i2> @llvm.ucmp(<17 x i71> %x, <17 x i71> %y)
@@ -415,6 +326,3 @@ define <17 x i2> @ucmp_uncommon_vectors(<17 x i71> %x, <17 x i71> %y) nounwind #
 }
 
 attributes #0 = { sanitize_memory }
-;.
-; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
-;.



More information about the llvm-commits mailing list