[llvm] 784d293 - AtomicExpand: Switch test to generated checks

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 20 13:51:15 PDT 2022


Author: Matt Arsenault
Date: 2022-09-20T16:51:05-04:00
New Revision: 784d2930c0faefd542229057fd71a1b723e6fac9

URL: https://github.com/llvm/llvm-project/commit/784d2930c0faefd542229057fd71a1b723e6fac9
DIFF: https://github.com/llvm/llvm-project/commit/784d2930c0faefd542229057fd71a1b723e6fac9.diff

LOG: AtomicExpand: Switch test to generated checks

Added: 
    

Modified: 
    llvm/test/Transforms/AtomicExpand/SPARC/partword.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
index 999fa1541f565..c186ac18a432a 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S %s -atomic-expand | FileCheck %s
 
 ;; Verify the cmpxchg and atomicrmw expansions where sub-word-size
@@ -8,183 +9,281 @@
 target datalayout = "E-m:e-i64:64-n32:64-S128"
 target triple = "sparcv9-unknown-unknown"
 
-; CHECK-LABEL: @test_cmpxchg_i8(
-; CHECK:  fence seq_cst
-; CHECK:  %0 = ptrtoint i8* %arg to i64
-; CHECK:  %1 = and i64 %0, -4
-; CHECK:  %AlignedAddr = inttoptr i64 %1 to i32*
-; CHECK:  %PtrLSB = and i64 %0, 3
-; CHECK:  %2 = xor i64 %PtrLSB, 3
-; CHECK:  %3 = shl i64 %2, 3
-; CHECK:  %ShiftAmt = trunc i64 %3 to i32
-; CHECK:  %Mask = shl i32 255, %ShiftAmt
-; CHECK:  %Inv_Mask = xor i32 %Mask, -1
-; CHECK:  %4 = zext i8 %new to i32
-; CHECK:  %5 = shl i32 %4, %ShiftAmt
-; CHECK:  %6 = zext i8 %old to i32
-; CHECK:  %7 = shl i32 %6, %ShiftAmt
-; CHECK:  %8 = load i32, i32* %AlignedAddr
-; CHECK:  %9 = and i32 %8, %Inv_Mask
-; CHECK:  br label %partword.cmpxchg.loop
-; CHECK:partword.cmpxchg.loop:
-; CHECK:  %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ]
-; CHECK:  %11 = or i32 %10, %5
-; CHECK:  %12 = or i32 %10, %7
-; CHECK:  %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic
-; CHECK:  %14 = extractvalue { i32, i1 } %13, 0
-; CHECK:  %15 = extractvalue { i32, i1 } %13, 1
-; CHECK:  br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure
-; CHECK:partword.cmpxchg.failure:
-; CHECK:  %16 = and i32 %14, %Inv_Mask
-; CHECK:  %17 = icmp ne i32 %10, %16
-; CHECK:  br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end
-; CHECK:partword.cmpxchg.end:
-; CHECK:  %shifted = lshr i32 %14, %ShiftAmt
-; CHECK:  %extracted = trunc i32 %shifted to i8
-; CHECK:  %18 = insertvalue { i8, i1 } undef, i8 %extracted, 0
-; CHECK:  %19 = insertvalue { i8, i1 } %18, i1 %15, 1
-; CHECK:  fence seq_cst
-; CHECK:  %ret = extractvalue { i8, i1 } %19, 0
-; CHECK:  ret i8 %ret
 define i8 @test_cmpxchg_i8(i8* %arg, i8 %old, i8 %new) {
+; CHECK-LABEL: @test_cmpxchg_i8(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i8* [[ARG:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[TMP0]], -4
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP1]] to i32*
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[PTRLSB]], 3
+; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i8 [[NEW:%.*]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP6:%.*]] = zext i8 [[OLD:%.*]] to i32
+; CHECK-NEXT:    [[TMP7:%.*]] = shl i32 [[TMP6]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = and i32 [[TMP8]], [[INV_MASK]]
+; CHECK-NEXT:    br label [[PARTWORD_CMPXCHG_LOOP:%.*]]
+; CHECK:       partword.cmpxchg.loop:
+; CHECK-NEXT:    [[TMP10:%.*]] = phi i32 [ [[TMP9]], [[ENTRY:%.*]] ], [ [[TMP16:%.*]], [[PARTWORD_CMPXCHG_FAILURE:%.*]] ]
+; CHECK-NEXT:    [[TMP11:%.*]] = or i32 [[TMP10]], [[TMP5]]
+; CHECK-NEXT:    [[TMP12:%.*]] = or i32 [[TMP10]], [[TMP7]]
+; CHECK-NEXT:    [[TMP13:%.*]] = cmpxchg i32* [[ALIGNEDADDR]], i32 [[TMP12]], i32 [[TMP11]] monotonic monotonic, align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = extractvalue { i32, i1 } [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = extractvalue { i32, i1 } [[TMP13]], 1
+; CHECK-NEXT:    br i1 [[TMP15]], label [[PARTWORD_CMPXCHG_END:%.*]], label [[PARTWORD_CMPXCHG_FAILURE]]
+; CHECK:       partword.cmpxchg.failure:
+; CHECK-NEXT:    [[TMP16]] = and i32 [[TMP14]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP10]], [[TMP16]]
+; CHECK-NEXT:    br i1 [[TMP17]], label [[PARTWORD_CMPXCHG_LOOP]], label [[PARTWORD_CMPXCHG_END]]
+; CHECK:       partword.cmpxchg.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP14]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; CHECK-NEXT:    [[TMP18:%.*]] = insertvalue { i8, i1 } undef, i8 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = insertvalue { i8, i1 } [[TMP18]], i1 [[TMP15]], 1
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    [[RET:%.*]] = extractvalue { i8, i1 } [[TMP19]], 0
+; CHECK-NEXT:    ret i8 [[RET]]
+;
 entry:
   %ret_succ = cmpxchg i8* %arg, i8 %old, i8 %new seq_cst monotonic
   %ret = extractvalue { i8, i1 } %ret_succ, 0
   ret i8 %ret
 }
 
-; CHECK-LABEL: @test_cmpxchg_i16(
-; CHECK:  fence seq_cst
-; CHECK:  %0 = ptrtoint i16* %arg to i64
-; CHECK:  %1 = and i64 %0, -4
-; CHECK:  %AlignedAddr = inttoptr i64 %1 to i32*
-; CHECK:  %PtrLSB = and i64 %0, 3
-; CHECK:  %2 = xor i64 %PtrLSB, 2
-; CHECK:  %3 = shl i64 %2, 3
-; CHECK:  %ShiftAmt = trunc i64 %3 to i32
-; CHECK:  %Mask = shl i32 65535, %ShiftAmt
-; CHECK:  %Inv_Mask = xor i32 %Mask, -1
-; CHECK:  %4 = zext i16 %new to i32
-; CHECK:  %5 = shl i32 %4, %ShiftAmt
-; CHECK:  %6 = zext i16 %old to i32
-; CHECK:  %7 = shl i32 %6, %ShiftAmt
-; CHECK:  %8 = load i32, i32* %AlignedAddr
-; CHECK:  %9 = and i32 %8, %Inv_Mask
-; CHECK:  br label %partword.cmpxchg.loop
-; CHECK:partword.cmpxchg.loop:
-; CHECK:  %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ]
-; CHECK:  %11 = or i32 %10, %5
-; CHECK:  %12 = or i32 %10, %7
-; CHECK:  %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic
-; CHECK:  %14 = extractvalue { i32, i1 } %13, 0
-; CHECK:  %15 = extractvalue { i32, i1 } %13, 1
-; CHECK:  br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure
-; CHECK:partword.cmpxchg.failure:
-; CHECK:  %16 = and i32 %14, %Inv_Mask
-; CHECK:  %17 = icmp ne i32 %10, %16
-; CHECK:  br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end
-; CHECK:partword.cmpxchg.end:
-; CHECK:  %shifted = lshr i32 %14, %ShiftAmt
-; CHECK:  %extracted = trunc i32 %shifted to i16
-; CHECK:  %18 = insertvalue { i16, i1 } undef, i16 %extracted, 0
-; CHECK:  %19 = insertvalue { i16, i1 } %18, i1 %15, 1
-; CHECK:  fence seq_cst
-; CHECK:  %ret = extractvalue { i16, i1 } %19, 0
-; CHECK:  ret i16 %ret
 define i16 @test_cmpxchg_i16(i16* %arg, i16 %old, i16 %new) {
+; CHECK-LABEL: @test_cmpxchg_i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[ARG:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[TMP0]], -4
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP1]] to i32*
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[PTRLSB]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[NEW:%.*]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP6:%.*]] = zext i16 [[OLD:%.*]] to i32
+; CHECK-NEXT:    [[TMP7:%.*]] = shl i32 [[TMP6]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = and i32 [[TMP8]], [[INV_MASK]]
+; CHECK-NEXT:    br label [[PARTWORD_CMPXCHG_LOOP:%.*]]
+; CHECK:       partword.cmpxchg.loop:
+; CHECK-NEXT:    [[TMP10:%.*]] = phi i32 [ [[TMP9]], [[ENTRY:%.*]] ], [ [[TMP16:%.*]], [[PARTWORD_CMPXCHG_FAILURE:%.*]] ]
+; CHECK-NEXT:    [[TMP11:%.*]] = or i32 [[TMP10]], [[TMP5]]
+; CHECK-NEXT:    [[TMP12:%.*]] = or i32 [[TMP10]], [[TMP7]]
+; CHECK-NEXT:    [[TMP13:%.*]] = cmpxchg i32* [[ALIGNEDADDR]], i32 [[TMP12]], i32 [[TMP11]] monotonic monotonic, align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = extractvalue { i32, i1 } [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = extractvalue { i32, i1 } [[TMP13]], 1
+; CHECK-NEXT:    br i1 [[TMP15]], label [[PARTWORD_CMPXCHG_END:%.*]], label [[PARTWORD_CMPXCHG_FAILURE]]
+; CHECK:       partword.cmpxchg.failure:
+; CHECK-NEXT:    [[TMP16]] = and i32 [[TMP14]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP10]], [[TMP16]]
+; CHECK-NEXT:    br i1 [[TMP17]], label [[PARTWORD_CMPXCHG_LOOP]], label [[PARTWORD_CMPXCHG_END]]
+; CHECK:       partword.cmpxchg.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP14]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP18:%.*]] = insertvalue { i16, i1 } undef, i16 [[EXTRACTED]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = insertvalue { i16, i1 } [[TMP18]], i1 [[TMP15]], 1
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    [[RET:%.*]] = extractvalue { i16, i1 } [[TMP19]], 0
+; CHECK-NEXT:    ret i16 [[RET]]
+;
 entry:
   %ret_succ = cmpxchg i16* %arg, i16 %old, i16 %new seq_cst monotonic
   %ret = extractvalue { i16, i1 } %ret_succ, 0
   ret i16 %ret
 }
 
-
-; CHECK-LABEL: @test_add_i16(
-; CHECK:  fence seq_cst
-; CHECK:  %0 = ptrtoint i16* %arg to i64
-; CHECK:  %1 = and i64 %0, -4
-; CHECK:  %AlignedAddr = inttoptr i64 %1 to i32*
-; CHECK:  %PtrLSB = and i64 %0, 3
-; CHECK:  %2 = xor i64 %PtrLSB, 2
-; CHECK:  %3 = shl i64 %2, 3
-; CHECK:  %ShiftAmt = trunc i64 %3 to i32
-; CHECK:  %Mask = shl i32 65535, %ShiftAmt
-; CHECK:  %Inv_Mask = xor i32 %Mask, -1
-; CHECK:  %4 = zext i16 %val to i32
-; CHECK:  %ValOperand_Shifted = shl i32 %4, %ShiftAmt
-; CHECK:  %5 = load i32, i32* %AlignedAddr, align 4
-; CHECK:  br label %atomicrmw.start
-; CHECK:atomicrmw.start:
-; CHECK:  %loaded = phi i32 [ %5, %entry ], [ %newloaded, %atomicrmw.start ]
-; CHECK:  %new = add i32 %loaded, %ValOperand_Shifted
-; CHECK:  %6 = and i32 %new, %Mask
-; CHECK:  %7 = and i32 %loaded, %Inv_Mask
-; CHECK:  %8 = or i32 %7, %6
-; CHECK:  %9 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %8 monotonic monotonic
-; CHECK:  %success = extractvalue { i32, i1 } %9, 1
-; CHECK:  %newloaded = extractvalue { i32, i1 } %9, 0
-; CHECK:  br i1 %success, label %atomicrmw.end, label %atomicrmw.start
-; CHECK:atomicrmw.end:
-; CHECK:  %shifted = lshr i32 %newloaded, %ShiftAmt
-; CHECK:  %extracted = trunc i32 %shifted to i16
-; CHECK:  fence seq_cst
-; CHECK:  ret i16 %extracted
 define i16 @test_add_i16(i16* %arg, i16 %val) {
+; CHECK-LABEL: @test_add_i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[ARG:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[TMP0]], -4
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP1]] to i32*
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[PTRLSB]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[VAL:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP5]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = add i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP6:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT:    [[TMP7:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    [[TMP9:%.*]] = cmpxchg i32* [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP8]] monotonic monotonic, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP9]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
 entry:
   %ret = atomicrmw add i16* %arg, i16 %val seq_cst
   ret i16 %ret
 }
 
-; CHECK-LABEL: @test_xor_i16(
-; (I'm going to just assert on the bits that 
diff er from add, above.)
-; CHECK:atomicrmw.start:
-; CHECK:  %new = xor i32 %loaded, %ValOperand_Shifted
-; CHECK:  %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
-; CHECK:atomicrmw.end:
 define i16 @test_xor_i16(i16* %arg, i16 %val) {
+; CHECK-LABEL: @test_xor_i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[ARG:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[TMP0]], -4
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP1]] to i32*
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[PTRLSB]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[VAL:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP5]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = xor i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg i32* [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] monotonic monotonic, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
 entry:
   %ret = atomicrmw xor i16* %arg, i16 %val seq_cst
   ret i16 %ret
 }
 
-; CHECK-LABEL: @test_or_i16(
-; (I'm going to just assert on the bits that 
diff er from add, above.)
-; CHECK:atomicrmw.start:
-; CHECK:  %new = or i32 %loaded, %ValOperand_Shifted
-; CHECK:  %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
-; CHECK:atomicrmw.end:
 define i16 @test_or_i16(i16* %arg, i16 %val) {
+; CHECK-LABEL: @test_or_i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[ARG:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[TMP0]], -4
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP1]] to i32*
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[PTRLSB]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[VAL:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP5]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = or i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg i32* [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] monotonic monotonic, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
 entry:
   %ret = atomicrmw or i16* %arg, i16 %val seq_cst
   ret i16 %ret
 }
 
-; CHECK-LABEL: @test_and_i16(
-; (I'm going to just assert on the bits that 
diff er from add, above.)
-; CHECK:  %AndOperand = or i32 %Inv_Mask, %ValOperand_Shifted
-; CHECK:atomicrmw.start:
-; CHECK:  %new = and i32 %loaded, %AndOperand
-; CHECK:  %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
-; CHECK:atomicrmw.end:
 define i16 @test_and_i16(i16* %arg, i16 %val) {
+; CHECK-LABEL: @test_and_i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[ARG:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[TMP0]], -4
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP1]] to i32*
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[PTRLSB]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[VAL:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[INV_MASK]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP5]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; CHECK-NEXT:    [[TMP6:%.*]] = cmpxchg i32* [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] monotonic monotonic, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    ret i16 [[EXTRACTED]]
+;
 entry:
   %ret = atomicrmw and i16* %arg, i16 %val seq_cst
   ret i16 %ret
 }
 
-; CHECK-LABEL: @test_min_i16(
-; CHECK:atomicrmw.start:
-; CHECK:  %shifted = lshr i32 %loaded, %ShiftAmt
-; CHECK:  %extracted = trunc i32 %shifted to i16
-; CHECK:  %6 = icmp sle i16 %extracted, %val
-; CHECK:  %new = select i1 %6, i16 %extracted, i16 %val
-; CHECK:  %extended = zext i16 %new to i32
-; CHECK:  %shifted1 = shl nuw i32 %extended, %ShiftAmt
-; CHECK:  %unmasked = and i32 %loaded, %Inv_Mask
-; CHECK:  %inserted = or i32 %unmasked, %shifted1
-; CHECK:  %7 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %inserted monotonic monotonic
-; CHECK:atomicrmw.end:
 define i16 @test_min_i16(i16* %arg, i16 %val) {
+; CHECK-LABEL: @test_min_i16(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i16* [[ARG:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[TMP0]], -4
+; CHECK-NEXT:    [[ALIGNEDADDR:%.*]] = inttoptr i64 [[TMP1]] to i32*
+; CHECK-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[PTRLSB]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 3
+; CHECK-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32
+; CHECK-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[VAL:%.*]] to i32
+; CHECK-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ALIGNEDADDR]], align 4
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
+; CHECK:       atomicrmw.start:
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i32 [ [[TMP5]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp sle i16 [[EXTRACTED]], [[VAL]]
+; CHECK-NEXT:    [[NEW:%.*]] = select i1 [[TMP6]], i16 [[EXTRACTED]], i16 [[VAL]]
+; CHECK-NEXT:    [[EXTENDED:%.*]] = zext i16 [[NEW]] to i32
+; CHECK-NEXT:    [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[UNMASKED:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT:    [[INSERTED:%.*]] = or i32 [[UNMASKED]], [[SHIFTED1]]
+; CHECK-NEXT:    [[TMP7:%.*]] = cmpxchg i32* [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[INSERTED]] monotonic monotonic, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK:       atomicrmw.end:
+; CHECK-NEXT:    [[SHIFTED2:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT:    [[EXTRACTED3:%.*]] = trunc i32 [[SHIFTED2]] to i16
+; CHECK-NEXT:    fence seq_cst
+; CHECK-NEXT:    ret i16 [[EXTRACTED3]]
+;
 entry:
   %ret = atomicrmw min i16* %arg, i16 %val seq_cst
   ret i16 %ret


        


More information about the llvm-commits mailing list