[llvm] r361241 - [NFC][X86][AArch64] Add some more tests for shift amount masking

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Tue May 21 04:14:01 PDT 2019


Author: lebedevri
Date: Tue May 21 04:14:01 2019
New Revision: 361241

URL: http://llvm.org/viewvc/llvm-project?rev=361241&view=rev
Log:
[NFC][X86][AArch64] Add some more tests for shift amount masking

The negation creation should be more eager:
https://bugs.llvm.org/show_bug.cgi?id=41952

Added:
    llvm/trunk/test/CodeGen/AArch64/shift-amount-mod.ll
    llvm/trunk/test/CodeGen/X86/shift-amount-mod.ll

Added: llvm/trunk/test/CodeGen/AArch64/shift-amount-mod.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/shift-amount-mod.ll?rev=361241&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/shift-amount-mod.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/shift-amount-mod.ll Tue May 21 04:14:01 2019
@@ -0,0 +1,458 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s
+
+;==============================================================================;
+; the shift amount is negated (shiftbitwidth - shiftamt)
+;==============================================================================;
+
+; shift left
+;------------------------------------------------------------------------------;
+
+define i32 @reg32_shl_by_negated(i32 %val, i32 %shamt) nounwind {
+; CHECK-LABEL: reg32_shl_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w8, w1
+; CHECK-NEXT:    lsl w0, w0, w8
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 32, %shamt
+  %shifted = shl i32 %val, %negshamt
+  ret i32 %shifted
+}
+define i32 @load32_shl_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: load32_shl_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    neg w9, w1
+; CHECK-NEXT:    lsl w0, w8, w9
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = shl i32 %val, %negshamt
+  ret i32 %shifted
+}
+define void @store32_shl_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+; CHECK-LABEL: store32_shl_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w8, w2
+; CHECK-NEXT:    lsl w8, w0, w8
+; CHECK-NEXT:    str w8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 32, %shamt
+  %shifted = shl i32 %val, %negshamt
+  store i32 %shifted, i32* %dstptr
+  ret void
+}
+define void @modify32_shl_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: modify32_shl_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    neg w9, w1
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    str w8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = shl i32 %val, %negshamt
+  store i32 %shifted, i32* %valptr
+  ret void
+}
+
+define i64 @reg64_shl_by_negated(i64 %val, i64 %shamt) nounwind {
+; CHECK-LABEL: reg64_shl_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x8, x1
+; CHECK-NEXT:    lsl x0, x0, x8
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 64, %shamt
+  %shifted = shl i64 %val, %negshamt
+  ret i64 %shifted
+}
+define i64 @load64_shl_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: load64_shl_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    neg x9, x1
+; CHECK-NEXT:    lsl x0, x8, x9
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = shl i64 %val, %negshamt
+  ret i64 %shifted
+}
+define void @store64_shl_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+; CHECK-LABEL: store64_shl_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x8, x2
+; CHECK-NEXT:    lsl x8, x0, x8
+; CHECK-NEXT:    str x8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 64, %shamt
+  %shifted = shl i64 %val, %negshamt
+  store i64 %shifted, i64* %dstptr
+  ret void
+}
+define void @modify64_shl_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: modify64_shl_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    neg x9, x1
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    str x8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = shl i64 %val, %negshamt
+  store i64 %shifted, i64* %valptr
+  ret void
+}
+
+; logical shift right
+;------------------------------------------------------------------------------;
+
+define i32 @reg32_lshr_by_negated(i32 %val, i32 %shamt) nounwind {
+; CHECK-LABEL: reg32_lshr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w8, w1
+; CHECK-NEXT:    lsr w0, w0, w8
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 32, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define i32 @load32_lshr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: load32_lshr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    neg w9, w1
+; CHECK-NEXT:    lsr w0, w8, w9
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define void @store32_lshr_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+; CHECK-LABEL: store32_lshr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w8, w2
+; CHECK-NEXT:    lsr w8, w0, w8
+; CHECK-NEXT:    str w8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 32, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  store i32 %shifted, i32* %dstptr
+  ret void
+}
+define void @modify32_lshr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: modify32_lshr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    neg w9, w1
+; CHECK-NEXT:    lsr w8, w8, w9
+; CHECK-NEXT:    str w8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  store i32 %shifted, i32* %valptr
+  ret void
+}
+
+define i64 @reg64_lshr_by_negated(i64 %val, i64 %shamt) nounwind {
+; CHECK-LABEL: reg64_lshr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x8, x1
+; CHECK-NEXT:    lsr x0, x0, x8
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 64, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define i64 @load64_lshr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: load64_lshr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    neg x9, x1
+; CHECK-NEXT:    lsr x0, x8, x9
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define void @store64_lshr_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+; CHECK-LABEL: store64_lshr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x8, x2
+; CHECK-NEXT:    lsr x8, x0, x8
+; CHECK-NEXT:    str x8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 64, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  store i64 %shifted, i64* %dstptr
+  ret void
+}
+define void @modify64_lshr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: modify64_lshr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    neg x9, x1
+; CHECK-NEXT:    lsr x8, x8, x9
+; CHECK-NEXT:    str x8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  store i64 %shifted, i64* %valptr
+  ret void
+}
+
+; arithmetic shift right
+;------------------------------------------------------------------------------;
+
+define i32 @reg32_ashr_by_negated(i32 %val, i32 %shamt) nounwind {
+; CHECK-LABEL: reg32_ashr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w8, w1
+; CHECK-NEXT:    asr w0, w0, w8
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 32, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define i32 @load32_ashr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: load32_ashr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    neg w9, w1
+; CHECK-NEXT:    asr w0, w8, w9
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define void @store32_ashr_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+; CHECK-LABEL: store32_ashr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w8, w2
+; CHECK-NEXT:    asr w8, w0, w8
+; CHECK-NEXT:    str w8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 32, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  store i32 %shifted, i32* %dstptr
+  ret void
+}
+define void @modify32_ashr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: modify32_ashr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    neg w9, w1
+; CHECK-NEXT:    asr w8, w8, w9
+; CHECK-NEXT:    str w8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  store i32 %shifted, i32* %valptr
+  ret void
+}
+
+define i64 @reg64_ashr_by_negated(i64 %val, i64 %shamt) nounwind {
+; CHECK-LABEL: reg64_ashr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x8, x1
+; CHECK-NEXT:    asr x0, x0, x8
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 64, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define i64 @load64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: load64_ashr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    neg x9, x1
+; CHECK-NEXT:    asr x0, x8, x9
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define void @store64_ashr_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+; CHECK-LABEL: store64_ashr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x8, x2
+; CHECK-NEXT:    asr x8, x0, x8
+; CHECK-NEXT:    str x8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 64, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  store i64 %shifted, i64* %dstptr
+  ret void
+}
+define void @modify64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: modify64_ashr_by_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    neg x9, x1
+; CHECK-NEXT:    asr x8, x8, x9
+; CHECK-NEXT:    str x8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  store i64 %shifted, i64* %valptr
+  ret void
+}
+
+;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||;
+; next let's only test simple reg pattern, and only lshr.
+;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||;
+
+;==============================================================================;
+; subtraction from negated shift amount
+
+define i32 @reg32_lshr_by_sub_from_negated(i32 %val, i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: reg32_lshr_by_sub_from_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #32
+; CHECK-NEXT:    sub w8, w8, w1
+; CHECK-NEXT:    sub w8, w8, w2
+; CHECK-NEXT:    lsr w0, w0, w8
+; CHECK-NEXT:    ret
+  %nega = sub i32 32, %a
+  %negasubb = sub i32 %nega, %b
+  %shifted = lshr i32 %val, %negasubb
+  ret i32 %shifted
+}
+define i64 @reg64_lshr_by_sub_from_negated(i64 %val, i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: reg64_lshr_by_sub_from_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #64
+; CHECK-NEXT:    sub x8, x8, x1
+; CHECK-NEXT:    sub x8, x8, x2
+; CHECK-NEXT:    lsr x0, x0, x8
+; CHECK-NEXT:    ret
+  %nega = sub i64 64, %a
+  %negasubb = sub i64 %nega, %b
+  %shifted = lshr i64 %val, %negasubb
+  ret i64 %shifted
+}
+
+;==============================================================================;
+; subtraction of negated shift amount
+
+define i32 @reg32_lshr_by_sub_of_negated(i32 %val, i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: reg32_lshr_by_sub_of_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w1, w2
+; CHECK-NEXT:    lsr w0, w0, w8
+; CHECK-NEXT:    ret
+  %nega = sub i32 32, %a
+  %negasubb = sub i32 %b, %nega
+  %shifted = lshr i32 %val, %negasubb
+  ret i32 %shifted
+}
+define i64 @reg64_lshr_by_sub_of_negated(i64 %val, i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: reg64_lshr_by_sub_of_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x1, x2
+; CHECK-NEXT:    lsr x0, x0, x8
+; CHECK-NEXT:    ret
+  %nega = sub i64 64, %a
+  %negasubb = sub i64 %b, %nega
+  %shifted = lshr i64 %val, %negasubb
+  ret i64 %shifted
+}
+
+;==============================================================================;
+; add to negated shift amount
+;
+
+define i32 @reg32_lshr_by_add_to_negated(i32 %val, i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: reg32_lshr_by_add_to_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #32
+; CHECK-NEXT:    sub w8, w8, w1
+; CHECK-NEXT:    add w8, w8, w2
+; CHECK-NEXT:    lsr w0, w0, w8
+; CHECK-NEXT:    ret
+  %nega = sub i32 32, %a
+  %negasubb = add i32 %nega, %b
+  %shifted = lshr i32 %val, %negasubb
+  ret i32 %shifted
+}
+define i64 @reg64_lshr_by_add_to_negated(i64 %val, i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: reg64_lshr_by_add_to_negated:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #64
+; CHECK-NEXT:    sub x8, x8, x1
+; CHECK-NEXT:    add x8, x8, x2
+; CHECK-NEXT:    lsr x0, x0, x8
+; CHECK-NEXT:    ret
+  %nega = sub i64 64, %a
+  %negasubb = add i64 %nega, %b
+  %shifted = lshr i64 %val, %negasubb
+  ret i64 %shifted
+}
+
+;==============================================================================;
+; subtraction of negated shift amounts
+
+define i32 @reg32_lshr_by_sub_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: reg32_lshr_by_sub_of_negated_amts:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w2, w1
+; CHECK-NEXT:    lsr w0, w0, w8
+; CHECK-NEXT:    ret
+  %nega = sub i32 32, %a
+  %negb = sub i32 32, %b
+  %negasubnegb = sub i32 %nega, %negb
+  %shifted = lshr i32 %val, %negasubnegb
+  ret i32 %shifted
+}
+define i64 @reg64_lshr_by_sub_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: reg64_lshr_by_sub_of_negated_amts:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x2, x1
+; CHECK-NEXT:    lsr x0, x0, x8
+; CHECK-NEXT:    ret
+  %nega = sub i64 64, %a
+  %negb = sub i64 64, %b
+  %negasubnegb = sub i64 %nega, %negb
+  %shifted = lshr i64 %val, %negasubnegb
+  ret i64 %shifted
+}
+
+;==============================================================================;
+; addition of negated shift amounts
+
+define i32 @reg32_lshr_by_add_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: reg32_lshr_by_add_of_negated_amts:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w1, w2
+; CHECK-NEXT:    neg w8, w8
+; CHECK-NEXT:    lsr w0, w0, w8
+; CHECK-NEXT:    ret
+  %nega = sub i32 32, %a
+  %negb = sub i32 32, %b
+  %negasubnegb = add i32 %nega, %negb
+  %shifted = lshr i32 %val, %negasubnegb
+  ret i32 %shifted
+}
+define i64 @reg64_lshr_by_add_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: reg64_lshr_by_add_of_negated_amts:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x1, x2
+; CHECK-NEXT:    neg x8, x8
+; CHECK-NEXT:    lsr x0, x0, x8
+; CHECK-NEXT:    ret
+  %nega = sub i64 64, %a
+  %negb = sub i64 64, %b
+  %negasubnegb = add i64 %nega, %negb
+  %shifted = lshr i64 %val, %negasubnegb
+  ret i64 %shifted
+}

Added: llvm/trunk/test/CodeGen/X86/shift-amount-mod.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-amount-mod.ll?rev=361241&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-amount-mod.ll (added)
+++ llvm/trunk/test/CodeGen/X86/shift-amount-mod.ll Tue May 21 04:14:01 2019
@@ -0,0 +1,1048 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown   | FileCheck %s --check-prefixes=ALL,X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=ALL,X64
+
+;==============================================================================;
+; the shift amount is negated (shiftbitwidth - shiftamt)
+;==============================================================================;
+
+; shift left
+;------------------------------------------------------------------------------;
+
+define i32 @reg32_shl_by_negated(i32 %val, i32 %shamt) nounwind {
+; X32-LABEL: reg32_shl_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    xorl %ecx, %ecx
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shll %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg32_shl_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shll %cl, %eax
+; X64-NEXT:    retq
+  %negshamt = sub i32 32, %shamt
+  %shifted = shl i32 %val, %negshamt
+  ret i32 %shifted
+}
+define i32 @load32_shl_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; X32-LABEL: load32_shl_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %eax
+; X32-NEXT:    xorl %ecx, %ecx
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shll %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: load32_shl_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shll %cl, %eax
+; X64-NEXT:    retq
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = shl i32 %val, %negshamt
+  ret i32 %shifted
+}
+define void @store32_shl_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+; X32-LABEL: store32_shl_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    xorl %ecx, %ecx
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shll %cl, %edx
+; X32-NEXT:    movl %edx, (%eax)
+; X32-NEXT:    retl
+;
+; X64-LABEL: store32_shl_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shll %cl, %edi
+; X64-NEXT:    movl %edi, (%rsi)
+; X64-NEXT:    retq
+  %negshamt = sub i32 32, %shamt
+  %shifted = shl i32 %val, %negshamt
+  store i32 %shifted, i32* %dstptr
+  ret void
+}
+define void @modify32_shl_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; X32-LABEL: modify32_shl_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movb $32, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    shll %cl, (%eax)
+; X32-NEXT:    retl
+;
+; X64-LABEL: modify32_shl_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movb $32, %cl
+; X64-NEXT:    subb %sil, %cl
+; X64-NEXT:    shll %cl, (%rdi)
+; X64-NEXT:    retq
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = shl i32 %val, %negshamt
+  store i32 %shifted, i32* %valptr
+  ret void
+}
+
+define i64 @reg64_shl_by_negated(i64 %val, i64 %shamt) nounwind {
+; X32-LABEL: reg64_shl_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %esi, %eax
+; X32-NEXT:    shll %cl, %eax
+; X32-NEXT:    shldl %cl, %esi, %edx
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB4_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %eax, %edx
+; X32-NEXT:    xorl %eax, %eax
+; X32-NEXT:  .LBB4_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg64_shl_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rcx
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NEXT:    shlq %cl, %rax
+; X64-NEXT:    retq
+  %negshamt = sub i64 64, %shamt
+  %shifted = shl i64 %val, %negshamt
+  ret i64 %shifted
+}
+define i64 @load64_shl_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; X32-LABEL: load64_shl_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %esi
+; X32-NEXT:    movl 4(%eax), %edx
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %esi, %eax
+; X32-NEXT:    shll %cl, %eax
+; X32-NEXT:    shldl %cl, %esi, %edx
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB5_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %eax, %edx
+; X32-NEXT:    xorl %eax, %eax
+; X32-NEXT:  .LBB5_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: load64_shl_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rcx
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NEXT:    shlq %cl, %rax
+; X64-NEXT:    retq
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = shl i64 %val, %negshamt
+  ret i64 %shifted
+}
+define void @store64_shl_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+; X32-LABEL: store64_shl_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %edi, %esi
+; X32-NEXT:    shll %cl, %esi
+; X32-NEXT:    shldl %cl, %edi, %edx
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB6_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    xorl %esi, %esi
+; X32-NEXT:  .LBB6_2:
+; X32-NEXT:    movl %edx, 4(%eax)
+; X32-NEXT:    movl %esi, (%eax)
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+;
+; X64-LABEL: store64_shl_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdx, %rcx
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NEXT:    shlq %cl, %rdi
+; X64-NEXT:    movq %rdi, (%rsi)
+; X64-NEXT:    retq
+  %negshamt = sub i64 64, %shamt
+  %shifted = shl i64 %val, %negshamt
+  store i64 %shifted, i64* %dstptr
+  ret void
+}
+define void @modify64_shl_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; X32-LABEL: modify64_shl_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %edi
+; X32-NEXT:    movl 4(%eax), %edx
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %edi, %esi
+; X32-NEXT:    shll %cl, %esi
+; X32-NEXT:    shldl %cl, %edi, %edx
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB7_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    xorl %esi, %esi
+; X32-NEXT:  .LBB7_2:
+; X32-NEXT:    movl %esi, (%eax)
+; X32-NEXT:    movl %edx, 4(%eax)
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+;
+; X64-LABEL: modify64_shl_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movb $64, %cl
+; X64-NEXT:    subb %sil, %cl
+; X64-NEXT:    shlq %cl, (%rdi)
+; X64-NEXT:    retq
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = shl i64 %val, %negshamt
+  store i64 %shifted, i64* %valptr
+  ret void
+}
+
+; logical shift right
+;------------------------------------------------------------------------------;
+
+define i32 @reg32_lshr_by_negated(i32 %val, i32 %shamt) nounwind {
+; X32-LABEL: reg32_lshr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    xorl %ecx, %ecx
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shrl %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg32_lshr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrl %cl, %eax
+; X64-NEXT:    retq
+  %negshamt = sub i32 32, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define i32 @load32_lshr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; X32-LABEL: load32_lshr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %eax
+; X32-NEXT:    xorl %ecx, %ecx
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shrl %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: load32_lshr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrl %cl, %eax
+; X64-NEXT:    retq
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define void @store32_lshr_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+; X32-LABEL: store32_lshr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    xorl %ecx, %ecx
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shrl %cl, %edx
+; X32-NEXT:    movl %edx, (%eax)
+; X32-NEXT:    retl
+;
+; X64-LABEL: store32_lshr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrl %cl, %edi
+; X64-NEXT:    movl %edi, (%rsi)
+; X64-NEXT:    retq
+  %negshamt = sub i32 32, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  store i32 %shifted, i32* %dstptr
+  ret void
+}
+define void @modify32_lshr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; X32-LABEL: modify32_lshr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movb $32, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    shrl %cl, (%eax)
+; X32-NEXT:    retl
+;
+; X64-LABEL: modify32_lshr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movb $32, %cl
+; X64-NEXT:    subb %sil, %cl
+; X64-NEXT:    shrl %cl, (%rdi)
+; X64-NEXT:    retq
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  store i32 %shifted, i32* %valptr
+  ret void
+}
+
+define i64 @reg64_lshr_by_negated(i64 %val, i64 %shamt) nounwind {
+; X32-LABEL: reg64_lshr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    shrl %cl, %edx
+; X32-NEXT:    shrdl %cl, %esi, %eax
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB12_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:  .LBB12_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg64_lshr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rcx
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NEXT:    shrq %cl, %rax
+; X64-NEXT:    retq
+  %negshamt = sub i64 64, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define i64 @load64_lshr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; X32-LABEL: load64_lshr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl (%ecx), %eax
+; X32-NEXT:    movl 4(%ecx), %esi
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    shrl %cl, %edx
+; X32-NEXT:    shrdl %cl, %esi, %eax
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB13_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:  .LBB13_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: load64_lshr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rcx
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NEXT:    shrq %cl, %rax
+; X64-NEXT:    retq
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define void @store64_lshr_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+; X32-LABEL: store64_lshr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %edi, %esi
+; X32-NEXT:    shrl %cl, %esi
+; X32-NEXT:    shrdl %cl, %edi, %edx
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB14_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    xorl %esi, %esi
+; X32-NEXT:  .LBB14_2:
+; X32-NEXT:    movl %esi, 4(%eax)
+; X32-NEXT:    movl %edx, (%eax)
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+;
+; X64-LABEL: store64_lshr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdx, %rcx
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NEXT:    shrq %cl, %rdi
+; X64-NEXT:    movq %rdi, (%rsi)
+; X64-NEXT:    retq
+  %negshamt = sub i64 64, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  store i64 %shifted, i64* %dstptr
+  ret void
+}
+define void @modify64_lshr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; X32-LABEL: modify64_lshr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %edx
+; X32-NEXT:    movl 4(%eax), %edi
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %edi, %esi
+; X32-NEXT:    shrl %cl, %esi
+; X32-NEXT:    shrdl %cl, %edi, %edx
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB15_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    xorl %esi, %esi
+; X32-NEXT:  .LBB15_2:
+; X32-NEXT:    movl %edx, (%eax)
+; X32-NEXT:    movl %esi, 4(%eax)
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+;
+; X64-LABEL: modify64_lshr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movb $64, %cl
+; X64-NEXT:    subb %sil, %cl
+; X64-NEXT:    shrq %cl, (%rdi)
+; X64-NEXT:    retq
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  store i64 %shifted, i64* %valptr
+  ret void
+}
+
+; arithmetic shift right
+;------------------------------------------------------------------------------;
+
+define i32 @reg32_ashr_by_negated(i32 %val, i32 %shamt) nounwind {
+; X32-LABEL: reg32_ashr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    xorl %ecx, %ecx
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    sarl %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg32_ashr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    sarl %cl, %eax
+; X64-NEXT:    retq
+  %negshamt = sub i32 32, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define i32 @load32_ashr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; X32-LABEL: load32_ashr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %eax
+; X32-NEXT:    xorl %ecx, %ecx
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    sarl %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: load32_ashr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    sarl %cl, %eax
+; X64-NEXT:    retq
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define void @store32_ashr_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+; X32-LABEL: store32_ashr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    xorl %ecx, %ecx
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    sarl %cl, %edx
+; X32-NEXT:    movl %edx, (%eax)
+; X32-NEXT:    retl
+;
+; X64-LABEL: store32_ashr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    sarl %cl, %edi
+; X64-NEXT:    movl %edi, (%rsi)
+; X64-NEXT:    retq
+  %negshamt = sub i32 32, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  store i32 %shifted, i32* %dstptr
+  ret void
+}
+define void @modify32_ashr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+; X32-LABEL: modify32_ashr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movb $32, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    sarl %cl, (%eax)
+; X32-NEXT:    retl
+;
+; X64-LABEL: modify32_ashr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movb $32, %cl
+; X64-NEXT:    subb %sil, %cl
+; X64-NEXT:    sarl %cl, (%rdi)
+; X64-NEXT:    retq
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 32, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  store i32 %shifted, i32* %valptr
+  ret void
+}
+
+define i64 @reg64_ashr_by_negated(i64 %val, i64 %shamt) nounwind {
+; X32-LABEL: reg64_ashr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    sarl %cl, %edx
+; X32-NEXT:    shrdl %cl, %esi, %eax
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB20_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    sarl $31, %esi
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:  .LBB20_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg64_ashr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rcx
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NEXT:    sarq %cl, %rax
+; X64-NEXT:    retq
+  %negshamt = sub i64 64, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define i64 @load64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; X32-LABEL: load64_ashr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl (%ecx), %eax
+; X32-NEXT:    movl 4(%ecx), %esi
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    sarl %cl, %edx
+; X32-NEXT:    shrdl %cl, %esi, %eax
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB21_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    sarl $31, %esi
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:  .LBB21_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: load64_ashr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rcx
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NEXT:    sarq %cl, %rax
+; X64-NEXT:    retq
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define void @store64_ashr_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+; X32-LABEL: store64_ashr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %edi, %esi
+; X32-NEXT:    sarl %cl, %esi
+; X32-NEXT:    shrdl %cl, %edi, %edx
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB22_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    sarl $31, %edi
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    movl %edi, %esi
+; X32-NEXT:  .LBB22_2:
+; X32-NEXT:    movl %esi, 4(%eax)
+; X32-NEXT:    movl %edx, (%eax)
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+;
+; X64-LABEL: store64_ashr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdx, %rcx
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NEXT:    sarq %cl, %rdi
+; X64-NEXT:    movq %rdi, (%rsi)
+; X64-NEXT:    retq
+  %negshamt = sub i64 64, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  store i64 %shifted, i64* %dstptr
+  ret void
+}
+define void @modify64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+; X32-LABEL: modify64_ashr_by_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %edx
+; X32-NEXT:    movl 4(%eax), %edi
+; X32-NEXT:    movb $64, %cl
+; X32-NEXT:    subb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl %edi, %esi
+; X32-NEXT:    sarl %cl, %esi
+; X32-NEXT:    shrdl %cl, %edi, %edx
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB23_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    sarl $31, %edi
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    movl %edi, %esi
+; X32-NEXT:  .LBB23_2:
+; X32-NEXT:    movl %edx, (%eax)
+; X32-NEXT:    movl %esi, 4(%eax)
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+;
+; X64-LABEL: modify64_ashr_by_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movb $64, %cl
+; X64-NEXT:    subb %sil, %cl
+; X64-NEXT:    sarq %cl, (%rdi)
+; X64-NEXT:    retq
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 64, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  store i64 %shifted, i64* %valptr
+  ret void
+}
+
+;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||;
+; next let's only test simple reg pattern, and only lshr.
+;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||;
+
+;==============================================================================;
+; subtraction from negated shift amount
+
+define i32 @reg32_lshr_by_sub_from_negated(i32 %val, i32 %a, i32 %b) nounwind {
+; X32-LABEL: reg32_lshr_by_sub_from_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl $32, %ecx
+; X32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shrl %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg32_lshr_by_sub_from_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movl $32, %ecx
+; X64-NEXT:    subl %esi, %ecx
+; X64-NEXT:    subl %edx, %ecx
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrl %cl, %eax
+; X64-NEXT:    retq
+  %nega = sub i32 32, %a
+  %negasubb = sub i32 %nega, %b
+  %shifted = lshr i32 %val, %negasubb
+  ret i32 %shifted
+}
+define i64 @reg64_lshr_by_sub_from_negated(i64 %val, i64 %a, i64 %b) nounwind {
+; X32-LABEL: reg64_lshr_by_sub_from_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT:    movl $64, %ecx
+; X32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    shrl %cl, %edx
+; X32-NEXT:    shrdl %cl, %esi, %eax
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB25_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:  .LBB25_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg64_lshr_by_sub_from_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movl $64, %ecx
+; X64-NEXT:    subl %esi, %ecx
+; X64-NEXT:    subl %edx, %ecx
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrq %cl, %rax
+; X64-NEXT:    retq
+  %nega = sub i64 64, %a
+  %negasubb = sub i64 %nega, %b
+  %shifted = lshr i64 %val, %negasubb
+  ret i64 %shifted
+}
+
+;==============================================================================;
+; subtraction of negated shift amount
+
+define i32 @reg32_lshr_by_sub_of_negated(i32 %val, i32 %a, i32 %b) nounwind {
+; X32-LABEL: reg32_lshr_by_sub_of_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shrl %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg32_lshr_by_sub_of_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-NEXT:    # kill: def $esi killed $esi def $rsi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    leal (%rsi,%rdx), %ecx
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrl %cl, %eax
+; X64-NEXT:    retq
+  %nega = sub i32 32, %a
+  %negasubb = sub i32 %b, %nega
+  %shifted = lshr i32 %val, %negasubb
+  ret i32 %shifted
+}
+define i64 @reg64_lshr_by_sub_of_negated(i64 %val, i64 %a, i64 %b) nounwind {
+; X32-LABEL: reg64_lshr_by_sub_of_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    addb $-64, %cl
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    shrl %cl, %edx
+; X32-NEXT:    shrdl %cl, %esi, %eax
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB27_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:  .LBB27_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg64_lshr_by_sub_of_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    leal (%rdx,%rsi), %ecx
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrq %cl, %rax
+; X64-NEXT:    retq
+  %nega = sub i64 64, %a
+  %negasubb = sub i64 %b, %nega
+  %shifted = lshr i64 %val, %negasubb
+  ret i64 %shifted
+}
+
+;==============================================================================;
+; add to negated shift amount
+;
+
+define i32 @reg32_lshr_by_add_to_negated(i32 %val, i32 %a, i32 %b) nounwind {
+; X32-LABEL: reg32_lshr_by_add_to_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl $32, %ecx
+; X32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shrl %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg32_lshr_by_add_to_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movl $32, %ecx
+; X64-NEXT:    subl %esi, %ecx
+; X64-NEXT:    addl %edx, %ecx
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrl %cl, %eax
+; X64-NEXT:    retq
+  %nega = sub i32 32, %a
+  %negasubb = add i32 %nega, %b
+  %shifted = lshr i32 %val, %negasubb
+  ret i32 %shifted
+}
+define i64 @reg64_lshr_by_add_to_negated(i64 %val, i64 %a, i64 %b) nounwind {
+; X32-LABEL: reg64_lshr_by_add_to_negated:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT:    movl $64, %ecx
+; X32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    shrl %cl, %edx
+; X32-NEXT:    shrdl %cl, %esi, %eax
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB29_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:  .LBB29_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg64_lshr_by_add_to_negated:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movl $64, %ecx
+; X64-NEXT:    subl %esi, %ecx
+; X64-NEXT:    addl %edx, %ecx
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrq %cl, %rax
+; X64-NEXT:    retq
+  %nega = sub i64 64, %a
+  %negasubb = add i64 %nega, %b
+  %shifted = lshr i64 %val, %negasubb
+  ret i64 %shifted
+}
+
+;==============================================================================;
+; subtraction of negated shift amounts
+
+define i32 @reg32_lshr_by_sub_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind {
+; X32-LABEL: reg32_lshr_by_sub_of_negated_amts:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shrl %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg32_lshr_by_sub_of_negated_amts:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    subl %esi, %ecx
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrl %cl, %eax
+; X64-NEXT:    retq
+  %nega = sub i32 32, %a
+  %negb = sub i32 32, %b
+  %negasubnegb = sub i32 %nega, %negb
+  %shifted = lshr i32 %val, %negasubnegb
+  ret i32 %shifted
+}
+define i64 @reg64_lshr_by_sub_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind {
+; X32-LABEL: reg64_lshr_by_sub_of_negated_amts:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    shrl %cl, %edx
+; X32-NEXT:    shrdl %cl, %esi, %eax
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB31_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:  .LBB31_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg64_lshr_by_sub_of_negated_amts:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdx, %rcx
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    subl %esi, %ecx
+; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
+; X64-NEXT:    shrq %cl, %rax
+; X64-NEXT:    retq
+  %nega = sub i64 64, %a
+  %negb = sub i64 64, %b
+  %negasubnegb = sub i64 %nega, %negb
+  %shifted = lshr i64 %val, %negasubnegb
+  ret i64 %shifted
+}
+
+;==============================================================================;
+; addition of negated shift amounts
+
+define i32 @reg32_lshr_by_add_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind {
+; X32-LABEL: reg32_lshr_by_add_of_negated_amts:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    negb %cl
+; X32-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X32-NEXT:    shrl %cl, %eax
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg32_lshr_by_add_of_negated_amts:
+; X64:       # %bb.0:
+; X64-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-NEXT:    # kill: def $esi killed $esi def $rsi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    leal (%rsi,%rdx), %ecx
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrl %cl, %eax
+; X64-NEXT:    retq
+  %nega = sub i32 32, %a
+  %negb = sub i32 32, %b
+  %negasubnegb = add i32 %nega, %negb
+  %shifted = lshr i32 %val, %negasubnegb
+  ret i32 %shifted
+}
+define i64 @reg64_lshr_by_add_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind {
+; X32-LABEL: reg64_lshr_by_add_of_negated_amts:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movb $-128, %cl
+; X32-NEXT:    subb %dl, %cl
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:    shrl %cl, %edx
+; X32-NEXT:    shrdl %cl, %esi, %eax
+; X32-NEXT:    testb $32, %cl
+; X32-NEXT:    je .LBB33_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:  .LBB33_2:
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+;
+; X64-LABEL: reg64_lshr_by_add_of_negated_amts:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    leal (%rdx,%rsi), %ecx
+; X64-NEXT:    negb %cl
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shrq %cl, %rax
+; X64-NEXT:    retq
+  %nega = sub i64 64, %a
+  %negb = sub i64 64, %b
+  %negasubnegb = add i64 %nega, %negb
+  %shifted = lshr i64 %val, %negasubnegb
+  ret i64 %shifted
+}




More information about the llvm-commits mailing list