[llvm] 9a05547 - [AArch64] Precommit tests for D77316

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Sat May 16 09:10:32 PDT 2020


Author: Jay Foad
Date: 2020-05-16T16:00:02+01:00
New Revision: 9a05547954a571cbe118d9cc9c0cb8a849afa1fd

URL: https://github.com/llvm/llvm-project/commit/9a05547954a571cbe118d9cc9c0cb8a849afa1fd
DIFF: https://github.com/llvm/llvm-project/commit/9a05547954a571cbe118d9cc9c0cb8a849afa1fd.diff

LOG: [AArch64] Precommit tests for D77316

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/shift-amount-mod.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/shift-amount-mod.ll b/llvm/test/CodeGen/AArch64/shift-amount-mod.ll
index 403839044cdf..966f3c6e7aab 100644
--- a/llvm/test/CodeGen/AArch64/shift-amount-mod.ll
+++ b/llvm/test/CodeGen/AArch64/shift-amount-mod.ll
@@ -308,6 +308,337 @@ define void @modify64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
   ret void
 }
 
+;==============================================================================;
+; the shift amount is complemented (shiftbitwidth - 1 - shiftamt)
+;==============================================================================;
+
+; shift left
+;------------------------------------------------------------------------------;
+
+define i32 @reg32_shl_by_complemented(i32 %val, i32 %shamt) nounwind {
+; CHECK-LABEL: reg32_shl_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #31
+; CHECK-NEXT:    sub w8, w8, w1
+; CHECK-NEXT:    lsl w0, w0, w8
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 31, %shamt
+  %shifted = shl i32 %val, %negshamt
+  ret i32 %shifted
+}
+define i32 @load32_shl_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: load32_shl_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    mov w9, #31
+; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    lsl w0, w8, w9
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 31, %shamt
+  %shifted = shl i32 %val, %negshamt
+  ret i32 %shifted
+}
+define void @store32_shl_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+; CHECK-LABEL: store32_shl_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #31
+; CHECK-NEXT:    sub w8, w8, w2
+; CHECK-NEXT:    lsl w8, w0, w8
+; CHECK-NEXT:    str w8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 31, %shamt
+  %shifted = shl i32 %val, %negshamt
+  store i32 %shifted, i32* %dstptr
+  ret void
+}
+define void @modify32_shl_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: modify32_shl_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    mov w9, #31
+; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    lsl w8, w8, w9
+; CHECK-NEXT:    str w8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 31, %shamt
+  %shifted = shl i32 %val, %negshamt
+  store i32 %shifted, i32* %valptr
+  ret void
+}
+
+define i64 @reg64_shl_by_complemented(i64 %val, i64 %shamt) nounwind {
+; CHECK-LABEL: reg64_shl_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    sub x8, x8, x1
+; CHECK-NEXT:    lsl x0, x0, x8
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 63, %shamt
+  %shifted = shl i64 %val, %negshamt
+  ret i64 %shifted
+}
+define i64 @load64_shl_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: load64_shl_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov w9, #63
+; CHECK-NEXT:    sub x9, x9, x1
+; CHECK-NEXT:    lsl x0, x8, x9
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 63, %shamt
+  %shifted = shl i64 %val, %negshamt
+  ret i64 %shifted
+}
+define void @store64_shl_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+; CHECK-LABEL: store64_shl_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    sub x8, x8, x2
+; CHECK-NEXT:    lsl x8, x0, x8
+; CHECK-NEXT:    str x8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 63, %shamt
+  %shifted = shl i64 %val, %negshamt
+  store i64 %shifted, i64* %dstptr
+  ret void
+}
+define void @modify64_shl_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: modify64_shl_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov w9, #63
+; CHECK-NEXT:    sub x9, x9, x1
+; CHECK-NEXT:    lsl x8, x8, x9
+; CHECK-NEXT:    str x8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 63, %shamt
+  %shifted = shl i64 %val, %negshamt
+  store i64 %shifted, i64* %valptr
+  ret void
+}
+
+; logical shift right
+;------------------------------------------------------------------------------;
+
+define i32 @reg32_lshr_by_complemented(i32 %val, i32 %shamt) nounwind {
+; CHECK-LABEL: reg32_lshr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #31
+; CHECK-NEXT:    sub w8, w8, w1
+; CHECK-NEXT:    lsr w0, w0, w8
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 31, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define i32 @load32_lshr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: load32_lshr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    mov w9, #31
+; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    lsr w0, w8, w9
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 31, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define void @store32_lshr_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+; CHECK-LABEL: store32_lshr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #31
+; CHECK-NEXT:    sub w8, w8, w2
+; CHECK-NEXT:    lsr w8, w0, w8
+; CHECK-NEXT:    str w8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 31, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  store i32 %shifted, i32* %dstptr
+  ret void
+}
+define void @modify32_lshr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: modify32_lshr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    mov w9, #31
+; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    lsr w8, w8, w9
+; CHECK-NEXT:    str w8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 31, %shamt
+  %shifted = lshr i32 %val, %negshamt
+  store i32 %shifted, i32* %valptr
+  ret void
+}
+
+define i64 @reg64_lshr_by_complemented(i64 %val, i64 %shamt) nounwind {
+; CHECK-LABEL: reg64_lshr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    sub x8, x8, x1
+; CHECK-NEXT:    lsr x0, x0, x8
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 63, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define i64 @load64_lshr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: load64_lshr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov w9, #63
+; CHECK-NEXT:    sub x9, x9, x1
+; CHECK-NEXT:    lsr x0, x8, x9
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 63, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define void @store64_lshr_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+; CHECK-LABEL: store64_lshr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    sub x8, x8, x2
+; CHECK-NEXT:    lsr x8, x0, x8
+; CHECK-NEXT:    str x8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 63, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  store i64 %shifted, i64* %dstptr
+  ret void
+}
+define void @modify64_lshr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: modify64_lshr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov w9, #63
+; CHECK-NEXT:    sub x9, x9, x1
+; CHECK-NEXT:    lsr x8, x8, x9
+; CHECK-NEXT:    str x8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 63, %shamt
+  %shifted = lshr i64 %val, %negshamt
+  store i64 %shifted, i64* %valptr
+  ret void
+}
+
+; arithmetic shift right
+;------------------------------------------------------------------------------;
+
+define i32 @reg32_ashr_by_complemented(i32 %val, i32 %shamt) nounwind {
+; CHECK-LABEL: reg32_ashr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #31
+; CHECK-NEXT:    sub w8, w8, w1
+; CHECK-NEXT:    asr w0, w0, w8
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 31, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define i32 @load32_ashr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: load32_ashr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    mov w9, #31
+; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    asr w0, w8, w9
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 31, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  ret i32 %shifted
+}
+define void @store32_ashr_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+; CHECK-LABEL: store32_ashr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #31
+; CHECK-NEXT:    sub w8, w8, w2
+; CHECK-NEXT:    asr w8, w0, w8
+; CHECK-NEXT:    str w8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i32 31, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  store i32 %shifted, i32* %dstptr
+  ret void
+}
+define void @modify32_ashr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+; CHECK-LABEL: modify32_ashr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    mov w9, #31
+; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    asr w8, w8, w9
+; CHECK-NEXT:    str w8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i32, i32* %valptr
+  %negshamt = sub i32 31, %shamt
+  %shifted = ashr i32 %val, %negshamt
+  store i32 %shifted, i32* %valptr
+  ret void
+}
+
+define i64 @reg64_ashr_by_complemented(i64 %val, i64 %shamt) nounwind {
+; CHECK-LABEL: reg64_ashr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    sub x8, x8, x1
+; CHECK-NEXT:    asr x0, x0, x8
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 63, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define i64 @load64_ashr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: load64_ashr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov w9, #63
+; CHECK-NEXT:    sub x9, x9, x1
+; CHECK-NEXT:    asr x0, x8, x9
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 63, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  ret i64 %shifted
+}
+define void @store64_ashr_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+; CHECK-LABEL: store64_ashr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    sub x8, x8, x2
+; CHECK-NEXT:    asr x8, x0, x8
+; CHECK-NEXT:    str x8, [x1]
+; CHECK-NEXT:    ret
+  %negshamt = sub i64 63, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  store i64 %shifted, i64* %dstptr
+  ret void
+}
+define void @modify64_ashr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+; CHECK-LABEL: modify64_ashr_by_complemented:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov w9, #63
+; CHECK-NEXT:    sub x9, x9, x1
+; CHECK-NEXT:    asr x8, x8, x9
+; CHECK-NEXT:    str x8, [x0]
+; CHECK-NEXT:    ret
+  %val = load i64, i64* %valptr
+  %negshamt = sub i64 63, %shamt
+  %shifted = ashr i64 %val, %negshamt
+  store i64 %shifted, i64* %valptr
+  ret void
+}
+
 ;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||;
 ; next let's only test simple reg pattern, and only lshr.
 ;||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||;


        


More information about the llvm-commits mailing list