[llvm] 1c4cb51 - [SystemZ] Don't use libcall for 128 bit shifts.

Jonas Paulsson via llvm-commits llvm-commits at lists.llvm.org
Thu May 6 09:18:14 PDT 2021


Author: Jonas Paulsson
Date: 2021-05-06T18:14:41+02:00
New Revision: 1c4cb510b4daccc0f4763958567affc2b442f317

URL: https://github.com/llvm/llvm-project/commit/1c4cb510b4daccc0f4763958567affc2b442f317
DIFF: https://github.com/llvm/llvm-project/commit/1c4cb510b4daccc0f4763958567affc2b442f317.diff

LOG: [SystemZ] Don't use libcall for 128 bit shifts.

Expand 128 bit shifts instead of using a libcall.

This patch removes the 128 bit shift libcalls and thereby causes
ExpandShiftWithUnknownAmountBit() to be called.

Review: Ulrich Weigand

Differential Revision: https://reviews.llvm.org/D101993

Added: 
    

Modified: 
    llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
    llvm/test/CodeGen/SystemZ/shift-12.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 59c747d060c1c..5c1f3dbc9afec 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -285,10 +285,13 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
   // Give LowerOperation the chance to replace 64-bit ORs with subregs.
   setOperationAction(ISD::OR, MVT::i64, Custom);
 
-  // FIXME: Can we support these natively?
+  // Expand 128 bit shifts without using a libcall.
   setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
   setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
   setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
+  setLibcallName(RTLIB::SRL_I128, nullptr);
+  setLibcallName(RTLIB::SHL_I128, nullptr);
+  setLibcallName(RTLIB::SRA_I128, nullptr);
 
   // We have native instructions for i8, i16 and i32 extensions, but not i1.
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);

diff  --git a/llvm/test/CodeGen/SystemZ/shift-12.ll b/llvm/test/CodeGen/SystemZ/shift-12.ll
index 7559602aa2565..421928f286985 100644
--- a/llvm/test/CodeGen/SystemZ/shift-12.ll
+++ b/llvm/test/CodeGen/SystemZ/shift-12.ll
@@ -2,7 +2,7 @@
 ; Test removal of AND operations that don't affect last 6 bits of shift amount
 ; operand.
 ;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
 
 ; Test that AND is not removed when some lower 6 bits are not set.
 define i32 @f1(i32 %a, i32 %sh) {
@@ -119,31 +119,28 @@ define i32 @f10(i32 %a, i32 %sh) {
   ret i32 %reuse
 }
 
-; Test that AND is not removed for i128 (which calls __ashlti3)
 define i128 @f11(i128 %a, i32 %sh) {
 ; CHECK-LABEL: f11:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    stmg %r13, %r15, 104(%r15)
-; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    stmg %r14, %r15, 112(%r15)
 ; CHECK-NEXT:    .cfi_offset %r14, -48
 ; CHECK-NEXT:    .cfi_offset %r15, -40
-; CHECK-NEXT:    aghi %r15, -192
-; CHECK-NEXT:    .cfi_def_cfa_offset 352
 ; CHECK-NEXT:    lg %r0, 8(%r3)
-; CHECK-NEXT:    # kill: def $r4l killed $r4l def $r4d
-; CHECK-NEXT:    lgr %r13, %r2
 ; CHECK-NEXT:    lg %r1, 0(%r3)
-; CHECK-NEXT:    stg %r0, 168(%r15)
-; CHECK-NEXT:    risbg %r4, %r4, 57, 191, 0
-; CHECK-NEXT:    la %r2, 176(%r15)
-; CHECK-NEXT:    la %r3, 160(%r15)
-; CHECK-NEXT:    stg %r1, 160(%r15)
-; CHECK-NEXT:    brasl %r14, __ashlti3 at PLT
-; CHECK-NEXT:    lg %r0, 184(%r15)
-; CHECK-NEXT:    lg %r1, 176(%r15)
-; CHECK-NEXT:    stg %r0, 8(%r13)
-; CHECK-NEXT:    stg %r1, 0(%r13)
-; CHECK-NEXT:    lmg %r13, %r15, 296(%r15)
+; CHECK-NEXT:    risblg %r3, %r4, 25, 159, 0
+; CHECK-NEXT:    lcr %r14, %r3
+; CHECK-NEXT:    sllg %r5, %r1, 0(%r4)
+; CHECK-NEXT:    srlg %r14, %r0, 0(%r14)
+; CHECK-NEXT:    ogr %r5, %r14
+; CHECK-NEXT:    sllg %r3, %r0, -64(%r3)
+; CHECK-NEXT:    tmll %r4, 127
+; CHECK-NEXT:    locgrle %r3, %r5
+; CHECK-NEXT:    sllg %r0, %r0, 0(%r4)
+; CHECK-NEXT:    locgre %r3, %r1
+; CHECK-NEXT:    locghinle %r0, 0
+; CHECK-NEXT:    stg %r0, 8(%r2)
+; CHECK-NEXT:    stg %r3, 0(%r2)
+; CHECK-NEXT:    lmg %r14, %r15, 112(%r15)
 ; CHECK-NEXT:    br %r14
   %and = and i32 %sh, 127
   %ext = zext i32 %and to i128
@@ -151,3 +148,62 @@ define i128 @f11(i128 %a, i32 %sh) {
   ret i128 %shift
 }
 
+define i128 @f12(i128 %a, i32 %sh) {
+; CHECK-LABEL: f12:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r14, %r15, 112(%r15)
+; CHECK-NEXT:    .cfi_offset %r14, -48
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 0(%r3)
+; CHECK-NEXT:    lg %r1, 8(%r3)
+; CHECK-NEXT:    risblg %r3, %r4, 25, 159, 0
+; CHECK-NEXT:    lcr %r14, %r3
+; CHECK-NEXT:    srlg %r5, %r1, 0(%r4)
+; CHECK-NEXT:    sllg %r14, %r0, 0(%r14)
+; CHECK-NEXT:    ogr %r5, %r14
+; CHECK-NEXT:    srlg %r3, %r0, -64(%r3)
+; CHECK-NEXT:    tmll %r4, 127
+; CHECK-NEXT:    locgrle %r3, %r5
+; CHECK-NEXT:    srlg %r0, %r0, 0(%r4)
+; CHECK-NEXT:    locgre %r3, %r1
+; CHECK-NEXT:    locghinle %r0, 0
+; CHECK-NEXT:    stg %r0, 0(%r2)
+; CHECK-NEXT:    stg %r3, 8(%r2)
+; CHECK-NEXT:    lmg %r14, %r15, 112(%r15)
+; CHECK-NEXT:    br %r14
+  %and = and i32 %sh, 127
+  %ext = zext i32 %and to i128
+  %shift = lshr i128 %a, %ext
+  ret i128 %shift
+}
+
+define i128 @f13(i128 %a, i32 %sh) {
+; CHECK-LABEL: f13:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r14, %r15, 112(%r15)
+; CHECK-NEXT:    .cfi_offset %r14, -48
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 0(%r3)
+; CHECK-NEXT:    lg %r1, 8(%r3)
+; CHECK-NEXT:    risblg %r3, %r4, 25, 159, 0
+; CHECK-NEXT:    lcr %r14, %r3
+; CHECK-NEXT:    srlg %r5, %r1, 0(%r4)
+; CHECK-NEXT:    sllg %r14, %r0, 0(%r14)
+; CHECK-NEXT:    ogr %r5, %r14
+; CHECK-NEXT:    srag %r14, %r0, 0(%r4)
+; CHECK-NEXT:    srag %r3, %r0, -64(%r3)
+; CHECK-NEXT:    srag %r0, %r0, 63
+; CHECK-NEXT:    tmll %r4, 127
+; CHECK-NEXT:    locgrle %r3, %r5
+; CHECK-NEXT:    locgre %r3, %r1
+; CHECK-NEXT:    locgrle %r0, %r14
+; CHECK-NEXT:    stg %r0, 0(%r2)
+; CHECK-NEXT:    stg %r3, 8(%r2)
+; CHECK-NEXT:    lmg %r14, %r15, 112(%r15)
+; CHECK-NEXT:    br %r14
+  %and = and i32 %sh, 127
+  %ext = zext i32 %and to i128
+  %shift = ashr i128 %a, %ext
+  ret i128 %shift
+}
+


        


More information about the llvm-commits mailing list