[llvm] r357481 - [SystemZ] Improve instruction selection of 64 bit shifts and rotates.

Jonas Paulsson via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 2 08:36:30 PDT 2019


Author: jonpa
Date: Tue Apr  2 08:36:30 2019
New Revision: 357481

URL: http://llvm.org/viewvc/llvm-project?rev=357481&view=rev
Log:
[SystemZ]  Improve instruction selection of 64 bit shifts and rotates.

For shift and rotate instructions that only use the last 6 bits of the shift
amount, a shift amount of (x*64-s) can be substituted with (-s). This saves
one instruction and a register:

  lhi     %r1, 64
  sr      %r1, %r3
  sllg    %r2, %r2, 0(%r1)
  =>
  lcr     %r1, %r3
  sllg    %r2, %r2, 0(%r1)

Review: Ulrich Weigand

Added:
    llvm/trunk/test/CodeGen/SystemZ/rot-shift-64-sub-amt.ll
Modified:
    llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td
    llvm/trunk/lib/Target/SystemZ/SystemZOperators.td

Modified: llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td?rev=357481&r1=357480&r2=357481&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td Tue Apr  2 08:36:30 2019
@@ -2182,6 +2182,22 @@ let AddedComplexity = 4 in {
             (RLLG GR64:$val, (NILL GR32:$shift, imm32zx16trunc:$imm), 0)>;
 }
 
+// Substitute (x*64-s) with (-s), since shift/rotate instructions only
+// use the last 6 bits of the second operand register (making it modulo 64).
+let AddedComplexity = 4 in {
+  def : Pat<(shl GR64:$val, (sub imm32mod64,  GR32:$shift)),
+            (SLLG GR64:$val, (LCR GR32:$shift), 0)>;
+
+  def : Pat<(sra GR64:$val, (sub imm32mod64,  GR32:$shift)),
+            (SRAG GR64:$val, (LCR GR32:$shift), 0)>;
+
+  def : Pat<(srl GR64:$val, (sub imm32mod64,  GR32:$shift)),
+            (SRLG GR64:$val, (LCR GR32:$shift), 0)>;
+
+  def : Pat<(rotl GR64:$val, (sub imm32mod64,  GR32:$shift)),
+            (RLLG GR64:$val, (LCR GR32:$shift), 0)>;
+}
+
 // Peepholes for turning scalar operations into block operations.
 defm : BlockLoadStore<anyextloadi8, i32, MVCSequence, NCSequence, OCSequence,
                       XCSequence, 1>;

Modified: llvm/trunk/lib/Target/SystemZ/SystemZOperators.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZOperators.td?rev=357481&r1=357480&r2=357481&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZOperators.td (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZOperators.td Tue Apr  2 08:36:30 2019
@@ -708,6 +708,10 @@ class shiftop<SDPatternOperator operator
              [(operator node:$val, node:$count),
               (operator node:$val, (and node:$count, imm32bottom6set))]>;
 
+def imm32mod64  : PatLeaf<(i32 imm), [{
+  return (N->getZExtValue() % 64 == 0);
+}]>;
+
 // Load a scalar and replicate it in all elements of a vector.
 class z_replicate_load<ValueType scalartype, SDPatternOperator load>
   : PatFrag<(ops node:$addr),

Added: llvm/trunk/test/CodeGen/SystemZ/rot-shift-64-sub-amt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/rot-shift-64-sub-amt.ll?rev=357481&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/rot-shift-64-sub-amt.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/rot-shift-64-sub-amt.ll Tue Apr  2 08:36:30 2019
@@ -0,0 +1,82 @@
+; Test that the case of (64 - shift) used by a shift/rotate instruction is
+; implemented with an lcr. This should also work for any multiple of 64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+define i64 @f1(i64 %in, i64 %sh) {
+; CHECK-LABEL: f1:
+; CHECK: lcr %r1, %r3
+; CHECK: sllg %r2, %r2, 0(%r1)
+  %sub = sub i64 64, %sh
+  %shl = shl i64 %in, %sub
+  ret i64 %shl
+}
+
+define i64 @f2(i64 %in, i64 %sh) {
+; CHECK-LABEL: f2:
+; CHECK: lcr %r1, %r3
+; CHECK: srag %r2, %r2, 0(%r1)
+  %sub = sub i64 64, %sh
+  %shl = ashr i64 %in, %sub
+  ret i64 %shl
+}
+
+define i64 @f3(i64 %in, i64 %sh) {
+; CHECK-LABEL: f3:
+; CHECK: lcr %r1, %r3
+; CHECK: srlg %r2, %r2, 0(%r1)
+  %sub = sub i64 64, %sh
+  %shl = lshr i64 %in, %sub
+  ret i64 %shl
+}
+
+define i64 @f4(i64 %in, i64 %sh) {
+; CHECK-LABEL: f4:
+; CHECK: lcr %r1, %r3
+; CHECK: rllg %r2, %r2, 0(%r1)
+  %shr = lshr i64 %in, %sh
+  %sub = sub i64 64, %sh
+  %shl = shl i64 %in, %sub
+  %or = or i64 %shl, %shr
+  ret i64 %or
+}
+
+define i64 @f5(i64 %in, i64 %sh) {
+; CHECK-LABEL: f5:
+; CHECK: lcr %r1, %r3
+; CHECK: sllg %r2, %r2, 0(%r1)
+  %sub = sub i64 128, %sh
+  %shl = shl i64 %in, %sub
+  ret i64 %shl
+}
+
+define i64 @f6(i64 %in, i64 %sh) {
+; CHECK-LABEL: f6:
+; CHECK: lcr %r1, %r3
+; CHECK: srag %r2, %r2, 0(%r1)
+  %sub = sub i64 256, %sh
+  %shl = ashr i64 %in, %sub
+  ret i64 %shl
+}
+
+define i64 @f7(i64 %in, i64 %sh) {
+; CHECK-LABEL: f7:
+; CHECK: lcr %r1, %r3
+; CHECK: srlg %r2, %r2, 0(%r1)
+  %sub = sub i64 512, %sh
+  %shl = lshr i64 %in, %sub
+  ret i64 %shl
+}
+
+define i64 @f8(i64 %in, i64 %sh) {
+; CHECK-LABEL: f8:
+; CHECK: lcr %r1, %r3
+; CHECK: srlg %r0, %r2, 0(%r3)
+; CHECK: sllg %r2, %r2, 0(%r1)
+; CHECK: ogr %r2, %r0
+  %shr = lshr i64 %in, %sh
+  %sub = sub i64 1024, %sh
+  %shl = shl i64 %in, %sub
+  %or = or i64 %shl, %shr
+  ret i64 %or
+}




More information about the llvm-commits mailing list