[llvm] e27e8e0 - [Hexagon] Use V6_vmpyuhvs for 16-bit mulhu on HVX v69+

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 22 11:31:43 PST 2022


Author: Krzysztof Parzyszek
Date: 2022-11-22T11:31:22-08:00
New Revision: e27e8e0541320ec207773c933d430738b5a73bc8

URL: https://github.com/llvm/llvm-project/commit/e27e8e0541320ec207773c933d430738b5a73bc8
DIFF: https://github.com/llvm/llvm-project/commit/e27e8e0541320ec207773c933d430738b5a73bc8.diff

LOG: [Hexagon] Use V6_vmpyuhvs for 16-bit mulhu on HVX v69+

Added: 
    

Modified: 
    llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
    llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
    llvm/test/CodeGen/Hexagon/autohvx/mulh.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td b/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
index 4a086beccb7c6..5b8386416a5f0 100644
--- a/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
+++ b/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
@@ -925,6 +925,10 @@ def: Pat<(VecI8  (mulhu  HVI8:$Vu,  HVI8:$Vv)),
 def: Pat<(VecI16 (mulhu HVI16:$Vu, HVI16:$Vv)),
          (V6_vshufoh (HiVec (V6_vmpyuhv $Vu, $Vv)),
                      (LoVec (V6_vmpyuhv $Vu, $Vv)))>;
+let Predicates = [UseHVXV69], AddedComplexity = 20 in {
+  def: Pat<(VecI16 (mulhu HVI16:$Vu, HVI16:$Vv)),
+           (V6_vmpyuhvs $Vu, $Vv)>;
+}
 
 let Predicates = [UseHVXV60] in {
   // V60 doesn't have vabsb or byte shifts.

diff  --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
index 59076ca004f9d..44fb0d7338663 100644
--- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
@@ -379,6 +379,8 @@ class HvxIdioms {
                       Value *CarryIn = nullptr) const
       -> std::pair<Value *, Value *>;
   auto createMul16(IRBuilderBase &Builder, SValue X, SValue Y) const -> Value *;
+  auto createMulH16(IRBuilderBase &Builder, SValue X, SValue Y) const
+      -> Value *;
   auto createMul32(IRBuilderBase &Builder, SValue X, SValue Y) const
       -> std::pair<Value *, Value *>;
   auto createAddLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX,
@@ -1420,7 +1422,14 @@ auto HvxIdioms::processFxpMulChopped(IRBuilderBase &Builder, Instruction &In,
     // Getting here with Op.Frac == 0 isn't wrong, but suboptimal: here we
     // generate a full precision products, which is unnecessary if there is
     // no shift.
+    assert(Width == 16);
     assert(Op.Frac != 0 && "Unshifted mul should have been skipped");
+    if (Op.Frac == 16) {
+      // Multiply high
+      if (Value *MulH = createMulH16(Builder, Op.X, Op.Y))
+        return MulH;
+    }
+    // Do full-precision multiply and shift.
     Value *Prod32 = createMul16(Builder, Op.X, Op.Y);
     if (Rounding) {
       Value *RoundVal = HVC.getConstSplat(Prod32->getType(), 1 << *Op.RoundAt);
@@ -1579,6 +1588,30 @@ auto HvxIdioms::createMul16(IRBuilderBase &Builder, SValue X, SValue Y) const
   return HVC.vdeal(Builder, HVC.sublo(Builder, P), HVC.subhi(Builder, P));
 }
 
+auto HvxIdioms::createMulH16(IRBuilderBase &Builder, SValue X, SValue Y) const
+    -> Value * {
+  Type *HvxI16Ty = HVC.getHvxTy(HVC.getIntTy(16), /*Pair=*/false);
+
+  if (HVC.HST.useHVXV69Ops()) {
+    if (X.Sgn != Signed && Y.Sgn != Signed) {
+      auto V6_vmpyuhvs = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyuhvs);
+      return HVC.createHvxIntrinsic(Builder, V6_vmpyuhvs, HvxI16Ty,
+                                    {X.Val, Y.Val});
+    }
+  }
+
+  Type *HvxP16Ty = HVC.getHvxTy(HVC.getIntTy(16), /*Pair=*/true);
+  Value *Pair16 = Builder.CreateBitCast(createMul16(Builder, X, Y), HvxP16Ty);
+  unsigned Len = HVC.length(HvxP16Ty) / 2;
+
+  SmallVector<int, 128> PickOdd(Len);
+  for (int i = 0; i != static_cast<int>(Len); ++i)
+    PickOdd[i] = 2 * i + 1;
+
+  return Builder.CreateShuffleVector(HVC.sublo(Builder, Pair16),
+                                     HVC.subhi(Builder, Pair16), PickOdd);
+}
+
 auto HvxIdioms::createMul32(IRBuilderBase &Builder, SValue X, SValue Y) const
     -> std::pair<Value *, Value *> {
   assert(X.Val->getType() == Y.Val->getType());

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll b/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll
index 58e8cac9dfe5c..561ab4e459ce5 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll
@@ -1,9 +1,125 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=hexagon -mattr=+hvxv60,+hvx-length128b,-packets < %s | FileCheck --check-prefix=V60 %s
 ; RUN: llc -march=hexagon -mattr=+hvxv65,+hvx-length128b,-packets < %s | FileCheck --check-prefix=V65 %s
+; RUN: llc -march=hexagon -mattr=+hvxv69,+hvx-length128b,-packets < %s | FileCheck --check-prefix=V69 %s
 
-define <32 x i32> @mulhs(<32 x i32> %a0, <32 x i32> %a1) #0 {
-; V60-LABEL: mulhs:
+define <64 x i16> @mulhs16(<64 x i16> %a0, <64 x i16> %a1) #0 {
+; V60-LABEL: mulhs16:
+; V60:       // %bb.0:
+; V60-NEXT:    {
+; V60-NEXT:     v1:0.w = vmpy(v0.h,v1.h)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     r7 = #-4
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v1:0 = vdeal(v1,v0,r7)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v0.h = vpacko(v1.w,v0.w)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     jumpr r31
+; V60-NEXT:    }
+;
+; V65-LABEL: mulhs16:
+; V65:       // %bb.0:
+; V65-NEXT:    {
+; V65-NEXT:     v1:0.w = vmpy(v0.h,v1.h)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     r7 = #-4
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v1:0 = vdeal(v1,v0,r7)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v0.h = vpacko(v1.w,v0.w)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     jumpr r31
+; V65-NEXT:    }
+;
+; V69-LABEL: mulhs16:
+; V69:       // %bb.0:
+; V69-NEXT:    {
+; V69-NEXT:     v1:0.w = vmpy(v0.h,v1.h)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     r7 = #-4
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     v1:0 = vdeal(v1,v0,r7)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     v0.h = vpacko(v1.w,v0.w)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     jumpr r31
+; V69-NEXT:    }
+  %v0 = sext <64 x i16> %a0 to <64 x i32>
+  %v1 = sext <64 x i16> %a1 to <64 x i32>
+  %v2 = mul <64 x i32> %v0, %v1
+  %v3 = lshr <64 x i32> %v2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %v4 = trunc <64 x i32> %v3 to <64 x i16>
+  ret <64 x i16> %v4
+}
+
+define <64 x i16> @mulhu16(<64 x i16> %a0, <64 x i16> %a1) #0 {
+; V60-LABEL: mulhu16:
+; V60:       // %bb.0:
+; V60-NEXT:    {
+; V60-NEXT:     v1:0.uw = vmpy(v0.uh,v1.uh)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     r7 = #-4
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v1:0 = vdeal(v1,v0,r7)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v0.h = vpacko(v1.w,v0.w)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     jumpr r31
+; V60-NEXT:    }
+;
+; V65-LABEL: mulhu16:
+; V65:       // %bb.0:
+; V65-NEXT:    {
+; V65-NEXT:     v1:0.uw = vmpy(v0.uh,v1.uh)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     r7 = #-4
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v1:0 = vdeal(v1,v0,r7)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v0.h = vpacko(v1.w,v0.w)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     jumpr r31
+; V65-NEXT:    }
+;
+; V69-LABEL: mulhu16:
+; V69:       // %bb.0:
+; V69-NEXT:    {
+; V69-NEXT:     v0.uh = vmpy(v0.uh,v1.uh):>>16
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     jumpr r31
+; V69-NEXT:    }
+  %v0 = zext <64 x i16> %a0 to <64 x i32>
+  %v1 = zext <64 x i16> %a1 to <64 x i32>
+  %v2 = mul <64 x i32> %v0, %v1
+  %v3 = lshr <64 x i32> %v2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %v4 = trunc <64 x i32> %v3 to <64 x i16>
+  ret <64 x i16> %v4
+}
+
+define <32 x i32> @mulhs32(<32 x i32> %a0, <32 x i32> %a1) #0 {
+; V60-LABEL: mulhs32:
 ; V60:       // %bb.0:
 ; V60-NEXT:    {
 ; V60-NEXT:     r0 = #16
@@ -39,7 +155,7 @@ define <32 x i32> @mulhs(<32 x i32> %a0, <32 x i32> %a1) #0 {
 ; V60-NEXT:     jumpr r31
 ; V60-NEXT:    }
 ;
-; V65-LABEL: mulhs:
+; V65-LABEL: mulhs32:
 ; V65:       // %bb.0:
 ; V65-NEXT:    {
 ; V65-NEXT:     v3:2 = vmpye(v0.w,v1.uh)
@@ -53,6 +169,21 @@ define <32 x i32> @mulhs(<32 x i32> %a0, <32 x i32> %a1) #0 {
 ; V65-NEXT:    {
 ; V65-NEXT:     jumpr r31
 ; V65-NEXT:    }
+;
+; V69-LABEL: mulhs32:
+; V69:       // %bb.0:
+; V69-NEXT:    {
+; V69-NEXT:     v3:2 = vmpye(v0.w,v1.uh)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     v3:2 += vmpyo(v0.w,v1.h)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     v0 = v3
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     jumpr r31
+; V69-NEXT:    }
   %v0 = sext <32 x i32> %a0 to <32 x i64>
   %v1 = sext <32 x i32> %a1 to <32 x i64>
   %v2 = mul <32 x i64> %v0, %v1
@@ -61,8 +192,8 @@ define <32 x i32> @mulhs(<32 x i32> %a0, <32 x i32> %a1) #0 {
   ret <32 x i32> %v4
 }
 
-define <32 x i32> @mulhu(<32 x i32> %a0, <32 x i32> %a1) #0 {
-; V60-LABEL: mulhu:
+define <32 x i32> @mulhu32(<32 x i32> %a0, <32 x i32> %a1) #0 {
+; V60-LABEL: mulhu32:
 ; V60:       // %bb.0:
 ; V60-NEXT:    {
 ; V60-NEXT:     r0 = ##33686018
@@ -101,7 +232,7 @@ define <32 x i32> @mulhu(<32 x i32> %a0, <32 x i32> %a1) #0 {
 ; V60-NEXT:     jumpr r31
 ; V60-NEXT:    }
 ;
-; V65-LABEL: mulhu:
+; V65-LABEL: mulhu32:
 ; V65:       // %bb.0:
 ; V65-NEXT:    {
 ; V65-NEXT:     v2 = vxor(v2,v2)
@@ -130,6 +261,36 @@ define <32 x i32> @mulhu(<32 x i32> %a0, <32 x i32> %a1) #0 {
 ; V65-NEXT:    {
 ; V65-NEXT:     jumpr r31
 ; V65-NEXT:    }
+;
+; V69-LABEL: mulhu32:
+; V69:       // %bb.0:
+; V69-NEXT:    {
+; V69-NEXT:     v2 = vxor(v2,v2)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     v5:4 = vmpye(v0.w,v1.uh)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     q0 = vcmp.gt(v2.w,v0.w)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     q1 = vcmp.gt(v2.w,v1.w)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     v5:4 += vmpyo(v0.w,v1.h)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     v31 = vand(q0,v1)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     if (q1) v31.w += v0.w
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     v0.w = vadd(v5.w,v31.w)
+; V69-NEXT:    }
+; V69-NEXT:    {
+; V69-NEXT:     jumpr r31
+; V69-NEXT:    }
   %v0 = zext <32 x i32> %a0 to <32 x i64>
   %v1 = zext <32 x i32> %a1 to <32 x i64>
   %v2 = mul <32 x i64> %v0, %v1
@@ -138,4 +299,4 @@ define <32 x i32> @mulhu(<32 x i32> %a0, <32 x i32> %a1) #0 {
   ret <32 x i32> %v4
 }
 
-attributes #0 = { nounwind }
+attributes #0 = { nounwind memory(none) }


        


More information about the llvm-commits mailing list