[llvm] 9fde8e9 - [Hexagon] Fix MULHS lowering for HVX v60

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 18 07:54:56 PDT 2022


Author: Krzysztof Parzyszek
Date: 2022-10-18T07:54:38-07:00
New Revision: 9fde8e907b5386e4738f873090be8def35d508e5

URL: https://github.com/llvm/llvm-project/commit/9fde8e907b5386e4738f873090be8def35d508e5
DIFF: https://github.com/llvm/llvm-project/commit/9fde8e907b5386e4738f873090be8def35d508e5.diff

LOG: [Hexagon] Fix MULHS lowering for HVX v60

The carry bit from an intermediate addition was not properly propagated.
For example mulhs(7fffffff, 7fffffff) was evaluated as 3ffeffff, while
the correct result is 3fffffff.

Added: 
    llvm/test/CodeGen/Hexagon/autohvx/mulh.ll

Modified: 
    llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
index 0fb707cf73404..e8e86d71b83b6 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
@@ -1916,9 +1916,18 @@ HexagonTargetLowering::LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const {
     return DAG.getBitcast(ResTy, BS);
   }
 
+  MVT PairTy = typeJoin({ResTy, ResTy});
+
   assert(ElemTy == MVT::i32);
   SDValue S16 = DAG.getConstant(16, dl, MVT::i32);
 
+  auto LoVec = [&DAG,ResTy,dl] (SDValue Pair) {
+    return DAG.getTargetExtractSubreg(Hexagon::vsub_lo, dl, ResTy, Pair);
+  };
+  auto HiVec = [&DAG,ResTy,dl] (SDValue Pair) {
+    return DAG.getTargetExtractSubreg(Hexagon::vsub_hi, dl, ResTy, Pair);
+  };
+
   auto MulHS_V60 = [&](SDValue Vs, SDValue Vt) {
     // mulhs(Vs,Vt) =
     //   = [(Hi(Vs)*2^16 + Lo(Vs)) *s (Hi(Vt)*2^16 + Lo(Vt))] >> 32
@@ -1931,21 +1940,42 @@ HexagonTargetLowering::LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const {
     // so everything in [] can be shifted by 16 without loss of precision.
     //   = [Hi(Vs) *s Hi(Vt)*2^16 + Hi(Vs)*su Lo(Vt) + Lo(Vs)*Vt >> 16] >> 16
     //   = [Hi(Vs) *s Hi(Vt)*2^16 + Hi(Vs)*su Lo(Vt) + V6_vmpyewuh(Vs,Vt)] >> 16
-    // Denote Hi(Vs) = Vs':
-    //   = [Vs'*s Hi(Vt)*2^16 + Vs' *su Lo(Vt) + V6_vmpyewuh(Vt,Vs)] >> 16
-    //   = Vs'*s Hi(Vt) + (V6_vmpyiewuh(Vs',Vt) + V6_vmpyewuh(Vt,Vs)) >> 16
+    // The final additions need to make sure to properly maintain any
+    // carry-out bits.
+    //
+    //                Hi(Vt) Lo(Vt)
+    //                Hi(Vs) Lo(Vs)
+    //               --------------
+    //                Lo(Vt)*Lo(Vs)  | T0 = V6_vmpyewuh(Vt,Vs) does this,
+    //         Hi(Vt)*Lo(Vs)         |      + dropping the low 16 bits
+    //         Hi(Vs)*Lo(Vt)   | T2
+    //  Hi(Vt)*Hi(Vs)
+
     SDValue T0 = getInstr(Hexagon::V6_vmpyewuh, dl, ResTy, {Vt, Vs}, DAG);
-    // Get Vs':
-    SDValue S0 = getInstr(Hexagon::V6_vasrw, dl, ResTy, {Vs, S16}, DAG);
-    SDValue T1 = getInstr(Hexagon::V6_vmpyiewuh_acc, dl, ResTy,
-                          {T0, S0, Vt}, DAG);
-    // Shift by 16:
-    SDValue S2 = getInstr(Hexagon::V6_vasrw, dl, ResTy, {T1, S16}, DAG);
-    // Get Vs'*Hi(Vt):
-    SDValue T2 = getInstr(Hexagon::V6_vmpyiowh, dl, ResTy, {S0, Vt}, DAG);
+    // T1 = get Hi(Vs) into low halves.
+    SDValue T1 = getInstr(Hexagon::V6_vasrw, dl, ResTy, {Vs, S16}, DAG);
+    // P0 = interleaved T1.h*Vt.uh (full precision product)
+    SDValue P0 = getInstr(Hexagon::V6_vmpyhus, dl, PairTy, {T1, Vt}, DAG);
+    // T2 = T1.even(h) * Vt.even(uh), i.e. Hi(Vs)*Lo(Vt)
+    SDValue T2 = LoVec(P0);
+    // We need to add T0+T2, recording the carry-out, which will be 1<<16
+    // added to the final sum.
+    // P1 = interleaved even/odd 32-bit (unsigned) sums of 16-bit halves
+    SDValue P1 = getInstr(Hexagon::V6_vadduhw, dl, PairTy, {T0, T2}, DAG);
+    // P2 = interleaved even/odd 32-bit (signed) sums of 16-bit halves
+    SDValue P2 = getInstr(Hexagon::V6_vaddhw, dl, PairTy, {T0, T2}, DAG);
+    // T3 = full-precision(T0+T2) >> 16
+    // The low halves are added-unsigned, the high ones are added-signed.
+    SDValue T3 = getInstr(Hexagon::V6_vasrw_acc, dl, ResTy,
+                          {HiVec(P2), LoVec(P1), S16}, DAG);
+    SDValue T4 = getInstr(Hexagon::V6_vasrw, dl, ResTy, {Vt, S16}, DAG);
+    // P3 = interleaved Hi(Vt)*Hi(Vs) (full precision),
+    // which is now Lo(T1)*Lo(T4), so we want to keep the even product.
+    SDValue P3 = getInstr(Hexagon::V6_vmpyhv, dl, PairTy, {T1, T4}, DAG);
+    SDValue T5 = LoVec(P3);
     // Add:
-    SDValue T3 = DAG.getNode(ISD::ADD, dl, ResTy, {S2, T2});
-    return T3;
+    SDValue T6 = DAG.getNode(ISD::ADD, dl, ResTy, {T3, T5});
+    return T6;
   };
 
   auto MulHS_V62 = [&](SDValue Vs, SDValue Vt) {
@@ -1962,16 +1992,8 @@ HexagonTargetLowering::LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const {
     return MulHS_V60(Vs, Vt);
   }
 
-  // Unsigned mulhw. (Would expansion using signed mulhw be better?)
+  // Unsigned mulhw.
 
-  auto LoVec = [&DAG,ResTy,dl] (SDValue Pair) {
-    return DAG.getTargetExtractSubreg(Hexagon::vsub_lo, dl, ResTy, Pair);
-  };
-  auto HiVec = [&DAG,ResTy,dl] (SDValue Pair) {
-    return DAG.getTargetExtractSubreg(Hexagon::vsub_hi, dl, ResTy, Pair);
-  };
-
-  MVT PairTy = typeJoin({ResTy, ResTy});
   SDValue P = getInstr(Hexagon::V6_lvsplatw, dl, ResTy,
                        {DAG.getConstant(0x02020202, dl, MVT::i32)}, DAG);
   // Multiply-unsigned halfwords:

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll b/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll
new file mode 100644
index 0000000000000..8c9f6a1ad4701
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll
@@ -0,0 +1,156 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=hexagon -mattr=+hvxv60,+hvx-length128b,-packets < %s | FileCheck --check-prefix=V60 %s
+; RUN: llc -march=hexagon -mattr=+hvxv65,+hvx-length128b,-packets < %s | FileCheck --check-prefix=V65 %s
+
+define <32 x i32> @mulhs(<32 x i32> %a0, <32 x i32> %a1) #0 {
+; V60-LABEL: mulhs:
+; V60:       // %bb.0:
+; V60-NEXT:    {
+; V60-NEXT:     r0 = #16
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v2.w = vmpye(v1.w,v0.uh)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v0.w = vasr(v0.w,r0)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v3.w = vasr(v1.w,r0)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v5:4.w = vmpy(v0.h,v1.uh)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v31:30.w = vmpy(v0.h,v3.h)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v7:6.w = vadd(v2.uh,v4.uh)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v29:28.w = vadd(v2.h,v4.h)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v29.w += vasr(v6.w,r0)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v0.w = vadd(v29.w,v30.w)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     jumpr r31
+; V60-NEXT:    }
+;
+; V65-LABEL: mulhs:
+; V65:       // %bb.0:
+; V65-NEXT:    {
+; V65-NEXT:     v3:2 = vmpye(v0.w,v1.uh)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v3:2 += vmpyo(v0.w,v1.h)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v0 = v3
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     jumpr r31
+; V65-NEXT:    }
+  %v0 = sext <32 x i32> %a0 to <32 x i64>
+  %v1 = sext <32 x i32> %a1 to <32 x i64>
+  %v2 = mul <32 x i64> %v0, %v1
+  %v3 = lshr <32 x i64> %v2, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+  %v4 = trunc <32 x i64> %v3 to <32 x i32>
+  ret <32 x i32> %v4
+}
+
+define <32 x i32> @mulhu(<32 x i32> %a0, <32 x i32> %a1) #0 {
+; V60-LABEL: mulhu:
+; V60:       // %bb.0:
+; V60-NEXT:    {
+; V60-NEXT:     r0 = ##33686018
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v3:2.uw = vmpy(v0.uh,v1.uh)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     r2 = #16
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v4 = vsplat(r0)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v2.uw = vlsr(v2.uw,r2)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v1 = vdelta(v1,v4)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v1:0.uw = vmpy(v0.uh,v1.uh)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v1:0.w = vadd(v0.uh,v1.uh)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v0.w = vadd(v2.w,v0.w)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v1.w = vadd(v3.w,v1.w)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v0.uw = vlsr(v0.uw,r2)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     v0.w = vadd(v0.w,v1.w)
+; V60-NEXT:    }
+; V60-NEXT:    {
+; V60-NEXT:     jumpr r31
+; V60-NEXT:    }
+;
+; V65-LABEL: mulhu:
+; V65:       // %bb.0:
+; V65-NEXT:    {
+; V65-NEXT:     r0 = ##33686018
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v3:2.uw = vmpy(v0.uh,v1.uh)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     r2 = #16
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v4 = vsplat(r0)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v2.uw = vlsr(v2.uw,r2)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v1 = vdelta(v1,v4)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v1:0.uw = vmpy(v0.uh,v1.uh)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v1:0.w = vadd(v0.uh,v1.uh)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v0.w = vadd(v2.w,v0.w)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v1.w = vadd(v3.w,v1.w)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v0.uw = vlsr(v0.uw,r2)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     v0.w = vadd(v0.w,v1.w)
+; V65-NEXT:    }
+; V65-NEXT:    {
+; V65-NEXT:     jumpr r31
+; V65-NEXT:    }
+  %v0 = zext <32 x i32> %a0 to <32 x i64>
+  %v1 = zext <32 x i32> %a1 to <32 x i64>
+  %v2 = mul <32 x i64> %v0, %v1
+  %v3 = lshr <32 x i64> %v2, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+  %v4 = trunc <32 x i64> %v3 to <32 x i32>
+  ret <32 x i32> %v4
+}
+
+attributes #0 = { nounwind }


        


More information about the llvm-commits mailing list