[llvm] ebec077 - [X86][FP16] Change the order of the operands in complex FMA intrinsics to allow swap between the mul operands.

via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 22 20:03:15 PDT 2021


Author: Wang, Pengfei
Date: 2021-09-23T11:02:48+08:00
New Revision: ebec077e07f5d35a870f075fb665c006978d49ea

URL: https://github.com/llvm/llvm-project/commit/ebec077e07f5d35a870f075fb665c006978d49ea
DIFF: https://github.com/llvm/llvm-project/commit/ebec077e07f5d35a870f075fb665c006978d49ea.diff

LOG: [X86][FP16] Change the order of the operands in complex FMA intrinsics to allow swap between the mul operands.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D109658

Added: 
    

Modified: 
    clang/lib/Headers/avx512fp16intrin.h
    clang/lib/Headers/avx512vlfp16intrin.h
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/lib/Target/X86/X86InstrAVX512.td
    llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
    llvm/lib/Target/X86/X86InstrInfo.cpp
    llvm/lib/Target/X86/X86IntrinsicsInfo.h
    llvm/test/CodeGen/X86/avx512cfma-intrinsics.ll
    llvm/test/CodeGen/X86/avx512cfmul-intrinsics.ll
    llvm/test/CodeGen/X86/avx512cfmulsh-instrinsics.ll
    llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll
    llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll
    llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16.ll
    llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16vl.ll

Removed: 
    


################################################################################
diff  --git a/clang/lib/Headers/avx512fp16intrin.h b/clang/lib/Headers/avx512fp16intrin.h
index 80f22ab997243..a65247916be85 100644
--- a/clang/lib/Headers/avx512fp16intrin.h
+++ b/clang/lib/Headers/avx512fp16intrin.h
@@ -2934,8 +2934,8 @@ _mm_mask3_fnmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmadd_sch(__m128h __A,
                                                                __m128h __B,
                                                                __m128h __C) {
-  return (__m128h)__builtin_ia32_vfcmaddcsh_mask((__v4sf)__C, (__v4sf)__A,
-                                                 (__v4sf)__B, (__mmask8)-1,
+  return (__m128h)__builtin_ia32_vfcmaddcsh_mask((__v4sf)__A, (__v4sf)__B,
+                                                 (__v4sf)__C, (__mmask8)-1,
                                                  _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -2943,15 +2943,15 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_fcmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectps_128(
       __U,
-      __builtin_ia32_vfcmaddcsh_mask((__v4sf)__C, (__v4sf)__A, (__v4sf)__B,
+      __builtin_ia32_vfcmaddcsh_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C,
                                      (__mmask8)__U, _MM_FROUND_CUR_DIRECTION),
       (__v4sf)__A);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_fcmadd_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_vfcmaddcsh_maskz((__v4sf)__C, (__v4sf)__A,
-                                                  (__v4sf)__B, (__mmask8)__U,
+  return (__m128h)__builtin_ia32_vfcmaddcsh_maskz((__v4sf)__A, (__v4sf)__B,
+                                                  (__v4sf)__C, (__mmask8)__U,
                                                   _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -2959,38 +2959,38 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fcmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
   return (__m128h)_mm_move_ss((__m128)__C,
                               (__m128)__builtin_ia32_vfcmaddcsh_mask(
-                                  (__v4sf)__C, (__v4sf)__A, (__v4sf)__B, __U,
+                                  (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, __U,
                                   _MM_FROUND_CUR_DIRECTION));
 }
 
 #define _mm_fcmadd_round_sch(A, B, C, R)                                       \
   ((__m128h)__builtin_ia32_vfcmaddcsh_mask(                                    \
-      (__v4sf)(__m128h)(C), (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B),        \
+      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
       (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_fcmadd_round_sch(A, U, B, C, R)                               \
   ((__m128h)__builtin_ia32_selectps_128(                                       \
       (__mmask8)(U & 1),                                                       \
       __builtin_ia32_vfcmaddcsh_mask(                                          \
-          (__v4sf)(__m128h)(C), (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B),    \
+          (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),    \
           (__mmask8)(U), (int)(R)),                                            \
       (__v4sf)(__m128h)(A)))
 
 #define _mm_maskz_fcmadd_round_sch(U, A, B, C, R)                              \
   ((__m128h)__builtin_ia32_vfcmaddcsh_maskz(                                   \
-      (__v4sf)(__m128h)(C), (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B),        \
+      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
       (__mmask8)(U), (int)(R)))
 
 #define _mm_mask3_fcmadd_round_sch(A, B, C, U, R)                              \
   ((__m128h)_mm_move_ss((__m128)(C),                                           \
                         (__m128)__builtin_ia32_vfcmaddcsh_mask(                \
-                            (__v4sf)(C), (__v4sf)(A), (__v4sf)(B), (U), (R))))
+                            (__v4sf)(A), (__v4sf)(B), (__v4sf)(C), (U), (R))))
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_sch(__m128h __A,
                                                               __m128h __B,
                                                               __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddcsh_mask((__v4sf)__C, (__v4sf)__A,
-                                                (__v4sf)__B, (__mmask8)-1,
+  return (__m128h)__builtin_ia32_vfmaddcsh_mask((__v4sf)__A, (__v4sf)__B,
+                                                (__v4sf)__C, (__mmask8)-1,
                                                 _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -2998,34 +2998,34 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_fmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectps_128(
       __U,
-      __builtin_ia32_vfmaddcsh_mask((__v4sf)__C, (__v4sf)__A, (__v4sf)__B,
+      __builtin_ia32_vfmaddcsh_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C,
                                     (__mmask8)__U, _MM_FROUND_CUR_DIRECTION),
       (__v4sf)__A);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_fmadd_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddcsh_maskz((__v4sf)__C, (__v4sf)__A,
-                                                 (__v4sf)__B, (__mmask8)__U,
+  return (__m128h)__builtin_ia32_vfmaddcsh_maskz((__v4sf)__A, (__v4sf)__B,
+                                                 (__v4sf)__C, (__mmask8)__U,
                                                  _MM_FROUND_CUR_DIRECTION);
 }
 
 #define _mm_fmadd_round_sch(A, B, C, R)                                        \
   ((__m128h)__builtin_ia32_vfmaddcsh_mask(                                     \
-      (__v4sf)(__m128h)(C), (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B),        \
+      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
       (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_fmadd_round_sch(A, U, B, C, R)                                \
   ((__m128h)__builtin_ia32_selectps_128(                                       \
       (__mmask8)(U & 1),                                                       \
       __builtin_ia32_vfmaddcsh_mask(                                           \
-          (__v4sf)(__m128h)(C), (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B),    \
+          (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),    \
           (__mmask8)(U), (int)(R)),                                            \
       (__v4sf)(__m128h)(A)))
 
 #define _mm_maskz_fmadd_round_sch(U, A, B, C, R)                               \
   ((__m128h)__builtin_ia32_vfmaddcsh_maskz(                                    \
-      (__v4sf)(__m128h)(C), (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B),        \
+      (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C),        \
       (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmul_sch(__m128h __A,
@@ -3177,8 +3177,8 @@ _mm512_maskz_fmul_pch(__mmask16 __U, __m512h __A, __m512h __B) {
 static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fcmadd_pch(__m512h __A,
                                                                   __m512h __B,
                                                                   __m512h __C) {
-  return (__m512h)__builtin_ia32_vfcmaddcph512_mask((__v16sf)__C, (__v16sf)__A,
-                                                    (__v16sf)__B, (__mmask16)-1,
+  return (__m512h)__builtin_ia32_vfcmaddcph512_mask((__v16sf)__A, (__v16sf)__B,
+                                                    (__v16sf)__C, (__mmask16)-1,
                                                     _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -3186,8 +3186,8 @@ static __inline__ __m512h __DEFAULT_FN_ATTRS512
 _mm512_mask_fcmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
   return (__m512h)__builtin_ia32_selectps_512(
       __U,
-      __builtin_ia32_vfcmaddcph512_mask((__v16sf)__C, (__v16sf)__A,
-                                        (__v16sf)__B, (__mmask16)__U,
+      __builtin_ia32_vfcmaddcph512_mask((__v16sf)__A, (__v16sf)__B,
+                                        (__v16sf)__C, (__mmask16)__U,
                                         _MM_FROUND_CUR_DIRECTION),
       (__v16sf)__A);
 }
@@ -3195,45 +3195,45 @@ _mm512_mask_fcmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
 static __inline__ __m512h __DEFAULT_FN_ATTRS512
 _mm512_mask3_fcmadd_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) {
   return (__m512h)__builtin_ia32_vfcmaddcph512_mask(
-      (__v16sf)__C, (__v16sf)__A, (__v16sf)__B, (__mmask16)__U,
+      (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
       _MM_FROUND_CUR_DIRECTION);
 }
 
 static __inline__ __m512h __DEFAULT_FN_ATTRS512
 _mm512_maskz_fcmadd_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) {
   return (__m512h)__builtin_ia32_vfcmaddcph512_maskz(
-      (__v16sf)__C, (__v16sf)__A, (__v16sf)__B, (__mmask16)__U,
+      (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
       _MM_FROUND_CUR_DIRECTION);
 }
 
 #define _mm512_fcmadd_round_pch(A, B, C, R)                                    \
   ((__m512h)__builtin_ia32_vfcmaddcph512_mask(                                 \
-      (__v16sf)(__m512h)(C), (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B),     \
+      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
       (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_fcmadd_round_pch(A, U, B, C, R)                            \
   ((__m512h)__builtin_ia32_selectps_512(                                       \
       (__mmask16)(U),                                                          \
       __builtin_ia32_vfcmaddcph512_mask(                                       \
-          (__v16sf)(__m512h)(C), (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), \
+          (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \
           (__mmask16)(U), (int)(R)),                                           \
       (__v16sf)(__m512h)(A)))
 
 #define _mm512_mask3_fcmadd_round_pch(A, B, C, U, R)                           \
   ((__m512h)__builtin_ia32_vfcmaddcph512_mask(                                 \
-      (__v16sf)(__m512h)(C), (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B),     \
+      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
       (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_fcmadd_round_pch(U, A, B, C, R)                           \
   ((__m512h)__builtin_ia32_vfcmaddcph512_maskz(                                \
-      (__v16sf)(__m512h)(C), (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B),     \
+      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
       (__mmask16)(U), (int)(R)))
 
 static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmadd_pch(__m512h __A,
                                                                  __m512h __B,
                                                                  __m512h __C) {
-  return (__m512h)__builtin_ia32_vfmaddcph512_mask((__v16sf)__C, (__v16sf)__A,
-                                                   (__v16sf)__B, (__mmask16)-1,
+  return (__m512h)__builtin_ia32_vfmaddcph512_mask((__v16sf)__A, (__v16sf)__B,
+                                                   (__v16sf)__C, (__mmask16)-1,
                                                    _MM_FROUND_CUR_DIRECTION);
 }
 
@@ -3241,7 +3241,7 @@ static __inline__ __m512h __DEFAULT_FN_ATTRS512
 _mm512_mask_fmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
   return (__m512h)__builtin_ia32_selectps_512(
       __U,
-      __builtin_ia32_vfmaddcph512_mask((__v16sf)__C, (__v16sf)__A, (__v16sf)__B,
+      __builtin_ia32_vfmaddcph512_mask((__v16sf)__A, (__v16sf)__B, (__v16sf)__C,
                                        (__mmask16)__U,
                                        _MM_FROUND_CUR_DIRECTION),
       (__v16sf)__A);
@@ -3249,39 +3249,39 @@ _mm512_mask_fmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
 
 static __inline__ __m512h __DEFAULT_FN_ATTRS512
 _mm512_mask3_fmadd_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) {
-  return (__m512h)__builtin_ia32_vfmaddcph512_mask((__v16sf)__C, (__v16sf)__A,
-                                                   (__v16sf)__B, (__mmask16)__U,
+  return (__m512h)__builtin_ia32_vfmaddcph512_mask((__v16sf)__A, (__v16sf)__B,
+                                                   (__v16sf)__C, (__mmask16)__U,
                                                    _MM_FROUND_CUR_DIRECTION);
 }
 
 static __inline__ __m512h __DEFAULT_FN_ATTRS512
 _mm512_maskz_fmadd_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) {
   return (__m512h)__builtin_ia32_vfmaddcph512_maskz(
-      (__v16sf)__C, (__v16sf)__A, (__v16sf)__B, (__mmask16)__U,
+      (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
       _MM_FROUND_CUR_DIRECTION);
 }
 
 #define _mm512_fmadd_round_pch(A, B, C, R)                                     \
   ((__m512h)__builtin_ia32_vfmaddcph512_mask(                                  \
-      (__v16sf)(__m512h)(C), (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B),     \
+      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
       (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_fmadd_round_pch(A, U, B, C, R)                             \
   ((__m512h)__builtin_ia32_selectps_512(                                       \
       (__mmask16)(U),                                                          \
       __builtin_ia32_vfmaddcph512_mask(                                        \
-          (__v16sf)(__m512h)(C), (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), \
+          (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \
           (__mmask16)(U), (int)(R)),                                           \
       (__v16sf)(__m512h)(A)))
 
 #define _mm512_mask3_fmadd_round_pch(A, B, C, U, R)                            \
   ((__m512h)__builtin_ia32_vfmaddcph512_mask(                                  \
-      (__v16sf)(__m512h)(C), (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B),     \
+      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
       (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_fmadd_round_pch(U, A, B, C, R)                            \
   ((__m512h)__builtin_ia32_vfmaddcph512_maskz(                                 \
-      (__v16sf)(__m512h)(C), (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B),     \
+      (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C),     \
       (__mmask16)(U), (int)(R)))
 
 static __inline__ _Float16 __DEFAULT_FN_ATTRS512

diff  --git a/clang/lib/Headers/avx512vlfp16intrin.h b/clang/lib/Headers/avx512vlfp16intrin.h
index 69f81af9217f7..5f596879bb2e1 100644
--- a/clang/lib/Headers/avx512vlfp16intrin.h
+++ b/clang/lib/Headers/avx512vlfp16intrin.h
@@ -1825,57 +1825,57 @@ _mm256_maskz_fcmul_pch(__mmask8 __U, __m256h __A, __m256h __B) {
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmadd_pch(__m128h __A,
                                                                __m128h __B,
                                                                __m128h __C) {
-  return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__C, (__v4sf)__A,
-                                                    (__v4sf)__B, (__mmask8)-1);
+  return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
+                                                    (__v4sf)__C, (__mmask8)-1);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_fcmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectps_128(
       __U,
-      __builtin_ia32_vfcmaddcph128_mask((__v4sf)__C, (__v4sf)(__m128h)__A,
-                                        (__v4sf)__B, (__mmask8)__U),
+      __builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)(__m128h)__B,
+                                        (__v4sf)__C, (__mmask8)__U),
       (__v4sf)__A);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fcmadd_pch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__C, (__v4sf)__A,
-                                                    (__v4sf)__B, (__mmask8)__U);
+  return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
+                                                    (__v4sf)__C, (__mmask8)__U);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_fcmadd_pch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_vfcmaddcph128_maskz(
-      (__v4sf)__C, (__v4sf)__A, (__v4sf)__B, (__mmask8)__U);
+      (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fcmadd_pch(__m256h __A,
                                                                   __m256h __B,
                                                                   __m256h __C) {
-  return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__C, (__v8sf)__A,
-                                                    (__v8sf)__B, (__mmask8)-1);
+  return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
+                                                    (__v8sf)__C, (__mmask8)-1);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_fcmadd_pch(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectps_256(
       __U,
-      __builtin_ia32_vfcmaddcph256_mask((__v8sf)__C, (__v8sf)__A, (__v8sf)__B,
+      __builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C,
                                         (__mmask8)__U),
       (__v8sf)__A);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask3_fcmadd_pch(__m256h __A, __m256h __B, __m256h __C, __mmask8 __U) {
-  return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__C, (__v8sf)__A,
-                                                    (__v8sf)__B, (__mmask8)__U);
+  return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
+                                                    (__v8sf)__C, (__mmask8)__U);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_fcmadd_pch(__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_vfcmaddcph256_maskz(
-      (__v8sf)__C, (__v8sf)__A, (__v8sf)__B, (__mmask8)__U);
+      (__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmul_pch(__m128h __A,
@@ -1919,57 +1919,57 @@ _mm256_maskz_fmul_pch(__mmask8 __U, __m256h __A, __m256h __B) {
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_pch(__m128h __A,
                                                               __m128h __B,
                                                               __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__C, (__v4sf)__A,
-                                                   (__v4sf)__B, (__mmask8)-1);
+  return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
+                                                   (__v4sf)__C, (__mmask8)-1);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_fmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectps_128(
       __U,
-      __builtin_ia32_vfmaddcph128_mask((__v4sf)__C, (__v4sf)__A, (__v4sf)__B,
+      __builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C,
                                        (__mmask8)__U),
       (__v4sf)__A);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fmadd_pch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
-  return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__C, (__v4sf)__A,
-                                                   (__v4sf)__B, (__mmask8)__U);
+  return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
+                                                   (__v4sf)__C, (__mmask8)__U);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_fmadd_pch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddcph128_maskz((__v4sf)__C, (__v4sf)__A,
-                                                    (__v4sf)__B, (__mmask8)__U);
+  return (__m128h)__builtin_ia32_vfmaddcph128_maskz((__v4sf)__A, (__v4sf)__B,
+                                                    (__v4sf)__C, (__mmask8)__U);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_pch(__m256h __A,
                                                                  __m256h __B,
                                                                  __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__C, (__v8sf)__A,
-                                                   (__v8sf)__B, (__mmask8)-1);
+  return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
+                                                   (__v8sf)__C, (__mmask8)-1);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_fmadd_pch(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectps_256(
       __U,
-      __builtin_ia32_vfmaddcph256_mask((__v8sf)__C, (__v8sf)__A, (__v8sf)__B,
+      __builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C,
                                        (__mmask8)__U),
       (__v8sf)__A);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmadd_pch(__m256h __A, __m256h __B, __m256h __C, __mmask8 __U) {
-  return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__C, (__v8sf)__A,
-                                                   (__v8sf)__B, (__mmask8)__U);
+  return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
+                                                   (__v8sf)__C, (__mmask8)__U);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmadd_pch(__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddcph256_maskz((__v8sf)__C, (__v8sf)__A,
-                                                    (__v8sf)__B, (__mmask8)__U);
+  return (__m256h)__builtin_ia32_vfmaddcph256_maskz((__v8sf)__A, (__v8sf)__B,
+                                                    (__v8sf)__C, (__mmask8)__U);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_blend_ph(__mmask8 __U,

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2e9a254590d26..5de3a4c9d8dbb 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -26067,16 +26067,16 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
       // Swap Src1 and Src2 in the node creation
       return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
     }
-    case FMA_OP_MASKZ:
-    case FMA_OP_MASK: {
+    case CFMA_OP_MASKZ:
+    case CFMA_OP_MASK: {
       SDValue Src1 = Op.getOperand(1);
       SDValue Src2 = Op.getOperand(2);
       SDValue Src3 = Op.getOperand(3);
       SDValue Mask = Op.getOperand(4);
       MVT VT = Op.getSimpleValueType();
 
-      SDValue PassThru = Src1;
-      if (IntrData->Type == FMA_OP_MASKZ)
+      SDValue PassThru = Src3;
+      if (IntrData->Type == CFMA_OP_MASKZ)
         PassThru = getZeroVector(VT, Subtarget, DAG, dl);
 
       // We add rounding mode to the Node when

diff  --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 9caa79e12e61a..8aee96e1c5044 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -13501,130 +13501,130 @@ let Predicates = [HasFP16, HasVLX] in {
 }
 
 let Constraints = "@earlyclobber $dst, $src1 = $dst" in {
-  multiclass avx512_cfmop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
+  multiclass avx512_cfmaop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86VectorVTInfo _, bit IsCommutable> {
     defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
             (ins _.RC:$src2, _.RC:$src3),
             OpcodeStr, "$src3, $src2", "$src2, $src3",
-            (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>, EVEX_4V;
+            (_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1)), IsCommutable>, EVEX_4V;
 
     defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
             (ins _.RC:$src2, _.MemOp:$src3),
             OpcodeStr, "$src3, $src2", "$src2, $src3",
-            (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>, EVEX_4V;
+            (_.VT (OpNode _.RC:$src2, (_.LdFrag addr:$src3), _.RC:$src1))>, EVEX_4V;
 
     defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
             (ins _.RC:$src2, _.ScalarMemOp:$src3),
             OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"), !strconcat("$src2, ${src3}", _.BroadcastStr),
-            (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.VT (_.BroadcastLdFrag addr:$src3))))>, EVEX_B, EVEX_4V;
+            (_.VT (OpNode _.RC:$src2, (_.VT (_.BroadcastLdFrag addr:$src3)), _.RC:$src1))>, EVEX_B, EVEX_4V;
   }
 } // Constraints = "@earlyclobber $dst, $src1 = $dst"
 
-multiclass avx512_cfmop_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_cfmaop_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
                                  X86VectorVTInfo _> {
   let Constraints = "@earlyclobber $dst, $src1 = $dst" in
   defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
           (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
           OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc",
-          (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3, (i32 timm:$rc)))>,
+          (_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1, (i32 timm:$rc)))>,
           EVEX_4V, EVEX_B, EVEX_RC;
 }
 
 
-multiclass avx512_cfmaop_common<bits<8> opc, string OpcodeStr, SDNode OpNode, SDNode OpNodeRnd> {
+multiclass avx512_cfmaop_common<bits<8> opc, string OpcodeStr, SDNode OpNode, SDNode OpNodeRnd, bit IsCommutable> {
   let Predicates = [HasFP16] in {
-    defm Z    : avx512_cfmop_rm<opc, OpcodeStr, OpNode, v16f32_info>,
-                avx512_cfmop_round<opc, OpcodeStr, OpNodeRnd, v16f32_info>,
+    defm Z    : avx512_cfmaop_rm<opc, OpcodeStr, OpNode, v16f32_info, IsCommutable>,
+                avx512_cfmaop_round<opc, OpcodeStr, OpNodeRnd, v16f32_info>,
                       EVEX_V512, Sched<[WriteFMAZ]>;
   }
   let Predicates = [HasVLX, HasFP16] in {
-    defm Z256 : avx512_cfmop_rm<opc, OpcodeStr, OpNode, v8f32x_info>, EVEX_V256, Sched<[WriteFMAY]>;
-    defm Z128 : avx512_cfmop_rm<opc, OpcodeStr, OpNode, v4f32x_info>, EVEX_V128, Sched<[WriteFMAX]>;
+    defm Z256 : avx512_cfmaop_rm<opc, OpcodeStr, OpNode, v8f32x_info, IsCommutable>, EVEX_V256, Sched<[WriteFMAY]>;
+    defm Z128 : avx512_cfmaop_rm<opc, OpcodeStr, OpNode, v4f32x_info, IsCommutable>, EVEX_V128, Sched<[WriteFMAX]>;
   }
 }
 
 multiclass avx512_cfmulop_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
-                                 SDNode MaskOpNode, SDNode OpNodeRnd,
-                                 X86SchedWriteWidths sched = SchedWriteFMA> {
+                                 SDNode MaskOpNode, SDNode OpNodeRnd, bit IsCommutable> {
   let Predicates = [HasFP16] in {
     defm Z    : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v16f32_info,
-                                 sched.ZMM, 0, 0, "", "@earlyclobber $dst", 0>,
-                avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, sched.ZMM, v16f32_info,
+                                 WriteFMAZ, IsCommutable, IsCommutable, "", "@earlyclobber $dst", 0>,
+                avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, WriteFMAZ, v16f32_info,
                                        "", "@earlyclobber $dst">, EVEX_V512;
   }
   let Predicates = [HasVLX, HasFP16] in {
     defm Z256 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v8f32x_info,
-                                 sched.YMM, 0, 0, "", "@earlyclobber $dst", 0>, EVEX_V256;
+                                 WriteFMAY, IsCommutable, IsCommutable, "", "@earlyclobber $dst", 0>, EVEX_V256;
     defm Z128 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v4f32x_info,
-                                 sched.XMM, 0, 0, "", "@earlyclobber $dst", 0>, EVEX_V128;
+                                 WriteFMAX, IsCommutable, IsCommutable, "", "@earlyclobber $dst", 0>, EVEX_V128;
   }
 }
 
 
 let Uses = [MXCSR] in {
-  defm VFMADDCPH  : avx512_cfmaop_common<0x56, "vfmaddcph", x86vfmaddc, x86vfmaddcRnd>,
+  defm VFMADDCPH  : avx512_cfmaop_common<0x56, "vfmaddcph", x86vfmaddc, x86vfmaddcRnd, 1>,
                                     T_MAP6XS, EVEX_CD8<32, CD8VF>;
-  defm VFCMADDCPH : avx512_cfmaop_common<0x56, "vfcmaddcph", x86vfcmaddc, x86vfcmaddcRnd>,
+  defm VFCMADDCPH : avx512_cfmaop_common<0x56, "vfcmaddcph", x86vfcmaddc, x86vfcmaddcRnd, 0>,
                                     T_MAP6XD, EVEX_CD8<32, CD8VF>;
 
   defm VFMULCPH  : avx512_cfmulop_common<0xD6, "vfmulcph", x86vfmulc, x86vfmulc,
-                                         x86vfmulcRnd>, T_MAP6XS, EVEX_CD8<32, CD8VF>;
+                                         x86vfmulcRnd, 1>, T_MAP6XS, EVEX_CD8<32, CD8VF>;
   defm VFCMULCPH : avx512_cfmulop_common<0xD6, "vfcmulcph", x86vfcmulc,
-                                         x86vfcmulc, x86vfcmulcRnd>, T_MAP6XD, EVEX_CD8<32, CD8VF>;
+                                         x86vfcmulc, x86vfcmulcRnd, 0>, T_MAP6XD, EVEX_CD8<32, CD8VF>;
 }
 
 
-multiclass avx512_cfmop_sh_common<bits<8> opc, string OpcodeStr, SDNode OpNode, SDNode OpNodeRnd,
-                                  X86SchedWriteWidths sched = SchedWriteFMA> {
+multiclass avx512_cfmaop_sh_common<bits<8> opc, string OpcodeStr, SDNode OpNode, SDNode OpNodeRnd,
+                                   bit IsCommutable> {
   let Predicates = [HasFP16], Constraints = "@earlyclobber $dst, $src1 = $dst" in {
     defm r : AVX512_maskable_3src<opc, MRMSrcReg, v4f32x_info, (outs VR128X:$dst),
                         (ins VR128X:$src2, VR128X:$src3), OpcodeStr,
                         "$src3, $src2", "$src2, $src3",
-                        (v4f32 (OpNode VR128X:$src1, VR128X:$src2, VR128X:$src3))>,
-                        Sched<[sched.XMM]>;
+                        (v4f32 (OpNode VR128X:$src2, VR128X:$src3, VR128X:$src1)), IsCommutable>,
+                        Sched<[WriteFMAX]>;
     defm m : AVX512_maskable_3src<opc, MRMSrcMem, v4f32x_info, (outs VR128X:$dst),
                         (ins VR128X:$src2, ssmem:$src3), OpcodeStr,
                         "$src3, $src2", "$src2, $src3",
-                        (v4f32 (OpNode VR128X:$src1, VR128X:$src2, (sse_load_f32 addr:$src3)))>,
-                        Sched<[sched.XMM.Folded, sched.XMM.ReadAfterFold]>;
+                        (v4f32 (OpNode VR128X:$src2, (sse_load_f32 addr:$src3), VR128X:$src1))>,
+                        Sched<[WriteFMAX.Folded, WriteFMAX.ReadAfterFold]>;
     defm rb : AVX512_maskable_3src<opc,  MRMSrcReg, v4f32x_info, (outs VR128X:$dst),
                         (ins VR128X:$src2, VR128X:$src3, AVX512RC:$rc), OpcodeStr,
                         "$rc, $src3, $src2", "$src2, $src3, $rc",
-                        (v4f32 (OpNodeRnd VR128X:$src1, VR128X:$src2, VR128X:$src3, (i32 timm:$rc)))>,
-                        EVEX_B, EVEX_RC, Sched<[sched.XMM]>;
+                        (v4f32 (OpNodeRnd VR128X:$src2, VR128X:$src3, VR128X:$src1, (i32 timm:$rc)))>,
+                        EVEX_B, EVEX_RC, Sched<[WriteFMAX]>;
   }
 }
 
 multiclass avx512_cfmbinop_sh_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
-                                     SDNode OpNodeRnd, X86SchedWriteWidths sched = SchedWriteFMA> {
+                                     SDNode OpNodeRnd, bit IsCommutable> {
   let Predicates = [HasFP16] in {
     defm rr : AVX512_maskable<opc, MRMSrcReg, f32x_info, (outs VR128X:$dst),
                         (ins VR128X:$src1, VR128X:$src2), OpcodeStr,
                         "$src2, $src1", "$src1, $src2",
                         (v4f32 (OpNode VR128X:$src1, VR128X:$src2)),
-                        0, 0, 0, X86selects, "@earlyclobber $dst">, Sched<[sched.XMM]>;
+                        IsCommutable, IsCommutable, IsCommutable,
+                        X86selects, "@earlyclobber $dst">, Sched<[WriteFMAX]>;
     defm rm : AVX512_maskable<opc, MRMSrcMem, f32x_info, (outs VR128X:$dst),
                         (ins VR128X:$src1, ssmem:$src2), OpcodeStr,
                         "$src2, $src1", "$src1, $src2",
                         (v4f32 (OpNode VR128X:$src1, (sse_load_f32 addr:$src2))),
                         0, 0, 0, X86selects, "@earlyclobber $dst">,
-                        Sched<[sched.XMM.Folded, sched.XMM.ReadAfterFold]>;
+                        Sched<[WriteFMAX.Folded, WriteFMAX.ReadAfterFold]>;
     defm rrb : AVX512_maskable<opc, MRMSrcReg, f32x_info, (outs VR128X:$dst),
                         (ins VR128X:$src1, VR128X:$src2, AVX512RC:$rc), OpcodeStr,
                         "$rc, $src2, $src1", "$src1, $src2, $rc",
                         (OpNodeRnd (v4f32 VR128X:$src1), (v4f32 VR128X:$src2), (i32 timm:$rc)),
                         0, 0, 0, X86selects, "@earlyclobber $dst">,
-                        EVEX_B, EVEX_RC, Sched<[sched.XMM]>;
+                        EVEX_B, EVEX_RC, Sched<[WriteFMAX]>;
   }
 }
 
 let Uses = [MXCSR] in {
-  defm VFMADDCSHZ  : avx512_cfmop_sh_common<0x57, "vfmaddcsh", x86vfmaddcSh, x86vfmaddcShRnd>,
+  defm VFMADDCSHZ  : avx512_cfmaop_sh_common<0x57, "vfmaddcsh", x86vfmaddcSh, x86vfmaddcShRnd, 1>,
                                     T_MAP6XS, EVEX_CD8<32, CD8VT1>, EVEX_V128, EVEX_4V;
-  defm VFCMADDCSHZ : avx512_cfmop_sh_common<0x57, "vfcmaddcsh", x86vfcmaddcSh, x86vfcmaddcShRnd>,
+  defm VFCMADDCSHZ : avx512_cfmaop_sh_common<0x57, "vfcmaddcsh", x86vfcmaddcSh, x86vfcmaddcShRnd, 0>,
                                     T_MAP6XD, EVEX_CD8<32, CD8VT1>, EVEX_V128, EVEX_4V;
 
-  defm VFMULCSHZ  : avx512_cfmbinop_sh_common<0xD7, "vfmulcsh", x86vfmulcSh, x86vfmulcShRnd>,
+  defm VFMULCSHZ  : avx512_cfmbinop_sh_common<0xD7, "vfmulcsh", x86vfmulcSh, x86vfmulcShRnd, 1>,
                                     T_MAP6XS, EVEX_CD8<32, CD8VT1>, EVEX_V128, VEX_LIG, EVEX_4V;
-  defm VFCMULCSHZ : avx512_cfmbinop_sh_common<0xD7, "vfcmulcsh", x86vfcmulcSh, x86vfcmulcShRnd>,
+  defm VFCMULCSHZ : avx512_cfmbinop_sh_common<0xD7, "vfcmulcsh", x86vfcmulcSh, x86vfcmulcShRnd, 0>,
                                     T_MAP6XD, EVEX_CD8<32, CD8VT1>, EVEX_V128, VEX_LIG, EVEX_4V;
 }

diff  --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index bb323bfabdf47..258b2a15d5e4a 100644
--- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -572,22 +572,22 @@ def SDTIFma : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0,1>,
 def x86vpmadd52l     : SDNode<"X86ISD::VPMADD52L",     SDTIFma, [SDNPCommutative]>;
 def x86vpmadd52h     : SDNode<"X86ISD::VPMADD52H",     SDTIFma, [SDNPCommutative]>;
 
-def x86vfmaddc       : SDNode<"X86ISD::VFMADDC",       SDTFPTernaryOp>;
-def x86vfmaddcRnd    : SDNode<"X86ISD::VFMADDC_RND",   SDTFmaRound>;
+def x86vfmaddc       : SDNode<"X86ISD::VFMADDC",       SDTFPTernaryOp,  [SDNPCommutative]>;
+def x86vfmaddcRnd    : SDNode<"X86ISD::VFMADDC_RND",   SDTFmaRound,     [SDNPCommutative]>;
 def x86vfcmaddc      : SDNode<"X86ISD::VFCMADDC",      SDTFPTernaryOp>;
 def x86vfcmaddcRnd   : SDNode<"X86ISD::VFCMADDC_RND",  SDTFmaRound>;
-def x86vfmulc        : SDNode<"X86ISD::VFMULC",        SDTFPBinOp>;
-def x86vfmulcRnd     : SDNode<"X86ISD::VFMULC_RND",    SDTFPBinOpRound>;
+def x86vfmulc        : SDNode<"X86ISD::VFMULC",        SDTFPBinOp,      [SDNPCommutative]>;
+def x86vfmulcRnd     : SDNode<"X86ISD::VFMULC_RND",    SDTFPBinOpRound, [SDNPCommutative]>;
 def x86vfcmulc       : SDNode<"X86ISD::VFCMULC",       SDTFPBinOp>;
 def x86vfcmulcRnd    : SDNode<"X86ISD::VFCMULC_RND",   SDTFPBinOpRound>;
 
-def x86vfmaddcSh     : SDNode<"X86ISD::VFMADDCSH",     SDTFPTernaryOp>;
+def x86vfmaddcSh     : SDNode<"X86ISD::VFMADDCSH",     SDTFPTernaryOp,  [SDNPCommutative]>;
 def x86vfcmaddcSh    : SDNode<"X86ISD::VFCMADDCSH",    SDTFPTernaryOp>;
-def x86vfmulcSh      : SDNode<"X86ISD::VFMULCSH",      SDTFPBinOp>;
+def x86vfmulcSh      : SDNode<"X86ISD::VFMULCSH",      SDTFPBinOp,      [SDNPCommutative]>;
 def x86vfcmulcSh     : SDNode<"X86ISD::VFCMULCSH",     SDTFPBinOp>;
-def x86vfmaddcShRnd  : SDNode<"X86ISD::VFMADDCSH_RND", SDTFmaRound>;
+def x86vfmaddcShRnd  : SDNode<"X86ISD::VFMADDCSH_RND", SDTFmaRound,     [SDNPCommutative]>;
 def x86vfcmaddcShRnd : SDNode<"X86ISD::VFCMADDCSH_RND",SDTFmaRound>;
-def x86vfmulcShRnd   : SDNode<"X86ISD::VFMULCSH_RND",  SDTFPBinOpRound>;
+def x86vfmulcShRnd   : SDNode<"X86ISD::VFMULCSH_RND",  SDTFPBinOpRound, [SDNPCommutative]>;
 def x86vfcmulcShRnd  : SDNode<"X86ISD::VFCMULCSH_RND", SDTFPBinOpRound>;
 
 def X86rsqrt14   : SDNode<"X86ISD::RSQRT14",  SDTFPUnaryOp>;

diff  --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index c51c17a8bdd9d..5fe468cfa5f25 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -2620,7 +2620,19 @@ bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
   case X86::VPMADD52LUQZ256rkz:
   case X86::VPMADD52LUQZr:
   case X86::VPMADD52LUQZrk:
-  case X86::VPMADD52LUQZrkz: {
+  case X86::VPMADD52LUQZrkz:
+  case X86::VFMADDCPHZr:
+  case X86::VFMADDCPHZrk:
+  case X86::VFMADDCPHZrkz:
+  case X86::VFMADDCPHZ128r:
+  case X86::VFMADDCPHZ128rk:
+  case X86::VFMADDCPHZ128rkz:
+  case X86::VFMADDCPHZ256r:
+  case X86::VFMADDCPHZ256rk:
+  case X86::VFMADDCPHZ256rkz:
+  case X86::VFMADDCSHZr:
+  case X86::VFMADDCSHZrk:
+  case X86::VFMADDCSHZrkz: {
     unsigned CommutableOpIdx1 = 2;
     unsigned CommutableOpIdx2 = 3;
     if (X86II::isKMasked(Desc.TSFlags)) {

diff  --git a/llvm/lib/Target/X86/X86IntrinsicsInfo.h b/llvm/lib/Target/X86/X86IntrinsicsInfo.h
index 869753ec84deb..1edec96bbec31 100644
--- a/llvm/lib/Target/X86/X86IntrinsicsInfo.h
+++ b/llvm/lib/Target/X86/X86IntrinsicsInfo.h
@@ -24,7 +24,7 @@ enum IntrinsicType : uint16_t {
   GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST, XGETBV, ADX, FPCLASSS,
   INTR_TYPE_1OP, INTR_TYPE_2OP, INTR_TYPE_3OP, INTR_TYPE_4OP_IMM8,
   INTR_TYPE_3OP_IMM8,
-  FMA_OP_MASK, FMA_OP_MASKZ,
+  CFMA_OP_MASK, CFMA_OP_MASKZ,
   CMP_MASK_CC,CMP_MASK_SCALAR_CC, VSHIFT, COMI, COMI_RM, BLENDV, BEXTRI,
   CVTPD2PS_MASK,
   INTR_TYPE_1OP_SAE, INTR_TYPE_2OP_SAE,
@@ -1161,30 +1161,30 @@ static const IntrinsicData  IntrinsicsWithoutChain[] = {
                      X86ISD::CVTUI2P, X86ISD::MCVTUI2P),
   X86_INTRINSIC_DATA(avx512fp16_mask_vcvtuqq2ph_256, TRUNCATE_TO_REG,
                      X86ISD::CVTUI2P, X86ISD::MCVTUI2P),
-  X86_INTRINSIC_DATA(avx512fp16_mask_vfcmadd_cph_128, FMA_OP_MASK,  X86ISD::VFCMADDC,  0),
-  X86_INTRINSIC_DATA(avx512fp16_mask_vfcmadd_cph_256, FMA_OP_MASK,  X86ISD::VFCMADDC,  0),
-  X86_INTRINSIC_DATA(avx512fp16_mask_vfcmadd_cph_512, FMA_OP_MASK,  X86ISD::VFCMADDC,  X86ISD::VFCMADDC_RND),
-  X86_INTRINSIC_DATA(avx512fp16_mask_vfcmadd_csh, FMA_OP_MASK,  X86ISD::VFCMADDCSH, X86ISD::VFCMADDCSH_RND),
+  X86_INTRINSIC_DATA(avx512fp16_mask_vfcmadd_cph_128, CFMA_OP_MASK,  X86ISD::VFCMADDC,  0),
+  X86_INTRINSIC_DATA(avx512fp16_mask_vfcmadd_cph_256, CFMA_OP_MASK,  X86ISD::VFCMADDC,  0),
+  X86_INTRINSIC_DATA(avx512fp16_mask_vfcmadd_cph_512, CFMA_OP_MASK,  X86ISD::VFCMADDC,  X86ISD::VFCMADDC_RND),
+  X86_INTRINSIC_DATA(avx512fp16_mask_vfcmadd_csh, CFMA_OP_MASK,  X86ISD::VFCMADDCSH, X86ISD::VFCMADDCSH_RND),
   X86_INTRINSIC_DATA(avx512fp16_mask_vfcmul_cph_128, INTR_TYPE_2OP_MASK,  X86ISD::VFCMULC,  0),
   X86_INTRINSIC_DATA(avx512fp16_mask_vfcmul_cph_256, INTR_TYPE_2OP_MASK,  X86ISD::VFCMULC,  0),
   X86_INTRINSIC_DATA(avx512fp16_mask_vfcmul_cph_512, INTR_TYPE_2OP_MASK,  X86ISD::VFCMULC,  X86ISD::VFCMULC_RND),
   X86_INTRINSIC_DATA(avx512fp16_mask_vfcmul_csh, INTR_TYPE_SCALAR_MASK,  X86ISD::VFCMULCSH, X86ISD::VFCMULCSH_RND),
-  X86_INTRINSIC_DATA(avx512fp16_mask_vfmadd_cph_128, FMA_OP_MASK,  X86ISD::VFMADDC,  0),
-  X86_INTRINSIC_DATA(avx512fp16_mask_vfmadd_cph_256, FMA_OP_MASK,  X86ISD::VFMADDC,  0),
-  X86_INTRINSIC_DATA(avx512fp16_mask_vfmadd_cph_512, FMA_OP_MASK,  X86ISD::VFMADDC,  X86ISD::VFMADDC_RND),
-  X86_INTRINSIC_DATA(avx512fp16_mask_vfmadd_csh, FMA_OP_MASK,  X86ISD::VFMADDCSH, X86ISD::VFMADDCSH_RND),
+  X86_INTRINSIC_DATA(avx512fp16_mask_vfmadd_cph_128, CFMA_OP_MASK,  X86ISD::VFMADDC,  0),
+  X86_INTRINSIC_DATA(avx512fp16_mask_vfmadd_cph_256, CFMA_OP_MASK,  X86ISD::VFMADDC,  0),
+  X86_INTRINSIC_DATA(avx512fp16_mask_vfmadd_cph_512, CFMA_OP_MASK,  X86ISD::VFMADDC,  X86ISD::VFMADDC_RND),
+  X86_INTRINSIC_DATA(avx512fp16_mask_vfmadd_csh, CFMA_OP_MASK,  X86ISD::VFMADDCSH, X86ISD::VFMADDCSH_RND),
   X86_INTRINSIC_DATA(avx512fp16_mask_vfmul_cph_128, INTR_TYPE_2OP_MASK,  X86ISD::VFMULC,  0),
   X86_INTRINSIC_DATA(avx512fp16_mask_vfmul_cph_256, INTR_TYPE_2OP_MASK,  X86ISD::VFMULC,  0),
   X86_INTRINSIC_DATA(avx512fp16_mask_vfmul_cph_512, INTR_TYPE_2OP_MASK,  X86ISD::VFMULC,  X86ISD::VFMULC_RND),
   X86_INTRINSIC_DATA(avx512fp16_mask_vfmul_csh, INTR_TYPE_SCALAR_MASK,  X86ISD::VFMULCSH, X86ISD::VFMULCSH_RND),
-  X86_INTRINSIC_DATA(avx512fp16_maskz_vfcmadd_cph_128, FMA_OP_MASKZ,  X86ISD::VFCMADDC,  0),
-  X86_INTRINSIC_DATA(avx512fp16_maskz_vfcmadd_cph_256, FMA_OP_MASKZ,  X86ISD::VFCMADDC,  0),
-  X86_INTRINSIC_DATA(avx512fp16_maskz_vfcmadd_cph_512, FMA_OP_MASKZ,  X86ISD::VFCMADDC,  X86ISD::VFCMADDC_RND),
-  X86_INTRINSIC_DATA(avx512fp16_maskz_vfcmadd_csh, FMA_OP_MASKZ,  X86ISD::VFCMADDCSH, X86ISD::VFCMADDCSH_RND),
-  X86_INTRINSIC_DATA(avx512fp16_maskz_vfmadd_cph_128, FMA_OP_MASKZ,  X86ISD::VFMADDC,  0),
-  X86_INTRINSIC_DATA(avx512fp16_maskz_vfmadd_cph_256, FMA_OP_MASKZ,  X86ISD::VFMADDC,  0),
-  X86_INTRINSIC_DATA(avx512fp16_maskz_vfmadd_cph_512, FMA_OP_MASKZ,  X86ISD::VFMADDC,  X86ISD::VFMADDC_RND),
-  X86_INTRINSIC_DATA(avx512fp16_maskz_vfmadd_csh, FMA_OP_MASKZ,  X86ISD::VFMADDCSH, X86ISD::VFMADDCSH_RND),
+  X86_INTRINSIC_DATA(avx512fp16_maskz_vfcmadd_cph_128, CFMA_OP_MASKZ,  X86ISD::VFCMADDC,  0),
+  X86_INTRINSIC_DATA(avx512fp16_maskz_vfcmadd_cph_256, CFMA_OP_MASKZ,  X86ISD::VFCMADDC,  0),
+  X86_INTRINSIC_DATA(avx512fp16_maskz_vfcmadd_cph_512, CFMA_OP_MASKZ,  X86ISD::VFCMADDC,  X86ISD::VFCMADDC_RND),
+  X86_INTRINSIC_DATA(avx512fp16_maskz_vfcmadd_csh, CFMA_OP_MASKZ,  X86ISD::VFCMADDCSH, X86ISD::VFCMADDCSH_RND),
+  X86_INTRINSIC_DATA(avx512fp16_maskz_vfmadd_cph_128, CFMA_OP_MASKZ,  X86ISD::VFMADDC,  0),
+  X86_INTRINSIC_DATA(avx512fp16_maskz_vfmadd_cph_256, CFMA_OP_MASKZ,  X86ISD::VFMADDC,  0),
+  X86_INTRINSIC_DATA(avx512fp16_maskz_vfmadd_cph_512, CFMA_OP_MASKZ,  X86ISD::VFMADDC,  X86ISD::VFMADDC_RND),
+  X86_INTRINSIC_DATA(avx512fp16_maskz_vfmadd_csh, CFMA_OP_MASKZ,  X86ISD::VFMADDCSH, X86ISD::VFMADDCSH_RND),
   X86_INTRINSIC_DATA(avx512fp16_max_ph_128, INTR_TYPE_2OP, X86ISD::FMAX, 0),
   X86_INTRINSIC_DATA(avx512fp16_max_ph_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
   X86_INTRINSIC_DATA(avx512fp16_max_ph_512, INTR_TYPE_2OP_SAE, X86ISD::FMAX, X86ISD::FMAX_SAE),

diff  --git a/llvm/test/CodeGen/X86/avx512cfma-intrinsics.ll b/llvm/test/CodeGen/X86/avx512cfma-intrinsics.ll
index 4302b03b7dbd2..8d9a927818ce9 100644
--- a/llvm/test/CodeGen/X86/avx512cfma-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512cfma-intrinsics.ll
@@ -4,13 +4,25 @@
 declare <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float>, <4 x float>, <4 x float>, i8)
 declare <4 x float> @llvm.x86.avx512fp16.maskz.vfmadd.cph.128(<4 x float>, <4 x float>, <4 x float>, i8)
 
-define <4 x float> @test_int_x86_avx512fp8_mask_cfmadd_ph_bst(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3){
+define <4 x float> @test_int_x86_avx512fp8_mask_cfmadd_ph_bst(<4 x float> %x0, <4 x float> %x1, i8 %x3){
 ; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfmadd_ph_bst:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcph {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    vfmaddcph {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1 {%k1}
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
-  %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> %x0, <4 x float> %x1, <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, i8 %x3)
+  %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> %x0, <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %x1, i8 %x3)
+  ret <4 x float> %res
+}
+
+define <4 x float> @test_int_x86_avx512fp8_mask_cfmadd_ph_bst2(<4 x float> %x0, <4 x float> %x1, i8 %x3){
+; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfmadd_ph_bst2:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    vfmaddcph {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1 {%k1}
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %x0, <4 x float> %x1, i8 %x3)
   ret <4 x float> %res
 }
 
@@ -18,7 +30,8 @@ define <4 x float> @test_int_x86_avx512fp8_mask_cfmadd_ph_128(<4 x float> %x0, <
 ; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfmadd_ph_128:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcph %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    vfmaddcph %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
   ret <4 x float> %res
@@ -28,7 +41,8 @@ define <4 x float> @test_int_x86_avx512fp8_maskz_cfmadd_ph_128(<4 x float> %x0,
 ; CHECK-LABEL: test_int_x86_avx512fp8_maskz_cfmadd_ph_128:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcph %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vfmaddcph %xmm1, %xmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.maskz.vfmadd.cph.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
   ret <4 x float> %res
@@ -37,8 +51,7 @@ define <4 x float> @test_int_x86_avx512fp8_maskz_cfmadd_ph_128(<4 x float> %x0,
 define <4 x float> @test_int_x86_avx512fp8_cfmadd_ph_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2){
 ; CHECK-LABEL: test_int_x86_avx512fp8_cfmadd_ph_128:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfmaddcph %xmm0, %xmm1, %xmm2
-; CHECK-NEXT:    vmovaps %xmm2, %xmm0
+; CHECK-NEXT:    vfmaddcph %xmm1, %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> %x2, <4 x float> %x1, <4 x float> %x0, i8 -1)
   ret <4 x float> %res
@@ -52,7 +65,8 @@ define <8 x float> @test_int_x86_avx512fp16_mask_cfmadd_ph_256(<8 x float> %x0,
 ; CHECK-LABEL: test_int_x86_avx512fp16_mask_cfmadd_ph_256:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcph %ymm2, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    vfmaddcph %ymm1, %ymm0, %ymm2 {%k1}
+; CHECK-NEXT:    vmovaps %ymm2, %ymm0
 ; CHECK-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
   ret <8 x float> %res
@@ -62,7 +76,8 @@ define <8 x float> @test_int_x86_avx512fp16_maskz_cfmadd_ph_256(<8 x float> %x0,
 ; CHECK-LABEL: test_int_x86_avx512fp16_maskz_cfmadd_ph_256:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcph %ymm2, %ymm1, %ymm0 {%k1} {z}
+; CHECK-NEXT:    vfmaddcph %ymm1, %ymm0, %ymm2 {%k1} {z}
+; CHECK-NEXT:    vmovaps %ymm2, %ymm0
 ; CHECK-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx512fp16.maskz.vfmadd.cph.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
   ret <8 x float> %res
@@ -71,8 +86,7 @@ define <8 x float> @test_int_x86_avx512fp16_maskz_cfmadd_ph_256(<8 x float> %x0,
 define <8 x float> @test_int_x86_avx512fp16_cfmadd_ph_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2){
 ; CHECK-LABEL: test_int_x86_avx512fp16_cfmadd_ph_256:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfmaddcph %ymm0, %ymm1, %ymm2
-; CHECK-NEXT:    vmovaps %ymm2, %ymm0
+; CHECK-NEXT:    vfmaddcph %ymm1, %ymm2, %ymm0
 ; CHECK-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.256(<8 x float> %x2, <8 x float> %x1, <8 x float> %x0, i8 -1)
   ret <8 x float> %res
@@ -86,7 +100,8 @@ define <16 x float> @test_int_x86_avx512fp16_mask_cfmadd_ph_512(<16 x float> %x0
 ; CHECK-LABEL: test_int_x86_avx512fp16_mask_cfmadd_ph_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcph %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT:    vfmaddcph %zmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT:    vmovaps %zmm2, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4)
   ret <16 x float> %res
@@ -96,7 +111,8 @@ define <16 x float> @test_int_x86_avx512fp16_maskz_cfmadd_ph_512(<16 x float> %x
 ; CHECK-LABEL: test_int_x86_avx512fp16_maskz_cfmadd_ph_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcph %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT:    vfmaddcph %zmm1, %zmm0, %zmm2 {%k1} {z}
+; CHECK-NEXT:    vmovaps %zmm2, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512fp16.maskz.vfmadd.cph.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4)
   ret <16 x float> %res
@@ -105,8 +121,7 @@ define <16 x float> @test_int_x86_avx512fp16_maskz_cfmadd_ph_512(<16 x float> %x
 define <16 x float> @test_int_x86_avx512fp16_cfmadd_ph_512_rn(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
 ; CHECK-LABEL: test_int_x86_avx512fp16_cfmadd_ph_512_rn:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfmaddcph {rz-sae}, %zmm0, %zmm1, %zmm2
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
+; CHECK-NEXT:    vfmaddcph {rz-sae}, %zmm1, %zmm2, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float> %x2, <16 x float> %x1, <16 x float> %x0, i16 -1, i32 11)
   ret <16 x float> %res
@@ -115,8 +130,7 @@ define <16 x float> @test_int_x86_avx512fp16_cfmadd_ph_512_rn(<16 x float> %x0,
 define <16 x float> @test_int_x86_avx512fp16_cfmadd_ph_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
 ; CHECK-LABEL: test_int_x86_avx512fp16_cfmadd_ph_512:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfmaddcph %zmm0, %zmm1, %zmm2
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
+; CHECK-NEXT:    vfmaddcph %zmm1, %zmm2, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float> %x2, <16 x float> %x1, <16 x float> %x0, i16 -1, i32 4)
   ret <16 x float> %res
@@ -125,11 +139,36 @@ define <16 x float> @test_int_x86_avx512fp16_cfmadd_ph_512(<16 x float> %x0, <16
 declare <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float>, <4 x float>, <4 x float>, i8)
 declare <4 x float> @llvm.x86.avx512fp16.maskz.vfcmadd.cph.128(<4 x float>, <4 x float>, <4 x float>, i8)
 
+define <4 x float> @test_int_x86_avx512fp8_mask_cfcmadd_ph_bst(<4 x float> %x0, <4 x float> %x1, i8 %x3){
+; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfcmadd_ph_bst:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    vfcmaddcph {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1 {%k1}
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float> %x0, <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %x1, i8 %x3)
+  ret <4 x float> %res
+}
+
+; Check conjugate complex FMA is not commutable.
+define <4 x float> @test_int_x86_avx512fp8_mask_cfcmadd_ph_bst2(<4 x float> %x0, <4 x float> %x1, i8 %x3){
+; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfcmadd_ph_bst2:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
+; CHECK-NEXT:    vfcmaddcph %xmm0, %xmm2, %xmm1 {%k1}
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %x0, <4 x float> %x1, i8 %x3)
+  ret <4 x float> %res
+}
+
 define <4 x float> @test_int_x86_avx512fp8_mask_cfcmadd_ph_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3){
 ; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfcmadd_ph_128:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfcmaddcph %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    vfcmaddcph %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
   ret <4 x float> %res
@@ -139,7 +178,8 @@ define <4 x float> @test_int_x86_avx512fp8_maskz_cfcmadd_ph_128(<4 x float> %x0,
 ; CHECK-LABEL: test_int_x86_avx512fp8_maskz_cfcmadd_ph_128:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfcmaddcph %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vfcmaddcph %xmm1, %xmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.maskz.vfcmadd.cph.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
   ret <4 x float> %res
@@ -148,8 +188,7 @@ define <4 x float> @test_int_x86_avx512fp8_maskz_cfcmadd_ph_128(<4 x float> %x0,
 define <4 x float> @test_int_x86_avx512fp8_cfcmadd_ph_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2){
 ; CHECK-LABEL: test_int_x86_avx512fp8_cfcmadd_ph_128:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfcmaddcph %xmm0, %xmm1, %xmm2
-; CHECK-NEXT:    vmovaps %xmm2, %xmm0
+; CHECK-NEXT:    vfcmaddcph %xmm1, %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float> %x2, <4 x float> %x1, <4 x float> %x0, i8 -1)
   ret <4 x float> %res
@@ -163,7 +202,8 @@ define <8 x float> @test_int_x86_avx512fp16_mask_cfcmadd_ph_256(<8 x float> %x0,
 ; CHECK-LABEL: test_int_x86_avx512fp16_mask_cfcmadd_ph_256:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfcmaddcph %ymm2, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    vfcmaddcph %ymm1, %ymm0, %ymm2 {%k1}
+; CHECK-NEXT:    vmovaps %ymm2, %ymm0
 ; CHECK-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
   ret <8 x float> %res
@@ -173,7 +213,8 @@ define <8 x float> @test_int_x86_avx512fp16_maskz_cfcmadd_ph_256(<8 x float> %x0
 ; CHECK-LABEL: test_int_x86_avx512fp16_maskz_cfcmadd_ph_256:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfcmaddcph %ymm2, %ymm1, %ymm0 {%k1} {z}
+; CHECK-NEXT:    vfcmaddcph %ymm1, %ymm0, %ymm2 {%k1} {z}
+; CHECK-NEXT:    vmovaps %ymm2, %ymm0
 ; CHECK-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx512fp16.maskz.vfcmadd.cph.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
   ret <8 x float> %res
@@ -182,8 +223,7 @@ define <8 x float> @test_int_x86_avx512fp16_maskz_cfcmadd_ph_256(<8 x float> %x0
 define <8 x float> @test_int_x86_avx512fp16_cfcmadd_ph_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2){
 ; CHECK-LABEL: test_int_x86_avx512fp16_cfcmadd_ph_256:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfcmaddcph %ymm0, %ymm1, %ymm2
-; CHECK-NEXT:    vmovaps %ymm2, %ymm0
+; CHECK-NEXT:    vfcmaddcph %ymm1, %ymm2, %ymm0
 ; CHECK-NEXT:    retq
   %res = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.256(<8 x float> %x2, <8 x float> %x1, <8 x float> %x0, i8 -1)
   ret <8 x float> %res
@@ -197,7 +237,8 @@ define <16 x float> @test_int_x86_avx512fp16_mask_cfcmadd_ph_512(<16 x float> %x
 ; CHECK-LABEL: test_int_x86_avx512fp16_mask_cfcmadd_ph_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfcmaddcph %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT:    vfcmaddcph %zmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT:    vmovaps %zmm2, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4)
   ret <16 x float> %res
@@ -207,7 +248,8 @@ define <16 x float> @test_int_x86_avx512fp16_maskz_cfcmadd_ph_512(<16 x float> %
 ; CHECK-LABEL: test_int_x86_avx512fp16_maskz_cfcmadd_ph_512:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfcmaddcph %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT:    vfcmaddcph %zmm1, %zmm0, %zmm2 {%k1} {z}
+; CHECK-NEXT:    vmovaps %zmm2, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512fp16.maskz.vfcmadd.cph.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4)
   ret <16 x float> %res
@@ -216,8 +258,7 @@ define <16 x float> @test_int_x86_avx512fp16_maskz_cfcmadd_ph_512(<16 x float> %
 define <16 x float> @test_int_x86_avx512fp16_cfcmadd_ph_512_rn(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
 ; CHECK-LABEL: test_int_x86_avx512fp16_cfcmadd_ph_512_rn:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfcmaddcph {rz-sae}, %zmm0, %zmm1, %zmm2
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
+; CHECK-NEXT:    vfcmaddcph {rz-sae}, %zmm1, %zmm2, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float> %x2, <16 x float> %x1, <16 x float> %x0, i16 -1, i32 11)
   ret <16 x float> %res
@@ -226,8 +267,7 @@ define <16 x float> @test_int_x86_avx512fp16_cfcmadd_ph_512_rn(<16 x float> %x0,
 define <16 x float> @test_int_x86_avx512fp16_cfcmadd_ph_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
 ; CHECK-LABEL: test_int_x86_avx512fp16_cfcmadd_ph_512:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfcmaddcph %zmm0, %zmm1, %zmm2
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
+; CHECK-NEXT:    vfcmaddcph %zmm1, %zmm2, %zmm0
 ; CHECK-NEXT:    retq
   %res = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float> %x2, <16 x float> %x1, <16 x float> %x0, i16 -1, i32 4)
   ret <16 x float> %res

diff  --git a/llvm/test/CodeGen/X86/avx512cfmul-intrinsics.ll b/llvm/test/CodeGen/X86/avx512cfmul-intrinsics.ll
index 2b98e429ed3f5..e37c649a4786d 100644
--- a/llvm/test/CodeGen/X86/avx512cfmul-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512cfmul-intrinsics.ll
@@ -14,6 +14,17 @@ define <4 x float> @test_int_x86_avx512fp8_mask_cfmul_ph_bst(<4 x float> %x0, <4
   ret <4 x float> %res
 }
 
+define <4 x float> @test_int_x86_avx512fp8_mask_cfmul_ph_bst2(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3){
+; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfmul_ph_bst2:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    vfmulcph {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.128(<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %x0, <4 x float> %x2, i8 %x3)
+  ret <4 x float> %res
+}
+
 define <4 x float> @test_int_x86_avx512fp8_mask_cfmul_ph_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3){
 ; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfmul_ph_128:
 ; CHECK:       ## %bb.0:
@@ -122,6 +133,30 @@ define <16 x float> @test_int_x86_avx512fp16_cfmul_ph_512(<16 x float> %x0, <16
 
 declare <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.128(<4 x float>, <4 x float>, <4 x float>, i8)
 
+define <4 x float> @test_int_x86_avx512fp8_mask_cfcmul_ph_bst(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3){
+; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfcmul_ph_bst:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    vfcmulcph {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.128(<4 x float> %x0, <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %x2, i8 %x3)
+  ret <4 x float> %res
+}
+
+; Check conjugate complex FMUL is not commutable.
+define <4 x float> @test_int_x86_avx512fp8_mask_cfcmul_ph_bst2(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3){
+; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfcmul_ph_bst2:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    kmovd %edi, %k1
+; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
+; CHECK-NEXT:    vfcmulcph %xmm0, %xmm1, %xmm2 {%k1}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.128(<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %x0, <4 x float> %x2, i8 %x3)
+  ret <4 x float> %res
+}
+
 define <4 x float> @test_int_x86_avx512fp8_mask_cfcmul_ph_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3){
 ; CHECK-LABEL: test_int_x86_avx512fp8_mask_cfcmul_ph_128:
 ; CHECK:       ## %bb.0:

diff  --git a/llvm/test/CodeGen/X86/avx512cfmulsh-instrinsics.ll b/llvm/test/CodeGen/X86/avx512cfmulsh-instrinsics.ll
index 8b273b6d8006c..43e085f37ff67 100644
--- a/llvm/test/CodeGen/X86/avx512cfmulsh-instrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512cfmulsh-instrinsics.ll
@@ -35,7 +35,8 @@ define <4 x float> @test_nm_nr_int_x86_avx512fp16_mask_cfcmul_sh(<4 x float> %x0
 define <4 x float> @test_nm_nr_int_x86_avx512fp16_cfmadd_sh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2) {
 ; CHECK-LABEL: test_nm_nr_int_x86_avx512fp16_cfmadd_sh:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfmaddcsh %xmm2, %xmm1, %xmm0
+; CHECK-NEXT:    vfmaddcsh %xmm1, %xmm0, %xmm2
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
   ret <4 x float> %res
@@ -44,7 +45,8 @@ define <4 x float> @test_nm_nr_int_x86_avx512fp16_cfmadd_sh(<4 x float> %x0, <4
 define <4 x float> @test_nm_nr_int_x86_avx512fp16_cfcmadd_sh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2) {
 ; CHECK-LABEL: test_nm_nr_int_x86_avx512fp16_cfcmadd_sh:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfcmaddcsh %xmm2, %xmm1, %xmm0
+; CHECK-NEXT:    vfcmaddcsh %xmm1, %xmm0, %xmm2
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
   ret <4 x float> %res
@@ -75,7 +77,8 @@ define <4 x float> @test_nm_r_int_x86_avx512fp16_mask_cfcmul_sh(<4 x float> %x0,
 define <4 x float> @test_nm_r_int_x86_avx512fp16_mask_cfmadd_sh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2) {
 ; CHECK-LABEL: test_nm_r_int_x86_avx512fp16_mask_cfmadd_sh:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfmaddcsh {rd-sae}, %xmm2, %xmm1, %xmm0
+; CHECK-NEXT:    vfmaddcsh {rd-sae}, %xmm1, %xmm0, %xmm2
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 9)
   ret <4 x float> %res
@@ -84,7 +87,8 @@ define <4 x float> @test_nm_r_int_x86_avx512fp16_mask_cfmadd_sh(<4 x float> %x0,
 define <4 x float> @test_nm_r_int_x86_avx512fp16_mask_cfcmadd_sh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2) {
 ; CHECK-LABEL: test_nm_r_int_x86_avx512fp16_mask_cfcmadd_sh:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfcmaddcsh {rd-sae}, %xmm2, %xmm1, %xmm0
+; CHECK-NEXT:    vfcmaddcsh {rd-sae}, %xmm1, %xmm0, %xmm2
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 9)
   ret <4 x float> %res
@@ -118,7 +122,8 @@ define <4 x float> @test_m_nr_int_x86_avx512fp16_mask_cfmadd_sh(<4 x float> %x0,
 ; CHECK-LABEL: test_m_nr_int_x86_avx512fp16_mask_cfmadd_sh:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcsh %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    vfmaddcsh %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
   ret <4 x float> %res
@@ -128,7 +133,8 @@ define <4 x float> @test_m_nr_int_x86_avx512fp16_mask_cfcmadd_sh(<4 x float> %x0
 ; CHECK-LABEL: test_m_nr_int_x86_avx512fp16_mask_cfcmadd_sh:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfcmaddcsh %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    vfcmaddcsh %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
   ret <4 x float> %res
@@ -162,7 +168,8 @@ define <4 x float> @test_int_x86_avx512fp16_mask_cfmadd_sh(<4 x float> %x0, <4 x
 ; CHECK-LABEL: test_int_x86_avx512fp16_mask_cfmadd_sh:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcsh {rd-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    vfmaddcsh {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 9)
   ret <4 x float> %res
@@ -172,7 +179,8 @@ define <4 x float> @test_int_x86_avx512fp16_mask_cfcmadd_sh(<4 x float> %x0, <4
 ; CHECK-LABEL: test_int_x86_avx512fp16_mask_cfcmadd_sh:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfcmaddcsh {rd-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    vfcmaddcsh {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 9)
   ret <4 x float> %res
@@ -206,7 +214,8 @@ define <4 x float> @test_m_nr_int_x86_avx512fp16_maskz_cfmadd_sh(<4 x float> %x0
 ; CHECK-LABEL: test_m_nr_int_x86_avx512fp16_maskz_cfmadd_sh:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcsh %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vfmaddcsh %xmm1, %xmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.maskz.vfmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
   ret <4 x float> %res
@@ -216,7 +225,8 @@ define <4 x float> @test_m_nr_int_x86_avx512fp16_maskz_cfcmadd_sh(<4 x float> %x
 ; CHECK-LABEL: test_m_nr_int_x86_avx512fp16_maskz_cfcmadd_sh:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfcmaddcsh %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vfcmaddcsh %xmm1, %xmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.maskz.vfcmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
   ret <4 x float> %res
@@ -250,7 +260,8 @@ define <4 x float> @test_int_x86_avx512fp16_maskz_cfmadd_sh(<4 x float> %x0, <4
 ; CHECK-LABEL: test_int_x86_avx512fp16_maskz_cfmadd_sh:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfmaddcsh {rd-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vfmaddcsh {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.maskz.vfmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 9)
   ret <4 x float> %res
@@ -260,7 +271,8 @@ define <4 x float> @test_int_x86_avx512fp16_maskz_cfcmadd_sh(<4 x float> %x0, <4
 ; CHECK-LABEL: test_int_x86_avx512fp16_maskz_cfcmadd_sh:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
-; CHECK-NEXT:    vfcmaddcsh {rd-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vfcmaddcsh {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1} {z}
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512fp16.maskz.vfcmadd.csh(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 9)
   ret <4 x float> %res

diff  --git a/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll
index 9afe46e9e7c63..83027ba535a1c 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll
@@ -4,7 +4,8 @@
 define dso_local <32 x half> @test1(<32 x half> %acc.coerce, <32 x half> %lhs.coerce, <32 x half> %rhs.coerce) {
 ; CHECK-LABEL: test1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfmaddcph %zmm2, %zmm1, %zmm0
+; CHECK-NEXT:    vfmaddcph %zmm1, %zmm0, %zmm2
+; CHECK-NEXT:    vmovaps %zmm2, %zmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <32 x half> %lhs.coerce to <16 x float>
@@ -18,7 +19,8 @@ entry:
 define dso_local <16 x half> @test2(<16 x half> %acc.coerce, <16 x half> %lhs.coerce, <16 x half> %rhs.coerce) {
 ; CHECK-LABEL: test2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfmaddcph %ymm2, %ymm1, %ymm0
+; CHECK-NEXT:    vfmaddcph %ymm1, %ymm0, %ymm2
+; CHECK-NEXT:    vmovaps %ymm2, %ymm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <16 x half> %lhs.coerce to <8 x float>
@@ -32,7 +34,8 @@ entry:
 define dso_local <8 x half> @test3(<8 x half> %acc.coerce, <8 x half> %lhs.coerce, <8 x half> %rhs.coerce) {
 ; CHECK-LABEL: test3:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfmaddcph %xmm2, %xmm1, %xmm0
+; CHECK-NEXT:    vfmaddcph %xmm1, %xmm0, %xmm2
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <8 x half> %lhs.coerce to <4 x float>
@@ -47,7 +50,8 @@ entry:
 define dso_local <8 x half> @test4(<8 x half> %acc.coerce, <8 x half> %lhs.coerce, <8 x half> %rhs.coerce) {
 ; CHECK-LABEL: test4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfmaddcph %xmm2, %xmm1, %xmm0
+; CHECK-NEXT:    vfmaddcph %xmm1, %xmm0, %xmm2
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <8 x half> %lhs.coerce to <4 x float>

diff  --git a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll
index 1d413ad0c1065..e29174fb1cffe 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll
@@ -4,7 +4,8 @@
 define dso_local <32 x half> @test1(<32 x half> %acc.coerce, <32 x half> %lhs.coerce.conj, <32 x half> %rhs.coerce) local_unnamed_addr #0 {
 ; CHECK-LABEL: test1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfcmaddcph %zmm1, %zmm2, %zmm0
+; CHECK-NEXT:    vfcmaddcph %zmm2, %zmm0, %zmm1
+; CHECK-NEXT:    vmovaps %zmm1, %zmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <32 x half> %lhs.coerce.conj to <16 x i32>
@@ -20,7 +21,8 @@ entry:
 define dso_local <32 x half> @test2(<32 x half> %acc.coerce, <32 x half> %lhs.coerce.conj, <32 x half> %rhs.coerce) local_unnamed_addr #0 {
 ; CHECK-LABEL: test2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfcmaddcph %zmm1, %zmm2, %zmm0
+; CHECK-NEXT:    vfcmaddcph %zmm2, %zmm0, %zmm1
+; CHECK-NEXT:    vmovaps %zmm1, %zmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <32 x half> %lhs.coerce.conj to <16 x i32>
@@ -36,7 +38,8 @@ entry:
 define dso_local <16 x half> @test3(<16 x half> %acc.coerce, <16 x half> %lhs.coerce.conj, <16 x half> %rhs.coerce) local_unnamed_addr #0 {
 ; CHECK-LABEL: test3:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfcmaddcph %ymm1, %ymm2, %ymm0
+; CHECK-NEXT:    vfcmaddcph %ymm2, %ymm0, %ymm1
+; CHECK-NEXT:    vmovaps %ymm1, %ymm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <16 x half> %lhs.coerce.conj to <8 x i32>
@@ -52,7 +55,8 @@ entry:
 define dso_local <8 x half> @test4(<8 x half> %acc.coerce, <8 x half> %lhs.coerce.conj, <8 x half> %rhs.coerce) local_unnamed_addr #0 {
 ; CHECK-LABEL: test4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfcmaddcph %xmm1, %xmm2, %xmm0
+; CHECK-NEXT:    vfcmaddcph %xmm2, %xmm0, %xmm1
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <8 x half> %lhs.coerce.conj to <4 x i32>
@@ -68,7 +72,8 @@ entry:
 define dso_local <8 x half> @test5(<8 x half> %acc.coerce, <8 x half> %lhs.coerce.conj, <8 x half> %rhs.coerce) local_unnamed_addr #0 {
 ; CHECK-LABEL: test5:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfcmaddcph %xmm1, %xmm2, %xmm0
+; CHECK-NEXT:    vfcmaddcph %xmm2, %xmm0, %xmm1
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <8 x half> %lhs.coerce.conj to <4 x i32>
@@ -85,7 +90,8 @@ define dso_local <8 x half> @test6(<8 x half> %acc.coerce, <8 x half> %lhs.coerc
 ; CHECK-LABEL: test6:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
-; CHECK-NEXT:    vfmaddcph %xmm2, %xmm1, %xmm0
+; CHECK-NEXT:    vfmaddcph %xmm1, %xmm0, %xmm2
+; CHECK-NEXT:    vmovaps %xmm2, %xmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <8 x half> %lhs.coerce.conj to <4 x i32>
@@ -101,7 +107,8 @@ entry:
 define dso_local <8 x half> @test7(<8 x half> %acc.coerce, <8 x half> %lhs.coerce.conj, <8 x half> %rhs.coerce) local_unnamed_addr #0 {
 ; CHECK-LABEL: test7:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfcmaddcph %xmm1, %xmm2, %xmm0
+; CHECK-NEXT:    vfcmaddcph %xmm2, %xmm0, %xmm1
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <8 x half> %lhs.coerce.conj to <4 x i32>
@@ -117,7 +124,8 @@ entry:
 define dso_local <8 x half> @test8(<8 x half> %acc.coerce, <4 x float> %lhs.coerce.conj, <8 x half> %rhs.coerce) local_unnamed_addr #0 {
 ; CHECK-LABEL: test8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfcmaddcph %xmm1, %xmm2, %xmm0
+; CHECK-NEXT:    vfcmaddcph %xmm2, %xmm0, %xmm1
+; CHECK-NEXT:    vmovaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
 entry:
   %0 = bitcast <4 x float> %lhs.coerce.conj to <4 x i32>
@@ -133,7 +141,8 @@ entry:
 define dso_local <32 x half> @test9(<32 x half> %acc.coerce, <8 x i64> %lhs.coerce.conj, <32 x half> %rhs.coerce) local_unnamed_addr #0 {
 ; CHECK-LABEL: test9:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vfcmaddcph %zmm1, %zmm2, %zmm0
+; CHECK-NEXT:    vfcmaddcph %zmm2, %zmm0, %zmm1
+; CHECK-NEXT:    vmovaps %zmm1, %zmm0
 ; CHECK-NEXT:    retq
 entry:
   %xor1.i = xor <8 x i64> %lhs.coerce.conj, <i64 -9223372034707292160, i64 -9223372034707292160, i64 -9223372034707292160, i64 -9223372034707292160, i64 -9223372034707292160, i64 -9223372034707292160, i64 -9223372034707292160, i64 -9223372034707292160>

diff  --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16.ll
index 495058c08b895..0be20ab6d5f41 100644
--- a/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16.ll
@@ -996,6 +996,14 @@ define <16 x float> @stack_fold_fmulcph(<16 x float> %a0, <16 x float> %a1) {
   %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> undef, i16 -1, i32 4)
   ret <16 x float> %2
 }
+
+define <16 x float> @stack_fold_fmulcph_commute(<16 x float> %a0, <16 x float> %a1) {
+  ;CHECK-LABEL: stack_fold_fmulcph_commute:
+  ;CHECK:       vfmulcph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.512(<16 x float> %a1, <16 x float> %a0, <16 x float> undef, i16 -1, i32 4)
+  ret <16 x float> %2
+}
 declare <16 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
 
 define <16 x float> @stack_fold_fmulcph_mask(<16 x float> %a0, <16 x float> %a1, <16 x float>* %passthru, i16 %mask) {
@@ -1023,6 +1031,15 @@ define <16 x float> @stack_fold_fcmulcph(<16 x float> %a0, <16 x float> %a1) {
   %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> undef, i16 -1, i32 4)
   ret <16 x float> %2
 }
+
+define <16 x float> @stack_fold_fcmulcph_commute(<16 x float> %a0, <16 x float> %a1) {
+  ;CHECK-LABEL: stack_fold_fcmulcph_commute:
+  ;CHECK:       vmovups {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Reload
+  ;CHECK:       vfcmulcph {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}}
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.512(<16 x float> %a1, <16 x float> %a0, <16 x float> undef, i16 -1, i32 4)
+  ret <16 x float> %2
+}
 declare <16 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
 
 define <16 x float> @stack_fold_fcmulcph_mask(<16 x float> %a0, <16 x float> %a1, <16 x float>* %passthru, i16 %mask) {
@@ -1047,7 +1064,15 @@ define <16 x float> @stack_fold_fmaddcph(<16 x float> %a0, <16 x float> %a1, <16
   ;CHECK-LABEL: stack_fold_fmaddcph:
   ;CHECK:       vfmaddcph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4)
+  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float> %a1, <16 x float> %a2, <16 x float> %a0, i16 -1, i32 4)
+  ret <16 x float> %2
+}
+
+define <16 x float> @stack_fold_fmaddcph_commute(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+  ;CHECK-LABEL: stack_fold_fmaddcph_commute:
+  ;CHECK:       vfmaddcph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float> %a2, <16 x float> %a1, <16 x float> %a0, i16 -1, i32 4)
   ret <16 x float> %2
 }
 declare <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
@@ -1057,7 +1082,7 @@ define <16 x float> @stack_fold_fmaddcph_mask(<16 x float>* %p, <16 x float> %a1
   ;CHECK:       vfmaddcph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %a0 = load <16 x float>, <16 x float>* %p
-  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4)
+  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float> %a1, <16 x float> %a2, <16 x float> %a0, i16 %mask, i32 4)
   ret <16 x float> %2
 }
 
@@ -1066,7 +1091,7 @@ define <16 x float> @stack_fold_fmaddcph_maskz(<16 x float> %a0, <16 x float> %a
   ;CHECK:       vfmaddcph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %2 = load i16, i16* %mask
-  %3 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float> zeroinitializer, <16 x float> %a1, <16 x float> %a2, i16 %2, i32 4)
+  %3 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.512(<16 x float> %a1, <16 x float> %a2, <16 x float> zeroinitializer, i16 %2, i32 4)
   ret <16 x float> %3
 }
 declare <16 x float> @llvm.x86.avx512fp16.maskz.vfmadd.cph.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
@@ -1075,7 +1100,16 @@ define <16 x float> @stack_fold_fcmaddcph(<16 x float> %a0, <16 x float> %a1, <1
   ;CHECK-LABEL: stack_fold_fcmaddcph:
   ;CHECK:       vfcmaddcph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4)
+  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float> %a1, <16 x float> %a2, <16 x float> %a0, i16 -1, i32 4)
+  ret <16 x float> %2
+}
+
+define <16 x float> @stack_fold_fcmaddcph_commute(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+  ;CHECK-LABEL: stack_fold_fcmaddcph_commute:
+  ;CHECK:       vmovups {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Reload
+  ;CHECK:       vfcmaddcph {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}}
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float> %a2, <16 x float> %a1, <16 x float> %a0, i16 -1, i32 4)
   ret <16 x float> %2
 }
 declare <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
@@ -1085,7 +1119,7 @@ define <16 x float> @stack_fold_fcmaddcph_mask(<16 x float>* %p, <16 x float> %a
   ;CHECK:       vfcmaddcph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %a0 = load <16 x float>, <16 x float>* %p
-  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4)
+  %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float> %a1, <16 x float> %a2, <16 x float> %a0, i16 %mask, i32 4)
   ret <16 x float> %2
 }
 
@@ -1094,7 +1128,7 @@ define <16 x float> @stack_fold_fcmaddcph_maskz(<16 x float> %a0, <16 x float> %
   ;CHECK:       vfcmaddcph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %2 = load i16, i16* %mask
-  %3 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float> zeroinitializer, <16 x float> %a1, <16 x float> %a2, i16 %2, i32 4)
+  %3 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.512(<16 x float> %a1, <16 x float> %a2, <16 x float> zeroinitializer, i16 %2, i32 4)
   ret <16 x float> %3
 }
 declare <16 x float> @llvm.x86.avx512fp16.maskz.vfcmadd.cph.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
@@ -1106,6 +1140,14 @@ define <4 x float> @stack_fold_fmulcsh(<4 x float> %a0, <4 x float> %a1) {
   %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmul.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1, i32 4)
   ret <4 x float> %2
 }
+
+define <4 x float> @stack_fold_fmulcsh_commute(<4 x float> %a0, <4 x float> %a1) {
+  ;CHECK-LABEL: stack_fold_fmulcsh_commute:
+  ;CHECK:       vfmulcsh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmul.csh(<4 x float> %a1, <4 x float> %a0, <4 x float> undef, i8 -1, i32 4)
+  ret <4 x float> %2
+}
 declare <4 x float> @llvm.x86.avx512fp16.mask.vfmul.csh(<4 x float>, <4 x float>, <4 x float>, i8, i32)
 
 define <4 x float> @stack_fold_fmulcsh_mask(<4 x float> %a0, <4 x float> %a1, <4 x float>* %passthru, i8 %mask) {
@@ -1133,6 +1175,15 @@ define <4 x float> @stack_fold_fcmulcsh(<4 x float> %a0, <4 x float> %a1) {
   %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1, i32 4)
   ret <4 x float> %2
 }
+
+define <4 x float> @stack_fold_fcmulcsh_commute(<4 x float> %a0, <4 x float> %a1) {
+  ;CHECK-LABEL: stack_fold_fcmulcsh_commute:
+  ;CHECK:       vmovaps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Reload
+  ;CHECK:       vfcmulcsh {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}}
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.csh(<4 x float> %a1, <4 x float> %a0, <4 x float> undef, i8 -1, i32 4)
+  ret <4 x float> %2
+}
 declare <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.csh(<4 x float>, <4 x float>, <4 x float>, i8, i32)
 
 define <4 x float> @stack_fold_fcmulcsh_mask(<4 x float> %a0, <4 x float> %a1, <4 x float>* %passthru, i8 %mask) {
@@ -1157,7 +1208,15 @@ define <4 x float> @stack_fold_fmaddcsh(<4 x float> %a0, <4 x float> %a1, <4 x f
   ;CHECK-LABEL: stack_fold_fmaddcsh:
   ;CHECK:       vfmaddcsh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1, i32 4)
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> %a1, <4 x float> %a2, <4 x float> %a0, i8 -1, i32 4)
+  ret <4 x float> %2
+}
+
+define <4 x float> @stack_fold_fmaddcsh_commute(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
+  ;CHECK-LABEL: stack_fold_fmaddcsh_commute:
+  ;CHECK:       vfmaddcsh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> %a2, <4 x float> %a1, <4 x float> %a0, i8 -1, i32 4)
   ret <4 x float> %2
 }
 declare <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float>, <4 x float>, <4 x float>, i8, i32)
@@ -1167,7 +1226,7 @@ define <4 x float> @stack_fold_fmaddcsh_mask(<4 x float>* %p, <4 x float> %a1, <
   ;CHECK:       vfmaddcsh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %a0 = load <4 x float>, <4 x float>* %p
-  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> %a1, <4 x float> %a2, <4 x float> %a0, i8 %mask, i32 4)
   ret <4 x float> %2
 }
 
@@ -1176,7 +1235,7 @@ define <4 x float> @stack_fold_fmaddcsh_maskz(<4 x float> %a0, <4 x float> %a1,
   ;CHECK:       vfmaddcsh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %2 = load i8, i8* %mask
-  %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> zeroinitializer, <4 x float> %a1, <4 x float> %a2, i8 %2, i32 4)
+  %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.csh(<4 x float> %a1, <4 x float> %a2, <4 x float> zeroinitializer, i8 %2, i32 4)
   ret <4 x float> %3
 }
 declare <4 x float> @llvm.x86.avx512fp16.maskz.vfmadd.csh(<4 x float>, <4 x float>, <4 x float>, i8, i32)
@@ -1185,7 +1244,16 @@ define <4 x float> @stack_fold_fcmaddcsh(<4 x float> %a0, <4 x float> %a1, <4 x
   ;CHECK-LABEL: stack_fold_fcmaddcsh:
   ;CHECK:       vfcmaddcsh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1, i32 4)
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> %a1, <4 x float> %a2, <4 x float> %a0, i8 -1, i32 4)
+  ret <4 x float> %2
+}
+
+define <4 x float> @stack_fold_fcmaddcsh_commute(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
+  ;CHECK-LABEL: stack_fold_fcmaddcsh_commute:
+  ;CHECK:       vmovaps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Reload
+  ;CHECK:       vfcmaddcsh {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}}
+  %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> %a2, <4 x float> %a1, <4 x float> %a0, i8 -1, i32 4)
   ret <4 x float> %2
 }
 declare <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float>, <4 x float>, <4 x float>, i8, i32)
@@ -1195,7 +1263,7 @@ define <4 x float> @stack_fold_fcmaddcsh_mask(<4 x float>* %p, <4 x float> %a1,
   ;CHECK:       vfcmaddcsh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %a0 = load <4 x float>, <4 x float>* %p
-  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> %a1, <4 x float> %a2, <4 x float> %a0, i8 %mask, i32 4)
   ret <4 x float> %2
 }
 
@@ -1204,7 +1272,7 @@ define <4 x float> @stack_fold_fcmaddcsh_maskz(<4 x float> %a0, <4 x float> %a1,
   ;CHECK:       vfcmaddcsh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %2 = load i8, i8* %mask
-  %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> zeroinitializer, <4 x float> %a1, <4 x float> %a2, i8 %2, i32 4)
+  %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.csh(<4 x float> %a1, <4 x float> %a2, <4 x float> zeroinitializer, i8 %2, i32 4)
   ret <4 x float> %3
 }
 declare <4 x float> @llvm.x86.avx512fp16.maskz.vfcmadd.csh(<4 x float>, <4 x float>, <4 x float>, i8, i32)

diff  --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16vl.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16vl.ll
index d988f04e34d4a..edba084e24e85 100644
--- a/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16vl.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16vl.ll
@@ -646,7 +646,7 @@ define <4 x float> @stack_fold_fmaddc(<4 x float> %a0, <4 x float> %a1, <4 x flo
   ;CHECK-LABEL: stack_fold_fmaddc:
   ;CHECK:       vfmaddcph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1)
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> %a1, <4 x float> %a2, <4 x float> %a0, i8 -1)
   ret <4 x float> %2
 }
 declare <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float>, <4 x float>, <4 x float>, i8)
@@ -656,7 +656,7 @@ define <4 x float> @stack_fold_fmaddc_mask(<4 x float>* %p, <4 x float> %a1, <4
   ;CHECK:       vfmaddcph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %a0 = load <4 x float>, <4 x float>* %p
-  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask)
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> %a1, <4 x float> %a2, <4 x float> %a0, i8 %mask)
   ret <4 x float> %2
 }
 
@@ -665,7 +665,7 @@ define <4 x float> @stack_fold_fmaddc_maskz(<4 x float> %a0, <4 x float> %a1, <4
   ;CHECK:       vfmaddcph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %2 = load i8, i8* %mask
-  %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> zeroinitializer, <4 x float> %a1, <4 x float> %a2, i8 %2)
+  %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.128(<4 x float> %a1, <4 x float> %a2, <4 x float> zeroinitializer, i8 %2)
   ret <4 x float> %3
 }
 declare <4 x float> @llvm.x86.avx512fp16.maskz.vfmadd.cph.128(<4 x float>, <4 x float>, <4 x float>, i8)
@@ -674,7 +674,7 @@ define <4 x float> @stack_fold_fcmaddc(<4 x float> %a0, <4 x float> %a1, <4 x fl
   ;CHECK-LABEL: stack_fold_fcmaddc:
   ;CHECK:       vfcmaddcph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1)
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float> %a1, <4 x float> %a2, <4 x float> %a0, i8 -1)
   ret <4 x float> %2
 }
 declare <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float>, <4 x float>, <4 x float>, i8)
@@ -684,7 +684,7 @@ define <4 x float> @stack_fold_fcmaddc_mask(<4 x float>* %p, <4 x float> %a1, <4
   ;CHECK:       vfcmaddcph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %a0 = load <4 x float>, <4 x float>* %p
-  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask)
+  %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float> %a1, <4 x float> %a2, <4 x float> %a0, i8 %mask)
   ret <4 x float> %2
 }
 
@@ -693,7 +693,7 @@ define <4 x float> @stack_fold_fcmaddc_maskz(<4 x float> %a0, <4 x float> %a1, <
   ;CHECK:       vfcmaddcph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %2 = load i8, i8* %mask
-  %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float> zeroinitializer, <4 x float> %a1, <4 x float> %a2, i8 %2)
+  %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.128(<4 x float> %a1, <4 x float> %a2, <4 x float> zeroinitializer, i8 %2)
   ret <4 x float> %3
 }
 declare <4 x float> @llvm.x86.avx512fp16.maskz.vfcmadd.cph.128(<4 x float>, <4 x float>, <4 x float>, i8)
@@ -756,7 +756,7 @@ define <8 x float> @stack_fold_fmaddc_ymm(<8 x float> %a0, <8 x float> %a1, <8 x
   ;CHECK-LABEL: stack_fold_fmaddc_ymm:
   ;CHECK:       vfmaddcph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 -1)
+  %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.256(<8 x float> %a1, <8 x float> %a2, <8 x float> %a0, i8 -1)
   ret <8 x float> %2
 }
 declare <8 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.256(<8 x float>, <8 x float>, <8 x float>, i8)
@@ -766,7 +766,7 @@ define <8 x float> @stack_fold_fmaddc_mask_ymm(<8 x float>* %p, <8 x float> %a1,
   ;CHECK:       vfmaddcph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %a0 = load <8 x float>, <8 x float>* %p
-  %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask)
+  %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.256(<8 x float> %a1, <8 x float> %a2, <8 x float> %a0, i8 %mask)
   ret <8 x float> %2
 }
 
@@ -775,7 +775,7 @@ define <8 x float> @stack_fold_fmaddc_maskz_ymm(<8 x float> %a0, <8 x float> %a1
   ;CHECK:       vfmaddcph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %2 = load i8, i8* %mask
-  %3 = call <8 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.256(<8 x float> zeroinitializer, <8 x float> %a1, <8 x float> %a2, i8 %2)
+  %3 = call <8 x float> @llvm.x86.avx512fp16.mask.vfmadd.cph.256(<8 x float> %a1, <8 x float> %a2, <8 x float> zeroinitializer, i8 %2)
   ret <8 x float> %3
 }
 declare <8 x float> @llvm.x86.avx512fp16.maskz.vfmadd.cph.256(<8 x float>, <8 x float>, <8 x float>, i8)
@@ -784,7 +784,7 @@ define <8 x float> @stack_fold_fcmaddc_ymm(<8 x float> %a0, <8 x float> %a1, <8
   ;CHECK-LABEL: stack_fold_fcmaddc_ymm:
   ;CHECK:       vfcmaddcph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
-  %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 -1)
+  %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.256(<8 x float> %a1, <8 x float> %a2, <8 x float> %a0, i8 -1)
   ret <8 x float> %2
 }
 declare <8 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.256(<8 x float>, <8 x float>, <8 x float>, i8)
@@ -794,7 +794,7 @@ define <8 x float> @stack_fold_fcmaddc_mask_ymm(<8 x float>* %p, <8 x float> %a1
   ;CHECK:       vfcmaddcph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %a0 = load <8 x float>, <8 x float>* %p
-  %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask)
+  %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.256(<8 x float> %a1, <8 x float> %a2, <8 x float> %a0, i8 %mask)
   ret <8 x float> %2
 }
 
@@ -803,7 +803,7 @@ define <8 x float> @stack_fold_fcmaddc_maskz_ymm(<8 x float> %a0, <8 x float> %a
   ;CHECK:       vfcmaddcph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %2 = load i8, i8* %mask
-  %3 = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.256(<8 x float> zeroinitializer, <8 x float> %a1, <8 x float> %a2, i8 %2)
+  %3 = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmadd.cph.256(<8 x float> %a1, <8 x float> %a2, <8 x float> zeroinitializer, i8 %2)
   ret <8 x float> %3
 }
 declare <8 x float> @llvm.x86.avx512fp16.maskz.vfcmadd.cph.256(<8 x float>, <8 x float>, <8 x float>, i8)


        


More information about the llvm-commits mailing list