[llvm] r188460 - [Mips][msa] Added the simple builtins (madd_q to xori)

Jack Carter jack.carter at imgtec.com
Thu Aug 15 07:22:08 PDT 2013


Author: jacksprat
Date: Thu Aug 15 09:22:07 2013
New Revision: 188460

URL: http://llvm.org/viewvc/llvm-project?rev=188460&view=rev
Log:
[Mips][msa] Added the simple builtins (madd_q to xori)

Includes:
madd_q, maddr_q, maddv, max_[asu], maxi_[su], min_[asu], mini_[su], mod_[su],
msub_q, msubr_q, msubv, mul_q, mulr_q, mulv, nloc, nlzc, nori, ori, pckev,
pckod, pcnt, sat_[su], shf, sld, sldi, sll, slli, splat, splati, sr[al],
sr[al]i, subs_[su], subss_u, subus_s, subv, subvi, vshf, xori

Patch by Daniel Sanders

Added:
    llvm/trunk/test/CodeGen/Mips/msa/2r.ll
    llvm/trunk/test/CodeGen/Mips/msa/3r-m.ll
    llvm/trunk/test/CodeGen/Mips/msa/3r-p.ll
    llvm/trunk/test/CodeGen/Mips/msa/3r-s.ll
    llvm/trunk/test/CodeGen/Mips/msa/3r-v.ll
    llvm/trunk/test/CodeGen/Mips/msa/3r_4r.ll
    llvm/trunk/test/CodeGen/Mips/msa/3r_splat.ll
    llvm/trunk/test/CodeGen/Mips/msa/3rf_4rf_q.ll
    llvm/trunk/test/CodeGen/Mips/msa/3rf_q.ll
    llvm/trunk/test/CodeGen/Mips/msa/bit.ll
    llvm/trunk/test/CodeGen/Mips/msa/elm_shift_slide.ll
    llvm/trunk/test/CodeGen/Mips/msa/i5-m.ll
    llvm/trunk/test/CodeGen/Mips/msa/i5-s.ll
Modified:
    llvm/trunk/include/llvm/IR/IntrinsicsMips.td
    llvm/trunk/lib/Target/Mips/MipsMSAInstrInfo.td
    llvm/trunk/test/CodeGen/Mips/msa/i8.ll

Modified: llvm/trunk/include/llvm/IR/IntrinsicsMips.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsMips.td?rev=188460&r1=188459&r2=188460&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IntrinsicsMips.td (original)
+++ llvm/trunk/include/llvm/IR/IntrinsicsMips.td Thu Aug 15 09:22:07 2013
@@ -1036,4 +1036,400 @@ def int_mips_ldi_w : GCCBuiltin<"__built
 def int_mips_ldi_d : GCCBuiltin<"__builtin_msa_ldi_d">,
   Intrinsic<[llvm_v2i64_ty], [llvm_i32_ty], []>;
 
+def int_mips_madd_q_h : GCCBuiltin<"__builtin_msa_madd_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_madd_q_w : GCCBuiltin<"__builtin_msa_madd_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], []>;
+
+def int_mips_maddr_q_h : GCCBuiltin<"__builtin_msa_maddr_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_maddr_q_w : GCCBuiltin<"__builtin_msa_maddr_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], []>;
+
+def int_mips_maddv_b : GCCBuiltin<"__builtin_msa_maddv_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_maddv_h : GCCBuiltin<"__builtin_msa_maddv_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_maddv_w : GCCBuiltin<"__builtin_msa_maddv_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_maddv_d : GCCBuiltin<"__builtin_msa_maddv_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_max_a_b : GCCBuiltin<"__builtin_msa_max_a_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_max_a_h : GCCBuiltin<"__builtin_msa_max_a_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_max_a_w : GCCBuiltin<"__builtin_msa_max_a_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_max_a_d : GCCBuiltin<"__builtin_msa_max_a_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_max_s_b : GCCBuiltin<"__builtin_msa_max_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_max_s_h : GCCBuiltin<"__builtin_msa_max_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_max_s_w : GCCBuiltin<"__builtin_msa_max_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_max_s_d : GCCBuiltin<"__builtin_msa_max_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_max_u_b : GCCBuiltin<"__builtin_msa_max_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_max_u_h : GCCBuiltin<"__builtin_msa_max_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_max_u_w : GCCBuiltin<"__builtin_msa_max_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_max_u_d : GCCBuiltin<"__builtin_msa_max_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_maxi_s_b : GCCBuiltin<"__builtin_msa_maxi_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_maxi_s_h : GCCBuiltin<"__builtin_msa_maxi_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_maxi_s_w : GCCBuiltin<"__builtin_msa_maxi_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_maxi_s_d : GCCBuiltin<"__builtin_msa_maxi_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_maxi_u_b : GCCBuiltin<"__builtin_msa_maxi_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_maxi_u_h : GCCBuiltin<"__builtin_msa_maxi_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_maxi_u_w : GCCBuiltin<"__builtin_msa_maxi_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_maxi_u_d : GCCBuiltin<"__builtin_msa_maxi_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_min_a_b : GCCBuiltin<"__builtin_msa_min_a_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_min_a_h : GCCBuiltin<"__builtin_msa_min_a_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_min_a_w : GCCBuiltin<"__builtin_msa_min_a_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_min_a_d : GCCBuiltin<"__builtin_msa_min_a_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_min_s_b : GCCBuiltin<"__builtin_msa_min_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_min_s_h : GCCBuiltin<"__builtin_msa_min_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_min_s_w : GCCBuiltin<"__builtin_msa_min_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_min_s_d : GCCBuiltin<"__builtin_msa_min_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_min_u_b : GCCBuiltin<"__builtin_msa_min_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_min_u_h : GCCBuiltin<"__builtin_msa_min_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_min_u_w : GCCBuiltin<"__builtin_msa_min_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_min_u_d : GCCBuiltin<"__builtin_msa_min_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_mini_s_b : GCCBuiltin<"__builtin_msa_mini_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_mini_s_h : GCCBuiltin<"__builtin_msa_mini_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_mini_s_w : GCCBuiltin<"__builtin_msa_mini_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_mini_s_d : GCCBuiltin<"__builtin_msa_mini_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_mini_u_b : GCCBuiltin<"__builtin_msa_mini_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_mini_u_h : GCCBuiltin<"__builtin_msa_mini_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_mini_u_w : GCCBuiltin<"__builtin_msa_mini_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_mini_u_d : GCCBuiltin<"__builtin_msa_mini_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_mod_s_b : GCCBuiltin<"__builtin_msa_mod_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_mod_s_h : GCCBuiltin<"__builtin_msa_mod_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_mod_s_w : GCCBuiltin<"__builtin_msa_mod_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_mod_s_d : GCCBuiltin<"__builtin_msa_mod_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_mod_u_b : GCCBuiltin<"__builtin_msa_mod_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_mod_u_h : GCCBuiltin<"__builtin_msa_mod_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_mod_u_w : GCCBuiltin<"__builtin_msa_mod_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_mod_u_d : GCCBuiltin<"__builtin_msa_mod_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_msub_q_h : GCCBuiltin<"__builtin_msa_msub_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_msub_q_w : GCCBuiltin<"__builtin_msa_msub_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], []>;
+
+def int_mips_msubr_q_h : GCCBuiltin<"__builtin_msa_msubr_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_msubr_q_w : GCCBuiltin<"__builtin_msa_msubr_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], []>;
+
+def int_mips_msubv_b : GCCBuiltin<"__builtin_msa_msubv_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_msubv_h : GCCBuiltin<"__builtin_msa_msubv_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_msubv_w : GCCBuiltin<"__builtin_msa_msubv_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_msubv_d : GCCBuiltin<"__builtin_msa_msubv_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_mul_q_h : GCCBuiltin<"__builtin_msa_mul_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_mul_q_w : GCCBuiltin<"__builtin_msa_mul_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+
+def int_mips_mulr_q_h : GCCBuiltin<"__builtin_msa_mulr_q_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_mulr_q_w : GCCBuiltin<"__builtin_msa_mulr_q_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+
+def int_mips_mulv_b : GCCBuiltin<"__builtin_msa_mulv_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_mulv_h : GCCBuiltin<"__builtin_msa_mulv_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_mulv_w : GCCBuiltin<"__builtin_msa_mulv_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_mulv_d : GCCBuiltin<"__builtin_msa_mulv_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_nloc_b : GCCBuiltin<"__builtin_msa_nloc_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], []>;
+def int_mips_nloc_h : GCCBuiltin<"__builtin_msa_nloc_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], []>;
+def int_mips_nloc_w : GCCBuiltin<"__builtin_msa_nloc_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], []>;
+def int_mips_nloc_d : GCCBuiltin<"__builtin_msa_nloc_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], []>;
+
+def int_mips_nlzc_b : GCCBuiltin<"__builtin_msa_nlzc_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], []>;
+def int_mips_nlzc_h : GCCBuiltin<"__builtin_msa_nlzc_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], []>;
+def int_mips_nlzc_w : GCCBuiltin<"__builtin_msa_nlzc_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], []>;
+def int_mips_nlzc_d : GCCBuiltin<"__builtin_msa_nlzc_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], []>;
+
+def int_mips_nori_b : GCCBuiltin<"__builtin_msa_nori_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+
+def int_mips_ori_b : GCCBuiltin<"__builtin_msa_ori_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+
+def int_mips_pckev_b : GCCBuiltin<"__builtin_msa_pckev_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_pckev_h : GCCBuiltin<"__builtin_msa_pckev_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_pckev_w : GCCBuiltin<"__builtin_msa_pckev_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_pckev_d : GCCBuiltin<"__builtin_msa_pckev_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_pckod_b : GCCBuiltin<"__builtin_msa_pckod_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_pckod_h : GCCBuiltin<"__builtin_msa_pckod_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_pckod_w : GCCBuiltin<"__builtin_msa_pckod_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_pckod_d : GCCBuiltin<"__builtin_msa_pckod_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_pcnt_b : GCCBuiltin<"__builtin_msa_pcnt_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], []>;
+def int_mips_pcnt_h : GCCBuiltin<"__builtin_msa_pcnt_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], []>;
+def int_mips_pcnt_w : GCCBuiltin<"__builtin_msa_pcnt_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], []>;
+def int_mips_pcnt_d : GCCBuiltin<"__builtin_msa_pcnt_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], []>;
+
+def int_mips_sat_s_b : GCCBuiltin<"__builtin_msa_sat_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_sat_s_h : GCCBuiltin<"__builtin_msa_sat_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_sat_s_w : GCCBuiltin<"__builtin_msa_sat_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_sat_s_d : GCCBuiltin<"__builtin_msa_sat_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_sat_u_b : GCCBuiltin<"__builtin_msa_sat_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_sat_u_h : GCCBuiltin<"__builtin_msa_sat_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_sat_u_w : GCCBuiltin<"__builtin_msa_sat_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_sat_u_d : GCCBuiltin<"__builtin_msa_sat_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_shf_b : GCCBuiltin<"__builtin_msa_shf_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_shf_h : GCCBuiltin<"__builtin_msa_shf_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_shf_w : GCCBuiltin<"__builtin_msa_shf_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+
+def int_mips_sld_b : GCCBuiltin<"__builtin_msa_sld_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_sld_h : GCCBuiltin<"__builtin_msa_sld_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_sld_w : GCCBuiltin<"__builtin_msa_sld_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_sld_d : GCCBuiltin<"__builtin_msa_sld_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_sldi_b : GCCBuiltin<"__builtin_msa_sldi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_sldi_h : GCCBuiltin<"__builtin_msa_sldi_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_sldi_w : GCCBuiltin<"__builtin_msa_sldi_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_sldi_d : GCCBuiltin<"__builtin_msa_sldi_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_sll_b : GCCBuiltin<"__builtin_msa_sll_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_sll_h : GCCBuiltin<"__builtin_msa_sll_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_sll_w : GCCBuiltin<"__builtin_msa_sll_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_sll_d : GCCBuiltin<"__builtin_msa_sll_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_slli_b : GCCBuiltin<"__builtin_msa_slli_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_slli_h : GCCBuiltin<"__builtin_msa_slli_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_slli_w : GCCBuiltin<"__builtin_msa_slli_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_slli_d : GCCBuiltin<"__builtin_msa_slli_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_splat_b : GCCBuiltin<"__builtin_msa_splat_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_splat_h : GCCBuiltin<"__builtin_msa_splat_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_splat_w : GCCBuiltin<"__builtin_msa_splat_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_splat_d : GCCBuiltin<"__builtin_msa_splat_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_splati_b : GCCBuiltin<"__builtin_msa_splati_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_splati_h : GCCBuiltin<"__builtin_msa_splati_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_splati_w : GCCBuiltin<"__builtin_msa_splati_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_splati_d : GCCBuiltin<"__builtin_msa_splati_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_sra_b : GCCBuiltin<"__builtin_msa_sra_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_sra_h : GCCBuiltin<"__builtin_msa_sra_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_sra_w : GCCBuiltin<"__builtin_msa_sra_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_sra_d : GCCBuiltin<"__builtin_msa_sra_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_srai_b : GCCBuiltin<"__builtin_msa_srai_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_srai_h : GCCBuiltin<"__builtin_msa_srai_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_srai_w : GCCBuiltin<"__builtin_msa_srai_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_srai_d : GCCBuiltin<"__builtin_msa_srai_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_srl_b : GCCBuiltin<"__builtin_msa_srl_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], []>;
+def int_mips_srl_h : GCCBuiltin<"__builtin_msa_srl_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], []>;
+def int_mips_srl_w : GCCBuiltin<"__builtin_msa_srl_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], []>;
+def int_mips_srl_d : GCCBuiltin<"__builtin_msa_srl_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], []>;
+
+def int_mips_srli_b : GCCBuiltin<"__builtin_msa_srli_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
+def int_mips_srli_h : GCCBuiltin<"__builtin_msa_srli_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], []>;
+def int_mips_srli_w : GCCBuiltin<"__builtin_msa_srli_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], []>;
+def int_mips_srli_d : GCCBuiltin<"__builtin_msa_srli_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], []>;
+
+def int_mips_subs_s_b : GCCBuiltin<"__builtin_msa_subs_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [Commutative]>;
+def int_mips_subs_s_h : GCCBuiltin<"__builtin_msa_subs_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [Commutative]>;
+def int_mips_subs_s_w : GCCBuiltin<"__builtin_msa_subs_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [Commutative]>;
+def int_mips_subs_s_d : GCCBuiltin<"__builtin_msa_subs_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [Commutative]>;
+
+def int_mips_subs_u_b : GCCBuiltin<"__builtin_msa_subs_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [Commutative]>;
+def int_mips_subs_u_h : GCCBuiltin<"__builtin_msa_subs_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [Commutative]>;
+def int_mips_subs_u_w : GCCBuiltin<"__builtin_msa_subs_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [Commutative]>;
+def int_mips_subs_u_d : GCCBuiltin<"__builtin_msa_subs_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [Commutative]>;
+
+def int_mips_subsus_u_b : GCCBuiltin<"__builtin_msa_subsus_u_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [Commutative]>;
+def int_mips_subsus_u_h : GCCBuiltin<"__builtin_msa_subsus_u_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [Commutative]>;
+def int_mips_subsus_u_w : GCCBuiltin<"__builtin_msa_subsus_u_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [Commutative]>;
+def int_mips_subsus_u_d : GCCBuiltin<"__builtin_msa_subsus_u_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [Commutative]>;
+
+def int_mips_subsuu_s_b : GCCBuiltin<"__builtin_msa_subsuu_s_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [Commutative]>;
+def int_mips_subsuu_s_h : GCCBuiltin<"__builtin_msa_subsuu_s_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [Commutative]>;
+def int_mips_subsuu_s_w : GCCBuiltin<"__builtin_msa_subsuu_s_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [Commutative]>;
+def int_mips_subsuu_s_d : GCCBuiltin<"__builtin_msa_subsuu_s_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [Commutative]>;
+
+def int_mips_subv_b : GCCBuiltin<"__builtin_msa_subv_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [Commutative]>;
+def int_mips_subv_h : GCCBuiltin<"__builtin_msa_subv_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [Commutative]>;
+def int_mips_subv_w : GCCBuiltin<"__builtin_msa_subv_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [Commutative]>;
+def int_mips_subv_d : GCCBuiltin<"__builtin_msa_subv_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [Commutative]>;
+
+def int_mips_subvi_b : GCCBuiltin<"__builtin_msa_subvi_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [Commutative]>;
+def int_mips_subvi_h : GCCBuiltin<"__builtin_msa_subvi_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [Commutative]>;
+def int_mips_subvi_w : GCCBuiltin<"__builtin_msa_subvi_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [Commutative]>;
+def int_mips_subvi_d : GCCBuiltin<"__builtin_msa_subvi_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [Commutative]>;
+
+def int_mips_vshf_b : GCCBuiltin<"__builtin_msa_vshf_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [Commutative]>;
+def int_mips_vshf_h : GCCBuiltin<"__builtin_msa_vshf_h">,
+  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [Commutative]>;
+def int_mips_vshf_w : GCCBuiltin<"__builtin_msa_vshf_w">,
+  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [Commutative]>;
+def int_mips_vshf_d : GCCBuiltin<"__builtin_msa_vshf_d">,
+  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [Commutative]>;
+
+def int_mips_xori_b : GCCBuiltin<"__builtin_msa_xori_b">,
+  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], []>;
 }

Modified: llvm/trunk/lib/Target/Mips/MipsMSAInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsMSAInstrInfo.td?rev=188460&r1=188459&r2=188460&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsMSAInstrInfo.td (original)
+++ llvm/trunk/lib/Target/Mips/MipsMSAInstrInfo.td Thu Aug 15 09:22:07 2013
@@ -405,11 +405,234 @@ class LDI_H_ENC   : MSA_I10_FMT<0b010, 0
 class LDI_W_ENC   : MSA_I10_FMT<0b010, 0b10, 0b001100>;
 class LDI_D_ENC   : MSA_I10_FMT<0b010, 0b11, 0b001100>;
 
+class MADD_Q_H_ENC : MSA_3RF_FMT<0b0010, 0b0, 0b011100>;
+class MADD_Q_W_ENC : MSA_3RF_FMT<0b0010, 0b1, 0b011100>;
+
+class MADDR_Q_H_ENC : MSA_3RF_FMT<0b0011, 0b0, 0b011100>;
+class MADDR_Q_W_ENC : MSA_3RF_FMT<0b0011, 0b1, 0b011100>;
+
+class MADDV_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010010>;
+class MADDV_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010010>;
+class MADDV_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010010>;
+class MADDV_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010010>;
+
+class MAX_A_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b001110>;
+class MAX_A_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b001110>;
+class MAX_A_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b001110>;
+class MAX_A_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b001110>;
+
+class MAX_S_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b001110>;
+class MAX_S_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b001110>;
+class MAX_S_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b001110>;
+class MAX_S_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b001110>;
+
+class MAX_U_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b001110>;
+class MAX_U_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b001110>;
+class MAX_U_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b001110>;
+class MAX_U_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b001110>;
+
+class MAXI_S_B_ENC : MSA_I5_FMT<0b010, 0b00, 0b000110>;
+class MAXI_S_H_ENC : MSA_I5_FMT<0b010, 0b01, 0b000110>;
+class MAXI_S_W_ENC : MSA_I5_FMT<0b010, 0b10, 0b000110>;
+class MAXI_S_D_ENC : MSA_I5_FMT<0b010, 0b11, 0b000110>;
+
+class MAXI_U_B_ENC : MSA_I5_FMT<0b011, 0b00, 0b000110>;
+class MAXI_U_H_ENC : MSA_I5_FMT<0b011, 0b01, 0b000110>;
+class MAXI_U_W_ENC : MSA_I5_FMT<0b011, 0b10, 0b000110>;
+class MAXI_U_D_ENC : MSA_I5_FMT<0b011, 0b11, 0b000110>;
+
+class MIN_A_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b001110>;
+class MIN_A_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b001110>;
+class MIN_A_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b001110>;
+class MIN_A_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b001110>;
+
+class MIN_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b001110>;
+class MIN_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b001110>;
+class MIN_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b001110>;
+class MIN_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b001110>;
+
+class MIN_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b001110>;
+class MIN_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b001110>;
+class MIN_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b001110>;
+class MIN_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b001110>;
+
+class MINI_S_B_ENC : MSA_I5_FMT<0b100, 0b00, 0b000110>;
+class MINI_S_H_ENC : MSA_I5_FMT<0b100, 0b01, 0b000110>;
+class MINI_S_W_ENC : MSA_I5_FMT<0b100, 0b10, 0b000110>;
+class MINI_S_D_ENC : MSA_I5_FMT<0b100, 0b11, 0b000110>;
+
+class MINI_U_B_ENC : MSA_I5_FMT<0b101, 0b00, 0b000110>;
+class MINI_U_H_ENC : MSA_I5_FMT<0b101, 0b01, 0b000110>;
+class MINI_U_W_ENC : MSA_I5_FMT<0b101, 0b10, 0b000110>;
+class MINI_U_D_ENC : MSA_I5_FMT<0b101, 0b11, 0b000110>;
+
+class MOD_S_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b010010>;
+class MOD_S_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b010010>;
+class MOD_S_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b010010>;
+class MOD_S_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b010010>;
+
+class MOD_U_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b010010>;
+class MOD_U_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b010010>;
+class MOD_U_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b010010>;
+class MOD_U_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b010010>;
+
+class MSUB_Q_H_ENC : MSA_3RF_FMT<0b0100, 0b0, 0b011100>;
+class MSUB_Q_W_ENC : MSA_3RF_FMT<0b0100, 0b1, 0b011100>;
+
+class MSUBR_Q_H_ENC : MSA_3RF_FMT<0b0101, 0b0, 0b011100>;
+class MSUBR_Q_W_ENC : MSA_3RF_FMT<0b0101, 0b1, 0b011100>;
+
+class MSUBV_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010010>;
+class MSUBV_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010010>;
+class MSUBV_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010010>;
+class MSUBV_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010010>;
+
+class MUL_Q_H_ENC : MSA_3RF_FMT<0b0000, 0b0, 0b011100>;
+class MUL_Q_W_ENC : MSA_3RF_FMT<0b0000, 0b1, 0b011100>;
+
+class MULR_Q_H_ENC : MSA_3RF_FMT<0b0001, 0b0, 0b011100>;
+class MULR_Q_W_ENC : MSA_3RF_FMT<0b0001, 0b1, 0b011100>;
+
+class MULV_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010010>;
+class MULV_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010010>;
+class MULV_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010010>;
+class MULV_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010010>;
+
+class NLOC_B_ENC : MSA_2R_FMT<0b11000010, 0b00, 0b011110>;
+class NLOC_H_ENC : MSA_2R_FMT<0b11000010, 0b01, 0b011110>;
+class NLOC_W_ENC : MSA_2R_FMT<0b11000010, 0b10, 0b011110>;
+class NLOC_D_ENC : MSA_2R_FMT<0b11000010, 0b11, 0b011110>;
+
+class NLZC_B_ENC : MSA_2R_FMT<0b11000011, 0b00, 0b011110>;
+class NLZC_H_ENC : MSA_2R_FMT<0b11000011, 0b01, 0b011110>;
+class NLZC_W_ENC : MSA_2R_FMT<0b11000011, 0b10, 0b011110>;
+class NLZC_D_ENC : MSA_2R_FMT<0b11000011, 0b11, 0b011110>;
+
+class NORI_B_ENC : MSA_I8_FMT<0b10, 0b000000>;
+
+class ORI_B_ENC  : MSA_I8_FMT<0b01, 0b000000>;
+
+class PCKEV_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010100>;
+class PCKEV_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010100>;
+class PCKEV_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010100>;
+class PCKEV_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010100>;
+
+class PCKOD_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b010100>;
+class PCKOD_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b010100>;
+class PCKOD_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b010100>;
+class PCKOD_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b010100>;
+
+class PCNT_B_ENC : MSA_2R_FMT<0b11000001, 0b00, 0b011110>;
+class PCNT_H_ENC : MSA_2R_FMT<0b11000001, 0b01, 0b011110>;
+class PCNT_W_ENC : MSA_2R_FMT<0b11000001, 0b10, 0b011110>;
+class PCNT_D_ENC : MSA_2R_FMT<0b11000001, 0b11, 0b011110>;
+
+class SAT_S_B_ENC : MSA_BIT_B_FMT<0b000, 0b001010>;
+class SAT_S_H_ENC : MSA_BIT_H_FMT<0b000, 0b001010>;
+class SAT_S_W_ENC : MSA_BIT_W_FMT<0b000, 0b001010>;
+class SAT_S_D_ENC : MSA_BIT_D_FMT<0b000, 0b001010>;
+
+class SAT_U_B_ENC : MSA_BIT_B_FMT<0b001, 0b001010>;
+class SAT_U_H_ENC : MSA_BIT_H_FMT<0b001, 0b001010>;
+class SAT_U_W_ENC : MSA_BIT_W_FMT<0b001, 0b001010>;
+class SAT_U_D_ENC : MSA_BIT_D_FMT<0b001, 0b001010>;
+
+class SHF_B_ENC  : MSA_I8_FMT<0b00, 0b000010>;
+class SHF_H_ENC  : MSA_I8_FMT<0b01, 0b000010>;
+class SHF_W_ENC  : MSA_I8_FMT<0b10, 0b000010>;
+
+class SLD_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010100>;
+class SLD_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010100>;
+class SLD_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010100>;
+class SLD_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010100>;
+
+class SLDI_B_ENC : MSA_ELM_B_FMT<0b0000, 0b011001>;
+class SLDI_H_ENC : MSA_ELM_H_FMT<0b0000, 0b011001>;
+class SLDI_W_ENC : MSA_ELM_W_FMT<0b0000, 0b011001>;
+class SLDI_D_ENC : MSA_ELM_D_FMT<0b0000, 0b011001>;
+
+class SLL_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b001101>;
+class SLL_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b001101>;
+class SLL_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b001101>;
+class SLL_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b001101>;
+
+class SLLI_B_ENC : MSA_BIT_B_FMT<0b000, 0b001001>;
+class SLLI_H_ENC : MSA_BIT_H_FMT<0b000, 0b001001>;
+class SLLI_W_ENC : MSA_BIT_W_FMT<0b000, 0b001001>;
+class SLLI_D_ENC : MSA_BIT_D_FMT<0b000, 0b001001>;
+
+class SPLAT_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010100>;
+class SPLAT_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010100>;
+class SPLAT_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010100>;
+class SPLAT_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010100>;
+
+class SPLATI_B_ENC : MSA_ELM_B_FMT<0b0001, 0b011001>;
+class SPLATI_H_ENC : MSA_ELM_H_FMT<0b0001, 0b011001>;
+class SPLATI_W_ENC : MSA_ELM_W_FMT<0b0001, 0b011001>;
+class SPLATI_D_ENC : MSA_ELM_D_FMT<0b0001, 0b011001>;
+
+class SRA_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b001101>;
+class SRA_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b001101>;
+class SRA_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b001101>;
+class SRA_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b001101>;
+
+class SRAI_B_ENC : MSA_BIT_B_FMT<0b001, 0b001001>;
+class SRAI_H_ENC : MSA_BIT_H_FMT<0b001, 0b001001>;
+class SRAI_W_ENC : MSA_BIT_W_FMT<0b001, 0b001001>;
+class SRAI_D_ENC : MSA_BIT_D_FMT<0b001, 0b001001>;
+
+class SRL_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b001101>;
+class SRL_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b001101>;
+class SRL_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b001101>;
+class SRL_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b001101>;
+
+class SRLI_B_ENC : MSA_BIT_B_FMT<0b010, 0b001001>;
+class SRLI_H_ENC : MSA_BIT_H_FMT<0b010, 0b001001>;
+class SRLI_W_ENC : MSA_BIT_W_FMT<0b010, 0b001001>;
+class SRLI_D_ENC : MSA_BIT_D_FMT<0b010, 0b001001>;
+
 class ST_B_ENC   : MSA_I5_FMT<0b111, 0b00, 0b000111>;
 class ST_H_ENC   : MSA_I5_FMT<0b111, 0b01, 0b000111>;
 class ST_W_ENC   : MSA_I5_FMT<0b111, 0b10, 0b000111>;
 class ST_D_ENC   : MSA_I5_FMT<0b111, 0b11, 0b000111>;
 
+class SUBS_S_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010001>;
+class SUBS_S_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010001>;
+class SUBS_S_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010001>;
+class SUBS_S_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010001>;
+
+class SUBS_U_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010001>;
+class SUBS_U_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010001>;
+class SUBS_U_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010001>;
+class SUBS_U_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010001>;
+
+class SUBSUS_U_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b010001>;
+class SUBSUS_U_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b010001>;
+class SUBSUS_U_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b010001>;
+class SUBSUS_U_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b010001>;
+
+class SUBSUU_S_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010001>;
+class SUBSUU_S_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010001>;
+class SUBSUU_S_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010001>;
+class SUBSUU_S_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010001>;
+
+class SUBV_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b001110>;
+class SUBV_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b001110>;
+class SUBV_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b001110>;
+class SUBV_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b001110>;
+
+class SUBVI_B_ENC : MSA_I5_FMT<0b001, 0b00, 0b000110>;
+class SUBVI_H_ENC : MSA_I5_FMT<0b001, 0b01, 0b000110>;
+class SUBVI_W_ENC : MSA_I5_FMT<0b001, 0b10, 0b000110>;
+class SUBVI_D_ENC : MSA_I5_FMT<0b001, 0b11, 0b000110>;
+
+class VSHF_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010101>;
+class VSHF_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010101>;
+class VSHF_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010101>;
+class VSHF_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010101>;
+
+class XORI_B_ENC : MSA_I8_FMT<0b11, 0b000000>;
+
 // Instruction desc.
 class MSA_BIT_D_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
                           InstrItinClass itin, RegisterClass RCWD,
@@ -1255,6 +1478,337 @@ class LDI_W_DESC : MSA_I10_DESC_BASE<"ld
 class LDI_D_DESC : MSA_I10_DESC_BASE<"ldi.d", int_mips_ldi_d,
                                      NoItinerary, MSA128>;
 
+class MADD_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"madd_q.h", int_mips_madd_q_h,
+                                            NoItinerary, MSA128, MSA128>;
+class MADD_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"madd_q.w", int_mips_madd_q_w,
+                                            NoItinerary, MSA128, MSA128>;
+
+class MADDR_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"maddr_q.h", int_mips_maddr_q_h,
+                                             NoItinerary, MSA128, MSA128>;
+class MADDR_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"maddr_q.w", int_mips_maddr_q_w,
+                                             NoItinerary, MSA128, MSA128>;
+
+class MADDV_B_DESC : MSA_3R_4R_DESC_BASE<"maddv.b", int_mips_maddv_b,
+                                         NoItinerary, MSA128, MSA128>;
+class MADDV_H_DESC : MSA_3R_4R_DESC_BASE<"maddv.h", int_mips_maddv_h,
+                                         NoItinerary, MSA128, MSA128>;
+class MADDV_W_DESC : MSA_3R_4R_DESC_BASE<"maddv.w", int_mips_maddv_w,
+                                         NoItinerary, MSA128, MSA128>;
+class MADDV_D_DESC : MSA_3R_4R_DESC_BASE<"maddv.d", int_mips_maddv_d,
+                                         NoItinerary, MSA128, MSA128>;
+
+class MAX_A_B_DESC : MSA_3R_DESC_BASE<"max_a.b", int_mips_max_a_b, NoItinerary,
+                                      MSA128, MSA128>;
+class MAX_A_H_DESC : MSA_3R_DESC_BASE<"max_a.h", int_mips_max_a_h, NoItinerary,
+                                      MSA128, MSA128>;
+class MAX_A_W_DESC : MSA_3R_DESC_BASE<"max_a.w", int_mips_max_a_w, NoItinerary,
+                                      MSA128, MSA128>;
+class MAX_A_D_DESC : MSA_3R_DESC_BASE<"max_a.d", int_mips_max_a_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class MAX_S_B_DESC : MSA_3R_DESC_BASE<"max_s.b", int_mips_max_s_b, NoItinerary,
+                                      MSA128, MSA128>;
+class MAX_S_H_DESC : MSA_3R_DESC_BASE<"max_s.h", int_mips_max_s_h, NoItinerary,
+                                      MSA128, MSA128>;
+class MAX_S_W_DESC : MSA_3R_DESC_BASE<"max_s.w", int_mips_max_s_w, NoItinerary,
+                                      MSA128, MSA128>;
+class MAX_S_D_DESC : MSA_3R_DESC_BASE<"max_s.d", int_mips_max_s_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class MAX_U_B_DESC : MSA_3R_DESC_BASE<"max_u.b", int_mips_max_u_b, NoItinerary,
+                                      MSA128, MSA128>;
+class MAX_U_H_DESC : MSA_3R_DESC_BASE<"max_u.h", int_mips_max_u_h, NoItinerary,
+                                      MSA128, MSA128>;
+class MAX_U_W_DESC : MSA_3R_DESC_BASE<"max_u.w", int_mips_max_u_w, NoItinerary,
+                                      MSA128, MSA128>;
+class MAX_U_D_DESC : MSA_3R_DESC_BASE<"max_u.d", int_mips_max_u_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class MAXI_S_B_DESC : MSA_I5_DESC_BASE<"maxi_s.b", int_mips_maxi_s_b,
+                                       NoItinerary, MSA128, MSA128>;
+class MAXI_S_H_DESC : MSA_I5_DESC_BASE<"maxi_s.h", int_mips_maxi_s_h,
+                                       NoItinerary, MSA128, MSA128>;
+class MAXI_S_W_DESC : MSA_I5_DESC_BASE<"maxi_s.w", int_mips_maxi_s_w,
+                                       NoItinerary, MSA128, MSA128>;
+class MAXI_S_D_DESC : MSA_I5_DESC_BASE<"maxi_s.d", int_mips_maxi_s_d,
+                                       NoItinerary, MSA128, MSA128>;
+
+class MAXI_U_B_DESC : MSA_I5_DESC_BASE<"maxi_u.b", int_mips_maxi_u_b,
+                                       NoItinerary, MSA128, MSA128>;
+class MAXI_U_H_DESC : MSA_I5_DESC_BASE<"maxi_u.h", int_mips_maxi_u_h,
+                                       NoItinerary, MSA128, MSA128>;
+class MAXI_U_W_DESC : MSA_I5_DESC_BASE<"maxi_u.w", int_mips_maxi_u_w,
+                                       NoItinerary, MSA128, MSA128>;
+class MAXI_U_D_DESC : MSA_I5_DESC_BASE<"maxi_u.d", int_mips_maxi_u_d,
+                                       NoItinerary, MSA128, MSA128>;
+
+class MIN_A_B_DESC : MSA_3R_DESC_BASE<"min_a.b", int_mips_min_a_b, NoItinerary,
+                                      MSA128, MSA128>;
+class MIN_A_H_DESC : MSA_3R_DESC_BASE<"min_a.h", int_mips_min_a_h, NoItinerary,
+                                      MSA128, MSA128>;
+class MIN_A_W_DESC : MSA_3R_DESC_BASE<"min_a.w", int_mips_min_a_w, NoItinerary,
+                                      MSA128, MSA128>;
+class MIN_A_D_DESC : MSA_3R_DESC_BASE<"min_a.d", int_mips_min_a_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class MIN_S_B_DESC : MSA_3R_DESC_BASE<"min_s.b", int_mips_min_s_b, NoItinerary,
+                                      MSA128, MSA128>;
+class MIN_S_H_DESC : MSA_3R_DESC_BASE<"min_s.h", int_mips_min_s_h, NoItinerary,
+                                      MSA128, MSA128>;
+class MIN_S_W_DESC : MSA_3R_DESC_BASE<"min_s.w", int_mips_min_s_w, NoItinerary,
+                                      MSA128, MSA128>;
+class MIN_S_D_DESC : MSA_3R_DESC_BASE<"min_s.d", int_mips_min_s_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class MIN_U_B_DESC : MSA_3R_DESC_BASE<"min_u.b", int_mips_min_u_b, NoItinerary,
+                                      MSA128, MSA128>;
+class MIN_U_H_DESC : MSA_3R_DESC_BASE<"min_u.h", int_mips_min_u_h, NoItinerary,
+                                      MSA128, MSA128>;
+class MIN_U_W_DESC : MSA_3R_DESC_BASE<"min_u.w", int_mips_min_u_w, NoItinerary,
+                                      MSA128, MSA128>;
+class MIN_U_D_DESC : MSA_3R_DESC_BASE<"min_u.d", int_mips_min_u_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class MINI_S_B_DESC : MSA_I5_DESC_BASE<"mini_s.b", int_mips_mini_s_b,
+                                       NoItinerary, MSA128, MSA128>;
+class MINI_S_H_DESC : MSA_I5_DESC_BASE<"mini_s.h", int_mips_mini_s_h,
+                                       NoItinerary, MSA128, MSA128>;
+class MINI_S_W_DESC : MSA_I5_DESC_BASE<"mini_s.w", int_mips_mini_s_w,
+                                       NoItinerary, MSA128, MSA128>;
+class MINI_S_D_DESC : MSA_I5_DESC_BASE<"mini_s.d", int_mips_mini_s_d,
+                                       NoItinerary, MSA128, MSA128>;
+
+class MINI_U_B_DESC : MSA_I5_DESC_BASE<"mini_u.b", int_mips_mini_u_b,
+                                       NoItinerary, MSA128, MSA128>;
+class MINI_U_H_DESC : MSA_I5_DESC_BASE<"mini_u.h", int_mips_mini_u_h,
+                                       NoItinerary, MSA128, MSA128>;
+class MINI_U_W_DESC : MSA_I5_DESC_BASE<"mini_u.w", int_mips_mini_u_w,
+                                       NoItinerary, MSA128, MSA128>;
+class MINI_U_D_DESC : MSA_I5_DESC_BASE<"mini_u.d", int_mips_mini_u_d,
+                                       NoItinerary, MSA128, MSA128>;
+
+class MOD_S_B_DESC : MSA_3R_DESC_BASE<"mod_s.b", int_mips_mod_s_b, NoItinerary,
+                                      MSA128, MSA128>;
+class MOD_S_H_DESC : MSA_3R_DESC_BASE<"mod_s.h", int_mips_mod_s_h, NoItinerary,
+                                      MSA128, MSA128>;
+class MOD_S_W_DESC : MSA_3R_DESC_BASE<"mod_s.w", int_mips_mod_s_w, NoItinerary,
+                                      MSA128, MSA128>;
+class MOD_S_D_DESC : MSA_3R_DESC_BASE<"mod_s.d", int_mips_mod_s_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class MOD_U_B_DESC : MSA_3R_DESC_BASE<"mod_u.b", int_mips_mod_u_b, NoItinerary,
+                                      MSA128, MSA128>;
+class MOD_U_H_DESC : MSA_3R_DESC_BASE<"mod_u.h", int_mips_mod_u_h, NoItinerary,
+                                      MSA128, MSA128>;
+class MOD_U_W_DESC : MSA_3R_DESC_BASE<"mod_u.w", int_mips_mod_u_w, NoItinerary,
+                                      MSA128, MSA128>;
+class MOD_U_D_DESC : MSA_3R_DESC_BASE<"mod_u.d", int_mips_mod_u_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class MSUB_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"msub_q.h", int_mips_msub_q_h,
+                                            NoItinerary, MSA128, MSA128>;
+class MSUB_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"msub_q.w", int_mips_msub_q_w,
+                                            NoItinerary, MSA128, MSA128>;
+
+class MSUBR_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"msubr_q.h", int_mips_msubr_q_h,
+                                             NoItinerary, MSA128, MSA128>;
+class MSUBR_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"msubr_q.w", int_mips_msubr_q_w,
+                                             NoItinerary, MSA128, MSA128>;
+
+class MSUBV_B_DESC : MSA_3R_4R_DESC_BASE<"msubv.b", int_mips_msubv_b,
+                                         NoItinerary, MSA128, MSA128>;
+class MSUBV_H_DESC : MSA_3R_4R_DESC_BASE<"msubv.h", int_mips_msubv_h,
+                                         NoItinerary, MSA128, MSA128>;
+class MSUBV_W_DESC : MSA_3R_4R_DESC_BASE<"msubv.w", int_mips_msubv_w,
+                                         NoItinerary, MSA128, MSA128>;
+class MSUBV_D_DESC : MSA_3R_4R_DESC_BASE<"msubv.d", int_mips_msubv_d,
+                                         NoItinerary, MSA128, MSA128>;
+
+class MUL_Q_H_DESC : MSA_3RF_DESC_BASE<"mul_q.h", int_mips_mul_q_h,
+                                       NoItinerary, MSA128, MSA128>;
+class MUL_Q_W_DESC : MSA_3RF_DESC_BASE<"mul_q.w", int_mips_mul_q_w,
+                                       NoItinerary, MSA128, MSA128>;
+
+class MULR_Q_H_DESC : MSA_3RF_DESC_BASE<"mulr_q.h", int_mips_mulr_q_h,
+                                        NoItinerary, MSA128, MSA128>;
+class MULR_Q_W_DESC : MSA_3RF_DESC_BASE<"mulr_q.w", int_mips_mulr_q_w,
+                                        NoItinerary, MSA128, MSA128>;
+
+class MULV_B_DESC : MSA_3R_DESC_BASE<"mulv.b", int_mips_mulv_b,
+                                     NoItinerary, MSA128, MSA128>;
+class MULV_H_DESC : MSA_3R_DESC_BASE<"mulv.h", int_mips_mulv_h,
+                                     NoItinerary, MSA128, MSA128>;
+class MULV_W_DESC : MSA_3R_DESC_BASE<"mulv.w", int_mips_mulv_w,
+                                     NoItinerary, MSA128, MSA128>;
+class MULV_D_DESC : MSA_3R_DESC_BASE<"mulv.d", int_mips_mulv_d,
+                                     NoItinerary, MSA128, MSA128>;
+
+class NLOC_B_DESC : MSA_2R_DESC_BASE<"nloc.b", int_mips_nloc_b,
+                                     NoItinerary, MSA128, MSA128>;
+class NLOC_H_DESC : MSA_2R_DESC_BASE<"nloc.h", int_mips_nloc_h,
+                                     NoItinerary, MSA128, MSA128>;
+class NLOC_W_DESC : MSA_2R_DESC_BASE<"nloc.w", int_mips_nloc_w,
+                                     NoItinerary, MSA128, MSA128>;
+class NLOC_D_DESC : MSA_2R_DESC_BASE<"nloc.d", int_mips_nloc_d,
+                                     NoItinerary, MSA128, MSA128>;
+
+class NLZC_B_DESC : MSA_2R_DESC_BASE<"nlzc.b", int_mips_nlzc_b,
+                                     NoItinerary, MSA128, MSA128>;
+class NLZC_H_DESC : MSA_2R_DESC_BASE<"nlzc.h", int_mips_nlzc_h,
+                                     NoItinerary, MSA128, MSA128>;
+class NLZC_W_DESC : MSA_2R_DESC_BASE<"nlzc.w", int_mips_nlzc_w,
+                                     NoItinerary, MSA128, MSA128>;
+class NLZC_D_DESC : MSA_2R_DESC_BASE<"nlzc.d", int_mips_nlzc_d,
+                                     NoItinerary, MSA128, MSA128>;
+
+class NORI_B_DESC : MSA_I8_DESC_BASE<"nori.b", int_mips_nori_b, NoItinerary,
+                                     MSA128, MSA128>;
+
+class ORI_B_DESC : MSA_I8_DESC_BASE<"ori.b", int_mips_ori_b, NoItinerary,
+                                    MSA128, MSA128>;
+
+class PCKEV_B_DESC : MSA_3R_DESC_BASE<"pckev.b", int_mips_pckev_b, NoItinerary,
+                                      MSA128, MSA128>;
+class PCKEV_H_DESC : MSA_3R_DESC_BASE<"pckev.h", int_mips_pckev_h, NoItinerary,
+                                      MSA128, MSA128>;
+class PCKEV_W_DESC : MSA_3R_DESC_BASE<"pckev.w", int_mips_pckev_w, NoItinerary,
+                                      MSA128, MSA128>;
+class PCKEV_D_DESC : MSA_3R_DESC_BASE<"pckev.d", int_mips_pckev_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class PCKOD_B_DESC : MSA_3R_DESC_BASE<"pckod.b", int_mips_pckod_b, NoItinerary,
+                                      MSA128, MSA128>;
+class PCKOD_H_DESC : MSA_3R_DESC_BASE<"pckod.h", int_mips_pckod_h, NoItinerary,
+                                      MSA128, MSA128>;
+class PCKOD_W_DESC : MSA_3R_DESC_BASE<"pckod.w", int_mips_pckod_w, NoItinerary,
+                                      MSA128, MSA128>;
+class PCKOD_D_DESC : MSA_3R_DESC_BASE<"pckod.d", int_mips_pckod_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class PCNT_B_DESC : MSA_2R_DESC_BASE<"pcnt.b", int_mips_pcnt_b,
+                                     NoItinerary, MSA128, MSA128>;
+class PCNT_H_DESC : MSA_2R_DESC_BASE<"pcnt.h", int_mips_pcnt_h,
+                                     NoItinerary, MSA128, MSA128>;
+class PCNT_W_DESC : MSA_2R_DESC_BASE<"pcnt.w", int_mips_pcnt_w,
+                                     NoItinerary, MSA128, MSA128>;
+class PCNT_D_DESC : MSA_2R_DESC_BASE<"pcnt.d", int_mips_pcnt_d,
+                                     NoItinerary, MSA128, MSA128>;
+
+class SAT_S_B_DESC : MSA_BIT_B_DESC_BASE<"sat_s.b", int_mips_sat_s_b,
+                                         NoItinerary, MSA128, MSA128>;
+class SAT_S_H_DESC : MSA_BIT_H_DESC_BASE<"sat_s.h", int_mips_sat_s_h,
+                                         NoItinerary, MSA128, MSA128>;
+class SAT_S_W_DESC : MSA_BIT_W_DESC_BASE<"sat_s.w", int_mips_sat_s_w,
+                                         NoItinerary, MSA128, MSA128>;
+class SAT_S_D_DESC : MSA_BIT_D_DESC_BASE<"sat_s.d", int_mips_sat_s_d,
+                                         NoItinerary, MSA128, MSA128>;
+
+class SAT_U_B_DESC : MSA_BIT_B_DESC_BASE<"sat_u.b", int_mips_sat_u_b,
+                                         NoItinerary, MSA128, MSA128>;
+class SAT_U_H_DESC : MSA_BIT_H_DESC_BASE<"sat_u.h", int_mips_sat_u_h,
+                                         NoItinerary, MSA128, MSA128>;
+class SAT_U_W_DESC : MSA_BIT_W_DESC_BASE<"sat_u.w", int_mips_sat_u_w,
+                                         NoItinerary, MSA128, MSA128>;
+class SAT_U_D_DESC : MSA_BIT_D_DESC_BASE<"sat_u.d", int_mips_sat_u_d,
+                                         NoItinerary, MSA128, MSA128>;
+
+class SHF_B_DESC : MSA_I8_DESC_BASE<"shf.b", int_mips_shf_b, NoItinerary,
+                                    MSA128, MSA128>;
+class SHF_H_DESC : MSA_I8_DESC_BASE<"shf.h", int_mips_shf_h, NoItinerary,
+                                    MSA128, MSA128>;
+class SHF_W_DESC : MSA_I8_DESC_BASE<"shf.w", int_mips_shf_w, NoItinerary,
+                                    MSA128, MSA128>;
+
+class SLD_B_DESC : MSA_3R_DESC_BASE<"sld.b", int_mips_sld_b, NoItinerary,
+                                    MSA128, MSA128>;
+class SLD_H_DESC : MSA_3R_DESC_BASE<"sld.h", int_mips_sld_h, NoItinerary,
+                                    MSA128, MSA128>;
+class SLD_W_DESC : MSA_3R_DESC_BASE<"sld.w", int_mips_sld_w, NoItinerary,
+                                    MSA128, MSA128>;
+class SLD_D_DESC : MSA_3R_DESC_BASE<"sld.d", int_mips_sld_d, NoItinerary,
+                                    MSA128, MSA128>;
+
+class SLDI_B_DESC : MSA_BIT_B_DESC_BASE<"sldi.b", int_mips_sldi_b,
+                                        NoItinerary, MSA128, MSA128>;
+class SLDI_H_DESC : MSA_BIT_H_DESC_BASE<"sldi.h", int_mips_sldi_h,
+                                        NoItinerary, MSA128, MSA128>;
+class SLDI_W_DESC : MSA_BIT_W_DESC_BASE<"sldi.w", int_mips_sldi_w,
+                                        NoItinerary, MSA128, MSA128>;
+class SLDI_D_DESC : MSA_BIT_D_DESC_BASE<"sldi.d", int_mips_sldi_d,
+                                        NoItinerary, MSA128, MSA128>;
+
+class SLL_B_DESC : MSA_3R_DESC_BASE<"sll.b", int_mips_sll_b, NoItinerary,
+                                    MSA128, MSA128>;
+class SLL_H_DESC : MSA_3R_DESC_BASE<"sll.h", int_mips_sll_h, NoItinerary,
+                                    MSA128, MSA128>;
+class SLL_W_DESC : MSA_3R_DESC_BASE<"sll.w", int_mips_sll_w, NoItinerary,
+                                    MSA128, MSA128>;
+class SLL_D_DESC : MSA_3R_DESC_BASE<"sll.d", int_mips_sll_d, NoItinerary,
+                                    MSA128, MSA128>;
+
+class SLLI_B_DESC : MSA_BIT_B_DESC_BASE<"slli.b", int_mips_slli_b,
+                                        NoItinerary, MSA128, MSA128>;
+class SLLI_H_DESC : MSA_BIT_H_DESC_BASE<"slli.h", int_mips_slli_h,
+                                        NoItinerary, MSA128, MSA128>;
+class SLLI_W_DESC : MSA_BIT_W_DESC_BASE<"slli.w", int_mips_slli_w,
+                                        NoItinerary, MSA128, MSA128>;
+class SLLI_D_DESC : MSA_BIT_D_DESC_BASE<"slli.d", int_mips_slli_d,
+                                        NoItinerary, MSA128, MSA128>;
+
+class SPLAT_B_DESC : MSA_3R_DESC_BASE<"splat.b", int_mips_splat_b, NoItinerary,
+                                      MSA128, MSA128, GPR32>;
+class SPLAT_H_DESC : MSA_3R_DESC_BASE<"splat.h", int_mips_splat_h, NoItinerary,
+                                      MSA128, MSA128, GPR32>;
+class SPLAT_W_DESC : MSA_3R_DESC_BASE<"splat.w", int_mips_splat_w, NoItinerary,
+                                      MSA128, MSA128, GPR32>;
+class SPLAT_D_DESC : MSA_3R_DESC_BASE<"splat.d", int_mips_splat_d, NoItinerary,
+                                      MSA128, MSA128, GPR32>;
+
+class SPLATI_B_DESC : MSA_BIT_B_DESC_BASE<"splati.b", int_mips_splati_b,
+                                          NoItinerary, MSA128, MSA128>;
+class SPLATI_H_DESC : MSA_BIT_H_DESC_BASE<"splati.h", int_mips_splati_h,
+                                          NoItinerary, MSA128, MSA128>;
+class SPLATI_W_DESC : MSA_BIT_W_DESC_BASE<"splati.w", int_mips_splati_w,
+                                          NoItinerary, MSA128, MSA128>;
+class SPLATI_D_DESC : MSA_BIT_D_DESC_BASE<"splati.d", int_mips_splati_d,
+                                          NoItinerary, MSA128, MSA128>;
+
+class SRA_B_DESC : MSA_3R_DESC_BASE<"sra.b", int_mips_sra_b, NoItinerary,
+                                    MSA128, MSA128>;
+class SRA_H_DESC : MSA_3R_DESC_BASE<"sra.h", int_mips_sra_h, NoItinerary,
+                                    MSA128, MSA128>;
+class SRA_W_DESC : MSA_3R_DESC_BASE<"sra.w", int_mips_sra_w, NoItinerary,
+                                    MSA128, MSA128>;
+class SRA_D_DESC : MSA_3R_DESC_BASE<"sra.d", int_mips_sra_d, NoItinerary,
+                                    MSA128, MSA128>;
+
+class SRAI_B_DESC : MSA_BIT_B_DESC_BASE<"srai.b", int_mips_srai_b,
+                                        NoItinerary, MSA128, MSA128>;
+class SRAI_H_DESC : MSA_BIT_H_DESC_BASE<"srai.h", int_mips_srai_h,
+                                        NoItinerary, MSA128, MSA128>;
+class SRAI_W_DESC : MSA_BIT_W_DESC_BASE<"srai.w", int_mips_srai_w,
+                                        NoItinerary, MSA128, MSA128>;
+class SRAI_D_DESC : MSA_BIT_D_DESC_BASE<"srai.d", int_mips_srai_d,
+                                        NoItinerary, MSA128, MSA128>;
+
+class SRL_B_DESC : MSA_3R_DESC_BASE<"srl.b", int_mips_srl_b, NoItinerary,
+                                    MSA128, MSA128>;
+class SRL_H_DESC : MSA_3R_DESC_BASE<"srl.h", int_mips_srl_h, NoItinerary,
+                                    MSA128, MSA128>;
+class SRL_W_DESC : MSA_3R_DESC_BASE<"srl.w", int_mips_srl_w, NoItinerary,
+                                    MSA128, MSA128>;
+class SRL_D_DESC : MSA_3R_DESC_BASE<"srl.d", int_mips_srl_d, NoItinerary,
+                                    MSA128, MSA128>;
+
+class SRLI_B_DESC : MSA_BIT_B_DESC_BASE<"srli.b", int_mips_srli_b,
+                                        NoItinerary, MSA128, MSA128>;
+class SRLI_H_DESC : MSA_BIT_H_DESC_BASE<"srli.h", int_mips_srli_h,
+                                        NoItinerary, MSA128, MSA128>;
+class SRLI_W_DESC : MSA_BIT_W_DESC_BASE<"srli.w", int_mips_srli_w,
+                                        NoItinerary, MSA128, MSA128>;
+class SRLI_D_DESC : MSA_BIT_D_DESC_BASE<"srli.d", int_mips_srli_d,
+                                        NoItinerary, MSA128, MSA128>;
+
 class ST_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
                    ValueType TyNode, InstrItinClass itin, RegisterClass RCWD,
                    Operand MemOpnd = mem, ComplexPattern Addr = addr> {
@@ -1271,6 +1825,71 @@ class ST_H_DESC : ST_DESC_BASE<"st.h", s
 class ST_W_DESC : ST_DESC_BASE<"st.w", store, v4i32, NoItinerary, MSA128>;
 class ST_D_DESC : ST_DESC_BASE<"st.d", store, v2i64, NoItinerary, MSA128>;
 
+class SUBS_S_B_DESC : MSA_3R_DESC_BASE<"subs_s.b", int_mips_subs_s_b,
+                                       NoItinerary, MSA128, MSA128>;
+class SUBS_S_H_DESC : MSA_3R_DESC_BASE<"subs_s.h", int_mips_subs_s_h,
+                                       NoItinerary, MSA128, MSA128>;
+class SUBS_S_W_DESC : MSA_3R_DESC_BASE<"subs_s.w", int_mips_subs_s_w,
+                                       NoItinerary, MSA128, MSA128>;
+class SUBS_S_D_DESC : MSA_3R_DESC_BASE<"subs_s.d", int_mips_subs_s_d,
+                                       NoItinerary, MSA128, MSA128>;
+
+class SUBS_U_B_DESC : MSA_3R_DESC_BASE<"subs_u.b", int_mips_subs_u_b,
+                                       NoItinerary, MSA128, MSA128>;
+class SUBS_U_H_DESC : MSA_3R_DESC_BASE<"subs_u.h", int_mips_subs_u_h,
+                                       NoItinerary, MSA128, MSA128>;
+class SUBS_U_W_DESC : MSA_3R_DESC_BASE<"subs_u.w", int_mips_subs_u_w,
+                                       NoItinerary, MSA128, MSA128>;
+class SUBS_U_D_DESC : MSA_3R_DESC_BASE<"subs_u.d", int_mips_subs_u_d,
+                                       NoItinerary, MSA128, MSA128>;
+
+class SUBSUS_U_B_DESC : MSA_3R_DESC_BASE<"subsus_u.b", int_mips_subsus_u_b,
+                                         NoItinerary, MSA128, MSA128>;
+class SUBSUS_U_H_DESC : MSA_3R_DESC_BASE<"subsus_u.h", int_mips_subsus_u_h,
+                                         NoItinerary, MSA128, MSA128>;
+class SUBSUS_U_W_DESC : MSA_3R_DESC_BASE<"subsus_u.w", int_mips_subsus_u_w,
+                                         NoItinerary, MSA128, MSA128>;
+class SUBSUS_U_D_DESC : MSA_3R_DESC_BASE<"subsus_u.d", int_mips_subsus_u_d,
+                                         NoItinerary, MSA128, MSA128>;
+
+class SUBSUU_S_B_DESC : MSA_3R_DESC_BASE<"subsuu_s.b", int_mips_subsuu_s_b,
+                                         NoItinerary, MSA128, MSA128>;
+class SUBSUU_S_H_DESC : MSA_3R_DESC_BASE<"subsuu_s.h", int_mips_subsuu_s_h,
+                                         NoItinerary, MSA128, MSA128>;
+class SUBSUU_S_W_DESC : MSA_3R_DESC_BASE<"subsuu_s.w", int_mips_subsuu_s_w,
+                                         NoItinerary, MSA128, MSA128>;
+class SUBSUU_S_D_DESC : MSA_3R_DESC_BASE<"subsuu_s.d", int_mips_subsuu_s_d,
+                                         NoItinerary, MSA128, MSA128>;
+
+class SUBV_B_DESC : MSA_3R_DESC_BASE<"subv.b", int_mips_subv_b,
+                                     NoItinerary, MSA128, MSA128>;
+class SUBV_H_DESC : MSA_3R_DESC_BASE<"subv.h", int_mips_subv_h,
+                                     NoItinerary, MSA128, MSA128>;
+class SUBV_W_DESC : MSA_3R_DESC_BASE<"subv.w", int_mips_subv_w,
+                                     NoItinerary, MSA128, MSA128>;
+class SUBV_D_DESC : MSA_3R_DESC_BASE<"subv.d", int_mips_subv_d,
+                                     NoItinerary, MSA128, MSA128>;
+
+class SUBVI_B_DESC : MSA_I5_DESC_BASE<"subvi.b", int_mips_subvi_b, NoItinerary,
+                                      MSA128, MSA128>;
+class SUBVI_H_DESC : MSA_I5_DESC_BASE<"subvi.h", int_mips_subvi_h, NoItinerary,
+                                      MSA128, MSA128>;
+class SUBVI_W_DESC : MSA_I5_DESC_BASE<"subvi.w", int_mips_subvi_w, NoItinerary,
+                                      MSA128, MSA128>;
+class SUBVI_D_DESC : MSA_I5_DESC_BASE<"subvi.d", int_mips_subvi_d, NoItinerary,
+                                      MSA128, MSA128>;
+
+class VSHF_B_DESC : MSA_3R_DESC_BASE<"vshf.b", int_mips_vshf_b,
+                                     NoItinerary, MSA128, MSA128>;
+class VSHF_H_DESC : MSA_3R_DESC_BASE<"vshf.h", int_mips_vshf_h,
+                                     NoItinerary, MSA128, MSA128>;
+class VSHF_W_DESC : MSA_3R_DESC_BASE<"vshf.w", int_mips_vshf_w,
+                                     NoItinerary, MSA128, MSA128>;
+class VSHF_D_DESC : MSA_3R_DESC_BASE<"vshf.d", int_mips_vshf_d,
+                                     NoItinerary, MSA128, MSA128>;
+
+class XORI_B_DESC : MSA_I8_DESC_BASE<"xori.b", int_mips_xori_b, NoItinerary,
+                                     MSA128, MSA128>;
 // Instruction defs.
 def ADD_A_B : ADD_A_B_ENC, ADD_A_B_DESC, Requires<[HasMSA]>;
 def ADD_A_H : ADD_A_H_ENC, ADD_A_H_DESC, Requires<[HasMSA]>;
@@ -1641,11 +2260,234 @@ def LDI_B : LDI_B_ENC, LDI_B_DESC, Requi
 def LDI_H : LDI_H_ENC, LDI_H_DESC, Requires<[HasMSA]>;
 def LDI_W : LDI_W_ENC, LDI_W_DESC, Requires<[HasMSA]>;
 
+def MADD_Q_H : MADD_Q_H_ENC, MADD_Q_H_DESC, Requires<[HasMSA]>;
+def MADD_Q_W : MADD_Q_W_ENC, MADD_Q_W_DESC, Requires<[HasMSA]>;
+
+def MADDR_Q_H : MADDR_Q_H_ENC, MADDR_Q_H_DESC, Requires<[HasMSA]>;
+def MADDR_Q_W : MADDR_Q_W_ENC, MADDR_Q_W_DESC, Requires<[HasMSA]>;
+
+def MADDV_B : MADDV_B_ENC, MADDV_B_DESC, Requires<[HasMSA]>;
+def MADDV_H : MADDV_H_ENC, MADDV_H_DESC, Requires<[HasMSA]>;
+def MADDV_W : MADDV_W_ENC, MADDV_W_DESC, Requires<[HasMSA]>;
+def MADDV_D : MADDV_D_ENC, MADDV_D_DESC, Requires<[HasMSA]>;
+
+def MAX_A_B : MAX_A_B_ENC, MAX_A_B_DESC, Requires<[HasMSA]>;
+def MAX_A_H : MAX_A_H_ENC, MAX_A_H_DESC, Requires<[HasMSA]>;
+def MAX_A_W : MAX_A_W_ENC, MAX_A_W_DESC, Requires<[HasMSA]>;
+def MAX_A_D : MAX_A_D_ENC, MAX_A_D_DESC, Requires<[HasMSA]>;
+
+def MAX_S_B : MAX_S_B_ENC, MAX_S_B_DESC, Requires<[HasMSA]>;
+def MAX_S_H : MAX_S_H_ENC, MAX_S_H_DESC, Requires<[HasMSA]>;
+def MAX_S_W : MAX_S_W_ENC, MAX_S_W_DESC, Requires<[HasMSA]>;
+def MAX_S_D : MAX_S_D_ENC, MAX_S_D_DESC, Requires<[HasMSA]>;
+
+def MAX_U_B : MAX_U_B_ENC, MAX_U_B_DESC, Requires<[HasMSA]>;
+def MAX_U_H : MAX_U_H_ENC, MAX_U_H_DESC, Requires<[HasMSA]>;
+def MAX_U_W : MAX_U_W_ENC, MAX_U_W_DESC, Requires<[HasMSA]>;
+def MAX_U_D : MAX_U_D_ENC, MAX_U_D_DESC, Requires<[HasMSA]>;
+
+def MAXI_S_B : MAXI_S_B_ENC, MAXI_S_B_DESC, Requires<[HasMSA]>;
+def MAXI_S_H : MAXI_S_H_ENC, MAXI_S_H_DESC, Requires<[HasMSA]>;
+def MAXI_S_W : MAXI_S_W_ENC, MAXI_S_W_DESC, Requires<[HasMSA]>;
+def MAXI_S_D : MAXI_S_D_ENC, MAXI_S_D_DESC, Requires<[HasMSA]>;
+
+def MAXI_U_B : MAXI_U_B_ENC, MAXI_U_B_DESC, Requires<[HasMSA]>;
+def MAXI_U_H : MAXI_U_H_ENC, MAXI_U_H_DESC, Requires<[HasMSA]>;
+def MAXI_U_W : MAXI_U_W_ENC, MAXI_U_W_DESC, Requires<[HasMSA]>;
+def MAXI_U_D : MAXI_U_D_ENC, MAXI_U_D_DESC, Requires<[HasMSA]>;
+
+def MIN_A_B : MIN_A_B_ENC, MIN_A_B_DESC, Requires<[HasMSA]>;
+def MIN_A_H : MIN_A_H_ENC, MIN_A_H_DESC, Requires<[HasMSA]>;
+def MIN_A_W : MIN_A_W_ENC, MIN_A_W_DESC, Requires<[HasMSA]>;
+def MIN_A_D : MIN_A_D_ENC, MIN_A_D_DESC, Requires<[HasMSA]>;
+
+def MIN_S_B : MIN_S_B_ENC, MIN_S_B_DESC, Requires<[HasMSA]>;
+def MIN_S_H : MIN_S_H_ENC, MIN_S_H_DESC, Requires<[HasMSA]>;
+def MIN_S_W : MIN_S_W_ENC, MIN_S_W_DESC, Requires<[HasMSA]>;
+def MIN_S_D : MIN_S_D_ENC, MIN_S_D_DESC, Requires<[HasMSA]>;
+
+def MIN_U_B : MIN_U_B_ENC, MIN_U_B_DESC, Requires<[HasMSA]>;
+def MIN_U_H : MIN_U_H_ENC, MIN_U_H_DESC, Requires<[HasMSA]>;
+def MIN_U_W : MIN_U_W_ENC, MIN_U_W_DESC, Requires<[HasMSA]>;
+def MIN_U_D : MIN_U_D_ENC, MIN_U_D_DESC, Requires<[HasMSA]>;
+
+def MINI_S_B : MINI_S_B_ENC, MINI_S_B_DESC, Requires<[HasMSA]>;
+def MINI_S_H : MINI_S_H_ENC, MINI_S_H_DESC, Requires<[HasMSA]>;
+def MINI_S_W : MINI_S_W_ENC, MINI_S_W_DESC, Requires<[HasMSA]>;
+def MINI_S_D : MINI_S_D_ENC, MINI_S_D_DESC, Requires<[HasMSA]>;
+
+def MINI_U_B : MINI_U_B_ENC, MINI_U_B_DESC, Requires<[HasMSA]>;
+def MINI_U_H : MINI_U_H_ENC, MINI_U_H_DESC, Requires<[HasMSA]>;
+def MINI_U_W : MINI_U_W_ENC, MINI_U_W_DESC, Requires<[HasMSA]>;
+def MINI_U_D : MINI_U_D_ENC, MINI_U_D_DESC, Requires<[HasMSA]>;
+
+def MOD_S_B : MOD_S_B_ENC, MOD_S_B_DESC, Requires<[HasMSA]>;
+def MOD_S_H : MOD_S_H_ENC, MOD_S_H_DESC, Requires<[HasMSA]>;
+def MOD_S_W : MOD_S_W_ENC, MOD_S_W_DESC, Requires<[HasMSA]>;
+def MOD_S_D : MOD_S_D_ENC, MOD_S_D_DESC, Requires<[HasMSA]>;
+
+def MOD_U_B : MOD_U_B_ENC, MOD_U_B_DESC, Requires<[HasMSA]>;
+def MOD_U_H : MOD_U_H_ENC, MOD_U_H_DESC, Requires<[HasMSA]>;
+def MOD_U_W : MOD_U_W_ENC, MOD_U_W_DESC, Requires<[HasMSA]>;
+def MOD_U_D : MOD_U_D_ENC, MOD_U_D_DESC, Requires<[HasMSA]>;
+
+def MSUB_Q_H : MSUB_Q_H_ENC, MSUB_Q_H_DESC, Requires<[HasMSA]>;
+def MSUB_Q_W : MSUB_Q_W_ENC, MSUB_Q_W_DESC, Requires<[HasMSA]>;
+
+def MSUBR_Q_H : MSUBR_Q_H_ENC, MSUBR_Q_H_DESC, Requires<[HasMSA]>;
+def MSUBR_Q_W : MSUBR_Q_W_ENC, MSUBR_Q_W_DESC, Requires<[HasMSA]>;
+
+def MSUBV_B : MSUBV_B_ENC, MSUBV_B_DESC, Requires<[HasMSA]>;
+def MSUBV_H : MSUBV_H_ENC, MSUBV_H_DESC, Requires<[HasMSA]>;
+def MSUBV_W : MSUBV_W_ENC, MSUBV_W_DESC, Requires<[HasMSA]>;
+def MSUBV_D : MSUBV_D_ENC, MSUBV_D_DESC, Requires<[HasMSA]>;
+
+def MUL_Q_H : MUL_Q_H_ENC, MUL_Q_H_DESC, Requires<[HasMSA]>;
+def MUL_Q_W : MUL_Q_W_ENC, MUL_Q_W_DESC, Requires<[HasMSA]>;
+
+def MULR_Q_H : MULR_Q_H_ENC, MULR_Q_H_DESC, Requires<[HasMSA]>;
+def MULR_Q_W : MULR_Q_W_ENC, MULR_Q_W_DESC, Requires<[HasMSA]>;
+
+def MULV_B : MULV_B_ENC, MULV_B_DESC, Requires<[HasMSA]>;
+def MULV_H : MULV_H_ENC, MULV_H_DESC, Requires<[HasMSA]>;
+def MULV_W : MULV_W_ENC, MULV_W_DESC, Requires<[HasMSA]>;
+def MULV_D : MULV_D_ENC, MULV_D_DESC, Requires<[HasMSA]>;
+
+def NLOC_B : NLOC_B_ENC, NLOC_B_DESC, Requires<[HasMSA]>;
+def NLOC_H : NLOC_H_ENC, NLOC_H_DESC, Requires<[HasMSA]>;
+def NLOC_W : NLOC_W_ENC, NLOC_W_DESC, Requires<[HasMSA]>;
+def NLOC_D : NLOC_D_ENC, NLOC_D_DESC, Requires<[HasMSA]>;
+
+def NLZC_B : NLZC_B_ENC, NLZC_B_DESC, Requires<[HasMSA]>;
+def NLZC_H : NLZC_H_ENC, NLZC_H_DESC, Requires<[HasMSA]>;
+def NLZC_W : NLZC_W_ENC, NLZC_W_DESC, Requires<[HasMSA]>;
+def NLZC_D : NLZC_D_ENC, NLZC_D_DESC, Requires<[HasMSA]>;
+
+def NORI_B : NORI_B_ENC, NORI_B_DESC, Requires<[HasMSA]>;
+
+def ORI_B : ORI_B_ENC, ORI_B_DESC, Requires<[HasMSA]>;
+
+def PCKEV_B : PCKEV_B_ENC, PCKEV_B_DESC, Requires<[HasMSA]>;
+def PCKEV_H : PCKEV_H_ENC, PCKEV_H_DESC, Requires<[HasMSA]>;
+def PCKEV_W : PCKEV_W_ENC, PCKEV_W_DESC, Requires<[HasMSA]>;
+def PCKEV_D : PCKEV_D_ENC, PCKEV_D_DESC, Requires<[HasMSA]>;
+
+def PCKOD_B : PCKOD_B_ENC, PCKOD_B_DESC, Requires<[HasMSA]>;
+def PCKOD_H : PCKOD_H_ENC, PCKOD_H_DESC, Requires<[HasMSA]>;
+def PCKOD_W : PCKOD_W_ENC, PCKOD_W_DESC, Requires<[HasMSA]>;
+def PCKOD_D : PCKOD_D_ENC, PCKOD_D_DESC, Requires<[HasMSA]>;
+
+def PCNT_B : PCNT_B_ENC, PCNT_B_DESC, Requires<[HasMSA]>;
+def PCNT_H : PCNT_H_ENC, PCNT_H_DESC, Requires<[HasMSA]>;
+def PCNT_W : PCNT_W_ENC, PCNT_W_DESC, Requires<[HasMSA]>;
+def PCNT_D : PCNT_D_ENC, PCNT_D_DESC, Requires<[HasMSA]>;
+
+def SAT_S_B : SAT_S_B_ENC, SAT_S_B_DESC, Requires<[HasMSA]>;
+def SAT_S_H : SAT_S_H_ENC, SAT_S_H_DESC, Requires<[HasMSA]>;
+def SAT_S_W : SAT_S_W_ENC, SAT_S_W_DESC, Requires<[HasMSA]>;
+def SAT_S_D : SAT_S_D_ENC, SAT_S_D_DESC, Requires<[HasMSA]>;
+
+def SAT_U_B : SAT_U_B_ENC, SAT_U_B_DESC, Requires<[HasMSA]>;
+def SAT_U_H : SAT_U_H_ENC, SAT_U_H_DESC, Requires<[HasMSA]>;
+def SAT_U_W : SAT_U_W_ENC, SAT_U_W_DESC, Requires<[HasMSA]>;
+def SAT_U_D : SAT_U_D_ENC, SAT_U_D_DESC, Requires<[HasMSA]>;
+
+def SHF_B : SHF_B_ENC, SHF_B_DESC, Requires<[HasMSA]>;
+def SHF_H : SHF_H_ENC, SHF_H_DESC, Requires<[HasMSA]>;
+def SHF_W : SHF_W_ENC, SHF_W_DESC, Requires<[HasMSA]>;
+
+def SLD_B : SLD_B_ENC, SLD_B_DESC, Requires<[HasMSA]>;
+def SLD_H : SLD_H_ENC, SLD_H_DESC, Requires<[HasMSA]>;
+def SLD_W : SLD_W_ENC, SLD_W_DESC, Requires<[HasMSA]>;
+def SLD_D : SLD_D_ENC, SLD_D_DESC, Requires<[HasMSA]>;
+
+def SLDI_B : SLDI_B_ENC, SLDI_B_DESC, Requires<[HasMSA]>;
+def SLDI_H : SLDI_H_ENC, SLDI_H_DESC, Requires<[HasMSA]>;
+def SLDI_W : SLDI_W_ENC, SLDI_W_DESC, Requires<[HasMSA]>;
+def SLDI_D : SLDI_D_ENC, SLDI_D_DESC, Requires<[HasMSA]>;
+
+def SLL_B : SLL_B_ENC, SLL_B_DESC, Requires<[HasMSA]>;
+def SLL_H : SLL_H_ENC, SLL_H_DESC, Requires<[HasMSA]>;
+def SLL_W : SLL_W_ENC, SLL_W_DESC, Requires<[HasMSA]>;
+def SLL_D : SLL_D_ENC, SLL_D_DESC, Requires<[HasMSA]>;
+
+def SLLI_B : SLLI_B_ENC, SLLI_B_DESC, Requires<[HasMSA]>;
+def SLLI_H : SLLI_H_ENC, SLLI_H_DESC, Requires<[HasMSA]>;
+def SLLI_W : SLLI_W_ENC, SLLI_W_DESC, Requires<[HasMSA]>;
+def SLLI_D : SLLI_D_ENC, SLLI_D_DESC, Requires<[HasMSA]>;
+
+def SPLAT_B : SPLAT_B_ENC, SPLAT_B_DESC, Requires<[HasMSA]>;
+def SPLAT_H : SPLAT_H_ENC, SPLAT_H_DESC, Requires<[HasMSA]>;
+def SPLAT_W : SPLAT_W_ENC, SPLAT_W_DESC, Requires<[HasMSA]>;
+def SPLAT_D : SPLAT_D_ENC, SPLAT_D_DESC, Requires<[HasMSA]>;
+
+def SPLATI_B : SPLATI_B_ENC, SPLATI_B_DESC, Requires<[HasMSA]>;
+def SPLATI_H : SPLATI_H_ENC, SPLATI_H_DESC, Requires<[HasMSA]>;
+def SPLATI_W : SPLATI_W_ENC, SPLATI_W_DESC, Requires<[HasMSA]>;
+def SPLATI_D : SPLATI_D_ENC, SPLATI_D_DESC, Requires<[HasMSA]>;
+
+def SRA_B : SRA_B_ENC, SRA_B_DESC, Requires<[HasMSA]>;
+def SRA_H : SRA_H_ENC, SRA_H_DESC, Requires<[HasMSA]>;
+def SRA_W : SRA_W_ENC, SRA_W_DESC, Requires<[HasMSA]>;
+def SRA_D : SRA_D_ENC, SRA_D_DESC, Requires<[HasMSA]>;
+
+def SRAI_B : SRAI_B_ENC, SRAI_B_DESC, Requires<[HasMSA]>;
+def SRAI_H : SRAI_H_ENC, SRAI_H_DESC, Requires<[HasMSA]>;
+def SRAI_W : SRAI_W_ENC, SRAI_W_DESC, Requires<[HasMSA]>;
+def SRAI_D : SRAI_D_ENC, SRAI_D_DESC, Requires<[HasMSA]>;
+
+def SRL_B : SRL_B_ENC, SRL_B_DESC, Requires<[HasMSA]>;
+def SRL_H : SRL_H_ENC, SRL_H_DESC, Requires<[HasMSA]>;
+def SRL_W : SRL_W_ENC, SRL_W_DESC, Requires<[HasMSA]>;
+def SRL_D : SRL_D_ENC, SRL_D_DESC, Requires<[HasMSA]>;
+
+def SRLI_B : SRLI_B_ENC, SRLI_B_DESC, Requires<[HasMSA]>;
+def SRLI_H : SRLI_H_ENC, SRLI_H_DESC, Requires<[HasMSA]>;
+def SRLI_W : SRLI_W_ENC, SRLI_W_DESC, Requires<[HasMSA]>;
+def SRLI_D : SRLI_D_ENC, SRLI_D_DESC, Requires<[HasMSA]>;
+
 def ST_B: ST_B_ENC, ST_B_DESC, Requires<[HasMSA]>;
 def ST_H: ST_H_ENC, ST_H_DESC, Requires<[HasMSA]>;
 def ST_W: ST_W_ENC, ST_W_DESC, Requires<[HasMSA]>;
 def ST_D: ST_D_ENC, ST_D_DESC, Requires<[HasMSA]>;
 
+def SUBS_S_B : SUBS_S_B_ENC, SUBS_S_B_DESC, Requires<[HasMSA]>;
+def SUBS_S_H : SUBS_S_H_ENC, SUBS_S_H_DESC, Requires<[HasMSA]>;
+def SUBS_S_W : SUBS_S_W_ENC, SUBS_S_W_DESC, Requires<[HasMSA]>;
+def SUBS_S_D : SUBS_S_D_ENC, SUBS_S_D_DESC, Requires<[HasMSA]>;
+
+def SUBS_U_B : SUBS_U_B_ENC, SUBS_U_B_DESC, Requires<[HasMSA]>;
+def SUBS_U_H : SUBS_U_H_ENC, SUBS_U_H_DESC, Requires<[HasMSA]>;
+def SUBS_U_W : SUBS_U_W_ENC, SUBS_U_W_DESC, Requires<[HasMSA]>;
+def SUBS_U_D : SUBS_U_D_ENC, SUBS_U_D_DESC, Requires<[HasMSA]>;
+
+def SUBSUS_U_B : SUBSUS_U_B_ENC, SUBSUS_U_B_DESC, Requires<[HasMSA]>;
+def SUBSUS_U_H : SUBSUS_U_H_ENC, SUBSUS_U_H_DESC, Requires<[HasMSA]>;
+def SUBSUS_U_W : SUBSUS_U_W_ENC, SUBSUS_U_W_DESC, Requires<[HasMSA]>;
+def SUBSUS_U_D : SUBSUS_U_D_ENC, SUBSUS_U_D_DESC, Requires<[HasMSA]>;
+
+def SUBSUU_S_B : SUBSUU_S_B_ENC, SUBSUU_S_B_DESC, Requires<[HasMSA]>;
+def SUBSUU_S_H : SUBSUU_S_H_ENC, SUBSUU_S_H_DESC, Requires<[HasMSA]>;
+def SUBSUU_S_W : SUBSUU_S_W_ENC, SUBSUU_S_W_DESC, Requires<[HasMSA]>;
+def SUBSUU_S_D : SUBSUU_S_D_ENC, SUBSUU_S_D_DESC, Requires<[HasMSA]>;
+
+def SUBV_B : SUBV_B_ENC, SUBV_B_DESC, Requires<[HasMSA]>;
+def SUBV_H : SUBV_H_ENC, SUBV_H_DESC, Requires<[HasMSA]>;
+def SUBV_W : SUBV_W_ENC, SUBV_W_DESC, Requires<[HasMSA]>;
+def SUBV_D : SUBV_D_ENC, SUBV_D_DESC, Requires<[HasMSA]>;
+
+def SUBVI_B : SUBVI_B_ENC, SUBVI_B_DESC, Requires<[HasMSA]>;
+def SUBVI_H : SUBVI_H_ENC, SUBVI_H_DESC, Requires<[HasMSA]>;
+def SUBVI_W : SUBVI_W_ENC, SUBVI_W_DESC, Requires<[HasMSA]>;
+def SUBVI_D : SUBVI_D_ENC, SUBVI_D_DESC, Requires<[HasMSA]>;
+
+def VSHF_B : VSHF_B_ENC, VSHF_B_DESC, Requires<[HasMSA]>;
+def VSHF_H : VSHF_H_ENC, VSHF_H_DESC, Requires<[HasMSA]>;
+def VSHF_W : VSHF_W_ENC, VSHF_W_DESC, Requires<[HasMSA]>;
+def VSHF_D : VSHF_D_ENC, VSHF_D_DESC, Requires<[HasMSA]>;
+
+def XORI_B : XORI_B_ENC, XORI_B_DESC, Requires<[HasMSA]>;
+
 // Patterns.
 class MSAPat<dag pattern, dag result, Predicate pred = HasMSA> :
   Pat<pattern, result>, Requires<[pred]>;

Added: llvm/trunk/test/CodeGen/Mips/msa/2r.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/2r.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/2r.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/2r.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,230 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_nloc_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_nloc_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nloc_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_nloc_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.nloc.b(<16 x i8> %0)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_nloc_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.nloc.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_nloc_b_test:
+; CHECK: ld.b
+; CHECK: nloc.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_nloc_b_test
+;
+ at llvm_mips_nloc_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_nloc_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_nloc_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_nloc_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.nloc.h(<8 x i16> %0)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_nloc_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.nloc.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_nloc_h_test:
+; CHECK: ld.h
+; CHECK: nloc.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_nloc_h_test
+;
+ at llvm_mips_nloc_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_nloc_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_nloc_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_nloc_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.nloc.w(<4 x i32> %0)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_nloc_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.nloc.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_nloc_w_test:
+; CHECK: ld.w
+; CHECK: nloc.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_nloc_w_test
+;
+ at llvm_mips_nloc_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_nloc_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_nloc_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_nloc_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.nloc.d(<2 x i64> %0)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_nloc_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.nloc.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_nloc_d_test:
+; CHECK: ld.d
+; CHECK: nloc.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_nloc_d_test
+;
+ at llvm_mips_nlzc_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_nlzc_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nlzc_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_nlzc_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.nlzc.b(<16 x i8> %0)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_nlzc_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.nlzc.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_nlzc_b_test:
+; CHECK: ld.b
+; CHECK: nlzc.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_nlzc_b_test
+;
+ at llvm_mips_nlzc_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_nlzc_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_nlzc_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_nlzc_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.nlzc.h(<8 x i16> %0)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_nlzc_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.nlzc.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_nlzc_h_test:
+; CHECK: ld.h
+; CHECK: nlzc.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_nlzc_h_test
+;
+ at llvm_mips_nlzc_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_nlzc_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_nlzc_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_nlzc_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.nlzc.w(<4 x i32> %0)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_nlzc_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.nlzc.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_nlzc_w_test:
+; CHECK: ld.w
+; CHECK: nlzc.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_nlzc_w_test
+;
+ at llvm_mips_nlzc_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_nlzc_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_nlzc_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_nlzc_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.nlzc.d(<2 x i64> %0)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_nlzc_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.nlzc.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_nlzc_d_test:
+; CHECK: ld.d
+; CHECK: nlzc.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_nlzc_d_test
+;
+ at llvm_mips_pcnt_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_pcnt_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_pcnt_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_pcnt_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.pcnt.b(<16 x i8> %0)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_pcnt_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.pcnt.b(<16 x i8>) nounwind
+
+; CHECK: llvm_mips_pcnt_b_test:
+; CHECK: ld.b
+; CHECK: pcnt.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_pcnt_b_test
+;
+ at llvm_mips_pcnt_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_pcnt_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_pcnt_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_pcnt_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.pcnt.h(<8 x i16> %0)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_pcnt_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.pcnt.h(<8 x i16>) nounwind
+
+; CHECK: llvm_mips_pcnt_h_test:
+; CHECK: ld.h
+; CHECK: pcnt.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_pcnt_h_test
+;
+ at llvm_mips_pcnt_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_pcnt_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_pcnt_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_pcnt_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.pcnt.w(<4 x i32> %0)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_pcnt_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.pcnt.w(<4 x i32>) nounwind
+
+; CHECK: llvm_mips_pcnt_w_test:
+; CHECK: ld.w
+; CHECK: pcnt.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_pcnt_w_test
+;
+ at llvm_mips_pcnt_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_pcnt_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_pcnt_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_pcnt_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.pcnt.d(<2 x i64> %0)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_pcnt_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.pcnt.d(<2 x i64>) nounwind
+
+; CHECK: llvm_mips_pcnt_d_test:
+; CHECK: ld.d
+; CHECK: pcnt.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_pcnt_d_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/3r-m.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/3r-m.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/3r-m.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/3r-m.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,794 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_max_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_max_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_max_a_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_max_a_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_max_a_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_max_a_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.max.a.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_max_a_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.max.a.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_max_a_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: max_a.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_max_a_b_test
+;
+ at llvm_mips_max_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_max_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_max_a_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_max_a_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_max_a_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_max_a_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.max.a.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_max_a_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.max.a.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_max_a_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: max_a.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_max_a_h_test
+;
+ at llvm_mips_max_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_max_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_max_a_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_max_a_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_max_a_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_max_a_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.max.a.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_max_a_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.max.a.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_max_a_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: max_a.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_max_a_w_test
+;
+ at llvm_mips_max_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_max_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_max_a_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_max_a_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_max_a_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_max_a_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.max.a.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_max_a_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.max.a.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_max_a_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: max_a.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_max_a_d_test
+;
+ at llvm_mips_max_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_max_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_max_s_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_max_s_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_max_s_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_max_s_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.max.s.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_max_s_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.max.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_max_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: max_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_max_s_b_test
+;
+ at llvm_mips_max_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_max_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_max_s_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_max_s_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_max_s_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_max_s_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.max.s.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_max_s_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.max.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_max_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: max_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_max_s_h_test
+;
+ at llvm_mips_max_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_max_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_max_s_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_max_s_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_max_s_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_max_s_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.max.s.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_max_s_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.max.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_max_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: max_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_max_s_w_test
+;
+ at llvm_mips_max_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_max_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_max_s_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_max_s_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_max_s_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_max_s_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.max.s.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_max_s_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.max.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_max_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: max_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_max_s_d_test
+;
+ at llvm_mips_max_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_max_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_max_u_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_max_u_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_max_u_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_max_u_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.max.u.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_max_u_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.max.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_max_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: max_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_max_u_b_test
+;
+ at llvm_mips_max_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_max_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_max_u_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_max_u_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_max_u_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_max_u_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.max.u.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_max_u_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.max.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_max_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: max_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_max_u_h_test
+;
+ at llvm_mips_max_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_max_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_max_u_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_max_u_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_max_u_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_max_u_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.max.u.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_max_u_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.max.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_max_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: max_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_max_u_w_test
+;
+ at llvm_mips_max_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_max_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_max_u_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_max_u_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_max_u_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_max_u_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.max.u.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_max_u_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.max.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_max_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: max_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_max_u_d_test
+;
+ at llvm_mips_min_a_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_min_a_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_min_a_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_min_a_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_min_a_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_min_a_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.min.a.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_min_a_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.min.a.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_min_a_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: min_a.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_min_a_b_test
+;
+ at llvm_mips_min_a_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_min_a_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_min_a_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_min_a_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_min_a_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_min_a_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.min.a.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_min_a_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.min.a.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_min_a_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: min_a.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_min_a_h_test
+;
+ at llvm_mips_min_a_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_min_a_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_min_a_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_min_a_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_min_a_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_min_a_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.min.a.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_min_a_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.min.a.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_min_a_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: min_a.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_min_a_w_test
+;
+ at llvm_mips_min_a_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_min_a_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_min_a_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_min_a_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_min_a_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_min_a_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.min.a.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_min_a_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.min.a.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_min_a_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: min_a.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_min_a_d_test
+;
+ at llvm_mips_min_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_min_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_min_s_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_min_s_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_min_s_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_min_s_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.min.s.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_min_s_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.min.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_min_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: min_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_min_s_b_test
+;
+ at llvm_mips_min_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_min_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_min_s_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_min_s_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_min_s_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_min_s_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.min.s.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_min_s_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.min.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_min_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: min_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_min_s_h_test
+;
+ at llvm_mips_min_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_min_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_min_s_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_min_s_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_min_s_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_min_s_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.min.s.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_min_s_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.min.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_min_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: min_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_min_s_w_test
+;
+ at llvm_mips_min_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_min_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_min_s_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_min_s_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_min_s_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_min_s_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.min.s.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_min_s_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.min.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_min_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: min_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_min_s_d_test
+;
+ at llvm_mips_min_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_min_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_min_u_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_min_u_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_min_u_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_min_u_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.min.u.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_min_u_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.min.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_min_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: min_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_min_u_b_test
+;
+ at llvm_mips_min_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_min_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_min_u_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_min_u_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_min_u_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_min_u_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.min.u.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_min_u_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.min.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_min_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: min_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_min_u_h_test
+;
+ at llvm_mips_min_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_min_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_min_u_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_min_u_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_min_u_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_min_u_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.min.u.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_min_u_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.min.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_min_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: min_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_min_u_w_test
+;
+ at llvm_mips_min_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_min_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_min_u_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_min_u_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_min_u_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_min_u_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.min.u.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_min_u_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.min.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_min_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: min_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_min_u_d_test
+;
+ at llvm_mips_mod_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_mod_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_mod_s_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mod_s_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_mod_s_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_mod_s_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_s_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_mod_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mod_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mod_s_b_test
+;
+ at llvm_mips_mod_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_mod_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_mod_s_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mod_s_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_mod_s_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_mod_s_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_s_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mod_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mod_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mod_s_h_test
+;
+ at llvm_mips_mod_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_mod_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_mod_s_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mod_s_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_mod_s_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_mod_s_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_s_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mod_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mod_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mod_s_w_test
+;
+ at llvm_mips_mod_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_mod_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_mod_s_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mod_s_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_mod_s_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_mod_s_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_s_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_mod_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mod_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mod_s_d_test
+;
+ at llvm_mips_mod_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_mod_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_mod_u_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mod_u_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_mod_u_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_mod_u_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_u_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_mod_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mod_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mod_u_b_test
+;
+ at llvm_mips_mod_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_mod_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_mod_u_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mod_u_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_mod_u_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_mod_u_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_u_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mod_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mod_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mod_u_h_test
+;
+ at llvm_mips_mod_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_mod_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_mod_u_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mod_u_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_mod_u_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_mod_u_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_u_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mod_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mod_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mod_u_w_test
+;
+ at llvm_mips_mod_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_mod_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_mod_u_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mod_u_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_mod_u_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_mod_u_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_u_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_mod_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mod_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mod_u_d_test
+;
+ at llvm_mips_mulv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_mulv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_mulv_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mulv_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.mulv.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_mulv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: mulv.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mulv_b_test
+;
+ at llvm_mips_mulv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_mulv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_mulv_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mulv_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.mulv.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mulv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mulv.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mulv_h_test
+;
+ at llvm_mips_mulv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_mulv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_mulv_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mulv_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.mulv.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mulv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mulv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mulv_w_test
+;
+ at llvm_mips_mulv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_mulv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_mulv_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mulv_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.mulv.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_mulv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: mulv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mulv_d_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/3r-p.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/3r-p.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/3r-p.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/3r-p.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,178 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_pckev_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_pckev_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_pckev_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_pckev_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_pckev_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_pckev_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.pckev.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_pckev_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.pckev.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_pckev_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: pckev.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_pckev_b_test
+;
+ at llvm_mips_pckev_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_pckev_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_pckev_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_pckev_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_pckev_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_pckev_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.pckev.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_pckev_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.pckev.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_pckev_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: pckev.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_pckev_h_test
+;
+ at llvm_mips_pckev_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_pckev_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_pckev_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_pckev_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_pckev_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_pckev_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.pckev.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_pckev_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.pckev.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_pckev_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: pckev.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_pckev_w_test
+;
+ at llvm_mips_pckev_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_pckev_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_pckev_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_pckev_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_pckev_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_pckev_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.pckev.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_pckev_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.pckev.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_pckev_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: pckev.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_pckev_d_test
+;
+ at llvm_mips_pckod_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_pckod_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_pckod_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_pckod_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_pckod_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_pckod_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.pckod.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_pckod_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.pckod.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_pckod_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: pckod.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_pckod_b_test
+;
+ at llvm_mips_pckod_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_pckod_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_pckod_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_pckod_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_pckod_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_pckod_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.pckod.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_pckod_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.pckod.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_pckod_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: pckod.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_pckod_h_test
+;
+ at llvm_mips_pckod_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_pckod_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_pckod_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_pckod_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_pckod_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_pckod_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.pckod.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_pckod_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.pckod.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_pckod_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: pckod.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_pckod_w_test
+;
+ at llvm_mips_pckod_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_pckod_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_pckod_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_pckod_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_pckod_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_pckod_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.pckod.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_pckod_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.pckod.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_pckod_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: pckod.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_pckod_d_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/3r-s.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/3r-s.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/3r-s.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/3r-s.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,794 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_sld_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_sld_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_sld_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sld_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_sld_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_sld_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_sld_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_sld_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: sld.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sld_b_test
+;
+ at llvm_mips_sld_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_sld_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_sld_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sld_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_sld_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_sld_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_sld_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_sld_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: sld.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sld_h_test
+;
+ at llvm_mips_sld_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_sld_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_sld_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sld_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_sld_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_sld_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_sld_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_sld_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: sld.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sld_w_test
+;
+ at llvm_mips_sld_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_sld_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_sld_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sld_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_sld_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_sld_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_sld_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_sld_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: sld.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sld_d_test
+;
+ at llvm_mips_sll_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_sll_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_sll_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sll_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.sll.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_sll_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: sll.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sll_b_test
+;
+ at llvm_mips_sll_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_sll_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_sll_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sll_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.sll.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_sll_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: sll.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sll_h_test
+;
+ at llvm_mips_sll_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_sll_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_sll_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sll_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_sll_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: sll.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sll_w_test
+;
+ at llvm_mips_sll_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_sll_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_sll_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sll_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.sll.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_sll_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: sll.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sll_d_test
+;
+ at llvm_mips_sra_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_sra_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_sra_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sra_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.sra.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_sra_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: sra.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sra_b_test
+;
+ at llvm_mips_sra_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_sra_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_sra_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sra_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.sra.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_sra_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: sra.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sra_h_test
+;
+ at llvm_mips_sra_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_sra_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_sra_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sra_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_sra_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: sra.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sra_w_test
+;
+ at llvm_mips_sra_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_sra_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_sra_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sra_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.sra.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_sra_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: sra.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sra_d_test
+;
+ at llvm_mips_srl_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_srl_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_srl_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srl_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.srl.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_srl_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: srl.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srl_b_test
+;
+ at llvm_mips_srl_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_srl_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_srl_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srl_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.srl.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_srl_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: srl.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srl_h_test
+;
+ at llvm_mips_srl_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_srl_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_srl_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srl_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.srl.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_srl_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: srl.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srl_w_test
+;
+ at llvm_mips_srl_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_srl_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_srl_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srl_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.srl.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_srl_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: srl.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srl_d_test
+;
+ at llvm_mips_subs_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_subs_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_subs_s_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subs_s_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_subs_s_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_subs_s_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.subs.s.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_s_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.subs.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subs_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: subs_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_subs_s_b_test
+;
+ at llvm_mips_subs_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_subs_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_subs_s_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subs_s_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_subs_s_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_subs_s_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.subs.s.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_s_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.subs.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subs_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: subs_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_subs_s_h_test
+;
+ at llvm_mips_subs_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_subs_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_subs_s_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subs_s_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_subs_s_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_subs_s_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.subs.s.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_s_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.subs.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subs_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: subs_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_subs_s_w_test
+;
+ at llvm_mips_subs_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_subs_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_subs_s_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subs_s_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_subs_s_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_subs_s_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.subs.s.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_s_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.subs.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subs_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: subs_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_subs_s_d_test
+;
+ at llvm_mips_subs_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_subs_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_subs_u_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subs_u_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_subs_u_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_subs_u_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.subs.u.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_u_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.subs.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subs_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: subs_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_subs_u_b_test
+;
+ at llvm_mips_subs_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_subs_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_subs_u_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subs_u_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_subs_u_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_subs_u_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.subs.u.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_u_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.subs.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subs_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: subs_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_subs_u_h_test
+;
+ at llvm_mips_subs_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_subs_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_subs_u_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subs_u_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_subs_u_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_subs_u_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.subs.u.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_u_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.subs.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subs_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: subs_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_subs_u_w_test
+;
+ at llvm_mips_subs_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_subs_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_subs_u_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subs_u_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_subs_u_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_subs_u_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.subs.u.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_u_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.subs.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subs_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: subs_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_subs_u_d_test
+;
+ at llvm_mips_subsus_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_subsus_u_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_subsus_u_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subsus_u_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.subsus.u.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_subsus_u_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.subsus.u.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subsus_u_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: subsus_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_subsus_u_b_test
+;
+ at llvm_mips_subsus_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_subsus_u_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_subsus_u_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subsus_u_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.subsus.u.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_subsus_u_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.subsus.u.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subsus_u_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: subsus_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_subsus_u_h_test
+;
+ at llvm_mips_subsus_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_subsus_u_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_subsus_u_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subsus_u_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.subsus.u.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_subsus_u_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.subsus.u.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subsus_u_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: subsus_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_subsus_u_w_test
+;
+ at llvm_mips_subsus_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_subsus_u_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_subsus_u_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subsus_u_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.subsus.u.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_subsus_u_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.subsus.u.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subsus_u_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: subsus_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_subsus_u_d_test
+;
+ at llvm_mips_subsuu_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_subsuu_s_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_subsuu_s_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subsuu_s_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_subsuu_s_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: subsuu_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_subsuu_s_b_test
+;
+ at llvm_mips_subsuu_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_subsuu_s_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_subsuu_s_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subsuu_s_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_subsuu_s_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: subsuu_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_subsuu_s_h_test
+;
+ at llvm_mips_subsuu_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_subsuu_s_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_subsuu_s_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subsuu_s_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_subsuu_s_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: subsuu_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_subsuu_s_w_test
+;
+ at llvm_mips_subsuu_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_subsuu_s_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_subsuu_s_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subsuu_s_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_subsuu_s_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subsuu_s_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: subsuu_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_subsuu_s_d_test
+;
+ at llvm_mips_subv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_subv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_subv_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subv_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.subv.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_subv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: subv.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_subv_b_test
+;
+ at llvm_mips_subv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_subv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_subv_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subv_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.subv.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_subv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: subv.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_subv_h_test
+;
+ at llvm_mips_subv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_subv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_subv_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subv_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.subv.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_subv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: subv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_subv_w_test
+;
+ at llvm_mips_subv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_subv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_subv_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subv_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.subv.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_subv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: subv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_subv_d_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/3r-v.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/3r-v.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/3r-v.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/3r-v.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,90 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_vshf_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_vshf_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_vshf_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_vshf_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_vshf_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_vshf_b_ARG2
+  %2 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* @llvm_mips_vshf_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_vshf_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: vshf.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_vshf_b_test
+;
+ at llvm_mips_vshf_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_vshf_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_vshf_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_vshf_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_vshf_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_vshf_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_vshf_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_vshf_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: vshf.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_vshf_h_test
+;
+ at llvm_mips_vshf_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_vshf_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_vshf_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_vshf_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_vshf_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_vshf_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_vshf_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_vshf_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: vshf.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_vshf_w_test
+;
+ at llvm_mips_vshf_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_vshf_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_vshf_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_vshf_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_vshf_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_vshf_d_ARG2
+  %2 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* @llvm_mips_vshf_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.vshf.d(<2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_vshf_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: vshf.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_vshf_d_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/3r_4r.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/3r_4r.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/3r_4r.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/3r_4r.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,202 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_maddv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_maddv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_maddv_b_ARG3 = global <16 x i8> <i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39, i8 40, i8 41, i8 42, i8 43, i8 44, i8 45, i8 46, i8 47>, align 16
+ at llvm_mips_maddv_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_maddv_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_maddv_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_maddv_b_ARG2
+  %2 = load <16 x i8>* @llvm_mips_maddv_b_ARG3
+  %3 = tail call <16 x i8> @llvm.mips.maddv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+  store <16 x i8> %3, <16 x i8>* @llvm_mips_maddv_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.maddv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_maddv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: maddv.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_maddv_b_test
+;
+ at llvm_mips_maddv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_maddv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_maddv_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+ at llvm_mips_maddv_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maddv_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_maddv_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_maddv_h_ARG2
+  %2 = load <8 x i16>* @llvm_mips_maddv_h_ARG3
+  %3 = tail call <8 x i16> @llvm.mips.maddv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  store <8 x i16> %3, <8 x i16>* @llvm_mips_maddv_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.maddv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_maddv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: maddv.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maddv_h_test
+;
+ at llvm_mips_maddv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_maddv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_maddv_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+ at llvm_mips_maddv_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maddv_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_maddv_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_maddv_w_ARG2
+  %2 = load <4 x i32>* @llvm_mips_maddv_w_ARG3
+  %3 = tail call <4 x i32> @llvm.mips.maddv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  store <4 x i32> %3, <4 x i32>* @llvm_mips_maddv_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.maddv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_maddv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: maddv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maddv_w_test
+;
+ at llvm_mips_maddv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_maddv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_maddv_d_ARG3 = global <2 x i64> <i64 4, i64 5>, align 16
+ at llvm_mips_maddv_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_maddv_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_maddv_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_maddv_d_ARG2
+  %2 = load <2 x i64>* @llvm_mips_maddv_d_ARG3
+  %3 = tail call <2 x i64> @llvm.mips.maddv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+  store <2 x i64> %3, <2 x i64>* @llvm_mips_maddv_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.maddv.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_maddv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: maddv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_maddv_d_test
+;
+ at llvm_mips_msubv_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_msubv_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
+ at llvm_mips_msubv_b_ARG3 = global <16 x i8> <i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38, i8 39, i8 40, i8 41, i8 42, i8 43, i8 44, i8 45, i8 46, i8 47>, align 16
+ at llvm_mips_msubv_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_msubv_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_msubv_b_ARG1
+  %1 = load <16 x i8>* @llvm_mips_msubv_b_ARG2
+  %2 = load <16 x i8>* @llvm_mips_msubv_b_ARG3
+  %3 = tail call <16 x i8> @llvm.mips.msubv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+  store <16 x i8> %3, <16 x i8>* @llvm_mips_msubv_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.msubv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
+
+; CHECK: llvm_mips_msubv_b_test:
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: ld.b
+; CHECK: msubv.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_msubv_b_test
+;
+ at llvm_mips_msubv_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_msubv_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_msubv_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+ at llvm_mips_msubv_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_msubv_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_msubv_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_msubv_h_ARG2
+  %2 = load <8 x i16>* @llvm_mips_msubv_h_ARG3
+  %3 = tail call <8 x i16> @llvm.mips.msubv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  store <8 x i16> %3, <8 x i16>* @llvm_mips_msubv_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.msubv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_msubv_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: msubv.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_msubv_h_test
+;
+ at llvm_mips_msubv_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_msubv_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_msubv_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+ at llvm_mips_msubv_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_msubv_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_msubv_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_msubv_w_ARG2
+  %2 = load <4 x i32>* @llvm_mips_msubv_w_ARG3
+  %3 = tail call <4 x i32> @llvm.mips.msubv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  store <4 x i32> %3, <4 x i32>* @llvm_mips_msubv_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.msubv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_msubv_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: msubv.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_msubv_w_test
+;
+ at llvm_mips_msubv_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_msubv_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
+ at llvm_mips_msubv_d_ARG3 = global <2 x i64> <i64 4, i64 5>, align 16
+ at llvm_mips_msubv_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_msubv_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_msubv_d_ARG1
+  %1 = load <2 x i64>* @llvm_mips_msubv_d_ARG2
+  %2 = load <2 x i64>* @llvm_mips_msubv_d_ARG3
+  %3 = tail call <2 x i64> @llvm.mips.msubv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
+  store <2 x i64> %3, <2 x i64>* @llvm_mips_msubv_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.msubv.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
+
+; CHECK: llvm_mips_msubv_d_test:
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: ld.d
+; CHECK: msubv.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_msubv_d_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/3r_splat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/3r_splat.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/3r_splat.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/3r_splat.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,78 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_splat_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_splat_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_splat_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_splat_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.splat.b(<16 x i8> %0, i32 3)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_splat_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.splat.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_splat_b_test:
+; CHECK: ld.b
+; CHECK: splat.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_splat_b_test
+;
+ at llvm_mips_splat_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_splat_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_splat_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_splat_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.splat.h(<8 x i16> %0, i32 3)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_splat_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.splat.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_splat_h_test:
+; CHECK: ld.h
+; CHECK: splat.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_splat_h_test
+;
+ at llvm_mips_splat_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_splat_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_splat_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_splat_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.splat.w(<4 x i32> %0, i32 3)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_splat_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.splat.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_splat_w_test:
+; CHECK: ld.w
+; CHECK: splat.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_splat_w_test
+;
+ at llvm_mips_splat_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_splat_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_splat_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_splat_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> %0, i32 3)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_splat_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.splat.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_splat_d_test:
+; CHECK: ld.d
+; CHECK: splat.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_splat_d_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/3rf_4rf_q.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/3rf_4rf_q.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/3rf_4rf_q.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/3rf_4rf_q.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,202 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_madd_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_madd_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_madd_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+ at llvm_mips_madd_q_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_madd_q_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_madd_q_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_madd_q_h_ARG2
+  %2 = load <8 x i16>* @llvm_mips_madd_q_h_ARG3
+  %3 = tail call <8 x i16> @llvm.mips.madd.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  store <8 x i16> %3, <8 x i16>* @llvm_mips_madd_q_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.madd.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_madd_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: madd_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_madd_q_h_test
+;
+ at llvm_mips_madd_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_madd_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_madd_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+ at llvm_mips_madd_q_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_madd_q_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_madd_q_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_madd_q_w_ARG2
+  %2 = load <4 x i32>* @llvm_mips_madd_q_w_ARG3
+  %3 = tail call <4 x i32> @llvm.mips.madd.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  store <4 x i32> %3, <4 x i32>* @llvm_mips_madd_q_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.madd.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_madd_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: madd_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_madd_q_w_test
+;
+ at llvm_mips_maddr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_maddr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_maddr_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+ at llvm_mips_maddr_q_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maddr_q_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG2
+  %2 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG3
+  %3 = tail call <8 x i16> @llvm.mips.maddr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  store <8 x i16> %3, <8 x i16>* @llvm_mips_maddr_q_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.maddr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_maddr_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: maddr_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maddr_q_h_test
+;
+ at llvm_mips_maddr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_maddr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_maddr_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+ at llvm_mips_maddr_q_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maddr_q_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG2
+  %2 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG3
+  %3 = tail call <4 x i32> @llvm.mips.maddr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  store <4 x i32> %3, <4 x i32>* @llvm_mips_maddr_q_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.maddr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_maddr_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: maddr_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maddr_q_w_test
+;
+ at llvm_mips_msub_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_msub_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_msub_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+ at llvm_mips_msub_q_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_msub_q_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_msub_q_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_msub_q_h_ARG2
+  %2 = load <8 x i16>* @llvm_mips_msub_q_h_ARG3
+  %3 = tail call <8 x i16> @llvm.mips.msub.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  store <8 x i16> %3, <8 x i16>* @llvm_mips_msub_q_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.msub.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_msub_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: msub_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_msub_q_h_test
+;
+ at llvm_mips_msub_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_msub_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_msub_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+ at llvm_mips_msub_q_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_msub_q_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_msub_q_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_msub_q_w_ARG2
+  %2 = load <4 x i32>* @llvm_mips_msub_q_w_ARG3
+  %3 = tail call <4 x i32> @llvm.mips.msub.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  store <4 x i32> %3, <4 x i32>* @llvm_mips_msub_q_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.msub.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_msub_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: msub_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_msub_q_w_test
+;
+ at llvm_mips_msubr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_msubr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_msubr_q_h_ARG3 = global <8 x i16> <i16 16, i16 17, i16 18, i16 19, i16 20, i16 21, i16 22, i16 23>, align 16
+ at llvm_mips_msubr_q_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_msubr_q_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG2
+  %2 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG3
+  %3 = tail call <8 x i16> @llvm.mips.msubr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
+  store <8 x i16> %3, <8 x i16>* @llvm_mips_msubr_q_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.msubr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_msubr_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: msubr_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_msubr_q_h_test
+;
+ at llvm_mips_msubr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_msubr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_msubr_q_w_ARG3 = global <4 x i32> <i32 8, i32 9, i32 10, i32 11>, align 16
+ at llvm_mips_msubr_q_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_msubr_q_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG2
+  %2 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG3
+  %3 = tail call <4 x i32> @llvm.mips.msubr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
+  store <4 x i32> %3, <4 x i32>* @llvm_mips_msubr_q_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.msubr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_msubr_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: msubr_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_msubr_q_w_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/3rf_q.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/3rf_q.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/3rf_q.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/3rf_q.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,90 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_mul_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_mul_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_mul_q_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mul_q_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_mul_q_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_mul_q_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.mul.q.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_mul_q_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.mul.q.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mul_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mul_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mul_q_h_test
+;
+ at llvm_mips_mul_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_mul_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_mul_q_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mul_q_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_mul_q_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_mul_q_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.mul.q.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_mul_q_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.mul.q.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mul_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mul_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mul_q_w_test
+;
+ at llvm_mips_mulr_q_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_mulr_q_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
+ at llvm_mips_mulr_q_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mulr_q_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG1
+  %1 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG2
+  %2 = tail call <8 x i16> @llvm.mips.mulr.q.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* @llvm_mips_mulr_q_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.mulr.q.h(<8 x i16>, <8 x i16>) nounwind
+
+; CHECK: llvm_mips_mulr_q_h_test:
+; CHECK: ld.h
+; CHECK: ld.h
+; CHECK: mulr_q.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mulr_q_h_test
+;
+ at llvm_mips_mulr_q_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_mulr_q_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
+ at llvm_mips_mulr_q_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mulr_q_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG1
+  %1 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG2
+  %2 = tail call <4 x i32> @llvm.mips.mulr.q.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* @llvm_mips_mulr_q_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.mulr.q.w(<4 x i32>, <4 x i32>) nounwind
+
+; CHECK: llvm_mips_mulr_q_w_test:
+; CHECK: ld.w
+; CHECK: ld.w
+; CHECK: mulr_q.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mulr_q_w_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/bit.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/bit.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/bit.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/bit.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,382 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_sat_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_sat_s_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sat_s_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_sat_s_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.sat.s.b(<16 x i8> %0, i32 7)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_s_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.sat.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_b_test:
+; CHECK: ld.b
+; CHECK: sat_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sat_s_b_test
+;
+ at llvm_mips_sat_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_sat_s_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sat_s_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_sat_s_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.sat.s.h(<8 x i16> %0, i32 7)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_s_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.sat.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_h_test:
+; CHECK: ld.h
+; CHECK: sat_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sat_s_h_test
+;
+ at llvm_mips_sat_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_sat_s_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sat_s_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_sat_s_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.sat.s.w(<4 x i32> %0, i32 7)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_s_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.sat.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_w_test:
+; CHECK: ld.w
+; CHECK: sat_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sat_s_w_test
+;
+ at llvm_mips_sat_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_sat_s_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sat_s_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_sat_s_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.sat.s.d(<2 x i64> %0, i32 7)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_s_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.sat.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sat_s_d_test:
+; CHECK: ld.d
+; CHECK: sat_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sat_s_d_test
+;
+ at llvm_mips_sat_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_sat_u_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sat_u_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_sat_u_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.sat.u.b(<16 x i8> %0, i32 7)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_u_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.sat.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_b_test:
+; CHECK: ld.b
+; CHECK: sat_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sat_u_b_test
+;
+ at llvm_mips_sat_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_sat_u_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sat_u_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_sat_u_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.sat.u.h(<8 x i16> %0, i32 7)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_u_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.sat.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_h_test:
+; CHECK: ld.h
+; CHECK: sat_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sat_u_h_test
+;
+ at llvm_mips_sat_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_sat_u_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sat_u_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_sat_u_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.sat.u.w(<4 x i32> %0, i32 7)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_u_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.sat.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_w_test:
+; CHECK: ld.w
+; CHECK: sat_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sat_u_w_test
+;
+ at llvm_mips_sat_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_sat_u_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sat_u_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_sat_u_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.sat.u.d(<2 x i64> %0, i32 7)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_u_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.sat.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sat_u_d_test:
+; CHECK: ld.d
+; CHECK: sat_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sat_u_d_test
+;
+ at llvm_mips_slli_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_slli_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_slli_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_slli_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.slli.b(<16 x i8> %0, i32 7)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_slli_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_slli_b_test:
+; CHECK: ld.b
+; CHECK: slli.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_slli_b_test
+;
+ at llvm_mips_slli_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_slli_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_slli_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_slli_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.slli.h(<8 x i16> %0, i32 7)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_slli_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_slli_h_test:
+; CHECK: ld.h
+; CHECK: slli.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_slli_h_test
+;
+ at llvm_mips_slli_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_slli_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_slli_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_slli_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %0, i32 7)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_slli_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_slli_w_test:
+; CHECK: ld.w
+; CHECK: slli.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_slli_w_test
+;
+ at llvm_mips_slli_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_slli_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_slli_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_slli_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %0, i32 7)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_slli_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_slli_d_test:
+; CHECK: ld.d
+; CHECK: slli.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_slli_d_test
+;
+ at llvm_mips_srai_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_srai_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srai_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_srai_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.srai.b(<16 x i8> %0, i32 7)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_srai_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_srai_b_test:
+; CHECK: ld.b
+; CHECK: srai.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srai_b_test
+;
+ at llvm_mips_srai_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_srai_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srai_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_srai_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.srai.h(<8 x i16> %0, i32 7)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_srai_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_srai_h_test:
+; CHECK: ld.h
+; CHECK: srai.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srai_h_test
+;
+ at llvm_mips_srai_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_srai_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srai_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_srai_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.srai.w(<4 x i32> %0, i32 7)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_srai_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_srai_w_test:
+; CHECK: ld.w
+; CHECK: srai.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srai_w_test
+;
+ at llvm_mips_srai_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_srai_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srai_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_srai_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.srai.d(<2 x i64> %0, i32 7)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_srai_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_srai_d_test:
+; CHECK: ld.d
+; CHECK: srai.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srai_d_test
+;
+ at llvm_mips_srli_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_srli_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_srli_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_srli_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.srli.b(<16 x i8> %0, i32 7)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_srli_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_srli_b_test:
+; CHECK: ld.b
+; CHECK: srli.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_srli_b_test
+;
+ at llvm_mips_srli_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_srli_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_srli_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_srli_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.srli.h(<8 x i16> %0, i32 7)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_srli_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_srli_h_test:
+; CHECK: ld.h
+; CHECK: srli.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_srli_h_test
+;
+ at llvm_mips_srli_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_srli_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_srli_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_srli_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_srli_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_srli_w_test:
+; CHECK: ld.w
+; CHECK: srli.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_srli_w_test
+;
+ at llvm_mips_srli_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_srli_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_srli_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_srli_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 7)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_srli_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_srli_d_test:
+; CHECK: ld.d
+; CHECK: srli.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_srli_d_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/elm_shift_slide.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/elm_shift_slide.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/elm_shift_slide.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/elm_shift_slide.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,154 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_sldi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_sldi_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_sldi_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_sldi_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, i32 1)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_sldi_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_b_test:
+; CHECK: ld.b
+; CHECK: sldi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_sldi_b_test
+;
+ at llvm_mips_sldi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_sldi_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_sldi_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_sldi_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, i32 1)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_sldi_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_h_test:
+; CHECK: ld.h
+; CHECK: sldi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_sldi_h_test
+;
+ at llvm_mips_sldi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_sldi_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_sldi_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_sldi_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, i32 1)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_sldi_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_w_test:
+; CHECK: ld.w
+; CHECK: sldi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_sldi_w_test
+;
+ at llvm_mips_sldi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_sldi_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_sldi_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_sldi_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, i32 1)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_sldi_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_sldi_d_test:
+; CHECK: ld.d
+; CHECK: sldi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_sldi_d_test
+;
+ at llvm_mips_splati_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_splati_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_splati_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_splati_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.splati.b(<16 x i8> %0, i32 1)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_splati_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.splati.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_splati_b_test:
+; CHECK: ld.b
+; CHECK: splati.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_splati_b_test
+;
+ at llvm_mips_splati_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_splati_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_splati_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_splati_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.splati.h(<8 x i16> %0, i32 1)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_splati_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.splati.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_splati_h_test:
+; CHECK: ld.h
+; CHECK: splati.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_splati_h_test
+;
+ at llvm_mips_splati_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_splati_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_splati_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_splati_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.splati.w(<4 x i32> %0, i32 1)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_splati_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.splati.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_splati_w_test:
+; CHECK: ld.w
+; CHECK: splati.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_splati_w_test
+;
+ at llvm_mips_splati_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_splati_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_splati_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_splati_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.splati.d(<2 x i64> %0, i32 1)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_splati_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.splati.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_splati_d_test:
+; CHECK: ld.d
+; CHECK: splati.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_splati_d_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/i5-m.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/i5-m.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/i5-m.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/i5-m.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,306 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_maxi_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_maxi_s_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_maxi_s_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_maxi_s_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %0, i32 14)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_s_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.maxi.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_b_test:
+; CHECK: ld.b
+; CHECK: maxi_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_maxi_s_b_test
+;
+ at llvm_mips_maxi_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_maxi_s_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maxi_s_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_maxi_s_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %0, i32 14)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_s_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.maxi.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_h_test:
+; CHECK: ld.h
+; CHECK: maxi_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maxi_s_h_test
+;
+ at llvm_mips_maxi_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_maxi_s_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maxi_s_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_maxi_s_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %0, i32 14)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_s_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.maxi.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_w_test:
+; CHECK: ld.w
+; CHECK: maxi_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maxi_s_w_test
+;
+ at llvm_mips_maxi_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_maxi_s_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_maxi_s_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_maxi_s_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %0, i32 14)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_s_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.maxi.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_s_d_test:
+; CHECK: ld.d
+; CHECK: maxi_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_maxi_s_d_test
+;
+ at llvm_mips_maxi_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_maxi_u_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_maxi_u_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_maxi_u_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %0, i32 14)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_u_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.maxi.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_b_test:
+; CHECK: ld.b
+; CHECK: maxi_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_maxi_u_b_test
+;
+ at llvm_mips_maxi_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_maxi_u_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_maxi_u_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_maxi_u_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %0, i32 14)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_u_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.maxi.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_h_test:
+; CHECK: ld.h
+; CHECK: maxi_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_maxi_u_h_test
+;
+ at llvm_mips_maxi_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_maxi_u_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_maxi_u_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_maxi_u_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %0, i32 14)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_u_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.maxi.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_w_test:
+; CHECK: ld.w
+; CHECK: maxi_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_maxi_u_w_test
+;
+ at llvm_mips_maxi_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_maxi_u_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_maxi_u_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_maxi_u_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %0, i32 14)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_u_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.maxi.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_maxi_u_d_test:
+; CHECK: ld.d
+; CHECK: maxi_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_maxi_u_d_test
+;
+ at llvm_mips_mini_s_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_mini_s_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mini_s_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_mini_s_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %0, i32 14)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_s_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.mini.s.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_b_test:
+; CHECK: ld.b
+; CHECK: mini_s.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mini_s_b_test
+;
+ at llvm_mips_mini_s_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_mini_s_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mini_s_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_mini_s_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %0, i32 14)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_s_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.mini.s.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_h_test:
+; CHECK: ld.h
+; CHECK: mini_s.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mini_s_h_test
+;
+ at llvm_mips_mini_s_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_mini_s_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mini_s_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_mini_s_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %0, i32 14)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_s_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.mini.s.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_w_test:
+; CHECK: ld.w
+; CHECK: mini_s.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mini_s_w_test
+;
+ at llvm_mips_mini_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_mini_s_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mini_s_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_mini_s_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %0, i32 14)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_s_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.mini.s.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_mini_s_d_test:
+; CHECK: ld.d
+; CHECK: mini_s.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mini_s_d_test
+;
+ at llvm_mips_mini_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_mini_u_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_mini_u_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_mini_u_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %0, i32 14)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_u_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.mini.u.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_b_test:
+; CHECK: ld.b
+; CHECK: mini_u.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_mini_u_b_test
+;
+ at llvm_mips_mini_u_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_mini_u_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_mini_u_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_mini_u_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %0, i32 14)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_u_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.mini.u.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_h_test:
+; CHECK: ld.h
+; CHECK: mini_u.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_mini_u_h_test
+;
+ at llvm_mips_mini_u_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_mini_u_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_mini_u_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_mini_u_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %0, i32 14)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_u_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.mini.u.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_w_test:
+; CHECK: ld.w
+; CHECK: mini_u.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_mini_u_w_test
+;
+ at llvm_mips_mini_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_mini_u_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_mini_u_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_mini_u_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %0, i32 14)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_u_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.mini.u.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_mini_u_d_test:
+; CHECK: ld.d
+; CHECK: mini_u.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_mini_u_d_test
+;

Added: llvm/trunk/test/CodeGen/Mips/msa/i5-s.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/i5-s.ll?rev=188460&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/i5-s.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/msa/i5-s.ll Thu Aug 15 09:22:07 2013
@@ -0,0 +1,78 @@
+; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
+
+ at llvm_mips_subvi_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_subvi_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_subvi_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_subvi_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 14)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_subvi_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_b_test:
+; CHECK: ld.b
+; CHECK: subvi.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_subvi_b_test
+;
+ at llvm_mips_subvi_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_subvi_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_subvi_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_subvi_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 14)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_subvi_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_h_test:
+; CHECK: ld.h
+; CHECK: subvi.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_subvi_h_test
+;
+ at llvm_mips_subvi_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_subvi_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_subvi_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_subvi_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 14)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_subvi_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_w_test:
+; CHECK: ld.w
+; CHECK: subvi.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_subvi_w_test
+;
+ at llvm_mips_subvi_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
+ at llvm_mips_subvi_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
+
+define void @llvm_mips_subvi_d_test() nounwind {
+entry:
+  %0 = load <2 x i64>* @llvm_mips_subvi_d_ARG1
+  %1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 14)
+  store <2 x i64> %1, <2 x i64>* @llvm_mips_subvi_d_RES
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.subvi.d(<2 x i64>, i32) nounwind
+
+; CHECK: llvm_mips_subvi_d_test:
+; CHECK: ld.d
+; CHECK: subvi.d
+; CHECK: st.d
+; CHECK: .size llvm_mips_subvi_d_test
+;

Modified: llvm/trunk/test/CodeGen/Mips/msa/i8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/i8.ll?rev=188460&r1=188459&r2=188460&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/i8.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/msa/i8.ll Thu Aug 15 09:22:07 2013
@@ -76,3 +76,117 @@ declare <16 x i8> @llvm.mips.bseli.b(<16
 ; CHECK: st.b
 ; CHECK: .size llvm_mips_bseli_b_test
 ;
+ at llvm_mips_nori_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_nori_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_nori_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_nori_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.nori.b(<16 x i8> %0, i32 25)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_nori_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.nori.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_nori_b_test:
+; CHECK: ld.b
+; CHECK: nori.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_nori_b_test
+;
+ at llvm_mips_ori_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_ori_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_ori_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_ori_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.ori.b(<16 x i8> %0, i32 25)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_ori_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.ori.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_ori_b_test:
+; CHECK: ld.b
+; CHECK: ori.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_ori_b_test
+;
+ at llvm_mips_shf_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_shf_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_shf_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_shf_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.shf.b(<16 x i8> %0, i32 25)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_shf_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.shf.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_shf_b_test:
+; CHECK: ld.b
+; CHECK: shf.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_shf_b_test
+;
+ at llvm_mips_shf_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
+ at llvm_mips_shf_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
+
+define void @llvm_mips_shf_h_test() nounwind {
+entry:
+  %0 = load <8 x i16>* @llvm_mips_shf_h_ARG1
+  %1 = tail call <8 x i16> @llvm.mips.shf.h(<8 x i16> %0, i32 25)
+  store <8 x i16> %1, <8 x i16>* @llvm_mips_shf_h_RES
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.shf.h(<8 x i16>, i32) nounwind
+
+; CHECK: llvm_mips_shf_h_test:
+; CHECK: ld.h
+; CHECK: shf.h
+; CHECK: st.h
+; CHECK: .size llvm_mips_shf_h_test
+;
+ at llvm_mips_shf_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+ at llvm_mips_shf_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
+
+define void @llvm_mips_shf_w_test() nounwind {
+entry:
+  %0 = load <4 x i32>* @llvm_mips_shf_w_ARG1
+  %1 = tail call <4 x i32> @llvm.mips.shf.w(<4 x i32> %0, i32 25)
+  store <4 x i32> %1, <4 x i32>* @llvm_mips_shf_w_RES
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.shf.w(<4 x i32>, i32) nounwind
+
+; CHECK: llvm_mips_shf_w_test:
+; CHECK: ld.w
+; CHECK: shf.w
+; CHECK: st.w
+; CHECK: .size llvm_mips_shf_w_test
+;
+ at llvm_mips_xori_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
+ at llvm_mips_xori_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
+
+define void @llvm_mips_xori_b_test() nounwind {
+entry:
+  %0 = load <16 x i8>* @llvm_mips_xori_b_ARG1
+  %1 = tail call <16 x i8> @llvm.mips.xori.b(<16 x i8> %0, i32 25)
+  store <16 x i8> %1, <16 x i8>* @llvm_mips_xori_b_RES
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.xori.b(<16 x i8>, i32) nounwind
+
+; CHECK: llvm_mips_xori_b_test:
+; CHECK: ld.b
+; CHECK: xori.b
+; CHECK: st.b
+; CHECK: .size llvm_mips_xori_b_test
+;





More information about the llvm-commits mailing list