[llvm] r198675 - [AArch64 NEON] Fixed incorrect immediate used in BIC instruction.

Kevin Qin Kevin.Qin at arm.com
Mon Jan 6 21:10:47 PST 2014


Author: kevinqin
Date: Mon Jan  6 23:10:47 2014
New Revision: 198675

URL: http://llvm.org/viewvc/llvm-project?rev=198675&view=rev
Log:
[AArch64 NEON] Fixed incorrect immediate used in BIC instruction.

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td
    llvm/trunk/test/CodeGen/AArch64/neon-bitwise-instructions.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td?rev=198675&r1=198674&r2=198675&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td Mon Jan  6 23:10:47 2014
@@ -1220,8 +1220,8 @@ multiclass NeonI_mov_imm_with_constraint
                  !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
                  [(set (v2i32 VPR64:$Rd),
                     (v2i32 (opnode (v2i32 VPR64:$src),
-                      (v2i32 (bitconvert (v2i32 (neonopnode timm:$Imm,
-                        neon_mov_imm_LSL_operand:$Simm)))))))],
+                      (v2i32 (neonopnode timm:$Imm,
+                        neon_mov_imm_LSL_operand:$Simm)))))],
                  NoItinerary> {
       bits<2> Simm;
       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
@@ -1234,8 +1234,8 @@ multiclass NeonI_mov_imm_with_constraint
                  !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
                  [(set (v4i32 VPR128:$Rd),
                     (v4i32 (opnode (v4i32 VPR128:$src),
-                      (v4i32 (bitconvert (v4i32 (neonopnode timm:$Imm,
-                        neon_mov_imm_LSL_operand:$Simm)))))))],
+                      (v4i32 (neonopnode timm:$Imm,
+                        neon_mov_imm_LSL_operand:$Simm)))))],
                  NoItinerary> {
       bits<2> Simm;
       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
@@ -1249,8 +1249,8 @@ multiclass NeonI_mov_imm_with_constraint
                  !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
                  [(set (v4i16 VPR64:$Rd),
                     (v4i16 (opnode (v4i16 VPR64:$src),
-                       (v4i16 (bitconvert (v4i16 (neonopnode timm:$Imm,
-                          neon_mov_imm_LSL_operand:$Simm)))))))],
+                       (v4i16 (neonopnode timm:$Imm,
+                          neon_mov_imm_LSL_operand:$Simm)))))],
                  NoItinerary> {
       bit  Simm;
       let cmode = {0b1, 0b0, Simm, 0b1};
@@ -1263,8 +1263,8 @@ multiclass NeonI_mov_imm_with_constraint
                  !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
                  [(set (v8i16 VPR128:$Rd),
                     (v8i16 (opnode (v8i16 VPR128:$src),
-                      (v8i16 (bitconvert (v8i16 (neonopnode timm:$Imm,
-                        neon_mov_imm_LSL_operand:$Simm)))))))],
+                      (v8i16 (neonopnode timm:$Imm,
+                        neon_mov_imm_LSL_operand:$Simm)))))],
                  NoItinerary> {
       bit Simm;
       let cmode = {0b1, 0b0, Simm, 0b1};
@@ -1350,30 +1350,70 @@ def neon_mov_imm_LSLH_transform_operand
     return (HasShift && !ShiftOnesIn); }],
   neon_mov_imm_LSLH_transform_XFORM>;
 
-// Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0x00, LSL 8)
-// Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0x00)
+// Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0xff, LSL 8)
+// Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0xff)
 def : Pat<(v4i16 (and VPR64:$src,
-            (v4i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
-          (BICvi_lsl_4H VPR64:$src, 0,
+            (v4i16 (Neon_movi 255,
+              neon_mov_imm_LSLH_transform_operand:$Simm)))),
+          (BICvi_lsl_4H VPR64:$src, 255,
             neon_mov_imm_LSLH_transform_operand:$Simm)>;
 
-// Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0x00, LSL 8)
-// Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0x00)
+// Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0xff, LSL 8)
+// Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0xff)
 def : Pat<(v8i16 (and VPR128:$src,
-            (v8i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
-          (BICvi_lsl_8H VPR128:$src, 0,
+            (v8i16 (Neon_movi 255,
+              neon_mov_imm_LSLH_transform_operand:$Simm)))),
+          (BICvi_lsl_8H VPR128:$src, 255,
             neon_mov_imm_LSLH_transform_operand:$Simm)>;
 
+def : Pat<(v8i8 (and VPR64:$src,
+                  (bitconvert(v4i16 (Neon_movi 255,
+                    neon_mov_imm_LSLH_transform_operand:$Simm))))),
+          (BICvi_lsl_4H VPR64:$src, 255,
+            neon_mov_imm_LSLH_transform_operand:$Simm)>;
+def : Pat<(v2i32 (and VPR64:$src,
+                 (bitconvert(v4i16 (Neon_movi 255,
+                   neon_mov_imm_LSLH_transform_operand:$Simm))))),
+          (BICvi_lsl_4H VPR64:$src, 255,
+            neon_mov_imm_LSLH_transform_operand:$Simm)>;
+def : Pat<(v1i64 (and VPR64:$src,
+                (bitconvert(v4i16 (Neon_movi 255,
+                  neon_mov_imm_LSLH_transform_operand:$Simm))))),
+        (BICvi_lsl_4H VPR64:$src, 255,
+          neon_mov_imm_LSLH_transform_operand:$Simm)>;
+
+def : Pat<(v16i8 (and VPR128:$src,
+                 (bitconvert(v8i16 (Neon_movi 255,
+                   neon_mov_imm_LSLH_transform_operand:$Simm))))),
+        (BICvi_lsl_8H VPR128:$src, 255,
+          neon_mov_imm_LSLH_transform_operand:$Simm)>;
+def : Pat<(v4i32 (and VPR128:$src,
+                 (bitconvert(v8i16 (Neon_movi 255,
+                   neon_mov_imm_LSLH_transform_operand:$Simm))))),
+        (BICvi_lsl_8H VPR128:$src, 255,
+          neon_mov_imm_LSLH_transform_operand:$Simm)>;
+def : Pat<(v2i64 (and VPR128:$src,
+                 (bitconvert(v8i16 (Neon_movi 255,
+                   neon_mov_imm_LSLH_transform_operand:$Simm))))),
+        (BICvi_lsl_8H VPR128:$src, 255,
+          neon_mov_imm_LSLH_transform_operand:$Simm)>;
 
 multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
                                    SDPatternOperator neonopnode,
                                    Instruction INST4H,
-                                   Instruction INST8H> {
+                                   Instruction INST8H,
+                                   Instruction INST2S,
+                                   Instruction INST4S> {
   def : Pat<(v8i8 (opnode VPR64:$src,
                     (bitconvert(v4i16 (neonopnode timm:$Imm,
                       neon_mov_imm_LSLH_operand:$Simm))))),
             (INST4H VPR64:$src, neon_uimm8:$Imm,
               neon_mov_imm_LSLH_operand:$Simm)>;
+  def : Pat<(v2i32 (opnode VPR64:$src,
+                   (bitconvert(v4i16 (neonopnode timm:$Imm,
+                     neon_mov_imm_LSLH_operand:$Simm))))),
+            (INST4H VPR64:$src, neon_uimm8:$Imm,
+              neon_mov_imm_LSLH_operand:$Simm)>;
   def : Pat<(v1i64 (opnode VPR64:$src,
                   (bitconvert(v4i16 (neonopnode timm:$Imm,
                     neon_mov_imm_LSLH_operand:$Simm))))),
@@ -1395,13 +1435,47 @@ multiclass Neon_bitwiseVi_patterns<SDPat
                      neon_mov_imm_LSLH_operand:$Simm))))),
           (INST8H VPR128:$src, neon_uimm8:$Imm,
             neon_mov_imm_LSLH_operand:$Simm)>;
+
+  def : Pat<(v8i8 (opnode VPR64:$src,
+                    (bitconvert(v2i32 (neonopnode timm:$Imm,
+                      neon_mov_imm_LSLH_operand:$Simm))))),
+            (INST2S VPR64:$src, neon_uimm8:$Imm,
+              neon_mov_imm_LSLH_operand:$Simm)>;
+  def : Pat<(v4i16 (opnode VPR64:$src,
+                   (bitconvert(v2i32 (neonopnode timm:$Imm,
+                     neon_mov_imm_LSLH_operand:$Simm))))),
+            (INST2S VPR64:$src, neon_uimm8:$Imm,
+              neon_mov_imm_LSLH_operand:$Simm)>;
+  def : Pat<(v1i64 (opnode VPR64:$src,
+                  (bitconvert(v2i32 (neonopnode timm:$Imm,
+                    neon_mov_imm_LSLH_operand:$Simm))))),
+          (INST2S VPR64:$src, neon_uimm8:$Imm,
+            neon_mov_imm_LSLH_operand:$Simm)>;
+
+  def : Pat<(v16i8 (opnode VPR128:$src,
+                   (bitconvert(v4i32 (neonopnode timm:$Imm,
+                     neon_mov_imm_LSLH_operand:$Simm))))),
+          (INST4S VPR128:$src, neon_uimm8:$Imm,
+            neon_mov_imm_LSLH_operand:$Simm)>;
+  def : Pat<(v8i16 (opnode VPR128:$src,
+                   (bitconvert(v4i32 (neonopnode timm:$Imm,
+                     neon_mov_imm_LSLH_operand:$Simm))))),
+          (INST4S VPR128:$src, neon_uimm8:$Imm,
+            neon_mov_imm_LSLH_operand:$Simm)>;
+  def : Pat<(v2i64 (opnode VPR128:$src,
+                   (bitconvert(v4i32 (neonopnode timm:$Imm,
+                     neon_mov_imm_LSLH_operand:$Simm))))),
+          (INST4S VPR128:$src, neon_uimm8:$Imm,
+            neon_mov_imm_LSLH_operand:$Simm)>;
 }
 
 // Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
-defm : Neon_bitwiseVi_patterns<or, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H>;
+defm : Neon_bitwiseVi_patterns<and, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H,
+                               BICvi_lsl_2S, BICvi_lsl_4S>;
 
 // Additional patterns for Vector Bitwise OR - immedidate
-defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H>;
+defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H,
+                               ORRvi_lsl_2S, ORRvi_lsl_4S>;
 
 
 // Vector Move Immediate Masked

Modified: llvm/trunk/test/CodeGen/AArch64/neon-bitwise-instructions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-bitwise-instructions.ll?rev=198675&r1=198674&r2=198675&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/neon-bitwise-instructions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/neon-bitwise-instructions.ll Mon Jan  6 23:10:47 2014
@@ -1,6 +1,5 @@
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
 
-
 define <8 x i8> @and8xi8(<8 x i8> %a, <8 x i8> %b) {
 ;CHECK: and {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
 	%tmp1 = and <8 x i8> %a, %b;
@@ -163,19 +162,19 @@ define <2 x i32> @bicimm2s_lsl0(<2 x i32
 
 define <2 x i32> @bicimm2s_lsl8(<2 x i32> %a) {
 ;CHECK:  bic {{v[0-9]+}}.2s, #0x10, lsl #8
-	%tmp1 = and <2 x i32> %a, < i32 18446744073709547519, i32  18446744073709547519 >
+	%tmp1 = and <2 x i32> %a, < i32 4294963199, i32  4294963199 >
 	ret <2 x i32> %tmp1
 }
 
 define <2 x i32> @bicimm2s_lsl16(<2 x i32> %a) {
 ;CHECK:  bic {{v[0-9]+}}.2s, #0x10, lsl #16
-	%tmp1 = and <2 x i32> %a, < i32 18446744073708503039, i32 18446744073708503039 >
+	%tmp1 = and <2 x i32> %a, < i32 4293918719, i32 4293918719 >
 	ret <2 x i32> %tmp1
 }
 
 define <2 x i32> @bicimm2s_lsl124(<2 x i32> %a) {
 ;CHECK:  bic {{v[0-9]+}}.2s, #0x10, lsl #24
-	%tmp1 = and <2 x i32> %a, < i32 18446744073441116159, i32  18446744073441116159>
+	%tmp1 = and <2 x i32> %a, < i32 4026531839, i32  4026531839>
 	ret <2 x i32> %tmp1
 }
 
@@ -187,68 +186,68 @@ define <4 x i32> @bicimm4s_lsl0(<4 x i32
 
 define <4 x i32> @bicimm4s_lsl8(<4 x i32> %a) {
 ;CHECK:  bic {{v[0-9]+}}.4s, #0x10, lsl #8
-	%tmp1 = and <4 x i32> %a, < i32 18446744073709547519, i32  18446744073709547519, i32  18446744073709547519, i32  18446744073709547519 >
+	%tmp1 = and <4 x i32> %a, < i32 4294963199, i32  4294963199, i32  4294963199, i32  4294963199 >
 	ret <4 x i32> %tmp1
 }
 
 define <4 x i32> @bicimm4s_lsl16(<4 x i32> %a) {
 ;CHECK:  bic {{v[0-9]+}}.4s, #0x10, lsl #16
-	%tmp1 = and <4 x i32> %a, < i32 18446744073708503039, i32 18446744073708503039, i32 18446744073708503039, i32 18446744073708503039 >
+	%tmp1 = and <4 x i32> %a, < i32 4293918719, i32 4293918719, i32 4293918719, i32 4293918719 >
 	ret <4 x i32> %tmp1
 }
 
 define <4 x i32> @bicimm4s_lsl124(<4 x i32> %a) {
 ;CHECK:  bic {{v[0-9]+}}.4s, #0x10, lsl #24
-	%tmp1 = and <4 x i32> %a, < i32 18446744073441116159, i32  18446744073441116159, i32  18446744073441116159, i32  18446744073441116159>
+	%tmp1 = and <4 x i32> %a, < i32 4026531839, i32  4026531839, i32  4026531839, i32  4026531839>
 	ret <4 x i32> %tmp1
 }
 
 define <4 x i16> @bicimm4h_lsl0_a(<4 x i16> %a) {
 ;CHECK:  bic {{v[0-9]+}}.4h, #0x10
-	%tmp1 = and <4 x i16> %a, < i16 18446744073709551599, i16  18446744073709551599, i16  18446744073709551599, i16  18446744073709551599 >
+	%tmp1 = and <4 x i16> %a, < i16 4294967279, i16  4294967279, i16  4294967279, i16  4294967279 >
 	ret <4 x i16> %tmp1
 }
 
 define <4 x i16> @bicimm4h_lsl0_b(<4 x i16> %a) {
-;CHECK:  bic {{v[0-9]+}}.4h, #0x0
+;CHECK:  bic {{v[0-9]+}}.4h, #0xff
 	%tmp1 = and <4 x i16> %a, < i16 65280, i16  65280, i16  65280, i16 65280 >
 	ret <4 x i16> %tmp1
 }
 
 define <4 x i16> @bicimm4h_lsl8_a(<4 x i16> %a) {
 ;CHECK:  bic {{v[0-9]+}}.4h, #0x10, lsl #8
-	%tmp1 = and <4 x i16> %a, < i16 18446744073709547519, i16  18446744073709547519, i16  18446744073709547519, i16  18446744073709547519>
+	%tmp1 = and <4 x i16> %a, < i16 4294963199, i16  4294963199, i16  4294963199, i16  4294963199>
 	ret <4 x i16> %tmp1
 }
 
 define <4 x i16> @bicimm4h_lsl8_b(<4 x i16> %a) {
-;CHECK:  bic {{v[0-9]+}}.4h, #0x0, lsl #8
+;CHECK:  bic {{v[0-9]+}}.4h, #0xff, lsl #8
 	%tmp1 = and <4 x i16> %a, < i16 255, i16 255, i16 255, i16 255>
 	ret <4 x i16> %tmp1
 }
 
 define <8 x i16> @bicimm8h_lsl0_a(<8 x i16> %a) {
 ;CHECK:  bic {{v[0-9]+}}.8h, #0x10
-	%tmp1 = and <8 x i16> %a, < i16 18446744073709551599, i16  18446744073709551599, i16  18446744073709551599, i16  18446744073709551599,
-   i16  18446744073709551599, i16  18446744073709551599, i16  18446744073709551599, i16  18446744073709551599 >
+	%tmp1 = and <8 x i16> %a, < i16 4294967279, i16  4294967279, i16  4294967279, i16  4294967279,
+   i16  4294967279, i16  4294967279, i16  4294967279, i16  4294967279 >
 	ret <8 x i16> %tmp1
 }
 
 define <8 x i16> @bicimm8h_lsl0_b(<8 x i16> %a) {
-;CHECK:  bic {{v[0-9]+}}.8h, #0x0
+;CHECK:  bic {{v[0-9]+}}.8h, #0xff
 	%tmp1 = and <8 x i16> %a, < i16 65280, i16  65280, i16  65280, i16 65280, i16 65280, i16  65280, i16  65280, i16 65280 >
 	ret <8 x i16> %tmp1
 }
 
 define <8 x i16> @bicimm8h_lsl8_a(<8 x i16> %a) {
 ;CHECK:  bic {{v[0-9]+}}.8h, #0x10, lsl #8
-	%tmp1 = and <8 x i16> %a, < i16 18446744073709547519, i16  18446744073709547519, i16  18446744073709547519, i16  18446744073709547519,
-   i16  18446744073709547519, i16  18446744073709547519, i16  18446744073709547519, i16  18446744073709547519>
+	%tmp1 = and <8 x i16> %a, < i16 4294963199, i16  4294963199, i16  4294963199, i16  4294963199,
+   i16  4294963199, i16  4294963199, i16  4294963199, i16  4294963199>
 	ret <8 x i16> %tmp1
 }
 
 define <8 x i16> @bicimm8h_lsl8_b(<8 x i16> %a) {
-;CHECK:  bic {{v[0-9]+}}.8h, #0x0, lsl #8
+;CHECK:  bic {{v[0-9]+}}.8h, #0xff, lsl #8
 	%tmp1 = and <8 x i16> %a, < i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
 	ret <8 x i16> %tmp1
 }
@@ -648,4 +647,436 @@ define <16 x i8> @orimm16b_as_orrimm8h_l
   ret <16 x i8> %val
 }
 
+define <8 x i8> @and8imm2s_lsl0(<8 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xff
+	%tmp1 = and <8 x i8> %a, < i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255>
+	ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @and8imm2s_lsl8(<8 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xff, lsl #8
+	%tmp1 = and <8 x i8> %a, < i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255>
+	ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @and8imm2s_lsl16(<8 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xff, lsl #16
+	%tmp1 = and <8 x i8> %a, < i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255>
+	ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @and8imm2s_lsl24(<8 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xfe, lsl #24
+	%tmp1 = and <8 x i8> %a, < i8 255, i8 255, i8 255, i8 1, i8 255, i8 255, i8 255, i8 1>
+	ret <8 x i8> %tmp1
+}
+
+define <4 x i16> @and16imm2s_lsl0(<4 x i16> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xff
+	%tmp1 = and <4 x i16> %a, < i16 65280, i16 65535, i16 65280, i16 65535>
+	ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @and16imm2s_lsl8(<4 x i16> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xff, lsl #8
+	%tmp1 = and <4 x i16> %a, < i16 255, i16 65535, i16 255, i16 65535>
+	ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @and16imm2s_lsl16(<4 x i16> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xff, lsl #16
+	%tmp1 = and <4 x i16> %a, < i16 65535, i16 65280, i16 65535, i16 65280>
+	ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @and16imm2s_lsl24(<4 x i16> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xfe, lsl #24
+	%tmp1 = and <4 x i16> %a, < i16 65535, i16 511, i16 65535, i16 511>
+	ret <4 x i16> %tmp1
+}
+
+
+define <1 x i64> @and64imm2s_lsl0(<1 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xff
+	%tmp1 = and <1 x i64> %a, < i64 -1095216660736>
+	ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @and64imm2s_lsl8(<1 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xff, lsl #8
+	%tmp1 = and <1 x i64> %a, < i64 -280375465148161>
+	ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @and64imm2s_lsl16(<1 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xff, lsl #16
+	%tmp1 = and <1 x i64> %a, < i64 -71776119077928961>
+	ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @and64imm2s_lsl24(<1 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.2s, #0xfe, lsl #24
+	%tmp1 = and <1 x i64> %a, < i64 144115183814443007>
+	ret <1 x i64> %tmp1
+}
+
+define <16 x i8> @and8imm4s_lsl0(<16 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xff
+	%tmp1 = and <16 x i8> %a, < i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255>
+	ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @and8imm4s_lsl8(<16 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xff, lsl #8
+	%tmp1 = and <16 x i8> %a, < i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255>
+	ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @and8imm4s_lsl16(<16 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xff, lsl #16
+	%tmp1 = and <16 x i8> %a, < i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255, i8 255, i8 255, i8 0, i8 255>
+	ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @and8imm4s_lsl24(<16 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xfe, lsl #24
+	%tmp1 = and <16 x i8> %a, < i8 255, i8 255, i8 255, i8 1, i8 255, i8 255, i8 255, i8 1, i8 255, i8 255, i8 255, i8 1, i8 255, i8 255, i8 255, i8 1>
+	ret <16 x i8> %tmp1
+}
+
+define <8 x i16> @and16imm4s_lsl0(<8 x i16> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xff
+	%tmp1 = and <8 x i16> %a, < i16 65280, i16 65535, i16 65280, i16 65535, i16 65280, i16 65535, i16 65280, i16 65535>
+	ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @and16imm4s_lsl8(<8 x i16> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xff, lsl #8
+	%tmp1 = and <8 x i16> %a, < i16 255, i16 65535, i16 255, i16 65535, i16 255, i16 65535, i16 255, i16 65535>
+	ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @and16imm4s_lsl16(<8 x i16> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xff, lsl #16
+	%tmp1 = and <8 x i16> %a, < i16 65535, i16 65280, i16 65535, i16 65280, i16 65535, i16 65280, i16 65535, i16 65280>
+	ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @and16imm4s_lsl24(<8 x i16> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xfe, lsl #24
+	%tmp1 = and <8 x i16> %a, < i16 65535, i16 511, i16 65535, i16 511, i16 65535, i16 511, i16 65535, i16 511>
+	ret <8 x i16> %tmp1
+}
+
+define <2 x i64> @and64imm4s_lsl0(<2 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xff
+	%tmp1 = and <2 x i64> %a, < i64 -1095216660736, i64 -1095216660736>
+	ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @and64imm4s_lsl8(<2 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xff, lsl #8
+	%tmp1 = and <2 x i64> %a, < i64 -280375465148161, i64 -280375465148161>
+	ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @and64imm4s_lsl16(<2 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xff, lsl #16
+	%tmp1 = and <2 x i64> %a, < i64 -71776119077928961, i64 -71776119077928961>
+	ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @and64imm4s_lsl24(<2 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.4s, #0xfe, lsl #24
+	%tmp1 = and <2 x i64> %a, < i64 144115183814443007, i64 144115183814443007>
+	ret <2 x i64> %tmp1
+}
+
+define <8 x i8> @and8imm4h_lsl0(<8 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.4h, #0xff
+	%tmp1 = and <8 x i8> %a, < i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+	ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @and8imm4h_lsl8(<8 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.4h, #0xff, lsl #8
+	%tmp1 = and <8 x i8> %a, < i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0>
+	ret <8 x i8> %tmp1
+}
+
+define <2 x i32> @and16imm4h_lsl0(<2 x i32> %a) {
+;CHECK:  bic {{v[0-9]+}}.4h, #0xff
+	%tmp1 = and <2 x i32> %a, < i32 4278255360, i32 4278255360>
+	ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @and16imm4h_lsl8(<2 x i32> %a) {
+;CHECK:  bic {{v[0-9]+}}.4h, #0xff, lsl #8
+	%tmp1 = and <2 x i32> %a, < i32 16711935, i32 16711935>
+	ret <2 x i32> %tmp1
+}
+
+define <1 x i64> @and64imm4h_lsl0(<1 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.4h, #0xff
+	%tmp1 = and <1 x i64> %a, < i64 -71777214294589696>
+	ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @and64imm4h_lsl8(<1 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.4h, #0xff, lsl #8
+	%tmp1 = and <1 x i64> %a, < i64 71777214294589695>
+	ret <1 x i64> %tmp1
+}
+
+define <16 x i8> @and8imm8h_lsl0(<16 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.8h, #0xff
+	%tmp1 = and <16 x i8> %a, < i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255 >
+	ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @and8imm8h_lsl8(<16 x i8> %a) {
+;CHECK:  bic {{v[0-9]+}}.8h, #0xff, lsl #8
+	%tmp1 = and <16 x i8> %a, <i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0 >
+	ret <16 x i8> %tmp1
+}
+
+define <4 x i32> @and16imm8h_lsl0(<4 x i32> %a) {
+;CHECK:  bic {{v[0-9]+}}.8h, #0xff
+	%tmp1 = and <4 x i32> %a, < i32 4278255360, i32 4278255360, i32 4278255360, i32 4278255360>
+	ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @and16imm8h_lsl8(<4 x i32> %a) {
+;CHECK:  bic {{v[0-9]+}}.8h, #0xff, lsl #8
+	%tmp1 = and <4 x i32> %a, < i32 16711935, i32 16711935, i32 16711935, i32 16711935>
+	ret <4 x i32> %tmp1
+}
+
+define <2 x i64> @and64imm8h_lsl0(<2 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.8h, #0xff
+	%tmp1 = and <2 x i64> %a, < i64 -71777214294589696, i64 -71777214294589696>
+	ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @and64imm8h_lsl8(<2 x i64> %a) {
+;CHECK:  bic {{v[0-9]+}}.8h, #0xff, lsl #8
+	%tmp1 = and <2 x i64> %a, < i64 71777214294589695, i64 71777214294589695>
+	ret <2 x i64> %tmp1
+}
+
+define <8 x i8> @orr8imm2s_lsl0(<8 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff
+	%tmp1 = or <8 x i8> %a, < i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0>
+	ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @orr8imm2s_lsl8(<8 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff, lsl #8
+	%tmp1 = or <8 x i8> %a, < i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0>
+	ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @orr8imm2s_lsl16(<8 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff, lsl #16
+	%tmp1 = or <8 x i8> %a, < i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0>
+	ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @orr8imm2s_lsl24(<8 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff, lsl #24
+	%tmp1 = or <8 x i8> %a, < i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255>
+	ret <8 x i8> %tmp1
+}
+
+define <4 x i16> @orr16imm2s_lsl0(<4 x i16> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff
+	%tmp1 = or <4 x i16> %a, < i16 255, i16 0, i16 255, i16 0>
+	ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @orr16imm2s_lsl8(<4 x i16> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff, lsl #8
+	%tmp1 = or <4 x i16> %a, < i16 65280, i16 0, i16 65280, i16 0>
+	ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @orr16imm2s_lsl16(<4 x i16> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff, lsl #16
+	%tmp1 = or <4 x i16> %a, < i16 0, i16 255, i16 0, i16 255>
+	ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @orr16imm2s_lsl24(<4 x i16> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff, lsl #24
+	%tmp1 = or <4 x i16> %a, < i16 0, i16 65280, i16 0, i16 65280>
+	ret <4 x i16> %tmp1
+}
+
+define <1 x i64> @orr64imm2s_lsl0(<1 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff
+	%tmp1 = or <1 x i64> %a, < i64 1095216660735>
+	ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @orr64imm2s_lsl8(<1 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff, lsl #8
+	%tmp1 = or <1 x i64> %a, < i64 280375465148160>
+	ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @orr64imm2s_lsl16(<1 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff, lsl #16
+	%tmp1 = or <1 x i64> %a, < i64 71776119077928960>
+	ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @orr64imm2s_lsl24(<1 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.2s, #0xff, lsl #24
+	%tmp1 = or <1 x i64> %a, < i64 -72057589759737856>
+	ret <1 x i64> %tmp1
+}
+
+define <16 x i8> @orr8imm4s_lsl0(<16 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff
+	%tmp1 = or <16 x i8> %a, < i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0>
+	ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @orr8imm4s_lsl8(<16 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff, lsl #8
+	%tmp1 = or <16 x i8> %a, < i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0>
+	ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @orr8imm4s_lsl16(<16 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff, lsl #16
+	%tmp1 = or <16 x i8> %a, < i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0>
+	ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @orr8imm4s_lsl24(<16 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff, lsl #24
+	%tmp1 = or <16 x i8> %a, < i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255, i8 0, i8 0, i8 0, i8 255>
+	ret <16 x i8> %tmp1
+}
+
+define <8 x i16> @orr16imm4s_lsl0(<8 x i16> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff
+	%tmp1 = or <8 x i16> %a, < i16 255, i16 0, i16 255, i16 0, i16 255, i16 0, i16 255, i16 0>
+	ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @orr16imm4s_lsl8(<8 x i16> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff, lsl #8
+	%tmp1 = or <8 x i16> %a, < i16 65280, i16 0, i16 65280, i16 0, i16 65280, i16 0, i16 65280, i16 0>
+	ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @orr16imm4s_lsl16(<8 x i16> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff, lsl #16
+	%tmp1 = or <8 x i16> %a, < i16 0, i16 255, i16 0, i16 255, i16 0, i16 255, i16 0, i16 255>
+	ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @orr16imm4s_lsl24(<8 x i16> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff, lsl #24
+	%tmp1 = or <8 x i16> %a, < i16 0, i16 65280, i16 0, i16 65280, i16 0, i16 65280, i16 0, i16 65280>
+	ret <8 x i16> %tmp1
+}
+
+define <2 x i64> @orr64imm4s_lsl0(<2 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff
+	%tmp1 = or <2 x i64> %a, < i64 1095216660735, i64 1095216660735>
+	ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @orr64imm4s_lsl8(<2 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff, lsl #8
+	%tmp1 = or <2 x i64> %a, < i64 280375465148160, i64 280375465148160>
+	ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @orr64imm4s_lsl16(<2 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff, lsl #16
+	%tmp1 = or <2 x i64> %a, < i64 71776119077928960, i64 71776119077928960>
+	ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @orr64imm4s_lsl24(<2 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.4s, #0xff, lsl #24
+	%tmp1 = or <2 x i64> %a, < i64 -72057589759737856, i64 -72057589759737856>
+	ret <2 x i64> %tmp1
+}
+
+define <8 x i8> @orr8imm4h_lsl0(<8 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.4h, #0xff
+	%tmp1 = or <8 x i8> %a, < i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0>
+	ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @orr8imm4h_lsl8(<8 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.4h, #0xff, lsl #8
+	%tmp1 = or <8 x i8> %a, < i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+	ret <8 x i8> %tmp1
+}
+
+define <2 x i32> @orr16imm4h_lsl0(<2 x i32> %a) {
+;CHECK:  orr {{v[0-9]+}}.4h, #0xff
+	%tmp1 = or <2 x i32> %a, < i32 16711935, i32 16711935>
+	ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @orr16imm4h_lsl8(<2 x i32> %a) {
+;CHECK:  orr {{v[0-9]+}}.4h, #0xff, lsl #8
+	%tmp1 = or <2 x i32> %a, < i32 4278255360, i32 4278255360>
+	ret <2 x i32> %tmp1
+}
+
+define <1 x i64> @orr64imm4h_lsl0(<1 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.4h, #0xff
+	%tmp1 = or <1 x i64> %a, < i64 71777214294589695>
+	ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @orr64imm4h_lsl8(<1 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.4h, #0xff, lsl #8
+	%tmp1 = or <1 x i64> %a, < i64 -71777214294589696>
+	ret <1 x i64> %tmp1
+}
+
+define <16 x i8> @orr8imm8h_lsl0(<16 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.8h, #0xff
+	%tmp1 = or <16 x i8> %a, < i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0>
+	ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @orr8imm8h_lsl8(<16 x i8> %a) {
+;CHECK:  orr {{v[0-9]+}}.8h, #0xff, lsl #8
+	%tmp1 = or <16 x i8> %a, < i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+	ret <16 x i8> %tmp1
+}
+
+define <4 x i32> @orr16imm8h_lsl0(<4 x i32> %a) {
+;CHECK:  orr {{v[0-9]+}}.8h, #0xff
+	%tmp1 = or <4 x i32> %a, < i32 16711935, i32 16711935, i32 16711935, i32 16711935>
+	ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @orr16imm8h_lsl8(<4 x i32> %a) {
+;CHECK:  orr {{v[0-9]+}}.8h, #0xff, lsl #8
+	%tmp1 = or <4 x i32> %a, < i32 4278255360, i32 4278255360, i32 4278255360, i32 4278255360>
+	ret <4 x i32> %tmp1
+}
+
+define <2 x i64> @orr64imm8h_lsl0(<2 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.8h, #0xff
+	%tmp1 = or <2 x i64> %a, < i64 71777214294589695, i64 71777214294589695>
+	ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @orr64imm8h_lsl8(<2 x i64> %a) {
+;CHECK:  orr {{v[0-9]+}}.8h, #0xff, lsl #8
+	%tmp1 = or <2 x i64> %a, < i64 -71777214294589696, i64 -71777214294589696>
+	ret <2 x i64> %tmp1
+}
 





More information about the llvm-commits mailing list