[llvm] cce24bb - [AArch64][GlobalISel] Add tests for pre-existing selection support for <4 x s16> arithmetic/bitwise ops.

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 18 17:18:36 PDT 2020


Author: Amara Emerson
Date: 2020-09-18T17:13:55-07:00
New Revision: cce24bb38d97c352bf7ac40860f0ade33024735c

URL: https://github.com/llvm/llvm-project/commit/cce24bb38d97c352bf7ac40860f0ade33024735c
DIFF: https://github.com/llvm/llvm-project/commit/cce24bb38d97c352bf7ac40860f0ade33024735c.diff

LOG: [AArch64][GlobalISel] Add tests for pre-existing selection support for <4 x s16> arithmetic/bitwise ops.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
index f6aa16784b25..da05ea0cea5a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
@@ -1,71 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
-
---- |
-  target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
-
-  define void @add_s32_gpr() { ret void }
-  define void @add_s64_gpr() { ret void }
-
-  define void @add_imm_s32_gpr() { ret void }
-  define void @add_imm_s64_gpr() { ret void }
-
-  define void @add_neg_s32_gpr() { ret void }
-  define void @add_neg_s64_gpr() { ret void }
-  define void @add_neg_invalid_immed_s32() { ret void }
-  define void @add_neg_invalid_immed_s64() { ret void }
-  define void @add_imm_0_s32() { ret void }
-  define void @add_imm_0_s64() { ret void }
-
-  define void @add_imm_s32_gpr_bb() { ret void }
-
-  define void @sub_s32_gpr() { ret void }
-  define void @sub_s64_gpr() { ret void }
-
-  define void @or_s32_gpr() { ret void }
-  define void @or_s64_gpr() { ret void }
-  define void @or_v2s32_fpr() { ret void }
-
-  define void @and_s32_gpr() { ret void }
-  define void @and_s64_gpr() { ret void }
-
-  define void @shl_s32_gpr() { ret void }
-  define void @shl_s64_gpr() { ret void }
-
-  define void @lshr_s32_gpr() { ret void }
-  define void @lshr_s64_gpr() { ret void }
-
-  define void @ashr_s32_gpr() { ret void }
-  define void @ashr_s64_gpr() { ret void }
-
-  define void @mul_s32_gpr() { ret void }
-  define void @mul_s64_gpr() { ret void }
-
-  define void @mulh_s64_gpr() { ret void }
-
-  define void @sdiv_s32_gpr() { ret void }
-  define void @sdiv_s64_gpr() { ret void }
-
-  define void @udiv_s32_gpr() { ret void }
-  define void @udiv_s64_gpr() { ret void }
-
-  define void @fadd_s32_fpr() { ret void }
-  define void @fadd_s64_fpr() { ret void }
-
-  define void @fsub_s32_fpr() { ret void }
-  define void @fsub_s64_fpr() { ret void }
-
-  define void @fmul_s32_fpr() { ret void }
-  define void @fmul_s64_fpr() { ret void }
-
-  define void @fdiv_s32_fpr() { ret void }
-  define void @fdiv_s64_fpr() { ret void }
-
-  define void @add_v8i16() { ret void }
-  define void @add_v16i8() { ret void }
-
-...
-
 ---
 # Check that we select a 32-bit GPR G_ADD into ADDWrr on GPR32.
 # Also check that we constrain the register class of the COPY to GPR32.
@@ -330,6 +264,7 @@ body:             |
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x80000000)
   ; CHECK:   [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
+  ; CHECK:   B %bb.1
   ; CHECK: bb.1:
   ; CHECK:   [[ADDWri:%[0-9]+]]:gpr32sp = ADDWri [[COPY]], 1, 0
   ; CHECK:   $w0 = COPY [[ADDWri]]
@@ -1127,3 +1062,91 @@ body:             |
     RET_ReallyLR implicit $q0
 
 ...
+---
+name:            add_v4i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1:
+    liveins: $d0, $d1
+
+    ; CHECK-LABEL: name: add_v4i16
+    ; CHECK: liveins: $d0, $d1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK: [[ADDv4i16_:%[0-9]+]]:fpr64 = ADDv4i16 [[COPY]], [[COPY1]]
+    ; CHECK: $d0 = COPY [[ADDv4i16_]]
+    ; CHECK: RET_ReallyLR implicit $d0
+    %0:fpr(<4 x s16>) = COPY $d0
+    %1:fpr(<4 x s16>) = COPY $d1
+    %2:fpr(<4 x s16>) = G_ADD %0, %1
+    $d0 = COPY %2(<4 x s16>)
+    RET_ReallyLR implicit $d0
+...
+---
+name:            or_v4i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1:
+    liveins: $d0, $d1
+
+    ; CHECK-LABEL: name: or_v4i16
+    ; CHECK: liveins: $d0, $d1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK: [[ORRv8i8_:%[0-9]+]]:fpr64 = ORRv8i8 [[COPY]], [[COPY1]]
+    ; CHECK: $d0 = COPY [[ORRv8i8_]]
+    ; CHECK: RET_ReallyLR implicit $d0
+    %0:fpr(<4 x s16>) = COPY $d0
+    %1:fpr(<4 x s16>) = COPY $d1
+    %2:fpr(<4 x s16>) = G_OR %0, %1
+    $d0 = COPY %2(<4 x s16>)
+    RET_ReallyLR implicit $d0
+...
+---
+name:            xor_v4i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1:
+    liveins: $d0, $d1
+
+    ; CHECK-LABEL: name: xor_v4i16
+    ; CHECK: liveins: $d0, $d1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK: [[EORv8i8_:%[0-9]+]]:fpr64 = EORv8i8 [[COPY]], [[COPY1]]
+    ; CHECK: $d0 = COPY [[EORv8i8_]]
+    ; CHECK: RET_ReallyLR implicit $d0
+    %0:fpr(<4 x s16>) = COPY $d0
+    %1:fpr(<4 x s16>) = COPY $d1
+    %2:fpr(<4 x s16>) = G_XOR %0, %1
+    $d0 = COPY %2(<4 x s16>)
+    RET_ReallyLR implicit $d0
+...
+---
+name:            mul_v4i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1:
+    liveins: $d0, $d1
+
+    ; CHECK-LABEL: name: mul_v4i16
+    ; CHECK: liveins: $d0, $d1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK: [[MULv4i16_:%[0-9]+]]:fpr64 = MULv4i16 [[COPY]], [[COPY1]]
+    ; CHECK: $d0 = COPY [[MULv4i16_]]
+    ; CHECK: RET_ReallyLR implicit $d0
+    %0:fpr(<4 x s16>) = COPY $d0
+    %1:fpr(<4 x s16>) = COPY $d1
+    %2:fpr(<4 x s16>) = G_MUL %0, %1
+    $d0 = COPY %2(<4 x s16>)
+    RET_ReallyLR implicit $d0
+...


        


More information about the llvm-commits mailing list