[llvm] r363546 - Test forward references in IntrinsicEmitter on Neon LD(2|3|4)

Sander de Smalen via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 17 05:01:53 PDT 2019


Author: s.desmalen
Date: Mon Jun 17 05:01:53 2019
New Revision: 363546

URL: http://llvm.org/viewvc/llvm-project?rev=363546&view=rev
Log:
Test forward references in IntrinsicEmitter on Neon LD(2|3|4)

This patch tests the forward-referencing added in D62995 by changing
some existing intrinsics to use forward referencing of overloadable
parameters, rather than backward referencing.

This patch changes the TableGen definition/implementation of
llvm.aarch64.neon.ld2lane and llvm.aarch64.neon.ld2lane intrinsics
(and similar for ld3 and ld4). This change is intended to be
non-functional, since the behaviour of the intrinsics is
expected to be the same.

Reviewers: arsenm, dmgreen, RKSimon, greened, rnk

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D63189

Added:
    llvm/trunk/test/Verifier/intrinsic-arg-overloading-struct-ret.ll
Modified:
    llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td

Modified: llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td?rev=363546&r1=363545&r2=363546&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td (original)
+++ llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td Mon Jun 17 05:01:53 2019
@@ -462,12 +462,12 @@ let TargetPrefix = "aarch64" in {  // Al
                 [IntrArgMemOnly, NoCapture<2>]>;
 
   class AdvSIMD_2Vec_Load_Intrinsic
-    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+    : Intrinsic<[LLVMMatchType<0>, llvm_anyvector_ty],
                 [LLVMAnyPointerType<LLVMMatchType<0>>],
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_2Vec_Load_Lane_Intrinsic
-    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
-                [LLVMMatchType<0>, LLVMMatchType<0>,
+    : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>],
+                [LLVMMatchType<0>, llvm_anyvector_ty,
                  llvm_i64_ty, llvm_anyptr_ty],
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_2Vec_Store_Intrinsic
@@ -480,12 +480,12 @@ let TargetPrefix = "aarch64" in {  // Al
                 [IntrArgMemOnly, NoCapture<3>]>;
 
   class AdvSIMD_3Vec_Load_Intrinsic
-    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+    : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
                 [LLVMAnyPointerType<LLVMMatchType<0>>],
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_3Vec_Load_Lane_Intrinsic
-    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
-                [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+    : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+                [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty,
                  llvm_i64_ty, llvm_anyptr_ty],
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_3Vec_Store_Intrinsic
@@ -499,15 +499,15 @@ let TargetPrefix = "aarch64" in {  // Al
                 [IntrArgMemOnly, NoCapture<4>]>;
 
   class AdvSIMD_4Vec_Load_Intrinsic
-    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
-                 LLVMMatchType<0>, LLVMMatchType<0>],
+    : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
+                 LLVMMatchType<0>, llvm_anyvector_ty],
                 [LLVMAnyPointerType<LLVMMatchType<0>>],
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_4Vec_Load_Lane_Intrinsic
-    : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+    : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
                  LLVMMatchType<0>, LLVMMatchType<0>],
                 [LLVMMatchType<0>, LLVMMatchType<0>,
-                 LLVMMatchType<0>, LLVMMatchType<0>,
+                 LLVMMatchType<0>, llvm_anyvector_ty,
                  llvm_i64_ty, llvm_anyptr_ty],
                 [IntrReadMem, IntrArgMemOnly]>;
   class AdvSIMD_4Vec_Store_Intrinsic

Added: llvm/trunk/test/Verifier/intrinsic-arg-overloading-struct-ret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Verifier/intrinsic-arg-overloading-struct-ret.ll?rev=363546&view=auto
==============================================================================
--- llvm/trunk/test/Verifier/intrinsic-arg-overloading-struct-ret.ll (added)
+++ llvm/trunk/test/Verifier/intrinsic-arg-overloading-struct-ret.ll Mon Jun 17 05:01:53 2019
@@ -0,0 +1,79 @@
+; RUN: not opt -verify -S < %s 2>&1 | FileCheck %s
+
+; LD2 and LD2LANE
+
+; CHECK: Intrinsic has incorrect return type
+; CHECK-NEXT: llvm.aarch64.neon.ld2.v4i32
+define { <4 x i64>, <4 x i32> } @test_ld2_ret(<4 x i32>* %ptr) {
+  %res = call { <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32(<4 x i32>* %ptr)
+  ret{ <4 x i64>, <4 x i32> } %res
+}
+declare { <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32(<4 x i32>* %ptr)
+
+; CHECK: Intrinsic has incorrect return type
+; CHECK-NEXT: llvm.aarch64.neon.ld2lane.v4i64
+define { <4 x i64>, <4 x i32> } @test_ld2lane_ret(i8* %ptr, <4 x i64> %a, <4 x i64> %b) {
+  %res = call { <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i64(<4 x i64> %a, <4 x i64> %b, i64 0, i8* %ptr)
+  ret{ <4 x i64>, <4 x i32> } %res
+}
+declare { <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i64(<4 x i64>, <4 x i64>, i64, i8*)
+
+; CHECK: Intrinsic has incorrect argument type
+; CHECK-NEXT: llvm.aarch64.neon.ld2lane.v4i32
+define { <4 x i32>, <4 x i32> } @test_ld2lane_arg(i8* %ptr, <4 x i64> %a, <4 x i32> %b) {
+  %res = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32(<4 x i64> %a, <4 x i32> %b, i64 0, i8* %ptr)
+  ret{ <4 x i32>, <4 x i32> } %res
+}
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32(<4 x i64>, <4 x i32>, i64, i8*)
+
+; LD3 and LD3LANE
+
+; CHECK: Intrinsic has incorrect return type
+; CHECK-NEXT: llvm.aarch64.neon.ld3.v4i32
+define { <4 x i32>, <4 x i64>, <4 x i32> } @test_ld3_ret(<4 x i32>* %ptr) {
+  %res = call { <4 x i32>, <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32(<4 x i32>* %ptr)
+  ret{ <4 x i32>, <4 x i64>, <4 x i32> } %res
+}
+declare { <4 x i32>, <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32(<4 x i32>* %ptr)
+
+; CHECK: Intrinsic has incorrect return type
+; CHECK-NEXT: llvm.aarch64.neon.ld3lane.v4i64
+define { <4 x i64>, <4 x i32>, <4 x i64> } @test_ld3lane_ret(i8* %ptr, <4 x i64> %a, <4 x i64> %b, <4 x i64> %c) {
+  %res = call { <4 x i64>, <4 x i32>, <4 x i64> } @llvm.aarch64.neon.ld3lane.v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, i64 0, i8* %ptr)
+  ret{ <4 x i64>, <4 x i32>, <4 x i64> } %res
+}
+declare { <4 x i64>, <4 x i32>, <4 x i64> } @llvm.aarch64.neon.ld3lane.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, i64, i8*)
+
+; CHECK: Intrinsic has incorrect argument type
+; CHECK-NEXT: llvm.aarch64.neon.ld3lane.v4i32
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_ld3lane_arg(i8* %ptr, <4 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+  %res = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32(<4 x i64> %a, <4 x i32> %b, <4 x i32> %c, i64 0, i8* %ptr)
+  ret{ <4 x i32>, <4 x i32>, <4 x i32> } %res
+}
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32(<4 x i64>, <4 x i32>, <4 x i32>, i64, i8*)
+
+; LD4 and LD4LANE
+
+; CHECK: Intrinsic has incorrect return type
+; CHECK-NEXT: llvm.aarch64.neon.ld4.v4i32
+define { <4 x i32>, <4 x i32>, <4 x i64>, <4 x i32> } @test_ld4_ret(<4 x i32>* %ptr) {
+  %res = call { <4 x i32>, <4 x i32>, <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32(<4 x i32>* %ptr)
+  ret{ <4 x i32>, <4 x i32>, <4 x i64>, <4 x i32> } %res
+}
+declare { <4 x i32>, <4 x i32>, <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32(<4 x i32>* %ptr)
+
+; CHECK: Intrinsic has incorrect return type
+; CHECK-NEXT: llvm.aarch64.neon.ld4lane.v4i64
+define { <4 x i64>, <4 x i64>, <4 x i32>, <4 x i64> } @test_ld4lane_ret(i8* %ptr, <4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
+  %res = call { <4 x i64>, <4 x i64>, <4 x i32>, <4 x i64> } @llvm.aarch64.neon.ld4lane.v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d, i64 0, i8* %ptr)
+  ret{ <4 x i64>, <4 x i64>, <4 x i32>, <4 x i64> } %res
+}
+declare { <4 x i64>, <4 x i64>, <4 x i32>, <4 x i64> } @llvm.aarch64.neon.ld4lane.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, <4 x i64>, i64, i8*)
+
+; CHECK: Intrinsic has incorrect argument type
+; CHECK-NEXT: llvm.aarch64.neon.ld4lane.v4i32
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_ld4lane_arg(i8* %ptr, <4 x i64> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
+  %res = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32(<4 x i64> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, i64 0, i8* %ptr)
+  ret{ <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %res
+}
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32(<4 x i64>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i8*)




More information about the llvm-commits mailing list