[llvm] f113057 - [CodeGen] Fix warnings in DAGCombiner::visitSCALAR_TO_VECTOR

David Sherwood via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 1 10:47:36 PDT 2020


Author: David Sherwood
Date: 2020-07-01T18:47:13+01:00
New Revision: f11305780f08969488add6c84439fc91d18692dc

URL: https://github.com/llvm/llvm-project/commit/f11305780f08969488add6c84439fc91d18692dc
DIFF: https://github.com/llvm/llvm-project/commit/f11305780f08969488add6c84439fc91d18692dc.diff

LOG: [CodeGen] Fix warnings in DAGCombiner::visitSCALAR_TO_VECTOR

In visitSCALAR_TO_VECTOR we try to optimise cases such as:

  scalar_to_vector (extract_vector_elt %x)

into vector shuffles of %x. However, it led to numerous warnings
when %x is a scalable vector type, so for now I've changed the
code to only perform the combination on fixed length vectors.
Although we probably could change the code to work with scalable
vectors in certain cases, without a proper profit analysis it
doesn't seem worth it at the moment.

This change fixes up one of the warnings in:

  llvm/test/CodeGen/AArch64/sve-merging-stores.ll

I've also added a simplified version of the same test to:

  llvm/test/CodeGen/AArch64/sve-fp.ll

which already has checks for no warnings.

Differential Revision: https://reviews.llvm.org/D82872

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AArch64/sve-fp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index f028e3adf46c..18133a107e25 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -20267,7 +20267,9 @@ SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) {
 
   // Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern
   // with a VECTOR_SHUFFLE and possible truncate.
-  if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
+  if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+      VT.isFixedLengthVector() &&
+      InVal->getOperand(0).getValueType().isFixedLengthVector()) {
     SDValue InVec = InVal->getOperand(0);
     SDValue EltNo = InVal->getOperand(1);
     auto InVecT = InVec.getValueType();

diff  --git a/llvm/test/CodeGen/AArch64/sve-fp.ll b/llvm/test/CodeGen/AArch64/sve-fp.ll
index 05ca330b3668..c7cf917b2e64 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
 ; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
 
@@ -5,124 +6,158 @@
 
 define <vscale x 8 x half> @fadd_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: fadd_h:
-; CHECK: fadd z0.h, z0.h, z1.h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
   %res = fadd <vscale x 8 x half> %a, %b
   ret <vscale x 8 x half> %res
 }
 
 define <vscale x 4 x float> @fadd_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: fadd_s:
-; CHECK: fadd z0.s, z0.s, z1.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
   %res = fadd <vscale x 4 x float> %a, %b
   ret <vscale x 4 x float> %res
 }
 
 define <vscale x 2 x double> @fadd_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: fadd_d:
-; CHECK: fadd z0.d, z0.d, z1.d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
   %res = fadd <vscale x 2 x double> %a, %b
   ret <vscale x 2 x double> %res
 }
 
 define <vscale x 8 x half> @fsub_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: fsub_h:
-; CHECK: fsub z0.h, z0.h, z1.h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
   %res = fsub <vscale x 8 x half> %a, %b
   ret <vscale x 8 x half> %res
 }
 
 define <vscale x 4 x float> @fsub_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: fsub_s:
-; CHECK: fsub z0.s, z0.s, z1.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsub z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
   %res = fsub <vscale x 4 x float> %a, %b
   ret <vscale x 4 x float> %res
 }
 
 define <vscale x 2 x double> @fsub_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: fsub_d:
-; CHECK: fsub z0.d, z0.d, z1.d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsub z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
   %res = fsub <vscale x 2 x double> %a, %b
   ret <vscale x 2 x double> %res
 }
 
 define <vscale x 8 x half> @fmul_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: fmul_h:
-; CHECK: fmul z0.h, z0.h, z1.h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
   %res = fmul <vscale x 8 x half> %a, %b
   ret <vscale x 8 x half> %res
 }
 
 define <vscale x 4 x float> @fmul_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: fmul_s:
-; CHECK: fmul z0.s, z0.s, z1.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
   %res = fmul <vscale x 4 x float> %a, %b
   ret <vscale x 4 x float> %res
 }
 
 define <vscale x 2 x double> @fmul_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: fmul_d:
-; CHECK: fmul z0.d, z0.d, z1.d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmul z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
   %res = fmul <vscale x 2 x double> %a, %b
   ret <vscale x 2 x double> %res
 }
 
 define <vscale x 8 x half> @frecps_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: frecps_h:
-; CHECK: frecps z0.h, z0.h, z1.h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frecps z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.aarch64.sve.frecps.x.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
   ret <vscale x 8 x half> %res
 }
 
 define <vscale x 4 x float> @frecps_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: frecps_s:
-; CHECK: frecps z0.s, z0.s, z1.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frecps z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.aarch64.sve.frecps.x.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
   ret <vscale x 4 x float> %res
 }
 
 define <vscale x 2 x double> @frecps_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: frecps_d:
-; CHECK: frecps z0.d, z0.d, z1.d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frecps z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.aarch64.sve.frecps.x.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
   ret <vscale x 2 x double> %res
 }
 
 define <vscale x 8 x half> @frsqrts_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: frsqrts_h:
-; CHECK: frsqrts z0.h, z0.h, z1.h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frsqrts z0.h, z0.h, z1.h
+; CHECK-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.aarch64.sve.frsqrts.x.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
   ret <vscale x 8 x half> %res
 }
 
 define <vscale x 4 x float> @frsqrts_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: frsqrts_s:
-; CHECK: frsqrts z0.s, z0.s, z1.s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frsqrts z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.aarch64.sve.frsqrts.x.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
   ret <vscale x 4 x float> %res
 }
 
 define <vscale x 2 x double> @frsqrts_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: frsqrts_d:
-; CHECK: frsqrts z0.d, z0.d, z1.d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frsqrts z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.aarch64.sve.frsqrts.x.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
   ret <vscale x 2 x double> %res
 }
 
+%complex = type { { double, double } }
+
+define void @scalar_to_vector(%complex* %outval, <vscale x 2 x i1> %pred, <vscale x 2 x double> %in1, <vscale x 2 x double> %in2) {
+; CHECK-LABEL: scalar_to_vector:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    faddv d0, p0, z0.d
+; CHECK-NEXT:    faddv d1, p0, z1.d
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %realp = getelementptr inbounds %complex, %complex* %outval, i64 0, i32 0, i32 0
+  %imagp = getelementptr inbounds %complex, %complex* %outval, i64 0, i32 0, i32 1
+  %1 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %in1)
+  %2 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %in2)
+  store double %1, double* %realp, align 8
+  store double %2, double* %imagp, align 8
+  ret void
+}
+
 declare <vscale x 8 x half> @llvm.aarch64.sve.frecps.x.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
 declare <vscale x 4 x float>  @llvm.aarch64.sve.frecps.x.nxv4f32(<vscale x 4 x float> , <vscale x 4 x float>)
 declare <vscale x 2 x double> @llvm.aarch64.sve.frecps.x.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
@@ -130,3 +165,6 @@ declare <vscale x 2 x double> @llvm.aarch64.sve.frecps.x.nxv2f64(<vscale x 2 x d
 declare <vscale x 8 x half> @llvm.aarch64.sve.frsqrts.x.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
 declare <vscale x 4 x float> @llvm.aarch64.sve.frsqrts.x.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
 declare <vscale x 2 x double> @llvm.aarch64.sve.frsqrts.x.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
+
+; Function Attrs: nounwind readnone
+declare double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>) #2


        


More information about the llvm-commits mailing list