[llvm] 2ed1587 - [X86] Ensure asm comments only print the constant values for the vector load's register width

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 17 06:30:55 PST 2023


Author: Simon Pilgrim
Date: 2023-11-17T14:30:30Z
New Revision: 2ed15877e7427801d1699611d0b29f23718b01ab

URL: https://github.com/llvm/llvm-project/commit/2ed15877e7427801d1699611d0b29f23718b01ab
DIFF: https://github.com/llvm/llvm-project/commit/2ed15877e7427801d1699611d0b29f23718b01ab.diff

LOG: [X86] Ensure asm comments only print the constant values for the vector load's register width

We were printing the entire Constant, which if we were loading from a wider constant pool entry meant that we were confusing the asm comment with upper bits that aren't actually part of the load result

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86MCInstLower.cpp
    llvm/test/CodeGen/X86/insert-into-constant-vector.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index bd64729e7bddc19..cbb012161524169 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -1842,6 +1842,18 @@ static void addConstantComments(const MachineInstr *MI,
   case X86::VMOVUPS##Suffix##rm:                                               \
   case X86::VMOVUPD##Suffix##rm:
 
+#define CASE_128_MOV_RM()                                                      \
+  MOV_CASE(, )   /* SSE */                                                     \
+  MOV_CASE(V, )  /* AVX-128 */                                                 \
+  MOV_AVX512_CASE(Z128)
+
+#define CASE_256_MOV_RM()                                                      \
+  MOV_CASE(V, Y) /* AVX-256 */                                                 \
+  MOV_AVX512_CASE(Z256)
+
+#define CASE_512_MOV_RM()                                                      \
+  MOV_AVX512_CASE(Z)
+
 #define CASE_ALL_MOV_RM()                                                      \
   MOV_CASE(, )   /* SSE */                                                     \
   MOV_CASE(V, )  /* AVX-128 */                                                 \
@@ -1871,22 +1883,28 @@ static void addConstantComments(const MachineInstr *MI,
            "Unexpected number of operands!");
     if (auto *C = getConstantFromPool(*MI, MI->getOperand(1 + X86::AddrDisp))) {
       int NumLanes = 1;
-      // Override NumLanes for the broadcast instructions.
+      int BitWidth = 128;
+      int CstEltSize = C->getType()->getScalarSizeInBits();
+
+      // Get destination BitWidth + override NumLanes for the broadcasts.
       switch (MI->getOpcode()) {
-      case X86::VBROADCASTF128:        NumLanes = 2; break;
-      case X86::VBROADCASTI128:        NumLanes = 2; break;
-      case X86::VBROADCASTF32X4Z256rm: NumLanes = 2; break;
-      case X86::VBROADCASTF32X4rm:     NumLanes = 4; break;
-      case X86::VBROADCASTF32X8rm:     NumLanes = 2; break;
-      case X86::VBROADCASTF64X2Z128rm: NumLanes = 2; break;
-      case X86::VBROADCASTF64X2rm:     NumLanes = 4; break;
-      case X86::VBROADCASTF64X4rm:     NumLanes = 2; break;
-      case X86::VBROADCASTI32X4Z256rm: NumLanes = 2; break;
-      case X86::VBROADCASTI32X4rm:     NumLanes = 4; break;
-      case X86::VBROADCASTI32X8rm:     NumLanes = 2; break;
-      case X86::VBROADCASTI64X2Z128rm: NumLanes = 2; break;
-      case X86::VBROADCASTI64X2rm:     NumLanes = 4; break;
-      case X86::VBROADCASTI64X4rm:     NumLanes = 2; break;
+      CASE_128_MOV_RM()                NumLanes = 1; BitWidth = 128; break;
+      CASE_256_MOV_RM()                NumLanes = 1; BitWidth = 256; break;
+      CASE_512_MOV_RM()                NumLanes = 1; BitWidth = 512; break;
+      case X86::VBROADCASTF128:        NumLanes = 2; BitWidth = 128; break;
+      case X86::VBROADCASTI128:        NumLanes = 2; BitWidth = 128; break;
+      case X86::VBROADCASTF32X4Z256rm: NumLanes = 2; BitWidth = 128; break;
+      case X86::VBROADCASTF32X4rm:     NumLanes = 4; BitWidth = 128; break;
+      case X86::VBROADCASTF32X8rm:     NumLanes = 2; BitWidth = 256; break;
+      case X86::VBROADCASTF64X2Z128rm: NumLanes = 2; BitWidth = 128; break;
+      case X86::VBROADCASTF64X2rm:     NumLanes = 4; BitWidth = 128; break;
+      case X86::VBROADCASTF64X4rm:     NumLanes = 2; BitWidth = 256; break;
+      case X86::VBROADCASTI32X4Z256rm: NumLanes = 2; BitWidth = 128; break;
+      case X86::VBROADCASTI32X4rm:     NumLanes = 4; BitWidth = 128; break;
+      case X86::VBROADCASTI32X8rm:     NumLanes = 2; BitWidth = 256; break;
+      case X86::VBROADCASTI64X2Z128rm: NumLanes = 2; BitWidth = 128; break;
+      case X86::VBROADCASTI64X2rm:     NumLanes = 4; BitWidth = 128; break;
+      case X86::VBROADCASTI64X4rm:     NumLanes = 2; BitWidth = 256; break;
       }
 
       std::string Comment;
@@ -1894,10 +1912,12 @@ static void addConstantComments(const MachineInstr *MI,
       const MachineOperand &DstOp = MI->getOperand(0);
       CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = ";
       if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
+        int NumElements = CDS->getNumElements();
+        if ((BitWidth % CstEltSize) == 0)
+          NumElements = std::min<int>(NumElements, BitWidth / CstEltSize);
         CS << "[";
         for (int l = 0; l != NumLanes; ++l) {
-          for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements;
-               ++i) {
+          for (int i = 0; i < NumElements; ++i) {
             if (i != 0 || l != 0)
               CS << ",";
             if (CDS->getElementType()->isIntegerTy())
@@ -1913,10 +1933,12 @@ static void addConstantComments(const MachineInstr *MI,
         CS << "]";
         OutStreamer.AddComment(CS.str());
       } else if (auto *CV = dyn_cast<ConstantVector>(C)) {
+        int NumOperands = CV->getNumOperands();
+        if ((BitWidth % CstEltSize) == 0)
+          NumOperands = std::min<int>(NumOperands, BitWidth / CstEltSize);
         CS << "<";
         for (int l = 0; l != NumLanes; ++l) {
-          for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands;
-               ++i) {
+          for (int i = 0; i < NumOperands; ++i) {
             if (i != 0 || l != 0)
               CS << ",";
             printConstant(CV->getOperand(i),

diff  --git a/llvm/test/CodeGen/X86/insert-into-constant-vector.ll b/llvm/test/CodeGen/X86/insert-into-constant-vector.ll
index 0f113556652d41f..7ea4d71385c6bc1 100644
--- a/llvm/test/CodeGen/X86/insert-into-constant-vector.ll
+++ b/llvm/test/CodeGen/X86/insert-into-constant-vector.ll
@@ -412,7 +412,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 ;
 ; X64-AVX1-LABEL: elt5_v8i64:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,u,6,7>
+; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,u>
 ; X64-AVX1-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; X64-AVX1-NEXT:    vmovaps {{.*#+}} ymm0 = [42,1,2,3]
@@ -429,7 +429,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
 ;
 ; X64-AVX2-LABEL: elt5_v8i64:
 ; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,u,6,7>
+; X64-AVX2-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,u>
 ; X64-AVX2-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; X64-AVX2-NEXT:    vmovaps {{.*#+}} ymm0 = [42,1,2,3]
@@ -478,7 +478,7 @@ define <8 x double> @elt1_v8f64(double %x) {
 ;
 ; X86-AVX1-LABEL: elt1_v8f64:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = <4.2E+1,u,2.0E+0,3.0E+0>
+; X86-AVX1-NEXT:    vmovaps {{.*#+}} xmm0 = <4.2E+1,u>
 ; X86-AVX1-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
 ; X86-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; X86-AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [4.0E+0,5.0E+0,6.0E+0,7.0E+0]
@@ -486,7 +486,7 @@ define <8 x double> @elt1_v8f64(double %x) {
 ;
 ; X64-AVX1-LABEL: elt1_v8f64:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vmovaps {{.*#+}} xmm1 = <4.2E+1,u,2.0E+0,3.0E+0>
+; X64-AVX1-NEXT:    vmovaps {{.*#+}} xmm1 = <4.2E+1,u>
 ; X64-AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; X64-AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [4.0E+0,5.0E+0,6.0E+0,7.0E+0]
@@ -494,7 +494,7 @@ define <8 x double> @elt1_v8f64(double %x) {
 ;
 ; X86-AVX2-LABEL: elt1_v8f64:
 ; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = <4.2E+1,u,2.0E+0,3.0E+0>
+; X86-AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = <4.2E+1,u>
 ; X86-AVX2-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
 ; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; X86-AVX2-NEXT:    vmovaps {{.*#+}} ymm1 = [4.0E+0,5.0E+0,6.0E+0,7.0E+0]
@@ -502,7 +502,7 @@ define <8 x double> @elt1_v8f64(double %x) {
 ;
 ; X64-AVX2-LABEL: elt1_v8f64:
 ; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vmovaps {{.*#+}} xmm1 = <4.2E+1,u,2.0E+0,3.0E+0>
+; X64-AVX2-NEXT:    vmovaps {{.*#+}} xmm1 = <4.2E+1,u>
 ; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; X64-AVX2-NEXT:    vmovaps {{.*#+}} ymm1 = [4.0E+0,5.0E+0,6.0E+0,7.0E+0]
@@ -510,7 +510,7 @@ define <8 x double> @elt1_v8f64(double %x) {
 ;
 ; X86-AVX512F-LABEL: elt1_v8f64:
 ; X86-AVX512F:       # %bb.0:
-; X86-AVX512F-NEXT:    vmovaps {{.*#+}} xmm0 = <4.2E+1,u,2.0E+0,3.0E+0,4.0E+0,5.0E+0,6.0E+0,7.0E+0>
+; X86-AVX512F-NEXT:    vmovaps {{.*#+}} xmm0 = <4.2E+1,u>
 ; X86-AVX512F-NEXT:    vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
 ; X86-AVX512F-NEXT:    vmovaps {{.*#+}} zmm1 = <4.2E+1,u,2.0E+0,3.0E+0,4.0E+0,5.0E+0,6.0E+0,7.0E+0>
 ; X86-AVX512F-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
@@ -518,7 +518,7 @@ define <8 x double> @elt1_v8f64(double %x) {
 ;
 ; X64-AVX512F-LABEL: elt1_v8f64:
 ; X64-AVX512F:       # %bb.0:
-; X64-AVX512F-NEXT:    vmovaps {{.*#+}} xmm1 = <4.2E+1,u,2.0E+0,3.0E+0,4.0E+0,5.0E+0,6.0E+0,7.0E+0>
+; X64-AVX512F-NEXT:    vmovaps {{.*#+}} xmm1 = <4.2E+1,u>
 ; X64-AVX512F-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; X64-AVX512F-NEXT:    vmovaps {{.*#+}} zmm1 = <4.2E+1,u,2.0E+0,3.0E+0,4.0E+0,5.0E+0,6.0E+0,7.0E+0>
 ; X64-AVX512F-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0


        


More information about the llvm-commits mailing list