[cfe-commits] r169693 - /cfe/trunk/test/CodeGenCXX/bitfield.cpp

Chandler Carruth chandlerc at gmail.com
Sun Dec 9 02:08:23 PST 2012


Author: chandlerc
Date: Sun Dec  9 04:08:22 2012
New Revision: 169693

URL: http://llvm.org/viewvc/llvm-project?rev=169693&view=rev
Log:
Add a test case that I've been using to clarify the bitfield layout for
both LE and BE targets.

AFAICT, Clang get's this correct for PPC64. I've compared it to GCC 4.8
output for PPC64 (thanks Roman!) and to my limited ability to read power
assembly, it looks functionally equivalent. It would be really good to
fill in the assertions on this test case for x86-32, PPC32, ARM, etc.,
but I've reached the limit of my time and energy... Hopefully other
folks can chip in as it would be good to have this in place to test any
subsequent changes.

To those who care about PPC64 performance, a side note: there is some
*obnoxiously* bad code generated for these test cases. It would be worth
someone's time to sit down and teach the PPC backend to pattern match
these IR constructs better. It appears that things like '(shr %foo,
<imm>)' turn into 'rldicl R, R, 64-<imm>, <imm>' or some such. They
don't even get combined with other 'rldicl' instructions *immediately
adjacent*. I'll add a couple of these patterns to the README, but
I think it would be better to look at all the patterns produced by this
and other bitfield access code, and systematically build up a collection
of patterns that efficiently reduce them to the minimal code.

Modified:
    cfe/trunk/test/CodeGenCXX/bitfield.cpp

Modified: cfe/trunk/test/CodeGenCXX/bitfield.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/bitfield.cpp?rev=169693&r1=169692&r2=169693&view=diff
==============================================================================
--- cfe/trunk/test/CodeGenCXX/bitfield.cpp (original)
+++ cfe/trunk/test/CodeGenCXX/bitfield.cpp Sun Dec  9 04:08:22 2012
@@ -6,6 +6,153 @@
 // Tests for bitfield access patterns in C++ with special attention to
 // conformance to C++11 memory model requirements.
 
+namespace N0 {
+  // Test basic bitfield layout access across interesting byte and word
+  // boundaries on both little endian and big endian platforms.
+  struct __attribute__((packed)) S {
+    unsigned b00 : 14;
+    unsigned b01 : 2;
+    unsigned b20 : 6;
+    unsigned b21 : 2;
+    unsigned b30 : 30;
+    unsigned b31 : 2;
+    unsigned b70 : 6;
+    unsigned b71 : 2;
+  };
+  unsigned read00(S* s) {
+    // CHECK-X86-64: define i32 @_ZN2N06read00
+    // CHECK-X86-64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-X86-64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-X86-64:   %[[and:.*]]   = and i64 %[[val]], 16383
+    // CHECK-X86-64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-X86-64:                   ret i32 %[[trunc]]
+    // CHECK-PPC64: define zeroext i32 @_ZN2N06read00
+    // CHECK-PPC64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-PPC64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-PPC64:   %[[shr:.*]]   = lshr i64 %[[val]], 50
+    // CHECK-PPC64:   %[[trunc:.*]] = trunc i64 %[[shr]] to i32
+    // CHECK-PPC64:                   ret i32 %[[trunc]]
+    return s->b00;
+  }
+  unsigned read01(S* s) {
+    // CHECK-X86-64: define i32 @_ZN2N06read01
+    // CHECK-X86-64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-X86-64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-X86-64:   %[[shr:.*]]   = lshr i64 %[[val]], 14
+    // CHECK-X86-64:   %[[and:.*]]   = and i64 %[[shr]], 3
+    // CHECK-X86-64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-X86-64:                   ret i32 %[[trunc]]
+    // CHECK-PPC64: define zeroext i32 @_ZN2N06read01
+    // CHECK-PPC64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-PPC64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-PPC64:   %[[shr:.*]]   = lshr i64 %[[val]], 48
+    // CHECK-PPC64:   %[[and:.*]]   = and i64 %[[shr]], 3
+    // CHECK-PPC64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-PPC64:                   ret i32 %[[trunc]]
+    return s->b01;
+  }
+  unsigned read20(S* s) {
+    // CHECK-X86-64: define i32 @_ZN2N06read20
+    // CHECK-X86-64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-X86-64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-X86-64:   %[[shr:.*]]   = lshr i64 %[[val]], 16
+    // CHECK-X86-64:   %[[and:.*]]   = and i64 %[[shr]], 63
+    // CHECK-X86-64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-X86-64:                   ret i32 %[[trunc]]
+    // CHECK-PPC64: define zeroext i32 @_ZN2N06read20
+    // CHECK-PPC64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-PPC64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-PPC64:   %[[shr:.*]]   = lshr i64 %[[val]], 42
+    // CHECK-PPC64:   %[[and:.*]]   = and i64 %[[shr]], 63
+    // CHECK-PPC64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-PPC64:                   ret i32 %[[trunc]]
+    return s->b20;
+  }
+  unsigned read21(S* s) {
+    // CHECK-X86-64: define i32 @_ZN2N06read21
+    // CHECK-X86-64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-X86-64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-X86-64:   %[[shr:.*]]   = lshr i64 %[[val]], 22
+    // CHECK-X86-64:   %[[and:.*]]   = and i64 %[[shr]], 3
+    // CHECK-X86-64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-X86-64:                   ret i32 %[[trunc]]
+    // CHECK-PPC64: define zeroext i32 @_ZN2N06read21
+    // CHECK-PPC64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-PPC64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-PPC64:   %[[shr:.*]]   = lshr i64 %[[val]], 40
+    // CHECK-PPC64:   %[[and:.*]]   = and i64 %[[shr]], 3
+    // CHECK-PPC64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-PPC64:                   ret i32 %[[trunc]]
+    return s->b21;
+  }
+  unsigned read30(S* s) {
+    // CHECK-X86-64: define i32 @_ZN2N06read30
+    // CHECK-X86-64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-X86-64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-X86-64:   %[[shr:.*]]   = lshr i64 %[[val]], 24
+    // CHECK-X86-64:   %[[and:.*]]   = and i64 %[[shr]], 1073741823
+    // CHECK-X86-64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-X86-64:                   ret i32 %[[trunc]]
+    // CHECK-PPC64: define zeroext i32 @_ZN2N06read30
+    // CHECK-PPC64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-PPC64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-PPC64:   %[[shr:.*]]   = lshr i64 %[[val]], 10
+    // CHECK-PPC64:   %[[and:.*]]   = and i64 %[[shr]], 1073741823
+    // CHECK-PPC64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-PPC64:                   ret i32 %[[trunc]]
+    return s->b30;
+  }
+  unsigned read31(S* s) {
+    // CHECK-X86-64: define i32 @_ZN2N06read31
+    // CHECK-X86-64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-X86-64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-X86-64:   %[[shr:.*]]   = lshr i64 %[[val]], 54
+    // CHECK-X86-64:   %[[and:.*]]   = and i64 %[[shr]], 3
+    // CHECK-X86-64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-X86-64:                   ret i32 %[[trunc]]
+    // CHECK-PPC64: define zeroext i32 @_ZN2N06read31
+    // CHECK-PPC64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-PPC64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-PPC64:   %[[shr:.*]]   = lshr i64 %[[val]], 8
+    // CHECK-PPC64:   %[[and:.*]]   = and i64 %[[shr]], 3
+    // CHECK-PPC64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-PPC64:                   ret i32 %[[trunc]]
+    return s->b31;
+  }
+  unsigned read70(S* s) {
+    // CHECK-X86-64: define i32 @_ZN2N06read70
+    // CHECK-X86-64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-X86-64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-X86-64:   %[[shr:.*]]   = lshr i64 %[[val]], 56
+    // CHECK-X86-64:   %[[and:.*]]   = and i64 %[[shr]], 63
+    // CHECK-X86-64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-X86-64:                   ret i32 %[[trunc]]
+    // CHECK-PPC64: define zeroext i32 @_ZN2N06read70
+    // CHECK-PPC64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-PPC64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-PPC64:   %[[shr:.*]]   = lshr i64 %[[val]], 2
+    // CHECK-PPC64:   %[[and:.*]]   = and i64 %[[shr]], 63
+    // CHECK-PPC64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-PPC64:                   ret i32 %[[trunc]]
+    return s->b70;
+  }
+  unsigned read71(S* s) {
+    // CHECK-X86-64: define i32 @_ZN2N06read71
+    // CHECK-X86-64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-X86-64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-X86-64:   %[[shr:.*]]   = lshr i64 %[[val]], 62
+    // CHECK-X86-64:   %[[trunc:.*]] = trunc i64 %[[shr]] to i32
+    // CHECK-X86-64:                   ret i32 %[[trunc]]
+    // CHECK-PPC64: define zeroext i32 @_ZN2N06read71
+    // CHECK-PPC64:   %[[ptr:.*]]   = bitcast %{{.*}}* %{{.*}} to i64*
+    // CHECK-PPC64:   %[[val:.*]]   = load i64* %[[ptr]]
+    // CHECK-PPC64:   %[[and:.*]]   = and i64 %[[val]], 3
+    // CHECK-PPC64:   %[[trunc:.*]] = trunc i64 %[[and]] to i32
+    // CHECK-PPC64:                   ret i32 %[[trunc]]
+    return s->b71;
+  }
+}
+
 namespace N1 {
   // Ensure that neither loads nor stores to bitfields are not widened into
   // other memory locations. (PR13691)





More information about the cfe-commits mailing list