@@ -99,10 +99,25 @@ struct CGRecordLowering {
99
99
MemberInfo StorageInfo (CharUnits Offset, llvm::Type *Data) {
100
100
return MemberInfo (Offset, MemberInfo::Field, Data);
101
101
}
102
- bool useMSABI () {
102
+
103
+ // / The Microsoft bitfield layout rule allocates discrete storage
104
+ // / units of the field's formal type and only combines adjacent
105
+ // / fields of the same formal type. We want to emit a layout with
106
+ // / these discrete storage units instead of combining them into a
107
+ // / continuous run.
108
+ bool isDiscreteBitFieldABI () {
103
109
return Context.getTargetInfo ().getCXXABI ().isMicrosoft () ||
104
110
D->isMsStruct (Context);
105
111
}
112
+
113
+ // / The Itanium base layout rule allows virtual bases to overlap
114
+ // / other bases, which complicates layout in specific ways.
115
+ // /
116
+ // / Note specifically that the ms_struct attribute doesn't change this.
117
+ bool isOverlappingVBaseABI () {
118
+ return !Context.getTargetInfo ().getCXXABI ().isMicrosoft ();
119
+ }
120
+
106
121
// / \brief Wraps llvm::Type::getIntNTy with some implicit arguments.
107
122
llvm::Type *getIntNType (uint64_t NumBits) {
108
123
return llvm::Type::getIntNTy (Types.getLLVMContext (),
@@ -119,8 +134,9 @@ struct CGRecordLowering {
119
134
// / for itanium bitfields that are smaller than their declared type.
120
135
llvm::Type *getStorageType (const FieldDecl *FD) {
121
136
llvm::Type *Type = Types.ConvertTypeForMem (FD->getType ());
122
- return useMSABI () || !FD->isBitField () ? Type :
123
- getIntNType (std::min (FD->getBitWidthValue (Context),
137
+ if (!FD->isBitField ()) return Type;
138
+ if (isDiscreteBitFieldABI ()) return Type;
139
+ return getIntNType (std::min (FD->getBitWidthValue (Context),
124
140
(unsigned )Context.toBits (getSize (Type))));
125
141
}
126
142
// / \brief Gets the llvm Basesubobject type from a CXXRecordDecl.
@@ -365,7 +381,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
365
381
// used to determine if the ASTRecordLayout is treating these two bitfields as
366
382
// contiguous. StartBitOffset is offset of the beginning of the Run.
367
383
uint64_t StartBitOffset, Tail = 0 ;
368
- if (useMSABI ()) {
384
+ if (isDiscreteBitFieldABI ()) {
369
385
for (; Field != FieldEnd; ++Field) {
370
386
uint64_t BitOffset = getFieldBitOffset (*Field);
371
387
// Zero-width bitfields end runs.
@@ -465,7 +481,7 @@ void CGRecordLowering::accumulateVBases() {
465
481
// smaller than the nvsize. Here we check to see if such a base is placed
466
482
// before the nvsize and set the scissor offset to that, instead of the
467
483
// nvsize.
468
- if (! useMSABI ())
484
+ if (isOverlappingVBaseABI ())
469
485
for (const auto &Base : RD->vbases ()) {
470
486
const CXXRecordDecl *BaseDecl = Base.getType ()->getAsCXXRecordDecl ();
471
487
if (BaseDecl->isEmpty ())
@@ -486,7 +502,8 @@ void CGRecordLowering::accumulateVBases() {
486
502
CharUnits Offset = Layout.getVBaseClassOffset (BaseDecl);
487
503
// If the vbase is a primary virtual base of some base, then it doesn't
488
504
// get its own storage location but instead lives inside of that base.
489
- if (!useMSABI () && Context.isNearlyEmpty (BaseDecl) &&
505
+ if (isOverlappingVBaseABI () &&
506
+ Context.isNearlyEmpty (BaseDecl) &&
490
507
!hasOwnStorage (RD, BaseDecl)) {
491
508
Members.push_back (MemberInfo (Offset, MemberInfo::VBase, nullptr ,
492
509
BaseDecl));
0 commit comments