13
13
14
14
#include " CodeGenFunction.h"
15
15
#include " CGCall.h"
16
- #include " CGRecordLayout.h"
17
16
#include " CodeGenModule.h"
18
17
#include " clang/AST/ASTContext.h"
19
18
#include " clang/CodeGen/CGFunctionInfo.h"
@@ -37,69 +36,34 @@ namespace {
37
36
CharUnits LValueAlign;
38
37
TypeEvaluationKind EvaluationKind;
39
38
bool UseLibcall;
40
- LValue LVal;
41
- CGBitFieldInfo BFI;
42
39
public:
43
- AtomicInfo (CodeGenFunction &CGF, LValue &lvalue)
44
- : CGF(CGF), AtomicSizeInBits(0 ), ValueSizeInBits(0 ), UseLibcall(true ) {
45
- assert (!lvalue.isGlobalReg ());
40
+ AtomicInfo (CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41
+ assert (lvalue.isSimple ());
42
+
43
+ AtomicTy = lvalue.getType ();
44
+ ValueTy = AtomicTy->castAs <AtomicType>()->getValueType ();
45
+ EvaluationKind = CGF.getEvaluationKind (ValueTy);
46
+
46
47
ASTContext &C = CGF.getContext ();
47
- if (lvalue.isSimple ()) {
48
- AtomicTy = lvalue.getType ();
49
- if (auto *ATy = AtomicTy->getAs <AtomicType>())
50
- ValueTy = ATy->getValueType ();
51
- else
52
- ValueTy = AtomicTy;
53
- EvaluationKind = CGF.getEvaluationKind (ValueTy);
54
-
55
- uint64_t ValueAlignInBits;
56
- uint64_t AtomicAlignInBits;
57
- TypeInfo ValueTI = C.getTypeInfo (ValueTy);
58
- ValueSizeInBits = ValueTI.Width ;
59
- ValueAlignInBits = ValueTI.Align ;
60
-
61
- TypeInfo AtomicTI = C.getTypeInfo (AtomicTy);
62
- AtomicSizeInBits = AtomicTI.Width ;
63
- AtomicAlignInBits = AtomicTI.Align ;
64
-
65
- assert (ValueSizeInBits <= AtomicSizeInBits);
66
- assert (ValueAlignInBits <= AtomicAlignInBits);
67
-
68
- AtomicAlign = C.toCharUnitsFromBits (AtomicAlignInBits);
69
- ValueAlign = C.toCharUnitsFromBits (ValueAlignInBits);
70
- if (lvalue.getAlignment ().isZero ())
71
- lvalue.setAlignment (AtomicAlign);
72
-
73
- LVal = lvalue;
74
- } else if (lvalue.isBitField ()) {
75
- auto &OrigBFI = lvalue.getBitFieldInfo ();
76
- auto Offset = OrigBFI.Offset % C.toBits (lvalue.getAlignment ());
77
- AtomicSizeInBits = C.toBits (
78
- C.toCharUnitsFromBits (Offset + OrigBFI.Size + C.getCharWidth () - 1 )
79
- .RoundUpToAlignment (lvalue.getAlignment ()));
80
- auto VoidPtrAddr = CGF.EmitCastToVoidPtr (lvalue.getBitFieldAddr ());
81
- auto OffsetInChars =
82
- (C.toCharUnitsFromBits (OrigBFI.Offset ) / lvalue.getAlignment ()) *
83
- lvalue.getAlignment ();
84
- VoidPtrAddr = CGF.Builder .CreateConstGEP1_64 (
85
- VoidPtrAddr, OffsetInChars.getQuantity ());
86
- auto Addr = CGF.Builder .CreatePointerBitCastOrAddrSpaceCast (
87
- VoidPtrAddr,
88
- CGF.Builder .getIntNTy (AtomicSizeInBits)->getPointerTo (),
89
- " atomic_bitfield_base" );
90
- BFI = OrigBFI;
91
- BFI.Offset = Offset;
92
- BFI.StorageSize = AtomicSizeInBits;
93
- LVal = LValue::MakeBitfield (Addr, BFI, lvalue.getType (),
94
- lvalue.getAlignment ());
95
- } else if (lvalue.isVectorElt ()) {
96
- AtomicSizeInBits = C.getTypeSize (lvalue.getType ());
97
- LVal = lvalue;
98
- } else {
99
- assert (lvalue.isExtVectorElt ());
100
- AtomicSizeInBits = C.getTypeSize (lvalue.getType ());
101
- LVal = lvalue;
102
- }
48
+
49
+ uint64_t ValueAlignInBits;
50
+ uint64_t AtomicAlignInBits;
51
+ TypeInfo ValueTI = C.getTypeInfo (ValueTy);
52
+ ValueSizeInBits = ValueTI.Width ;
53
+ ValueAlignInBits = ValueTI.Align ;
54
+
55
+ TypeInfo AtomicTI = C.getTypeInfo (AtomicTy);
56
+ AtomicSizeInBits = AtomicTI.Width ;
57
+ AtomicAlignInBits = AtomicTI.Align ;
58
+
59
+ assert (ValueSizeInBits <= AtomicSizeInBits);
60
+ assert (ValueAlignInBits <= AtomicAlignInBits);
61
+
62
+ AtomicAlign = C.toCharUnitsFromBits (AtomicAlignInBits);
63
+ ValueAlign = C.toCharUnitsFromBits (ValueAlignInBits);
64
+ if (lvalue.getAlignment ().isZero ())
65
+ lvalue.setAlignment (AtomicAlign);
66
+
103
67
UseLibcall = !C.getTargetInfo ().hasBuiltinAtomic (
104
68
AtomicSizeInBits, C.toBits (lvalue.getAlignment ()));
105
69
}
@@ -112,7 +76,6 @@ namespace {
112
76
uint64_t getValueSizeInBits () const { return ValueSizeInBits; }
113
77
TypeEvaluationKind getEvaluationKind () const { return EvaluationKind; }
114
78
bool shouldUseLibcall () const { return UseLibcall; }
115
- const LValue &getAtomicLValue () const { return LVal; }
116
79
117
80
// / Is the atomic size larger than the underlying value type?
118
81
// /
@@ -124,7 +87,7 @@ namespace {
124
87
return (ValueSizeInBits != AtomicSizeInBits);
125
88
}
126
89
127
- bool emitMemSetZeroIfNecessary () const ;
90
+ bool emitMemSetZeroIfNecessary (LValue dest ) const ;
128
91
129
92
llvm::Value *getAtomicSizeValue () const {
130
93
CharUnits size = CGF.getContext ().toCharUnitsFromBits (AtomicSizeInBits);
@@ -147,17 +110,16 @@ namespace {
147
110
SourceLocation Loc) const ;
148
111
149
112
// / Copy an atomic r-value into atomic-layout memory.
150
- void emitCopyIntoMemory (RValue rvalue) const ;
113
+ void emitCopyIntoMemory (RValue rvalue, LValue lvalue ) const ;
151
114
152
115
// / Project an l-value down to the value field.
153
- LValue projectValue () const {
154
- assert (LVal.isSimple ());
155
- llvm::Value *addr = LVal.getAddress ();
116
+ LValue projectValue (LValue lvalue) const {
117
+ llvm::Value *addr = lvalue.getAddress ();
156
118
if (hasPadding ())
157
119
addr = CGF.Builder .CreateStructGEP (addr, 0 );
158
120
159
- return LValue::MakeAddr (addr, getValueType (), LVal .getAlignment (),
160
- CGF.getContext (), LVal .getTBAAInfo ());
121
+ return LValue::MakeAddr (addr, getValueType (), lvalue .getAlignment (),
122
+ CGF.getContext (), lvalue .getTBAAInfo ());
161
123
}
162
124
163
125
// / Materialize an atomic r-value in atomic-layout memory.
@@ -210,15 +172,14 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
210
172
llvm_unreachable (" bad evaluation kind" );
211
173
}
212
174
213
- bool AtomicInfo::emitMemSetZeroIfNecessary () const {
214
- assert (LVal.isSimple ());
215
- llvm::Value *addr = LVal.getAddress ();
175
+ bool AtomicInfo::emitMemSetZeroIfNecessary (LValue dest) const {
176
+ llvm::Value *addr = dest.getAddress ();
216
177
if (!requiresMemSetZero (addr->getType ()->getPointerElementType ()))
217
178
return false ;
218
179
219
180
CGF.Builder .CreateMemSet (addr, llvm::ConstantInt::get (CGF.Int8Ty , 0 ),
220
181
AtomicSizeInBits / 8 ,
221
- LVal .getAlignment ().getQuantity ());
182
+ dest .getAlignment ().getQuantity ());
222
183
return true ;
223
184
}
224
185
@@ -941,34 +902,21 @@ llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
941
902
RValue AtomicInfo::convertTempToRValue (llvm::Value *addr,
942
903
AggValueSlot resultSlot,
943
904
SourceLocation loc) const {
944
- if (LVal.isSimple ()) {
945
- if (EvaluationKind == TEK_Aggregate)
946
- return resultSlot.asRValue ();
947
-
948
- // Drill into the padding structure if we have one.
949
- if (hasPadding ())
950
- addr = CGF.Builder .CreateStructGEP (addr, 0 );
951
-
952
- // Otherwise, just convert the temporary to an r-value using the
953
- // normal conversion routine.
954
- return CGF.convertTempToRValue (addr, getValueType (), loc);
955
- } else if (LVal.isBitField ())
956
- return CGF.EmitLoadOfBitfieldLValue (LValue::MakeBitfield (
957
- addr, LVal.getBitFieldInfo (), LVal.getType (), LVal.getAlignment ()));
958
- else if (LVal.isVectorElt ())
959
- return CGF.EmitLoadOfLValue (LValue::MakeVectorElt (addr, LVal.getVectorIdx (),
960
- LVal.getType (),
961
- LVal.getAlignment ()),
962
- loc);
963
- assert (LVal.isExtVectorElt ());
964
- return CGF.EmitLoadOfExtVectorElementLValue (LValue::MakeExtVectorElt (
965
- addr, LVal.getExtVectorElts (), LVal.getType (), LVal.getAlignment ()));
905
+ if (EvaluationKind == TEK_Aggregate)
906
+ return resultSlot.asRValue ();
907
+
908
+ // Drill into the padding structure if we have one.
909
+ if (hasPadding ())
910
+ addr = CGF.Builder .CreateStructGEP (addr, 0 );
911
+
912
+ // Otherwise, just convert the temporary to an r-value using the
913
+ // normal conversion routine.
914
+ return CGF.convertTempToRValue (addr, getValueType (), loc);
966
915
}
967
916
968
917
RValue AtomicInfo::convertIntToValue (llvm::Value *IntVal,
969
918
AggValueSlot ResultSlot,
970
919
SourceLocation Loc) const {
971
- assert (LVal.isSimple ());
972
920
// Try not to in some easy cases.
973
921
assert (IntVal->getType ()->isIntegerTy () && " Expected integer value" );
974
922
if (getEvaluationKind () == TEK_Scalar && !hasPadding ()) {
@@ -1010,43 +958,25 @@ RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
1010
958
RValue CodeGenFunction::EmitAtomicLoad (LValue src, SourceLocation loc,
1011
959
AggValueSlot resultSlot) {
1012
960
AtomicInfo atomics (*this , src);
1013
- LValue LVal = atomics.getAtomicLValue ();
1014
- llvm::Value *SrcAddr = nullptr ;
1015
- llvm::AllocaInst *NonSimpleTempAlloca = nullptr ;
1016
- if (LVal.isSimple ())
1017
- SrcAddr = LVal.getAddress ();
1018
- else {
1019
- if (LVal.isBitField ())
1020
- SrcAddr = LVal.getBitFieldAddr ();
1021
- else if (LVal.isVectorElt ())
1022
- SrcAddr = LVal.getVectorAddr ();
1023
- else {
1024
- assert (LVal.isExtVectorElt ());
1025
- SrcAddr = LVal.getExtVectorAddr ();
1026
- }
1027
- NonSimpleTempAlloca = CreateTempAlloca (
1028
- SrcAddr->getType ()->getPointerElementType (), " atomic-load-temp" );
1029
- NonSimpleTempAlloca->setAlignment (getContext ().toBits (src.getAlignment ()));
1030
- }
1031
961
1032
962
// Check whether we should use a library call.
1033
963
if (atomics.shouldUseLibcall ()) {
1034
964
llvm::Value *tempAddr;
1035
- if (LVal.isSimple ()) {
1036
- if (!resultSlot.isIgnored ()) {
1037
- assert (atomics.getEvaluationKind () == TEK_Aggregate);
1038
- tempAddr = resultSlot.getAddr ();
1039
- } else
1040
- tempAddr = CreateMemTemp (atomics.getAtomicType (), " atomic-load-temp" );
1041
- } else
1042
- tempAddr = NonSimpleTempAlloca;
965
+ if (!resultSlot.isIgnored ()) {
966
+ assert (atomics.getEvaluationKind () == TEK_Aggregate);
967
+ tempAddr = resultSlot.getAddr ();
968
+ } else {
969
+ tempAddr = CreateMemTemp (atomics.getAtomicType (), " atomic-load-temp" );
970
+ }
1043
971
1044
972
// void __atomic_load(size_t size, void *mem, void *return, int order);
1045
973
CallArgList args;
1046
974
args.add (RValue::get (atomics.getAtomicSizeValue ()),
1047
975
getContext ().getSizeType ());
1048
- args.add (RValue::get (EmitCastToVoidPtr (SrcAddr)), getContext ().VoidPtrTy );
1049
- args.add (RValue::get (EmitCastToVoidPtr (tempAddr)), getContext ().VoidPtrTy );
976
+ args.add (RValue::get (EmitCastToVoidPtr (src.getAddress ())),
977
+ getContext ().VoidPtrTy );
978
+ args.add (RValue::get (EmitCastToVoidPtr (tempAddr)),
979
+ getContext ().VoidPtrTy );
1050
980
args.add (RValue::get (llvm::ConstantInt::get (
1051
981
IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
1052
982
getContext ().IntTy );
@@ -1057,7 +987,7 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1057
987
}
1058
988
1059
989
// Okay, we're doing this natively.
1060
- llvm::Value *addr = atomics.emitCastToAtomicIntPointer (SrcAddr );
990
+ llvm::Value *addr = atomics.emitCastToAtomicIntPointer (src. getAddress () );
1061
991
llvm::LoadInst *load = Builder.CreateLoad (addr, " atomic-load" );
1062
992
load->setAtomic (llvm::SequentiallyConsistent);
1063
993
@@ -1073,46 +1003,40 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1073
1003
return RValue::getAggregate (nullptr , false );
1074
1004
1075
1005
// Okay, turn that back into the original value type.
1076
- if (src.isSimple ())
1077
- return atomics.convertIntToValue (load, resultSlot, loc);
1078
-
1079
- auto *IntAddr = atomics.emitCastToAtomicIntPointer (NonSimpleTempAlloca);
1080
- Builder.CreateAlignedStore (load, IntAddr, src.getAlignment ().getQuantity ());
1081
- return atomics.convertTempToRValue (NonSimpleTempAlloca, resultSlot, loc);
1006
+ return atomics.convertIntToValue (load, resultSlot, loc);
1082
1007
}
1083
1008
1084
1009
1085
1010
1086
1011
// / Copy an r-value into memory as part of storing to an atomic type.
1087
1012
// / This needs to create a bit-pattern suitable for atomic operations.
1088
- void AtomicInfo::emitCopyIntoMemory (RValue rvalue) const {
1089
- assert (LVal.isSimple ());
1013
+ void AtomicInfo::emitCopyIntoMemory (RValue rvalue, LValue dest) const {
1090
1014
// If we have an r-value, the rvalue should be of the atomic type,
1091
1015
// which means that the caller is responsible for having zeroed
1092
1016
// any padding. Just do an aggregate copy of that type.
1093
1017
if (rvalue.isAggregate ()) {
1094
- CGF.EmitAggregateCopy (LVal .getAddress (),
1018
+ CGF.EmitAggregateCopy (dest .getAddress (),
1095
1019
rvalue.getAggregateAddr (),
1096
1020
getAtomicType (),
1097
1021
(rvalue.isVolatileQualified ()
1098
- || LVal .isVolatileQualified ()),
1099
- LVal .getAlignment ());
1022
+ || dest .isVolatileQualified ()),
1023
+ dest .getAlignment ());
1100
1024
return ;
1101
1025
}
1102
1026
1103
1027
// Okay, otherwise we're copying stuff.
1104
1028
1105
1029
// Zero out the buffer if necessary.
1106
- emitMemSetZeroIfNecessary ();
1030
+ emitMemSetZeroIfNecessary (dest );
1107
1031
1108
1032
// Drill past the padding if present.
1109
- LValue TempLVal = projectValue ();
1033
+ dest = projectValue (dest );
1110
1034
1111
1035
// Okay, store the rvalue in.
1112
1036
if (rvalue.isScalar ()) {
1113
- CGF.EmitStoreOfScalar (rvalue.getScalarVal (), TempLVal , /* init*/ true );
1037
+ CGF.EmitStoreOfScalar (rvalue.getScalarVal (), dest , /* init*/ true );
1114
1038
} else {
1115
- CGF.EmitStoreOfComplex (rvalue.getComplexVal (), TempLVal , /* init*/ true );
1039
+ CGF.EmitStoreOfComplex (rvalue.getComplexVal (), dest , /* init*/ true );
1116
1040
}
1117
1041
}
1118
1042
@@ -1127,10 +1051,8 @@ llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1127
1051
1128
1052
// Otherwise, make a temporary and materialize into it.
1129
1053
llvm::Value *temp = CGF.CreateMemTemp (getAtomicType (), " atomic-store-temp" );
1130
- LValue tempLV =
1131
- CGF.MakeAddrLValue (temp, getAtomicType (), getAtomicAlignment ());
1132
- AtomicInfo Atomics (CGF, tempLV);
1133
- Atomics.emitCopyIntoMemory (rvalue);
1054
+ LValue tempLV = CGF.MakeAddrLValue (temp, getAtomicType (), getAtomicAlignment ());
1055
+ emitCopyIntoMemory (rvalue, tempLV);
1134
1056
return temp;
1135
1057
}
1136
1058
@@ -1176,7 +1098,7 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
1176
1098
1177
1099
// If this is an initialization, just put the value there normally.
1178
1100
if (isInit) {
1179
- atomics.emitCopyIntoMemory (rvalue);
1101
+ atomics.emitCopyIntoMemory (rvalue, dest );
1180
1102
return ;
1181
1103
}
1182
1104
@@ -1292,13 +1214,13 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1292
1214
switch (atomics.getEvaluationKind ()) {
1293
1215
case TEK_Scalar: {
1294
1216
llvm::Value *value = EmitScalarExpr (init);
1295
- atomics.emitCopyIntoMemory (RValue::get (value));
1217
+ atomics.emitCopyIntoMemory (RValue::get (value), dest );
1296
1218
return ;
1297
1219
}
1298
1220
1299
1221
case TEK_Complex: {
1300
1222
ComplexPairTy value = EmitComplexExpr (init);
1301
- atomics.emitCopyIntoMemory (RValue::getComplex (value));
1223
+ atomics.emitCopyIntoMemory (RValue::getComplex (value), dest );
1302
1224
return ;
1303
1225
}
1304
1226
@@ -1307,8 +1229,8 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1307
1229
// of atomic type.
1308
1230
bool Zeroed = false ;
1309
1231
if (!init->getType ()->isAtomicType ()) {
1310
- Zeroed = atomics.emitMemSetZeroIfNecessary ();
1311
- dest = atomics.projectValue ();
1232
+ Zeroed = atomics.emitMemSetZeroIfNecessary (dest );
1233
+ dest = atomics.projectValue (dest );
1312
1234
}
1313
1235
1314
1236
// Evaluate the expression directly into the destination.
0 commit comments