Skip to content

Commit aad6bc8

Browse files
committed
IRGen: Pass large explosions as indirect arguments.
It's not worth burning more than three registers on a parameter, and doing so causes code size issues for large structs and enums. Make it so that values with more than three explosion members get passed indirectly, just like they get returned indirectly. This time, modify emitPartialApplyForwarder not to attempt to 'tail' call the original function when indirect arguments get alloca'ed on the stack, which is UB, and don't use "byval", as suggested by John. Swift SVN r29032
1 parent bb78a4d commit aad6bc8

16 files changed

+286
-74
lines changed

lib/IRGen/CallEmission.h

+3-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,9 @@ class CallEmission {
7777
}
7878

7979
/// Set the arguments to the function from an explosion.
80-
void setArgs(Explosion &arg, WitnessMetadata *witnessMetadata = nullptr);
80+
void setArgs(Explosion &arg,
81+
ArrayRef<SILParameterInfo> params,
82+
WitnessMetadata *witnessMetadata = nullptr);
8183

8284
void addAttribute(unsigned Index, llvm::Attribute::AttrKind Attr);
8385

lib/IRGen/Explosion.h

+3
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,7 @@ class ExplosionSchema {
233233
return Elements[index];
234234
}
235235

236+
bool requiresIndirectParameter(IRGenModule &IGM) const;
236237
bool requiresIndirectResult(IRGenModule &IGM) const;
237238

238239
typedef SmallVectorImpl<Element>::iterator iterator;
@@ -258,6 +259,8 @@ class ExplosionSchema {
258259
/// Treating the types in this schema as potential arguments to a
259260
/// function call, add them to the end of the given vector of types.
260261
void addToArgTypes(IRGenModule &IGM,
262+
const TypeInfo &TI,
263+
llvm::AttributeSet &Attrs,
261264
SmallVectorImpl<llvm::Type*> &types) const;
262265
};
263266

lib/IRGen/GenFunc.cpp

+100-15
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,12 @@ bool ExplosionSchema::requiresIndirectResult(IRGenModule &IGM) const {
121121
size() > IGM.TargetInfo.MaxScalarsForDirectResult;
122122
}
123123

124+
bool ExplosionSchema::requiresIndirectParameter(IRGenModule &IGM) const {
125+
// For now, use the same condition as requiresIndirectSchema. We may want
126+
// to diverge at some point.
127+
return requiresIndirectResult(IGM);
128+
}
129+
124130
llvm::Type *ExplosionSchema::getScalarResultType(IRGenModule &IGM) const {
125131
if (size() == 0) {
126132
return IGM.VoidTy;
@@ -134,7 +140,14 @@ llvm::Type *ExplosionSchema::getScalarResultType(IRGenModule &IGM) const {
134140
}
135141

136142
void ExplosionSchema::addToArgTypes(IRGenModule &IGM,
143+
const TypeInfo &TI,
144+
llvm::AttributeSet &Attrs,
137145
SmallVectorImpl<llvm::Type*> &types) const {
146+
// Pass indirect arguments as byvals.
147+
if (requiresIndirectParameter(IGM)) {
148+
types.push_back(TI.getStorageType()->getPointerTo());
149+
return;
150+
}
138151
for (auto &elt : *this) {
139152
if (elt.isAggregate())
140153
types.push_back(elt.getAggregateType()->getPointerTo());
@@ -1271,8 +1284,9 @@ void SignatureExpansion::expand(SILParameterInfo param) {
12711284
return;
12721285
}
12731286
case SILFunctionLanguage::Swift: {
1274-
auto schema = IGM.getSchema(param.getSILType());
1275-
schema.addToArgTypes(IGM, ParamIRTypes);
1287+
auto &ti = IGM.getTypeInfo(param.getSILType());
1288+
auto schema = ti.getSchema();
1289+
schema.addToArgTypes(IGM, ti, Attrs, ParamIRTypes);
12761290
return;
12771291
}
12781292
}
@@ -2824,17 +2838,54 @@ static void externalizeArguments(IRGenFunction &IGF, const Callee &callee,
28242838
}
28252839
}
28262840

2841+
namespace {
2842+
enum IsIndirectValueArgument_t: bool {
2843+
IsNotIndirectValueArgument = false,
2844+
IsIndirectValueArgument = true,
2845+
};
2846+
}
2847+
2848+
static IsIndirectValueArgument_t addNativeArgument(IRGenFunction &IGF,
2849+
Explosion &in,
2850+
const TypeInfo &ti,
2851+
ParameterConvention convention,
2852+
Explosion &out) {
2853+
// Addresses consist of a single pointer argument.
2854+
if (isIndirectParameter(convention)) {
2855+
out.add(in.claimNext());
2856+
return IsNotIndirectValueArgument;
2857+
}
2858+
2859+
auto &loadableTI = cast<LoadableTypeInfo>(ti);
2860+
auto schema = ti.getSchema();
2861+
2862+
if (schema.requiresIndirectParameter(IGF.IGM)) {
2863+
// Pass the argument indirectly.
2864+
auto buf = IGF.createAlloca(ti.getStorageType(),
2865+
loadableTI.getFixedAlignment(), "");
2866+
loadableTI.initialize(IGF, in, buf);
2867+
out.add(buf.getAddress());
2868+
return IsIndirectValueArgument;
2869+
} else {
2870+
// Pass the argument explosion directly.
2871+
loadableTI.reexplode(IGF, in, out);
2872+
return IsNotIndirectValueArgument;
2873+
}
2874+
}
2875+
28272876
/// Add a new set of arguments to the function.
2828-
void CallEmission::setArgs(Explosion &arg, WitnessMetadata *witnessMetadata) {
2877+
void CallEmission::setArgs(Explosion &arg,
2878+
ArrayRef<SILParameterInfo> params,
2879+
WitnessMetadata *witnessMetadata) {
28292880
// Convert arguments to a representation appropriate to the calling
28302881
// convention.
2882+
Explosion adjustedArg;
2883+
28312884
switch (getCallee().getRepresentation()) {
28322885
case SILFunctionTypeRepresentation::CFunctionPointer:
28332886
case SILFunctionTypeRepresentation::ObjCMethod:
28342887
case SILFunctionTypeRepresentation::Block: {
2835-
Explosion externalized;
2836-
externalizeArguments(IGF, getCallee(), arg, externalized);
2837-
arg = std::move(externalized);
2888+
externalizeArguments(IGF, getCallee(), arg, adjustedArg);
28382889
break;
28392890
}
28402891

@@ -2848,19 +2899,26 @@ void CallEmission::setArgs(Explosion &arg, WitnessMetadata *witnessMetadata) {
28482899
case SILFunctionTypeRepresentation::Method:
28492900
case SILFunctionTypeRepresentation::Thin:
28502901
case SILFunctionTypeRepresentation::Thick:
2851-
// Nothing to do.
2902+
// Check for value arguments that need to be passed indirectly.
2903+
for (auto param : params) {
2904+
addNativeArgument(IGF, arg,
2905+
IGF.getTypeInfoForLowered(param.getType()),
2906+
param.getConvention(),
2907+
adjustedArg);
2908+
}
2909+
adjustedArg.add(arg.claimAll());
28522910
break;
28532911
}
28542912

28552913
// Add the given number of arguments.
2856-
assert(LastArgWritten >= arg.size());
2914+
assert(LastArgWritten >= adjustedArg.size());
28572915

2858-
size_t targetIndex = LastArgWritten - arg.size();
2916+
size_t targetIndex = LastArgWritten - adjustedArg.size();
28592917
assert(targetIndex <= 1);
28602918
LastArgWritten = targetIndex;
28612919

28622920
auto argIterator = Args.begin() + targetIndex;
2863-
for (auto value : arg.claimAll()) {
2921+
for (auto value : adjustedArg.claimAll()) {
28642922
*argIterator++ = value;
28652923
}
28662924
}
@@ -3180,7 +3238,23 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM,
31803238

31813239
// Reemit the parameters as unsubstituted.
31823240
for (unsigned i = 0; i < outType->getParameters().size(); ++i) {
3183-
emitApplyArgument(subIGF, origType->getParameters()[i],
3241+
Explosion arg;
3242+
auto origParamInfo = origType->getParameters()[i];
3243+
auto &ti = IGM.getTypeInfoForLowered(origParamInfo.getType());
3244+
auto schema = ti.getSchema();
3245+
3246+
// Forward the address of indirect value params.
3247+
if (!isIndirectParameter(origParamInfo.getConvention())
3248+
&& schema.requiresIndirectParameter(IGM)) {
3249+
auto addr = origParams.claimNext();
3250+
if (addr->getType() != ti.getStorageType()->getPointerTo())
3251+
addr = subIGF.Builder.CreateBitCast(addr,
3252+
ti.getStorageType()->getPointerTo());
3253+
args.add(addr);
3254+
continue;
3255+
}
3256+
3257+
emitApplyArgument(subIGF, origParamInfo,
31843258
outType->getParameters()[i],
31853259
origParams, args);
31863260
}
@@ -3195,6 +3269,7 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM,
31953269

31963270
bool dependsOnContextLifetime = false;
31973271
bool consumesContext;
3272+
bool needsAllocas = false;
31983273

31993274
switch (outType->getCalleeConvention()) {
32003275
case ParameterConvention::Direct_Owned:
@@ -3260,12 +3335,14 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM,
32603335
auto fieldConvention = conventions[nextCapturedField];
32613336
Address fieldAddr = fieldLayout.project(subIGF, data, offsets);
32623337
auto &fieldTI = fieldLayout.getType();
3338+
auto fieldSchema = fieldTI.getSchema();
32633339

32643340
Explosion param;
32653341
switch (fieldConvention) {
32663342
case ParameterConvention::Indirect_In: {
32673343
// The +1 argument is passed indirectly, so we need to copy into a
32683344
// temporary.
3345+
needsAllocas = true;
32693346
auto caddr = fieldTI.allocateStack(subIGF, fieldTy, "arg.temp");
32703347
fieldTI.initializeWithCopy(subIGF, caddr.getAddress(), fieldAddr,
32713348
fieldTy);
@@ -3314,14 +3391,21 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM,
33143391

33153392
// Reemit the capture params as unsubstituted.
33163393
if (origParamI < origType->getParameters().size()) {
3317-
emitApplyArgument(subIGF,
3318-
origType->getParameters()[origParamI],
3394+
Explosion origParam;
3395+
auto origParamInfo = origType->getParameters()[origParamI];
3396+
emitApplyArgument(subIGF, origParamInfo,
33193397
substType->getParameters()[origParamI],
3320-
param, args);
3398+
param, origParam);
3399+
3400+
needsAllocas |= addNativeArgument(subIGF, origParam,
3401+
IGM.getTypeInfoForLowered(origParamInfo.getType()),
3402+
origParamInfo.getConvention(),
3403+
args);
33213404
++origParamI;
33223405
} else {
33233406
args.add(param.claimAll());
33243407
}
3408+
33253409
}
33263410

33273411
// If the parameters can live independent of the context, release it now
@@ -3407,7 +3491,8 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM,
34073491
// "C" calling convention, but that may change.
34083492
call->setAttributes(origAttrs);
34093493
}
3410-
if (!consumesContext || !dependsOnContextLifetime)
3494+
if (addressesToDeallocate.empty() && !needsAllocas &&
3495+
(!consumesContext || !dependsOnContextLifetime))
34113496
call->setTailCall();
34123497

34133498
// Deallocate everything we allocated above.

lib/IRGen/GenObjC.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -726,7 +726,7 @@ llvm::Value *irgen::emitObjCAllocObjectCall(IRGenFunction &IGF,
726726
args.add(self);
727727
args.add(IGF.emitObjCSelectorRefLoad("allocWithZone:"));
728728
args.add(llvm::ConstantPointerNull::get(IGF.IGM.Int8PtrTy));
729-
emission.setArgs(args);
729+
emission.setArgs(args, {});
730730
}
731731

732732
// Emit the call.
@@ -824,7 +824,7 @@ static llvm::Function *emitObjCPartialApplicationForwarder(IRGenModule &IGM,
824824
Explosion args;
825825
addObjCMethodCallImplicitArguments(subIGF, args, method, self, SILType());
826826
args.add(params.claimAll());
827-
emission.setArgs(args);
827+
emission.setArgs(args, {});
828828

829829
// Cleanup that always has to occur after the function call.
830830
auto cleanup = [&]{

lib/IRGen/IRGenSIL.cpp

+17-6
Original file line numberDiff line numberDiff line change
@@ -946,20 +946,31 @@ static void bindParameter(IRGenSILFunction &IGF,
946946
auto &paramTI = IGF.getTypeInfo(param->getType());
947947

948948
// If the SIL parameter isn't passed indirectly, we need to map it
949-
// to an explosion. Fortunately, in this case we have a guarantee
950-
// that it's passed directly in IR.
949+
// to an explosion.
951950
if (param->getType().isObject()) {
952951
Explosion paramValues;
953-
cast<LoadableTypeInfo>(paramTI).reexplode(IGF, allParamValues, paramValues);
952+
auto &loadableTI = cast<LoadableTypeInfo>(paramTI);
953+
// If the explosion must be passed indirectly, load the value from the
954+
// indirect address.
955+
if (loadableTI.getSchema().requiresIndirectParameter(IGF.IGM)) {
956+
Address paramAddr
957+
= loadableTI.getAddressForPointer(allParamValues.claimNext());
958+
loadableTI.loadAsTake(IGF, paramAddr, paramValues);
959+
} else {
960+
// Otherwise, we can just take the exploded arguments.
961+
// FIXME: It doesn't necessarily make sense to pass all types using their
962+
// explosion schema.
963+
loadableTI.reexplode(IGF, allParamValues, paramValues);
964+
}
954965
IGF.setLoweredExplosion(SILValue(param, 0), paramValues);
955966
return;
956967
}
957968

958969
// Okay, the type is passed indirectly in SIL, so we need to map
959970
// it to an address.
960971
// FIXME: that doesn't mean we should physically pass it
961-
// indirectly at this explosion level, but SIL currently gives us
962-
// no ability to distinguish between an l-value and a byval argument.
972+
// indirectly at this resilience expansion. An @in or @in_guaranteed parameter
973+
// could be passed by value in the right resilience domain.
963974
Address paramAddr
964975
= paramTI.getAddressForPointer(allParamValues.claimNext());
965976
IGF.setLoweredAddress(SILValue(param, 0), paramAddr);
@@ -1882,7 +1893,7 @@ void IRGenSILFunction::visitFullApplySite(FullApplySite site) {
18821893
}
18831894

18841895
// Add all those arguments.
1885-
emission.setArgs(llArgs, &witnessMetadata);
1896+
emission.setArgs(llArgs, params, &witnessMetadata);
18861897

18871898
SILInstruction *i = site.getInstruction();
18881899

test/IRGen/abitypes.swift

+16-16
Original file line numberDiff line numberDiff line change
@@ -35,40 +35,40 @@ class Foo {
3535
}
3636

3737

38-
// x86_64-macosx: define hidden double @_TFC8abitypes3Foo14getXFromNSRect{{.*}}(double, double, double, double, %C8abitypes3Foo*) {{.*}} {
38+
// x86_64-macosx: define hidden double @_TFC8abitypes3Foo14getXFromNSRect{{.*}}(%VSC6CGRect*, %C8abitypes3Foo*) {{.*}} {
3939
// x86_64-macosx: define hidden double @_TToFC8abitypes3Foo14getXFromNSRect{{.*}}(i8*, i8*, %VSC6CGRect* byval align 8) unnamed_addr {{.*}} {
40-
// armv7-ios: define hidden double @_TFC8abitypes3Foo14getXFromNSRect{{.*}}(float, float, float, float, %C8abitypes3Foo*) {{.*}} {
40+
// armv7-ios: define hidden double @_TFC8abitypes3Foo14getXFromNSRect{{.*}}(%VSC6CGRect*, %C8abitypes3Foo*) {{.*}} {
4141
// armv7-ios: define hidden double @_TToFC8abitypes3Foo14getXFromNSRect{{.*}}(i8*, i8*, [4 x i32]) unnamed_addr {{.*}} {
4242
dynamic func getXFromNSRect(r: NSRect) -> Double {
4343
return Double(r.origin.x)
4444
}
4545

46-
// x86_64-macosx: define hidden float @_TFC8abitypes3Foo12getXFromRect{{.*}}(float, float, float, float, %C8abitypes3Foo*) {{.*}} {
46+
// x86_64-macosx: define hidden float @_TFC8abitypes3Foo12getXFromRect{{.*}}(%VSC6MyRect*, %C8abitypes3Foo*) {{.*}} {
4747
// x86_64-macosx: define hidden float @_TToFC8abitypes3Foo12getXFromRect{{.*}}(i8*, i8*, <2 x float>, <2 x float>) unnamed_addr {{.*}} {
48-
// armv7-ios: define hidden float @_TFC8abitypes3Foo12getXFromRect{{.*}}(float, float, float, float, %C8abitypes3Foo*) {{.*}} {
48+
// armv7-ios: define hidden float @_TFC8abitypes3Foo12getXFromRect{{.*}}(%VSC6MyRect*, %C8abitypes3Foo*) {{.*}} {
4949
// armv7-ios: define hidden float @_TToFC8abitypes3Foo12getXFromRect{{.*}}(i8*, i8*, [4 x i32]) unnamed_addr {{.*}} {
5050
dynamic func getXFromRect(r: MyRect) -> Float {
5151
return r.x
5252
}
5353

5454
// Call from Swift entrypoint with exploded Rect to @objc entrypoint
5555
// with unexploaded ABI-coerced type.
56-
// x86_64-macosx: define hidden float @_TFC8abitypes3Foo17getXFromRectSwift{{.*}}(float, float, float, float, [[SELF:%.*]]*) {{.*}} {
56+
// x86_64-macosx: define hidden float @_TFC8abitypes3Foo17getXFromRectSwift{{.*}}(%VSC6MyRect*, [[SELF:%.*]]*) {{.*}} {
5757
// x86_64-macosx: [[COERCED:%.*]] = alloca [[MYRECT:%.*MyRect.*]], align 4
5858
// x86_64-macosx: [[SEL:%.*]] = load i8*, i8** @"\01L_selector(getXFromRect:)", align 8
5959
// x86_64-macosx: [[CAST:%.*]] = bitcast [[MYRECT]]* [[COERCED]] to { <2 x float>, <2 x float> }*
6060
// x86_64-macosx: [[T0:%.*]] = getelementptr inbounds { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* [[CAST]], i32 0, i32 0
6161
// x86_64-macosx: [[FIRST_HALF:%.*]] = load <2 x float>, <2 x float>* [[T0]]
6262
// x86_64-macosx: [[T0:%.*]] = getelementptr inbounds { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* [[CAST]], i32 0, i32 1
6363
// x86_64-macosx: [[SECOND_HALF:%.*]] = load <2 x float>, <2 x float>* [[T0]]
64-
// x86_64-macosx: [[SELFCAST:%.*]] = bitcast [[SELF]]* %4 to i8*
64+
// x86_64-macosx: [[SELFCAST:%.*]] = bitcast [[SELF]]* %1 to i8*
6565
// x86_64-macosx: [[RESULT:%.*]] = call float bitcast (void ()* @objc_msgSend to float (i8*, i8*, <2 x float>, <2 x float>)*)(i8* [[SELFCAST]], i8* [[SEL]], <2 x float> [[FIRST_HALF]], <2 x float> [[SECOND_HALF]])
66-
// armv7-ios: define hidden float @_TFC8abitypes3Foo17getXFromRectSwift{{.*}}(float, float, float, float, [[SELF:%.*]]*) {{.*}} {
66+
// armv7-ios: define hidden float @_TFC8abitypes3Foo17getXFromRectSwift{{.*}}(%VSC6MyRect*, [[SELF:%.*]]*) {{.*}} {
6767
// armv7-ios: [[COERCED:%.*]] = alloca [[MYRECT:%.*MyRect.*]], align 4
6868
// armv7-ios: [[SEL:%.*]] = load i8*, i8** @"\01L_selector(getXFromRect:)", align 4
6969
// armv7-ios: [[CAST:%.*]] = bitcast [[MYRECT]]* [[COERCED]] to [4 x i32]*
7070
// armv7-ios: [[LOADED:%.*]] = load [4 x i32], [4 x i32]* [[CAST]]
71-
// armv7-ios: [[SELFCAST:%.*]] = bitcast [[SELF]]* %4 to i8*
71+
// armv7-ios: [[SELFCAST:%.*]] = bitcast [[SELF]]* %1 to i8*
7272
// armv7-ios: [[RESULT:%.*]] = call float bitcast (void ()* @objc_msgSend to float (i8*, i8*, [4 x i32])*)(i8* [[SELFCAST]], i8* [[SEL]], [4 x i32] [[LOADED]])
7373
func getXFromRectSwift(r: MyRect) -> Float {
7474
return getXFromRect(r)
@@ -85,7 +85,7 @@ class Foo {
8585
}
8686

8787
// Make sure the caller-side from Swift also uses indirect-byval for the argument
88-
// x86_64-macosx: define hidden float @_TFC8abitypes3Foo25getXFromRectIndirectSwift{{.*}}(float, float, float, float, %C8abitypes3Foo*) {{.*}} {
88+
// x86_64-macosx: define hidden float @_TFC8abitypes3Foo25getXFromRectIndirectSwift{{.*}}(%VSC6MyRect*, %C8abitypes3Foo*) {{.*}} {
8989
func getXFromRectIndirectSwift(r: MyRect) -> Float {
9090
let f : Float = 1.0;
9191
// x86_64-macosx: [[TEMP:%.*]] = alloca [[TEMPTYPE:%.*]], align 4
@@ -361,10 +361,10 @@ class Foo {
361361
x.alDente()
362362
}
363363

364-
// arm64-ios: define hidden void @_TFC8abitypes3Foo14callJustReturn{{.*}}(%VSC9BigStruct* noalias sret, %CSo13StructReturns*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %C8abitypes3Foo*) {{.*}} {
364+
// arm64-ios: define hidden void @_TFC8abitypes3Foo14callJustReturn{{.*}}(%VSC9BigStruct* noalias sret, %CSo13StructReturns*, %VSC9BigStruct*, %C8abitypes3Foo*) {{.*}} {
365365
// arm64-ios: define hidden void @_TToFC8abitypes3Foo14callJustReturnfS0_FTCSo13StructReturns4withVSC9BigStruct_S2_(%VSC9BigStruct* noalias sret, i8*, i8*, [[OPAQUE:.*]]*, %VSC9BigStruct*) unnamed_addr {{.*}} {
366366
//
367-
// arm64-tvos: define hidden void @_TFC8abitypes3Foo14callJustReturn{{.*}}(%VSC9BigStruct* noalias sret, %CSo13StructReturns*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %C8abitypes3Foo*) {{.*}} {
367+
// arm64-tvos: define hidden void @_TFC8abitypes3Foo14callJustReturn{{.*}}(%VSC9BigStruct* noalias sret, %CSo13StructReturns*, %VSC9BigStruct*, %C8abitypes3Foo*) {{.*}} {
368368
// arm64-tvos: define hidden void @_TToFC8abitypes3Foo14callJustReturnfS0_FTCSo13StructReturns4withVSC9BigStruct_S2_(%VSC9BigStruct* noalias sret, i8*, i8*, [[OPAQUE:.*]]*, %VSC9BigStruct*) unnamed_addr {{.*}} {
369369
dynamic func callJustReturn(r: StructReturns, with v: BigStruct) -> BigStruct {
370370
return r.justReturn(v)
@@ -380,12 +380,12 @@ class Foo {
380380
// armv7-ios: define internal void @makeOne(%struct.One* noalias sret %agg.result, float %f, float %s)
381381

382382
// rdar://17631440 - Expand direct arguments that are coerced to aggregates.
383-
// x86_64-macosx: define float @_TF8abitypes13testInlineAggFVSC6MyRectSf(float, float, float, float) {{.*}} {
383+
// x86_64-macosx: define float @_TF8abitypes13testInlineAggFVSC6MyRectSf(%VSC6MyRect*) {{.*}} {
384384
// x86_64-macosx: [[COERCED:%.*]] = alloca %VSC6MyRect, align 4
385-
// x86_64-macosx: store float %0,
386-
// x86_64-macosx: store float %1,
387-
// x86_64-macosx: store float %2,
388-
// x86_64-macosx: store float %3,
385+
// x86_64-macosx: store float %
386+
// x86_64-macosx: store float %
387+
// x86_64-macosx: store float %
388+
// x86_64-macosx: store float %
389389
// x86_64-macosx: [[CAST:%.*]] = bitcast %VSC6MyRect* [[COERCED]] to { <2 x float>, <2 x float> }*
390390
// x86_64-macosx: [[T0:%.*]] = getelementptr inbounds { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* [[CAST]], i32 0, i32 0
391391
// x86_64-macosx: [[FIRST_HALF:%.*]] = load <2 x float>, <2 x float>* [[T0]], align 4

0 commit comments

Comments
 (0)