Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 87 additions & 0 deletions Benchmarks/Benchmarks/DataIO/DataIO.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
//===----------------------------------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2022-2023 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//

import Benchmark
import func Benchmark.blackHole
@testable import FoundationEssentials

#if canImport(Glibc)
import Glibc
#endif
#if canImport(Darwin)
import Darwin
#endif

func testPath() -> String {
// Generate a random file name
String.temporaryDirectoryPath.appendingPathComponent("testfile-\(UUID().uuidString)")
}

func generateTestData() -> Data {
// 16 MB file, big enough to trigger things like chunking
let count = 1 << 24

let memory = malloc(count)!
let ptr = memory.bindMemory(to: UInt8.self, capacity: count)

// Set a few bytes so we're sure to not be all zeros
let buf = UnsafeMutableBufferPointer(start: ptr, count: count)
for i in 0..<128 {
buf[i] = UInt8.random(in: 1..<42)
}

return Data(bytesNoCopy: ptr, count: count, deallocator: .free)
}

func cleanup(at path: String) {
_ = unlink(path)
// Ignore any errors
}

let data = generateTestData()
let readMe = testPath()

let benchmarks = {
Benchmark.defaultConfiguration.maxIterations = 1_000_000_000
Benchmark.defaultConfiguration.maxDuration = .seconds(3)
Benchmark.defaultConfiguration.scalingFactor = .kilo
// Benchmark.defaultConfiguration.metrics = .arc + [.cpuTotal, .wallClock, .mallocCountTotal, .throughput] // use ARC to see traffic
// Benchmark.defaultConfiguration.metrics = [.cpuTotal, .wallClock, .mallocCountTotal, .throughput] // skip ARC as it has some overhead
Benchmark.defaultConfiguration.metrics = .all // Use all metrics to easily see which ones are of interest for this benchmark suite

Benchmark("read-write-emptyFile") { benchmark in
let path = testPath()
let data = Data()
try data.write(to: path)
let read = try Data(contentsOf: path, options: [])
cleanup(at: path)
}

Benchmark("write-regularFile") { benchmark in
let path = testPath()
try data.write(to: path)
cleanup(at: path)
}

Benchmark("read-regularFile",
configuration: .init(
setup: {
try! data.write(to: readMe)
},
teardown: {
cleanup(at: readMe)
}
)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's an initializer with trailing setup/teardown closures that you probably would prefer for readability...

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

By the way, do you have any advice on one-target-many-benchmarks vs many-targets-one-benchmark?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's a good question and the answer is "it depends", but a few reflections:

I think it in general makes sense to group benchmarks together in one target which you want to check together (ie. they depend on the same class, set of classes, of module / component which is under test) just as a convenience for the engineer who wants to validate performance iteratively while making changes to a specific part of the software. For CI the structure just doesn't matter as one would typically run all of the benchmarks anyway, so the structure is just optimising for a) ease-of-running during interactive development and b) ease-of-updating/changing benchmarks as they are related.

It is similar to unit tests in nature that you usually want to have tests that are related to each other in a "suite" (one target for benchmarks).

The Benchmark package supports filtering/skipping of both targets and benchmarks with regexes:

--filter <filter>
Benchmarks matching the regexp filter that should be run
--skip <skip>
Benchmarks matching the regexp filter that should be skipped
--target <target>
Benchmark targets matching the regexp filter that should be run
--skip-target <skip-target>
Benchmark targets matching the regexp filter that should be skipped

Which makes it easy to run partial sets of targets/benchmarks if using a thoughtful naming convention too.

One difference is that you might have benchmarks that are both similar to unit tests, but also much more complex benchmarks that are more similar to integration tests, so the answer depends a bit also on the flavour of benchmarks you do.

For longer more complex benchmarks "integration test"-style, it's a good idea to do as swift-nio did and just break out the actual code under test into separate files/targets and just have the benchmark file itself be benchmark configuration and basically a driver - see e.g. https://github.com/apple/swift-nio/blob/main/Benchmarks/Benchmarks/NIOPosixBenchmarks/Benchmarks.swift

While simpler benchmarks can be done inline...

So more concretely for Foundation, I'd probably consider a structure with targets with a naming convention, something along the lines:

FoundationEssentials
FoundationEssentials-Date
FoundationEssentials-Serialization
FoundationEssentials-Predicate
FoundationInternationalization
FoundationNetworking
FoundationNetworking-URLSession

Etc, basically a common targets (e.g. FoundationEssentials) where most benchmarks for that module reside, but then having additional targets for whatever types that might need more complex sets of benchmarks (I just picked some examples above how it could look)) - this allows for regex filtering if wanting to run benchmarks for a given part of Foundation easily with e.g. swift package benchmark --target "FoundationEssentials.*" but doesn't make the benchmarks to balloon out of control in terms of size in a single file. But thats me, YMMV :-)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for that detailed response. I think the approach we'll take in the short term is to just focus on getting a base level of tests in. Once we see how/if/when we use them, we can always refactor it to match requirements for CI or easier at-desk usage.

) { benchmark in
blackHole(try Data(contentsOf: readMe))
}
}
12 changes: 12 additions & 0 deletions Benchmarks/Package.swift
Original file line number Diff line number Diff line change
Expand Up @@ -44,5 +44,17 @@ let package = Package(
.plugin(name: "BenchmarkPlugin", package: "package-benchmark")
]
),
.executableTarget(
name: "DataIOBenchmarks",
dependencies: [
.product(name: "FoundationEssentials", package: "swift-foundation-local"),
.product(name: "Benchmark", package: "package-benchmark"),
],
path: "Benchmarks/DataIO",
plugins: [
.plugin(name: "BenchmarkPlugin", package: "package-benchmark")
]
),

]
)
12 changes: 10 additions & 2 deletions Sources/FoundationEssentials/CocoaError.swift
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ public struct CocoaError : CustomNSError, _StoredError, Hashable {
public let code: Code
public let userInfo: [String: AnyHashable]

public init(code: Code, userInfo: [String: AnyHashable]) {
public init(_ code: Code, userInfo: [String: AnyHashable] = [:]) {
self.code = code
self.userInfo = userInfo
}
Expand Down Expand Up @@ -187,7 +187,7 @@ extension CocoaError {
if let url = url {
info["NSURLErrorKey"] = url
}
return CocoaError(code: code, userInfo: info)
return CocoaError(code, userInfo: info)
}
#endif
}
Expand Down Expand Up @@ -275,3 +275,11 @@ public extension Error {
#endif


#if FOUNDATION_FRAMEWORK
#else
// These are loosely typed, as a typedef for String called NSErrorUserInfoKey
internal let NSUnderlyingErrorKey = "NSUnderlyingErrorKey"
internal let NSUserStringVariantErrorKey = "NSUserStringVariantErrorKey"
internal let NSFilePathErrorKey = "NSFilePathErrorKey"
internal let NSURLErrorKey = "NSURLErrorKey"
#endif
94 changes: 94 additions & 0 deletions Sources/FoundationEssentials/Data/Data+Error.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
//===----------------------------------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//

#if FOUNDATION_FRAMEWORK
// For Logger
@_implementationOnly import os
@_implementationOnly import _ForSwiftFoundation
@_implementationOnly import _CShims
#else
package import _CShims
#endif

#if canImport(Darwin)
import Darwin
#elseif canImport(Glibc)
import Glibc
#endif

internal func fileReadingOrWritingError(posixErrno: Int32, path: PathOrURL?, reading: Bool, variant: String? = nil, extraUserInfo: [String: AnyHashable] = [:]) -> Error {
let code: CocoaError.Code
if reading {
switch posixErrno {
case EFBIG:
code = .fileReadTooLarge
case ENOENT:
code = .fileReadNoSuchFile
case EPERM, EACCES:
code = .fileReadNoPermission
case ENAMETOOLONG:
code = .fileReadInvalidFileName
default:
code = .fileReadUnknown
}
} else {
switch posixErrno {
case ENOENT:
code = .fileNoSuchFile
case EPERM, EACCES:
code = .fileWriteNoPermission
case ENAMETOOLONG:
code = .fileWriteInvalidFileName
case EDQUOT, ENOSPC:
code = .fileWriteOutOfSpace
case EROFS:
code = .fileWriteVolumeReadOnly
case EEXIST:
code = .fileWriteFileExists
default:
code = .fileWriteUnknown
}
}

var userInfo : [String : AnyHashable] = [:]
if let posixError = POSIXErrorCode(rawValue: posixErrno) {
userInfo[NSUnderlyingErrorKey] = POSIXError(posixError)
}

if let variant {
userInfo[NSUserStringVariantErrorKey] = [variant]
}

if let path {
switch path {
case .path(let path):
userInfo[NSFilePathErrorKey] = path
case .url(let url):
userInfo[NSURLErrorKey] = url
}
}

if !extraUserInfo.isEmpty {
for (k, v) in extraUserInfo {
userInfo[k] = v
}
}

return CocoaError(code, userInfo: userInfo)
}

internal func logFileIOErrno(_ err: Int32, at place: String) {
#if FOUNDATION_FRAMEWORK && !os(bridgeOS)
let errnoDesc = String(cString: strerror(err))
Logger(_NSOSLog()).error("Encountered \(place) failure \(err) \(errnoDesc)")
#endif
}
Loading