diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index 260ac9a..0000000 Binary files a/.DS_Store and /dev/null differ diff --git a/.gitignore b/.gitignore index 330d167..4f20167 100644 --- a/.gitignore +++ b/.gitignore @@ -88,3 +88,4 @@ fastlane/test_output # https://github.com/johnno1962/injectionforxcode iOSInjectionProject/ +.DS_Store diff --git a/.spi.yml b/.spi.yml new file mode 100644 index 0000000..544a002 --- /dev/null +++ b/.spi.yml @@ -0,0 +1,4 @@ +version: 1 +builder: + configs: + - documentation_targets: [openai-async-image-swiftui] diff --git a/LICENSE b/LICENSE index b2bea10..f148400 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023 Igor +Copyright (c) 2023 Igor Shelopaev Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Package.resolved b/Package.resolved index c686312..6d6537d 100644 --- a/Package.resolved +++ b/Package.resolved @@ -3,19 +3,28 @@ { "identity" : "async-http-client", "kind" : "remoteSourceControl", - "location" : "https://github.com/The-Igor/async-http-client.git", + "location" : "https://github.com/swiftuiux/async-http-client.git", "state" : { - "revision" : "8be7d1c1f036350a8109b075052a71d3353ad366", - "version" : "1.4.4" + "revision" : "936a6e953d3c3e05a14c3d852fea9955e57c9854", + "version" : "1.5.0" + } + }, + { + "identity" : "async-task", + "kind" : "remoteSourceControl", + "location" : "https://github.com/swiftuiux/async-task.git", + "state" : { + "revision" : "d05dc1ec967813392da38e3501dfe666098baaec", + "version" : "1.2.5" } }, { "identity" : "retry-policy-service", "kind" : "remoteSourceControl", - "location" : "https://github.com/The-Igor/retry-policy-service.git", + "location" : "https://github.com/swiftuiux/retry-policy-service.git", "state" : { - "revision" : "46ded002161a95c6b08ddd02c3f319891c773d14", - "version" : "1.0.0" + "revision" : "2a6a1f057fbf77337dfc73db98bd3d538127b3e2", + "version" : "1.0.1" } } ], diff --git a/Package.swift b/Package.swift index 793984b..2dde9bb 100644 --- a/Package.swift +++ b/Package.swift @@ -14,14 +14,15 @@ let package = Package( ], dependencies: [ // Dependencies declare other packages that this package depends on. - .package(url: "https://github.com/The-Igor/async-http-client.git", from: "1.4.4") + .package(url: "https://github.com/swiftuiux/async-http-client.git", from: "1.5.0"), + .package(url: "https://github.com/swiftuiux/async-task.git", from: "1.2.5") ], targets: [ // Targets are the basic building blocks of a package. A target can define a module or a test suite. // Targets can depend on other targets in this package, and on products in packages this package depends on. .target( name: "openai-async-image-swiftui", - dependencies: ["async-http-client"]), + dependencies: ["async-http-client", "async-task"]), .testTarget( name: "openai-async-image-swiftuiTests", dependencies: ["openai-async-image-swiftui"]), diff --git a/README.md b/README.md index fe539a0..f2fba21 100644 --- a/README.md +++ b/README.md @@ -2,20 +2,22 @@ SwiftUI view that asynchronously loads and displays an OpenAI image from open API -You just type in any your idea and AI will give you an art solution +### Please star the repository if you believe continuing the development of this package is worthwhile. This will help me understand which package deserves more effort. -DALL-E and DALL-E 2 are deep learning models developed by OpenAI to generate digital images from natural language descriptions, called "prompts" +[![](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2Fswiftuiux%2Fopenai-async-image-swiftui%2Fbadge%3Ftype%3Dplatforms)](https://swiftpackageindex.com/swiftuiux/openai-async-image-swiftui) + +## [Example for the package](https://github.com/swiftuiux/openai-async-image-swiftui-example) +## [Documentation(API)](https://swiftpackageindex.com/swiftuiux/openai-async-image-swiftui/main/documentation/openai_async_image_swiftui) -[![](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2FThe-Igor%2Fopenai-async-image-swiftui%2Fbadge%3Ftype%3Dswift-versions)](https://swiftpackageindex.com/The-Igor/openai-async-image-swiftui) -[![](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2FThe-Igor%2Fopenai-async-image-swiftui%2Fbadge%3Ftype%3Dplatforms)](https://swiftpackageindex.com/The-Igor/openai-async-image-swiftui) ## Features -- [x] Multiplatform iOS, macOS, watchOS and tvOS -- [x] Customizable in term of SwiftUI Image specs [renderingMode, resizable, antialiased...] -- [x] Customizable in term of the transport layer [Loader] -- [x] Based on interfaces not implementations +- [x] Supports multiple platforms: iOS, macOS, watchOS, and tvOS +- [x] Customizable with SwiftUI Image properties (e.g., `renderingMode`, `resizable`, `antialiased`) +- [x] Configurable transport layer via custom `Loader` +- [x] Designed with interfaces, not implementations +- [x] Fully leverages Swift's new concurrency model - ![OpenAI AsyncImage SwiftUI](https://github.com/The-Igor/openai-async-image-swiftui/blob/main/image/sun_watch.png) + ![OpenAI AsyncImage SwiftUI](https://github.com/swiftuiux/openai-async-image-swiftui/blob/main/image/sun_watch.png) ## How to use @@ -59,25 +61,27 @@ or with custom **ViewBuilder** | tpl | Custom view builder tpl | | loader | Custom loader if you need something specific| - ![OpenAI AsyncImage SwiftUI](https://github.com/The-Igor/openai-async-image-swiftui/blob/main/image/appletv_art.png) + ![OpenAI AsyncImage SwiftUI](https://github.com/swiftuiux/openai-async-image-swiftui/blob/main/image/appletv_art.png) ## Documentation(API) - You need to have Xcode 13 installed in order to have access to Documentation Compiler (DocC) - Go to Product > Build Documentation or **โŒƒโ‡งโŒ˜ D** -## SwiftUI example for the package - -[OpenAI AsyncImage SwiftUI example](https://github.com/The-Igor/openai-async-image-swiftui-example) - -![OpenAI AsyncImage SwiftUI](https://github.com/The-Igor/openai-async-image-swiftui/blob/main/image/sun_11.png) +![OpenAI AsyncImage SwiftUI](https://github.com/swiftuiux/openai-async-image-swiftui/blob/main/image/sun_11.png) +## More Stable Diffusion examples -## Replicate toolkit for swift. Set of diffusion models +### Replicate toolkit for swift. Set of diffusion models Announced in 2022, OpenAI's text-to-image model DALL-E 2 is a recent example of diffusion models. It uses diffusion models for both the model's prior (which produces an image embedding given a text caption) and the decoder that generates the final image. In machine learning, diffusion models, also known as diffusion probabilistic models, are a class of latent variable models. They are Markov chains trained using variational inference. The goal of diffusion models is to learn the latent structure of a dataset by modeling the way in which data points diffuse through the latent space. Diffusion models can be applied to a variety of tasks, including image denoising, inpainting, super-resolution, and image generation. For example, an image generation model would start with a random noise image and then, after having been trained reversing the diffusion process on natural images, the model would be able to generate new natural images. -[Replicate kit](https://github.com/The-Igor/replicate-kit-swift) +[Replicate kit](https://github.com/swiftuiux/replicate-kit-swift) + + +![The concept](https://github.com/swiftuiux/replicate-kit-swift/raw/main/img/image_02.png) +### CoreML Stable Diffusion +[The example app](https://github.com/swiftuiux/coreml-stable-diffusion-swift-example) for running text-to-image or image-to-image models to generate images using Apple's Core ML Stable Diffusion implementation -![The concept](https://github.com/The-Igor/replicate-kit-swift/raw/main/img/image_02.png) +![The concept](https://github.com/swiftuiux/coreml-stable-diffusion-swift-example/blob/main/img/img_01.png) diff --git a/Sources/openai-async-image-swiftui/OpenAIAsyncImage.swift b/Sources/openai-async-image-swiftui/OpenAIAsyncImage.swift index ce59921..dc37f41 100644 --- a/Sources/openai-async-image-swiftui/OpenAIAsyncImage.swift +++ b/Sources/openai-async-image-swiftui/OpenAIAsyncImage.swift @@ -1,64 +1,69 @@ // // OpenAIAsyncImage.swift -// +// // // Created by Igor on 18.02.2023. // import SwiftUI +import async_task fileprivate typealias ImageSize = OpenAIImageSize +fileprivate typealias TaskModel = Async.SingleTask /// Async image component to load and show OpenAI image from OpenAI image API @available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) public struct OpenAIAsyncImage: View { - /// Custom view builder tpl + /// Task model for managing image loading cycle + @StateObject private var taskModel = TaskModel(errorMapper: errorMapper) + + /// Custom view builder template type alias public typealias ImageProcess = (ImageState) -> Content - /// Default loader + /// Default loader, injected from environment @Environment(\.openAIDefaultLoader) var defaultLoader : OpenAIDefaultLoader - - // MARK: - Private properties - - /// OpenAI image - @State private var image: Image? - - /// Error - @State private var error: Error? - - /// Current task - @State private var task : Task? - + // MARK: - Config - /// A text description of the desired image(s). The maximum length is 1000 characters + /// A binding to the text prompt describing the desired image. The maximum length is 1000 characters @Binding var prompt : String - /// Custom loader + /// Optional custom loader conforming to `IOpenAILoader` protocol let loader : T? - /// Image size + /// The size of the image to be generated let size : OpenAIImageSize - /// Custom view builder tpl + /// Optional custom view builder template let tpl : ImageProcess? - // MARK: - Life circle + /// Dall-e model type + let model : DalleModel + + // MARK: - Life cycle + /// Initializes a view model for generating images using the OpenAI API with customizable parameters. /// - Parameters: - /// - prompt: A text description of the desired image(s). The maximum length is 1000 characters - /// - size: The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 - /// - tpl: Custom view builder tpl - /// - loader: Custom loader + /// - prompt: A `Binding` to a `String` that represents a text description of the desired image(s). + /// The maximum length for the prompt is 1000 characters. + /// - size: The size of the generated images, specified as an `OpenAIImageSize`. + /// Defaults to `.dpi256`. Must be one of `.dpi256` (256x256), `.dpi512` (512x512), or `.dpi1024` (1024x1024). + /// - model: The `DalleModel` specifying which model to use for generating the image(s). + /// Defaults to `.dalle2`. + /// - tpl: A custom SwiftUI `ViewBuilder` template for processing or rendering the generated image(s). + /// - loader: A custom loader conforming to the `IOpenAILoader` protocol, responsible for handling + /// the image generation process, such as communicating with the OpenAI API. public init( - prompt : Binding, - size : OpenAIImageSize = .dpi256, - @ViewBuilder tpl : @escaping ImageProcess, - loader : T - ){ + prompt: Binding, + size: OpenAIImageSize = .dpi256, + model: DalleModel = .dalle2, + @ViewBuilder tpl: @escaping ImageProcess, + loader: T + ) { self._prompt = prompt self.size = size + self.model = model self.tpl = tpl self.loader = loader } @@ -74,97 +79,91 @@ public struct OpenAIAsyncImage: View { } } .onChange(of: prompt){ _ in - cancelTask() - clear() - task = getTask() + start() } .onAppear { - task = getTask() + start() } .onDisappear{ - cancelTask() + cancel() } } - // MARK: - Private + // MARK: - Private methods - /// - Returns: Current image state status + /// - Returns: The current image state status private func getState () -> ImageState{ - if let image { return .loaded(image) } - else if let error { return .loadError(error)} + if let image = taskModel.value { return .loaded(image) } + else if let error = taskModel.error { return .loadError(error)} return .loading } - /// Load using default loader - /// - Returns: OpenAI image - private func loadImageDefault(_ prompt : String, with size : ImageSize) async throws -> Image{ - try await defaultLoader.load(prompt, with: size) + /// Loads an image using the default loader. + /// - Parameters: + /// - prompt: The text prompt describing the desired image content. + /// - size: The dimensions of the generated image, specified as `ImageSize`. + /// - model: The `DalleModel` specifying the AI model to use for image generation. + /// - Returns: A generated `Image` object if successful. + /// - Throws: An error if the image generation fails. + private func loadImageDefault( + _ prompt: String, + with size: ImageSize, + model: DalleModel + ) async throws -> Image { + try await defaultLoader.load(prompt, with: size, model: model) } - - /// Load image by text + + /// Loads an image using a provided loader, or falls back to the default loader if none is provided. /// - Parameters: - /// - prompt: Text - /// - size: Image size - /// - Returns: Open AI Image - private func loadImage(_ prompt : String, with size : ImageSize) async -> Image?{ - do{ - if let loader = loader{ - return try await loader.load(prompt, with: size) + /// - prompt: The text prompt describing the desired image content. + /// - size: The dimensions of the generated image, specified as `ImageSize`. + /// - model: The `DalleModel` specifying the AI model to use for image generation. + /// - Returns: An `Image` object if successful, or `nil` if the operation fails or is cancelled. + private func loadImage( + _ prompt: String, + with size: ImageSize, + model: DalleModel + ) async throws -> Image? { + if let loader = loader { + return try await loader.load(prompt, with: size, model: model) } - - return try await loadImageDefault(prompt, with: size) - }catch{ - if !Task.isCancelled{ - self.error = error - } - - return nil - } + return try await loadImageDefault(prompt, with: size, model: model) } - - /// - Parameter value: OpenAI image - @MainActor - private func setImage(_ value : Image){ - image = value - } - - /// Clear properties - @MainActor - private func clear(){ - image = nil - error = nil - } - - private func cancelTask(){ - task?.cancel() - task = nil + + /// Creates and returns a task to fetch the OpenAI image + /// - Returns: A task that fetches the OpenAI image + private func start(){ + taskModel.start{ + try await loadImage(prompt, with: size, model: model) + } } - /// - Returns: Task to fetch OpenAI image - private func getTask() -> Task{ - Task{ - if let image = await loadImage(prompt, with: size){ - await setImage(image) - } - } + /// Cancel task + private func cancel(){ + taskModel.cancel() } } -// MARK: - Extension public - +// MARK: - Public extensions - public extension OpenAIAsyncImage where Content == EmptyView, T == OpenAIDefaultLoader{ + /// Convenience initializer for creating an instance with the default loader and no custom view template. /// - Parameters: - /// - prompt: Text - /// - size: Image size + /// - prompt: A `Binding` to a `String` containing the text prompt that describes the desired image content. + /// - size: The desired size of the generated image, specified as an `OpenAIImageSize`. + /// Defaults to `.dpi256`. + /// - model: The `DalleModel` specifying the AI model to use for image generation. Defaults to `.dalle2`. init( - prompt : Binding, - size : OpenAIImageSize = .dpi256 - ){ + prompt: Binding, + size: OpenAIImageSize = .dpi256, + model: DalleModel = .dalle2 + ) { self._prompt = prompt self.size = size + self.model = model self.tpl = nil self.loader = nil } @@ -172,29 +171,52 @@ public extension OpenAIAsyncImage where Content == EmptyView, T == OpenAIDefault public extension OpenAIAsyncImage where T == OpenAIDefaultLoader{ + /// Convenience initializer for creating an instance with the default loader and a custom view template. /// - Parameters: - /// - prompt: Text - /// - size: Image size - /// - tpl: View tpl + /// - prompt: A `Binding` to a `String` containing the text prompt that describes the desired image content. + /// - size: The desired size of the generated image, specified as an `OpenAIImageSize`. Defaults to `.dpi256`. + /// - model: The `DalleModel` specifying the AI model to use for image generation. Defaults to `.dalle2`. + /// - tpl: A SwiftUI `@ViewBuilder` closure that provides a custom view template for processing or rendering the generated image. init( - prompt : Binding, - size : OpenAIImageSize = .dpi256, - @ViewBuilder tpl : @escaping ImageProcess - ){ + prompt: Binding, + size: OpenAIImageSize = .dpi256, + model: DalleModel = .dalle2, + @ViewBuilder tpl: @escaping ImageProcess + ) { self._prompt = prompt self.size = size + self.model = model self.tpl = tpl self.loader = nil } } -// MARK: - File private - +// MARK: - File private functions - +/// A function that builds the appropriate view for a given `ImageState`. +/// - Parameter state: The current state of the image. +/// - Returns: A SwiftUI view representing the current state of the image. @ViewBuilder -fileprivate func imageTpl(_ state : ImageState) -> some View{ - switch state{ - case .loaded(let image) : image.resizable() - case .loadError(let error) : Text(error.localizedDescription) - case .loading : ProgressView() +fileprivate func imageTpl(_ state: ImageState) -> some View { + switch state { + case .loaded(let image): + image.resizable() + case .loadError(let error): + Text(error.localizedDescription) + case .loading: + ProgressView() + } +} + +/// Maps an error to a corresponding `AsyncImageErrors` type. +/// - Parameter error: The error to map, which may be `nil`. +/// - Returns: An `AsyncImageErrors` value if the error can be mapped; otherwise, `nil`. +@Sendable +fileprivate func errorMapper(_ error: Error?) -> AsyncImageErrors? { + if error is CancellationError { + return .cancellationError } + + // Return nil for other errors + return nil } diff --git a/Sources/openai-async-image-swiftui/enum/AsyncImageErrors.swift b/Sources/openai-async-image-swiftui/enum/AsyncImageErrors.swift index 11055d4..140be38 100644 --- a/Sources/openai-async-image-swiftui/enum/AsyncImageErrors.swift +++ b/Sources/openai-async-image-swiftui/enum/AsyncImageErrors.swift @@ -1,23 +1,80 @@ // // AsyncImageErrors.swift -// +// // // Created by Igor on 18.02.2023. // import Foundation +import async_http_client -/// Set of errors for ``OpenAIAsyncImage`` @available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) -enum AsyncImageErrors: Error, Equatable{ - - /// Could not create Image from uiImage - case imageInit - - /// Client not found - the reason url in not valid - case clientIsNotDefined - - /// response returned no images - case returnedNoImages +/// Enum representing different errors that can occur when loading images asynchronously +enum AsyncImageErrors: Error { + case imageInit // Error initializing an image from data + case clientIsNotDefined // HTTP client is not defined + case returnedNoImages // No images were returned in the response + case httpStatus(String) // HTTP status error with a message + case responseError(Error) // Generic response error + case cancellationError +} + +@available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) +extension AsyncImageErrors: LocalizedError { + public var errorDescription: String? { + switch self { + case .imageInit: + return NSLocalizedString("Unable to create image from the provided data.", comment: "") + case .clientIsNotDefined: + return NSLocalizedString("Client not found. The URL might be invalid.", comment: "") + case .returnedNoImages: + return NSLocalizedString("The response did not contain any images.", comment: "") + case .httpStatus(let description): + return NSLocalizedString(description, comment: "") + case .responseError(let error): + return error.localizedDescription + case .cancellationError: + return NSLocalizedString("Cancellation error.", comment: "") + } + } +} + +@available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) +extension AsyncImageErrors { + /// Handles errors that occur during the request + /// - Parameter error: The error that occurred + /// - Returns: An instance of `AsyncImageErrors` + static func handleRequest(_ error: Error) -> AsyncImageErrors { + if let httpError = error as? Http.Errors, + case let .status(_, _, data) = httpError, + let responseData = data { + return decodeErrorResponse(from: responseData) + } + return .responseError(error) + } +} + +/// Decodes the error response data +/// - Parameter responseData: The response data to decode +/// - Returns: An instance of `AsyncImageErrors` with a decoded message +fileprivate func decodeErrorResponse(from responseData: Data) -> AsyncImageErrors { + if let apiResponse = try? JSONDecoder().decode(ErrorResponseWrapper.self, from: responseData) { + return .httpStatus(apiResponse.error.message) + } + let dataString = String(data: responseData, encoding: .utf8) ?? "Unable to decode data" + return .httpStatus(dataString) +} + +/// Defines the structure for the inner "error" object in the API response +fileprivate struct ErrorResponse: Decodable { + let code: String? + let message: String + let param: String? + let type: String +} + +/// Defines the structure for the overall response wrapper containing the error object +fileprivate struct ErrorResponseWrapper: Decodable { + let error: ErrorResponse } diff --git a/Sources/openai-async-image-swiftui/enum/DalleModel.swift b/Sources/openai-async-image-swiftui/enum/DalleModel.swift new file mode 100644 index 0000000..43dbcac --- /dev/null +++ b/Sources/openai-async-image-swiftui/enum/DalleModel.swift @@ -0,0 +1,13 @@ +// +// DalleModel.swift +// openai-async-image-swiftui +// +// Created by Igor on 26.11.24. +// + +public enum DalleModel: String{ + + case dalle2 = "dall-e-2" + + case dalle3 = "dall-e-3" +} diff --git a/Sources/openai-async-image-swiftui/enum/ImageState.swift b/Sources/openai-async-image-swiftui/enum/ImageState.swift index c302014..e10ecb9 100644 --- a/Sources/openai-async-image-swiftui/enum/ImageState.swift +++ b/Sources/openai-async-image-swiftui/enum/ImageState.swift @@ -7,17 +7,16 @@ import SwiftUI -/// Set of states for ``OpenAIAsyncImage`` +/// Enumeration representing the various states of `OpenAIAsyncImage` @available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) -public enum ImageState{ +public enum ImageState { - /// Loading currently + /// State when the image is currently being loaded case loading - /// Loaded + /// State when the image has been successfully loaded case loaded(Image) - /// There's an error happened while fetching + /// State when an error occurred during image fetching case loadError(Error) - } diff --git a/Sources/openai-async-image-swiftui/enum/OpenAIImageSize.swift b/Sources/openai-async-image-swiftui/enum/OpenAIImageSize.swift index 8139966..f9b2998 100644 --- a/Sources/openai-async-image-swiftui/enum/OpenAIImageSize.swift +++ b/Sources/openai-async-image-swiftui/enum/OpenAIImageSize.swift @@ -16,4 +16,8 @@ public enum OpenAIImageSize: String, Encodable{ case dpi512 = "512x512" case dpi1024 = "1024x1024" + + case dpi1792x1024 = "1792x1024" + + case dpi1024x1792 = "1024x1792" } diff --git a/Sources/openai-async-image-swiftui/environmentKey/OpenAIAsyncImageLoaderKey.swift b/Sources/openai-async-image-swiftui/environmentKey/OpenAIAsyncImageLoaderKey.swift index 98fe531..d08a514 100644 --- a/Sources/openai-async-image-swiftui/environmentKey/OpenAIAsyncImageLoaderKey.swift +++ b/Sources/openai-async-image-swiftui/environmentKey/OpenAIAsyncImageLoaderKey.swift @@ -12,7 +12,7 @@ import SwiftUI public struct OpenAIDefaultLoaderKey : EnvironmentKey{ public typealias Value = OpenAIDefaultLoader - public static var defaultValue = OpenAIDefaultLoader(endpoint: OpenAIImageEndpoint.get(with: "")) + public static let defaultValue = OpenAIDefaultLoader(endpoint: OpenAIImageEndpoint.get(with: "")) } public extension EnvironmentValues{ diff --git a/Sources/openai-async-image-swiftui/model/Input.swift b/Sources/openai-async-image-swiftui/model/Input.swift index 42a2aa6..17c1118 100644 --- a/Sources/openai-async-image-swiftui/model/Input.swift +++ b/Sources/openai-async-image-swiftui/model/Input.swift @@ -12,6 +12,9 @@ import Foundation /// Given a prompt and/or an input image, the model will generate a new image @available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) struct Input: Encodable{ + + /// dall-e model + let model : String /// A text description of the desired image(s). The maximum length is 1000 characters let prompt: String diff --git a/Sources/openai-async-image-swiftui/model/Output.swift b/Sources/openai-async-image-swiftui/model/Output.swift index b107e52..5919549 100644 --- a/Sources/openai-async-image-swiftui/model/Output.swift +++ b/Sources/openai-async-image-swiftui/model/Output.swift @@ -7,22 +7,24 @@ import Foundation -/// Output format for OpenAI API +/// Structure representing the output format for the OpenAI API response @available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) -struct Output: Decodable{ +struct Output: Decodable { - /// Date and time - let created : Int + /// The creation date and time of the response in UNIX timestamp format + let created: Int - /// Set of images + /// An array of base64 encoded images let data: [Base64] - /// Fist image from the received data set - var firstImage : String?{ + /// The first image from the received data set, if available + var firstImage: String? { data.first?.b64_json } } -struct Base64: Decodable{ - let b64_json : String +/// Structure representing a base64 encoded image +struct Base64: Decodable { + /// The base64 encoded image data in JSON format + let b64_json: String } diff --git a/Sources/openai-async-image-swiftui/net/OpenAIImageEndpoint.swift b/Sources/openai-async-image-swiftui/net/OpenAIImageEndpoint.swift index a038334..c9242ce 100644 --- a/Sources/openai-async-image-swiftui/net/OpenAIImageEndpoint.swift +++ b/Sources/openai-async-image-swiftui/net/OpenAIImageEndpoint.swift @@ -7,45 +7,47 @@ import Foundation -/// Set of specs for access to OpenAPI image resource +/// Struct providing specifications for accessing the OpenAI image resource @available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) -public struct OpenAIImageEndpoint: IOpenAIImageEndpoint{ +public struct OpenAIImageEndpoint: IOpenAIImageEndpoint { - // MARK: - Static + // MARK: - Static Properties - /// Base url to OpenAPI image resource - public static var urlString = "https://api.openai.com" + /// Static base URL for the OpenAI image resource + public static let urlString = "https://api.openai.com" - /// Path to the point - public static var path = "/v1/images/generations" + /// Static path to the specific endpoint for generating images + public static let path = "/v1/images/generations" - /// - Parameter apiKey: Api key for access - /// - Returns: Endpoint - static public func get(with apiKey: String) -> Self{ + /// Creates an instance of `OpenAIImageEndpoint` with the provided API key + /// - Parameter apiKey: API key for accessing the OpenAI API + /// - Returns: Configured instance of `OpenAIImageEndpoint` + public static func get(with apiKey: String) -> Self { .init( urlString: Self.urlString, apiKey: apiKey, - path: Self.path) + path: Self.path + ) } - // MARK: - Config + // MARK: - Instance Properties - /// Base url to OpenAPI image resource + /// Base URL for the OpenAI image resource public let urlString: String - /// Path to the point - public let path : String + /// Path to the specific endpoint + public let path: String - /// Api key for access - public let apiKey : String + /// API key for authentication and access to the OpenAI API + public let apiKey: String - // MARK: - Life circle + // MARK: - Initializer + /// Initializes a new instance of `OpenAIImageEndpoint` /// - Parameters: - /// - urlString: Base url to OpenAPI image resource - /// - httpMethod: Http method - /// - apiKey: Api key for access - /// - path: Path to the point + /// - urlString: Base URL for the OpenAI image resource + /// - apiKey: API key for accessing the OpenAI API + /// - path: Path to the specific endpoint public init(urlString: String, apiKey: String, path: String) { self.urlString = urlString self.apiKey = apiKey diff --git a/Sources/openai-async-image-swiftui/protocol/IOpenAIImageEndpoint.swift b/Sources/openai-async-image-swiftui/protocol/IOpenAIImageEndpoint.swift index 0b30dac..8bf9894 100644 --- a/Sources/openai-async-image-swiftui/protocol/IOpenAIImageEndpoint.swift +++ b/Sources/openai-async-image-swiftui/protocol/IOpenAIImageEndpoint.swift @@ -7,18 +7,17 @@ import Foundation -/// Defines access API to OpenAI image API +/// Protocol defining access to the OpenAI image API @available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) -public protocol IOpenAIImageEndpoint{ +public protocol IOpenAIImageEndpoint: Sendable { - /// Base url to OpenAPI image resource - var urlString : String { get } + /// Base URL for the OpenAI image resource + var urlString: String { get } - /// Path to the point - var path : String { get } - - /// Api key for access - var apiKey : String { get } + /// Path to the specific endpoint within the OpenAI API + var path: String { get } + /// API key for authentication and access to the OpenAI API + var apiKey: String { get } } diff --git a/Sources/openai-async-image-swiftui/protocol/IOpenAILoader.swift b/Sources/openai-async-image-swiftui/protocol/IOpenAILoader.swift index d4ee384..f94745e 100644 --- a/Sources/openai-async-image-swiftui/protocol/IOpenAILoader.swift +++ b/Sources/openai-async-image-swiftui/protocol/IOpenAILoader.swift @@ -7,14 +7,17 @@ import SwiftUI -/// Loader for getting images +/// Protocol defining the loader for fetching images from the OpenAI API @available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) -public protocol IOpenAILoader{ +public protocol IOpenAILoader { - /// Load image by text + /// Asynchronously generates an image using a given text prompt, size, and model. /// - Parameters: - /// - prompt: Text - /// - size: Image size - /// - Returns: Open AI Image - func load(_ prompt : String, with size : OpenAIImageSize) async throws -> Image + /// - prompt: A descriptive text prompt that defines the content of the desired image. + /// - size: The dimensions of the generated image, specified as an `OpenAIImageSize`. + /// - model: The `DalleModel` used for image generation. + /// - Returns: A generated `Image` based on the provided prompt and size. + /// - Throws: An error if the image generation process fails, such as issues with the prompt, model, or network. + func load(_ prompt: String, with size: OpenAIImageSize, + model: DalleModel) async throws -> Image } diff --git a/Sources/openai-async-image-swiftui/viewModel/OpenAIDefaultLoader.swift b/Sources/openai-async-image-swiftui/viewModel/OpenAIDefaultLoader.swift index 584c2ec..665b4cc 100644 --- a/Sources/openai-async-image-swiftui/viewModel/OpenAIDefaultLoader.swift +++ b/Sources/openai-async-image-swiftui/viewModel/OpenAIDefaultLoader.swift @@ -1,6 +1,6 @@ // // OpenAIViewModel.swift -// +// // // Created by Igor on 28.02.2023. // @@ -17,20 +17,20 @@ import AppKit.NSImage #endif @available(iOS 15.0, macOS 12.0, tvOS 15.0, watchOS 8.0, *) -public final class OpenAIDefaultLoader : IOpenAILoader{ +public final class OpenAIDefaultLoader: IOpenAILoader, Sendable { - /// Http async client - private let client : Http.Proxy? + /// HTTP async client to handle requests + private let client: Http.Proxy? - /// Set of params for making requests - private let endpoint : IOpenAIImageEndpoint + /// Endpoint parameters required for making requests + private let endpoint: IOpenAIImageEndpoint - /// - Parameter endpoint: Set of params for making requests - public init(endpoint : IOpenAIImageEndpoint) { - + /// Initializes the loader with endpoint parameters + /// - Parameter endpoint: Set of parameters for making requests + public init(endpoint: IOpenAIImageEndpoint) { self.endpoint = endpoint - guard let url = URL(string: endpoint.urlString) else{ + guard let url = URL(string: endpoint.urlString) else { client = nil return } @@ -38,70 +38,88 @@ public final class OpenAIDefaultLoader : IOpenAILoader{ client = Http.Proxy(baseURL: url) } - /// Load image by text + /// Asynchronously loads an image from the OpenAI API using a text prompt and specified parameters. /// - Parameters: - /// - prompt: Text - /// - size: Image size - /// - Returns: Open AI Image + /// - prompt: The text prompt describing the desired image content. + /// - size: The dimensions of the generated image, specified as `OpenAIImageSize`. + /// - model: The `DalleModel` used for generating the image. + /// - Returns: A generated `Image` object based on the prompt and size. + /// - Throws: An `AsyncImageErrors` if the client is undefined, the request fails, + /// or the OpenAI API returns an error. public func load( - _ prompt : String, - with size : OpenAIImageSize - ) async throws -> Image{ - - let body = Input(prompt: prompt, size: size, response_format: .b64, n: 1) - - let headers = ["Authorization": "Bearer \(endpoint.apiKey)"] - let path = endpoint.path + _ prompt: String, + with size: OpenAIImageSize, + model: DalleModel + ) async throws -> Image { - guard let client = client else{ + guard let client = client else { throw AsyncImageErrors.clientIsNotDefined } - let result: Http.Response = try await client.post(path: path, body: body, headers: headers) - - return try imageBase64(from: result.value) + do { + let (path, body, headers) = prepareRequest(prompt: prompt, size: size, model: model) + let result: Http.Response = try await client.post(path: path, body: body, headers: headers) + return try imageBase64(from: result.value) + + } catch { + throw AsyncImageErrors.handleRequest(error) + } } + + /// Prepares the API request for generating an image with the given parameters. + /// - Parameters: + /// - prompt: The descriptive text prompt for generating the image. + /// - size: The dimensions of the image to be generated, as `OpenAIImageSize`. + /// - model: The `DalleModel` specifying the AI model to use for generation. + /// - Returns: A tuple containing: + /// - `path`: The API endpoint path as a `String`. + /// - `body`: The request payload as an `Input` object, containing model, prompt, size, and other parameters. + /// - `headers`: A dictionary of HTTP headers required for the request. + private func prepareRequest(prompt: String, size: OpenAIImageSize, model: DalleModel) -> (String, Input, [String: String]) { + let body = Input(model: model.rawValue, prompt: prompt, size: size, response_format: .b64, n: 1) + let headers = ["Content-Type": "application/json", "Authorization": "Bearer \(endpoint.apiKey)"] + let path = endpoint.path + return (path, body, headers) + } + - /// Decode base64 to Data - /// - Parameter output: Received format from the endpoint - /// - Returns: Decoded data - private func decodeBase64(from output: Output) throws -> Data?{ - guard let base64 = output.firstImage else { + /// Decodes base64 encoded string to Data + /// - Parameter output: The output received from the endpoint + /// - Returns: Decoded Data + private func decodeBase64(from output: Output) throws -> Data? { + guard let base64 = output.firstImage else { throw AsyncImageErrors.returnedNoImages } return Data(base64Encoded: base64) } - -#if os(iOS) || os(watchOS) || os(tvOS) - /// Base64 encoder for iOS + +#if os(macOS) + /// Converts base64 encoded string to NSImage for macOS /// - Parameter output: OpenAI response type - /// - Returns: UIImage + /// - Returns: NSImage private func imageBase64(from output: Output) throws -> Image { - let data = try decodeBase64(from: output) - if let data, let image = UIImage(data: data){ - return Image(uiImage: image) + if let data, let image = NSImage(data: data) { + return Image(nsImage: image) } throw AsyncImageErrors.imageInit } -#endif - -#if os(macOS) - /// Base64 encoder for macOS +#else + /// Converts base64 encoded string to UIImage for iOS /// - Parameter output: OpenAI response type - /// - Returns: NSImage + /// - Returns: UIImage private func imageBase64(from output: Output) throws -> Image { - let data = try decodeBase64(from: output) - if let data, let image = NSImage(data: data){ - return Image(nsImage: image) + if let data, let image = UIImage(data: data) { + return Image(uiImage: image) } throw AsyncImageErrors.imageInit } #endif + } diff --git a/Tests/openai-async-image-swiftuiTests/openai_async_image_swiftuiTests.swift b/Tests/openai-async-image-swiftuiTests/openai_async_image_swiftuiTests.swift index 44051a3..453239c 100644 --- a/Tests/openai-async-image-swiftuiTests/openai_async_image_swiftuiTests.swift +++ b/Tests/openai-async-image-swiftuiTests/openai_async_image_swiftuiTests.swift @@ -3,9 +3,6 @@ import XCTest final class openai_async_image_swiftuiTests: XCTestCase { func testExample() throws { - // This is an example of a functional test case. - // Use XCTAssert and related functions to verify your tests produce the correct - // results. - // XCTAssertEqual(openai_async_image_swiftui().text, "Hello, World!") + } }