diff --git a/Sources/OpenAI/OpenAI.swift b/Sources/OpenAI/OpenAI.swift index 49715c8e..a0d65ae6 100644 --- a/Sources/OpenAI/OpenAI.swift +++ b/Sources/OpenAI/OpenAI.swift @@ -112,8 +112,8 @@ final public class OpenAI: OpenAIProtocol { performRequest(request: MultipartFormDataRequest(body: query, url: buildURL(path: .audioTranslations)), completion: completion) } - public func audioCreateSpeech(query: AudioSpeechQuery, completion: @escaping (Result) -> Void) { - performSpeechRequest(request: JSONRequest(body: query, url: buildURL(path: .audioSpeech)), completion: completion) + public func audioCreateSpeech(query: AudioSpeechQuery, completion: @escaping (Result) -> Void) { + performRequest(request: JSONRequest(body: query, url: buildURL(path: .audioSpeech)), completion: completion) } } @@ -136,7 +136,11 @@ extension OpenAI { do { completion(.success(try decoder.decode(ResultType.self, from: data))) } catch { - completion(.failure((try? decoder.decode(APIErrorResponse.self, from: data)) ?? error)) + if ResultType.self == Data.self { + completion(.success(data as! ResultType)) + } else { + completion(.failure((try? decoder.decode(APIErrorResponse.self, from: data)) ?? error)) + } } } task.resume() @@ -167,28 +171,6 @@ extension OpenAI { completion?(error) } } - - func performSpeechRequest(request: any URLRequestBuildable, completion: @escaping (Result) -> Void) { - do { - let request = try request.build(token: configuration.token, - organizationIdentifier: configuration.organizationIdentifier, - timeoutInterval: configuration.timeoutInterval) - - let task = session.dataTask(with: request) { data, _, error in - if let error = error { - return completion(.failure(error)) - } - guard let data = data else { - return completion(.failure(OpenAIError.emptyData)) - } - - completion(.success(AudioSpeechResult(audio: data))) - } - task.resume() - } catch { - completion(.failure(error)) - } - } } extension OpenAI { diff --git a/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Async.swift b/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Async.swift index b515a234..b8c24b11 100644 --- a/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Async.swift +++ b/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Async.swift @@ -191,7 +191,7 @@ public extension OpenAIProtocol { audioCreateSpeech(query: query) { result in switch result { case let .success(success): - return continuation.resume(returning: success) + return continuation.resume(returning: AudioSpeechResult(audio: success)) case let .failure(failure): return continuation.resume(throwing: failure) } diff --git a/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Combine.swift b/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Combine.swift index c5142044..1999026c 100644 --- a/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Combine.swift +++ b/Sources/OpenAI/Public/Protocols/OpenAIProtocol+Combine.swift @@ -8,6 +8,7 @@ #if canImport(Combine) import Combine +import Foundation @available(iOS 13.0, *) @available(tvOS 13.0, *) @@ -113,8 +114,8 @@ public extension OpenAIProtocol { .eraseToAnyPublisher() } - func audioCreateSpeech(query: AudioSpeechQuery) -> AnyPublisher { - Future { + func audioCreateSpeech(query: AudioSpeechQuery) -> AnyPublisher { + Future { audioCreateSpeech(query: query, completion: $0) } .eraseToAnyPublisher() diff --git a/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift b/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift index 8c65b190..4b41f4d9 100644 --- a/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift +++ b/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift @@ -228,8 +228,8 @@ public protocol OpenAIProtocol { - query: An `AudioSpeechQuery` object containing the parameters for the API request. This includes the Text-to-Speech model to be used, input text, voice to be used for generating the audio, the desired audio format, and the speed of the generated audio. - completion: A closure which receives the result. The closure's parameter, `Result`, will either contain the `AudioSpeechResult` object with the audio data or an error if the request failed. */ - func audioCreateSpeech(query: AudioSpeechQuery, completion: @escaping (Result) -> Void) - + func audioCreateSpeech(query: AudioSpeechQuery, completion: @escaping (Result) -> Void) + /** Transcribes audio data using OpenAI's audio transcription API and completes the operation asynchronously.