Skip to content

update doc comments #166

New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Merged
merged 1 commit into from
May 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions Sources/GoogleAI/Chat.swift
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,11 @@ public class Chat {
/// model. This will be provided to the model for each message sent as context for the discussion.
public var history: [ModelContent]

/// See ``sendMessage(_:)-3ify5``.
/// Sends a message using the existing history of this chat as context. If successful, the message
/// and response will be added to the history. If unsuccessful, history will remain unchanged.
/// - Parameter parts: The new content to send as a single chat message.
/// - Returns: The model's response if no error occurred.
/// - Throws: A ``GenerateContentError`` if an error occurred.
public func sendMessage(_ parts: any ThrowingPartsRepresentable...) async throws
-> GenerateContentResponse {
return try await sendMessage([ModelContent(parts: parts)])
Expand Down Expand Up @@ -76,7 +80,10 @@ public class Chat {
return result
}

/// See ``sendMessageStream(_:)-4abs3``.
/// Sends a message using the existing history of this chat as context. If successful, the message
/// and response will be added to the history. If unsuccessful, history will remain unchanged.
/// - Parameter parts: The new content to send as a single chat message.
/// - Returns: A stream containing the model's response or an error if an error occurred.
@available(macOS 12.0, *)
public func sendMessageStream(_ parts: any ThrowingPartsRepresentable...)
-> AsyncThrowingStream<GenerateContentResponse, Error> {
Expand Down
3 changes: 2 additions & 1 deletion Sources/GoogleAI/GenerativeAISwift.swift
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,11 @@ import Foundation
#warning("Only iOS, macOS, and Catalyst targets are currently fully supported.")
#endif

/// Constants associated with the GenerativeAISwift SDK
/// Constants associated with the GenerativeAISwift SDK.
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, *)
public enum GenerativeAISwift {
/// String value of the SDK version
public static let version = "0.5.2"
/// The Google AI backend endpoint URL.
static let baseURL = "https://generativelanguage.googleapis.com"
}
12 changes: 6 additions & 6 deletions Sources/GoogleAI/GenerativeModel.swift
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,14 @@ public final class GenerativeModel {
/// Initializes a new remote model with the given parameters.
///
/// - Parameters:
/// - name: The name of the model to use, e.g., `"gemini-1.5-pro-latest"`; see
/// - name: The name of the model to use, for example `"gemini-1.5-pro-latest"`; see
/// [Gemini models](https://ai.google.dev/models/gemini) for a list of supported model names.
/// - apiKey: The API key for your project.
/// - generationConfig: The content generation parameters your model should use.
/// - safetySettings: A value describing what types of harmful content your model should allow.
/// - tools: A list of ``Tool`` objects that the model may use to generate the next response.
/// - systemInstruction: Instructions that direct the model to behave a certain way; currently
/// only text content is supported, e.g.,
/// only text content is supported, for example
/// `ModelContent(role: "system", parts: "You are a cat. Your name is Neko.")`.
/// - toolConfig: Tool configuration for any `Tool` specified in the request.
/// - requestOptions Configuration parameters for sending requests to the backend.
Expand Down Expand Up @@ -154,7 +154,7 @@ public final class GenerativeModel {
/// [zero-shot](https://developers.google.com/machine-learning/glossary/generative#zero-shot-prompting)
/// or "direct" prompts. For
/// [few-shot](https://developers.google.com/machine-learning/glossary/generative#few-shot-prompting)
/// prompts, see ``generateContent(_:)-58rm0``.
/// prompts, see `generateContent(_ content: @autoclosure () throws -> [ModelContent])`.
///
/// - Parameter content: The input(s) given to the model as a prompt (see
/// ``ThrowingPartsRepresentable``
Expand Down Expand Up @@ -213,7 +213,7 @@ public final class GenerativeModel {
/// [zero-shot](https://developers.google.com/machine-learning/glossary/generative#zero-shot-prompting)
/// or "direct" prompts. For
/// [few-shot](https://developers.google.com/machine-learning/glossary/generative#few-shot-prompting)
/// prompts, see ``generateContent(_:)-58rm0``.
/// prompts, see `generateContent(_ content: @autoclosure () throws -> [ModelContent])`.
///
/// - Parameter content: The input(s) given to the model as a prompt (see
/// ``ThrowingPartsRepresentable``
Expand Down Expand Up @@ -302,7 +302,7 @@ public final class GenerativeModel {
/// [zero-shot](https://developers.google.com/machine-learning/glossary/generative#zero-shot-prompting)
/// or "direct" prompts. For
/// [few-shot](https://developers.google.com/machine-learning/glossary/generative#few-shot-prompting)
/// input, see ``countTokens(_:)-9spwl``.
/// input, see `countTokens(_ content: @autoclosure () throws -> [ModelContent])`.
///
/// - Parameter content: The input(s) given to the model as a prompt (see
/// ``ThrowingPartsRepresentable``
Expand Down Expand Up @@ -360,7 +360,7 @@ public final class GenerativeModel {
}
}

/// See ``GenerativeModel/countTokens(_:)-9spwl``.
/// An error thrown in `GenerativeModel.countTokens(_:)`.
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, *)
public enum CountTokensError: Error {
case internalError(underlying: Error)
Expand Down
Loading