Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Sources/SwiftCommitGen/CommitGenOptions.swift
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ struct CommitGenOptions {
case detailed

#if canImport(FoundationModels)
@available(macOS 26.0, *)
var promptRepresentation: Prompt {
Prompt { styleGuidance }
}
Expand Down
6 changes: 5 additions & 1 deletion Sources/SwiftCommitGen/CommitGenTool.swift
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,11 @@ struct CommitGenTool {
self.llmClient = OllamaClient(configuration: config)
#if canImport(FoundationModels)
case .foundationModels:
self.llmClient = FoundationModelsClient()
if #available(macOS 26.0, *) {
self.llmClient = FoundationModelsClient()
} else {
fatalError("FoundationModels backend requires macOS 26.0 or newer")
}
#else
case .foundationModels:
fatalError("FoundationModels is not available on this platform")
Expand Down
47 changes: 13 additions & 34 deletions Sources/SwiftCommitGen/Core/BatchCombinationPromptBuilder.swift
Original file line number Diff line number Diff line change
Expand Up @@ -57,43 +57,22 @@ struct BatchCombinationPromptBuilder {
"Produce one final commit subject (<= 50 characters) and an optional body that summarizes the full change set. Avoid repeating the batch headings—present the combined commit message only."
)

#if canImport(FoundationModels)
let userPrompt = Prompt {
for line in userLines {
line
}
}

let systemPrompt = Instructions {
"""
You are an AI assistant merging multiple partial commit drafts into a single, well-structured commit message.
Preserve all important intent from the inputs, avoid redundancy, and keep the final subject concise (<= 50 characters).
The title should succinctly describe the change in a specific and informative manner.
Provide an optional body only when useful for additional context.
If a body is present, it should describe the _purpose_ of the change, not just _what_ was changed: focus on the reasoning behind the changes rather than a file-by-file summary.

Be clear and concise, but do not omit critical information.
"""
""
metadata.style.styleGuidance
}
#else
let userPrompt = PromptContent(userLines.joined(separator: "\n"))
// Always use PromptContent - LLM client will convert if needed
let userPrompt = PromptContent(userLines.joined(separator: "\n"))

let systemPrompt = PromptContent(
"""
You are an AI assistant merging multiple partial commit drafts into a single, well-structured commit message.
Preserve all important intent from the inputs, avoid redundancy, and keep the final subject concise (<= 50 characters).
The title should succinctly describe the change in a specific and informative manner.
Provide an optional body only when useful for additional context.
If a body is present, it should describe the _purpose_ of the change, not just _what_ was changed: focus on the reasoning behind the changes rather than a file-by-file summary.
let systemPrompt = PromptContent(
"""
You are an AI assistant merging multiple partial commit drafts into a single, well-structured commit message.
Preserve all important intent from the inputs, avoid redundancy, and keep the final subject concise (<= 50 characters).
The title should succinctly describe the change in a specific and informative manner.
Provide an optional body only when useful for additional context.
If a body is present, it should describe the _purpose_ of the change, not just _what_ was changed: focus on the reasoning behind the changes rather than a file-by-file summary.

Be clear and concise, but do not omit critical information.
Be clear and concise, but do not omit critical information.

\(metadata.style.styleGuidance)
"""
)
#endif
\(metadata.style.styleGuidance)
"""
)

let characterCount = userLines.reduce(0) { $0 + $1.count + 1 }
let estimatedTokens = PromptDiagnostics.tokenEstimate(forCharacterCount: characterCount)
Expand Down
3 changes: 3 additions & 0 deletions Sources/SwiftCommitGen/Core/CommitGenError.swift
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ enum CommitGenError: Error {
case modelTimedOut(timeout: TimeInterval)
case modelGenerationFailed(message: String)
case llmRequestFailed(reason: String)
case invalidBackend(String)
case notImplemented
}

Expand All @@ -33,6 +34,8 @@ extension CommitGenError: LocalizedError {
message
case .llmRequestFailed(let reason):
"LLM request failed: \(reason)"
case .invalidBackend(let message):
message
case .notImplemented:
"Commit generation is not implemented yet; future phases will add this capability."
}
Expand Down
2 changes: 2 additions & 0 deletions Sources/SwiftCommitGen/Core/DiffSummarizer.swift
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ struct ChangeSummary: Hashable, Codable {
}

#if canImport(FoundationModels)
@available(macOS 26.0, *)
var promptRepresentation: Prompt {
Prompt {
for line in promptLines() {
Expand Down Expand Up @@ -198,6 +199,7 @@ struct ChangeSummary: Hashable, Codable {
}

#if canImport(FoundationModels)
@available(macOS 26.0, *)
var promptRepresentation: Prompt {
Prompt {
for line in promptLines() {
Expand Down
92 changes: 69 additions & 23 deletions Sources/SwiftCommitGen/Core/LLMClient.swift
Original file line number Diff line number Diff line change
Expand Up @@ -9,25 +9,9 @@ protocol LLMClient {
func generateCommitDraft(from prompt: PromptPackage) async throws -> LLMGenerationResult
}

#if canImport(FoundationModels)
@Generable(description: "A commit for changes made in a git repository.")
#endif
/// Model representation for the subject/body pair returned by the language model.
struct CommitDraft: Hashable, Codable, Sendable {
#if canImport(FoundationModels)
@Guide(
description:
"The title of a commit. It should be no longer than 50 characters and should summarize the contents of the changeset for other developers reading the commit history. It should describe WHAT was changed."
)
#endif
var subject: String

#if canImport(FoundationModels)
@Guide(
description:
"A detailed description of the the purposes of the changes. It should describe WHY the changes were made."
)
#endif
var body: String?

init(subject: String = "", body: String? = nil) {
Expand Down Expand Up @@ -77,6 +61,35 @@ struct CommitDraft: Hashable, Codable, Sendable {
}
}

#if canImport(FoundationModels)
@available(macOS 26.0, *)
extension CommitDraft: Generable {
static var generationSchema: GenerationSchema {
GenerationSchema(
type: Self.self,
description: "A commit for changes made in a git repository.",
properties: [
.init(name: "subject", type: String.self),
.init(name: "body", type: String?.self),
]
)
}

init(_ content: GeneratedContent) throws {
self.subject = try content.value(forProperty: "subject")
self.body = try content.value(forProperty: "body")
}

var generatedContent: GeneratedContent {
var props: [(String, any ConvertibleToGeneratedContent)] = [("subject", subject)]
if let body = body {
props.append(("body", body))
}
return GeneratedContent(properties: props, uniquingKeysWith: { _, second in second })
}
}
#endif

/// Wraps a generated draft alongside diagnostics gathered during inference.
struct LLMGenerationResult: Sendable {
var draft: CommitDraft
Expand All @@ -85,6 +98,7 @@ struct LLMGenerationResult: Sendable {

#if canImport(FoundationModels)
/// Concrete LLM client backed by Apple's FoundationModels framework.
@available(macOS 26.0, *)
struct FoundationModelsClient: LLMClient {
/// Controls retry behavior and timeouts for generation requests.
struct Configuration {
Expand Down Expand Up @@ -125,17 +139,26 @@ struct LLMGenerationResult: Sendable {
throw CommitGenError.modelUnavailable(reason: reason)
}

// Convert PromptContent to FoundationModels types
guard let systemPromptContent = prompt.systemPrompt as? PromptContent,
let userPromptContent = prompt.userPrompt as? PromptContent
else {
throw CommitGenError.invalidBackend(
"FoundationModels backend expected PromptContent but received: System: \(type(of: prompt.systemPrompt)), User: \(type(of: prompt.userPrompt))"
)
}

let session = LanguageModelSession(
model: model,
instructions: prompt.systemPrompt
instructions: { Instructions { systemPromptContent.content } }
)

var diagnostics = prompt.diagnostics
let response = try await session.respond(
generating: CommitDraft.self,
options: generationOptions
) {
prompt.userPrompt
Prompt { userPromptContent.content }
}

let usage = analyzeTranscriptEntries(response.transcriptEntries)
Expand Down Expand Up @@ -279,16 +302,39 @@ struct OllamaClient: LLMClient {
request.httpMethod = "POST"
request.setValue("application/json", forHTTPHeaderField: "Content-Type")

// Extract string content from prompts
#if canImport(FoundationModels)
// When FoundationModels is available, prompts could be either PromptContent or FoundationModels types
// depending on macOS version and runtime availability
let systemContent: String
let userContent: String

if let systemPromptContent = prompt.systemPrompt as? PromptContent,
let userPromptContent = prompt.userPrompt as? PromptContent
{
systemContent = systemPromptContent.content
userContent = userPromptContent.content
} else {
// This should not happen if the builder correctly uses PromptContent for non-macOS 26+ systems
throw CommitGenError.invalidBackend(
"Ollama backend requires PromptContent, but received FoundationModels types (System: \(type(of: prompt.systemPrompt)), User: \(type(of: prompt.userPrompt))). Use FoundationModels backend instead."
)
}
#else
let systemContent = prompt.systemPrompt.content
let userContent = prompt.userPrompt.content
#endif

// Structure the messages properly
let messages: [[String: String]] = [
[
"role": "system",
"content": prompt.systemPrompt.content,
"content": systemContent,
],
[
"role": "user",
"content": """
\(prompt.userPrompt.content)
\(userContent)

IMPORTANT: You must respond with ONLY valid JSON in this exact format, with no additional text before or after:
{
Expand All @@ -314,8 +360,8 @@ struct OllamaClient: LLMClient {

// Log the request if verbose logging is enabled
configuration.logger?.debug {
let systemPreview = prompt.systemPrompt.content.prefix(200)
let userPreview = prompt.userPrompt.content.prefix(800)
let systemPreview = systemContent.prefix(200)
let userPreview = userContent.prefix(800)
return """
📤 Ollama Request to \(configuration.model):
┌─ System Prompt (first 200 chars):
Expand Down Expand Up @@ -367,7 +413,7 @@ struct OllamaClient: LLMClient {
promptTokens = promptEvalCount
} else {
promptTokens = PromptDiagnostics.tokenEstimate(
forCharacterCount: prompt.systemPrompt.content.count + prompt.userPrompt.content.count
forCharacterCount: systemContent.count + userContent.count
)
}

Expand Down
Loading