SwiftFM is a beginner-first Swift wrapper around Apple Foundation Models.
Version 2.0.0 keeps the original power-user features, but makes the package feel much more like SwiftUI:
- modifier-style config chains
- modifier-style request chains
- modifier-style prompt chains
- dynamic schemas and structured streaming
- locale helpers, token counting, and feedback attachment export
- custom adapter helpers
If you already use the older Config(...) and RequestConfig(...) style, it still works.
- Swift
6.2+ - Xcode
26+ - iOS
26+ - macOS
26+ - visionOS
26+ - Apple Intelligence enabled on supported hardware
Add the package with Swift Package Manager:
.package(url: "https://github.com/ricky-stone/SwiftFM.git", from: "2.0.0")import SwiftFM
let fm = SwiftFM()
let text = try await fm.generateText(
for: "Explain a snooker century break in one sentence."
)
print(text)This is the new 2.0 feel.
You start from SwiftFM.configuration(), SwiftFM.request(), or SwiftFM.prompt(...), then chain small modifiers.
import SwiftFM
let fm = SwiftFM(
config: SwiftFM.configuration()
.system("You are clear, friendly, and concise.")
.model(.general)
.temperature(0.3)
.maximumResponseTokens(180)
.postProcessing(.readableParagraphs)
)
let text = try await fm.generateText(
for: "Write a short beginner explanation of snooker safety play."
)Use SwiftFM.request() when you only want to change one call.
let text = try await fm.generateText(
for: "Write a short match preview.",
request: SwiftFM.request()
.temperature(0.2)
.maximumResponseTokens(120)
.postProcessing(.readableParagraphs)
)TextPostProcessing is still here, and now it chains nicely too.
let fm = SwiftFM(
config: SwiftFM.configuration()
.postProcessing(
.none
.trimmingWhitespace()
.collapsingSpacesAndTabs()
.limitingConsecutiveNewlines(to: 2)
.roundingFloatingPointNumbers(to: 0)
)
)PromptSpec now chains cleanly too.
let spec = SwiftFM.prompt("Write a pre-match analysis.")
.rule("Use plain text only")
.rule("Do not use markdown")
.requirement("Exactly 3 short paragraphs")
.tone("Professional and engaging")
let text = try await fm.generateText(from: spec)
print(text)If your app already has Swift models, pass them directly.
struct MatchVision: Codable, Sendable {
let home: String
let away: String
let venue: String
let bestOfFrames: Int
}
let vision = MatchVision(
home: "Judd Trump",
away: "Mark Allen",
venue: "Alexandra Palace",
bestOfFrames: 11
)
let summary = try await fm.generateText(
for: "Write a short neutral preview using only this data.",
context: vision,
request: SwiftFM.request()
.postProcessing(.readableParagraphs)
)You can still control how the JSON is embedded in the prompt.
let text = try await fm.generateText(
for: "Summarize this payload for a beginner.",
context: vision,
request: SwiftFM.request()
.contextOptions(
.init()
.heading("Match Payload")
.jsonFormatting(.compactSorted)
)
)SwiftFM still supports both full snapshots and delta chunks.
for try await snapshot in await fm.streamText(
for: "Explain snooker break-building in three short paragraphs."
) {
print(snapshot)
}var text = ""
for try await delta in await fm.streamTextDeltas(
for: "Explain snooker break-building in three short paragraphs."
) {
text += delta
}import SwiftUI
import SwiftFM
struct HomeView: View {
@State private var text = ""
@State private var isLoading = true
private let fm = SwiftFM(
config: .beginnerFriendly
.system("You explain things simply.")
.temperature(0.3)
)
var body: some View {
ZStack {
ScrollView {
Text(text)
.frame(maxWidth: .infinity, alignment: .leading)
.padding()
}
if isLoading {
ProgressView("Thinking...")
}
}
.task {
do {
for try await delta in await fm.streamTextDeltas(
from: SwiftFM.prompt("Explain one snooker safety drill.")
.requirement("Exactly 2 short paragraphs")
) {
if isLoading { isLoading = false }
text += delta
}
} catch {
isLoading = false
text = "Error: \(error.localizedDescription)"
}
}
}
}import SwiftFM
import FoundationModels
@Generable
struct MatchPrediction: Decodable, Sendable {
@Guide(description: "Home player")
let home: String
@Guide(description: "Away player")
let away: String
@Guide(description: "Predicted winner")
let winner: String
@Guide(description: "Confidence from 0.0 to 1.0")
let confidence: Double
}
let prediction = try await fm.generateJSON(
for: "Predict this match and return home, away, winner, and confidence.",
as: MatchPrediction.self
)New in 2.0: you can stream partial typed snapshots, not just text.
for try await partial in await fm.streamJSON(
for: "Generate a snooker match prediction.",
as: MatchPrediction.self
) {
print(partial.winner ?? "Waiting...")
}If you want to use Apple's newer explicit-nil generation behavior, you can do that directly with Foundation Models and still use SwiftFM normally:
@Generable(representNilExplicitlyInGeneratedContent: true)
struct OptionalNote: Decodable, Sendable {
let title: String
let subtitle: String?
}New in 2.0: you can generate runtime-structured content without creating a Swift type first.
import FoundationModels
let schema = DynamicGenerationSchema(
name: "SnookerNote",
properties: [
.init(
name: "title",
description: "Short title",
schema: .init(type: String.self)
),
.init(
name: "frameCount",
description: "Likely number of frames",
schema: .init(type: Int.self, guides: [.range(1 ... 35)])
)
]
)
let content = try await fm.generateContent(
for: "Generate a snooker match note with a title and likely frame count.",
dynamicSchema: schema
)
let title = try content.value(String.self, forProperty: "title")
let frames = try content.value(Int.self, forProperty: "frameCount")for try await snapshot in await fm.streamContent(
for: "Generate a short structured match note.",
dynamicSchema: schema
) {
print(snapshot.jsonString)
}Use tools when the model should fetch live data or call app logic.
import SwiftFM
import FoundationModels
@Generable
struct MatchLookupArgs: Decodable, Sendable {
@Guide(description: "Match id to fetch")
let id: String
}
struct MatchLookupTool: Tool {
let name = "match_lookup"
let description = "Fetches match JSON by id"
func call(arguments: MatchLookupArgs) async throws -> String {
"""
{"id":"\(arguments.id)","home":"Player A","away":"Player B","venue":"Main Arena"}
"""
}
}
let text = try await fm.generateText(
for: "Use match_lookup for id 123, then write a short neutral preview.",
request: SwiftFM.request()
.tool(MatchLookupTool())
)If you want more control, the existing sampling features are still available.
let fm = SwiftFM(
config: SwiftFM.configuration()
.model(.general)
.temperature(0.2)
.maximumResponseTokens(250)
.sampling(.greedy)
)SwiftFM model options:
.default.general.contentTagging.custom(SystemLanguageModel)
let summary = try await fm.generateText(
for: "Give one tactical snooker tip.",
using: .general
)
let label = try await fm.generateText(
for: "Return one label only: billing, support, bug. Text: app crashes at launch.",
using: .contentTagging
)If you want the raw Apple surface, that is still supported too.
import FoundationModels
let customModel = SystemLanguageModel(
useCase: .general,
guardrails: .default
)
let fm = SwiftFM(
config: SwiftFM.configuration()
.model(.custom(customModel))
)New in 2.0: adapter helpers make Apple adapter usage easier to discover.
let fm = SwiftFM(
config: .beginnerFriendly
.model(try .adapter(named: "MyAdapter"))
)You can also load an adapter from disk:
let model = try SwiftFM.Model.adapter(fileURL: adapterURL)
let fm = SwiftFM(config: .init(model: model))if SwiftFM.isModelAvailable && SwiftFM.supportsCurrentLocale() {
print("Ready")
} else {
print("Unavailable: \(SwiftFM.modelAvailability)")
}
let languages = SwiftFM.supportedLanguages(for: .default)
print(languages)Apple added token counting in 26.4, and SwiftFM now exposes it.
if #available(iOS 26.4, macOS 26.4, visionOS 26.4, *) {
let count = try await fm.tokenCount(
from: SwiftFM.prompt("Explain a snooker safety shot.")
.requirement("One sentence only")
)
print("Prompt tokens:", count)
}There are also static helpers for tools, schemas, and transcript entries:
if #available(iOS 26.4, macOS 26.4, visionOS 26.4, *) {
let count = try await SwiftFM.tokenCount(for: schema)
print(count)
}Apple recommends exporting feedback attachments when a response is poor or guardrails trigger unexpectedly.
New in 2.0: you can export that attachment directly from the current session.
let attachment = await fm.feedbackAttachment(
sentiment: .negative,
issues: [
.init(category: .didNotFollowInstructions, explanation: "It ignored the output format.")
],
desiredResponseText: "A short plain-text answer in exactly two sentences."
)
print("Attachment bytes:", attachment.count)let fm = SwiftFM(
config: .beginnerFriendly
.system("You are concise.")
)
await fm.prewarm(promptPrefix: "Match analysis")
let busy = await fm.isBusy
let transcript = await fm.transcript
await fm.resetConversation()What these do:
prewarm(promptPrefix:): reduce first-response latencyisBusy:truewhile the session is generatingtranscript: inspect the current conversation historyresetConversation(): clear the session and start fresh with the same base config
do {
let text = try await fm.generateText(for: "Analyze this match.")
print(text)
} catch let error as SwiftFM.SwiftFMError {
print(error.localizedDescription)
if let generationError = error.generationError {
print("Foundation Models error:", generationError)
}
} catch {
print(error.localizedDescription)
}2.0.0 adds fluent builder-style usage, but it does not remove the current feature set.
These still work:
SwiftFM(config: .init(...))RequestConfig(...)PromptSpec(...)generateTextstreamTextstreamTextDeltasgenerateJSON- request-scoped tools
- context embedding options
- post-processing options
- custom
SystemLanguageModel
- Current source version:
2.0.0
SwiftFM is licensed under the MIT License. See LICENSE.