Supercharge Your Applications with Local AI
Offlyn uses on-device AI — your data never leaves your computer.
-
We believe AI should work for you privately. Offlyn runs AI models directly on your device so your resume, work history, and personal info stay 100% yours.
+
We believe AI should work for you privately. Offlyn runs AI models directly on your device so your resume, work history, and personal info stay 100% yours. Once downloaded, the model runs fully offline — no data is ever sent to a server and there is no per-query cost.
diff --git a/apps/extension-firefox/src/background.ts b/apps/extension-firefox/src/background.ts
index 9c91435..172753b 100644
--- a/apps/extension-firefox/src/background.ts
+++ b/apps/extension-firefox/src/background.ts
@@ -182,7 +182,9 @@ browser.runtime.onMessage.addListener(async (message: unknown, sender: browser.r
if (legacyResult.status === 'rejected') warn('Legacy parser failed:', legacyResult.reason);
if (!ragProfile && !legacyProfile) {
- throw new Error('Both parsers failed — no profile could be extracted');
+ const ragErr = ragResult.status === 'rejected' ? String(ragResult.reason) : 'no result';
+ const legacyErr = legacyResult.status === 'rejected' ? String(legacyResult.reason) : 'no result';
+ throw new Error(`Both parsers failed — RAG: ${ragErr} | Legacy: ${legacyErr}`);
}
let profile: any;
diff --git a/apps/extension-firefox/src/content.ts b/apps/extension-firefox/src/content.ts
index 9787263..83a2c60 100644
--- a/apps/extension-firefox/src/content.ts
+++ b/apps/extension-firefox/src/content.ts
@@ -266,7 +266,7 @@ function detectPage(): void {
console.log('[OA] Page state:', classification.state, '|', classification.detectionReason);
if (classification.state === 'NOT_JOB_PAGE') {
- console.warn('[OA] Not a job page — skipping');
+ console.log('[OA] Not a job page — skipping');
return;
}
diff --git a/apps/extension-firefox/src/onboarding/onboarding.ts b/apps/extension-firefox/src/onboarding/onboarding.ts
index bae1731..e67f23f 100644
--- a/apps/extension-firefox/src/onboarding/onboarding.ts
+++ b/apps/extension-firefox/src/onboarding/onboarding.ts
@@ -421,7 +421,7 @@ async function checkOllamaConnection(): Promise
{
}
}
} else {
- console.warn('[Ollama Setup] Connection failed:', result.error);
+ console.log('[Ollama Setup] Connection failed:', result.error);
showOllamaUIState('not-installed');
checkNativeHelper().then(installed => updateHelperSubstate(installed));
}
@@ -430,9 +430,11 @@ async function checkOllamaConnection(): Promise {
// ── Native Messaging helpers ──────────────────────────────────────────────
const HELPER_INSTALL_BASE =
- 'https://raw.githubusercontent.com/joelnishanth/offlyn-apply/main/scripts/native-host';
+ 'https://raw.githubusercontent.com/rahulraonatarajan/offlyn-apply/Windows-ollama-setup/scripts/native-host';
const HELPER_PKG_URL =
- 'https://github.com/joelnishanth/offlyn-apply/releases/download/v0.5.0/offlyn-helper.pkg';
+ 'https://raw.githubusercontent.com/rahulraonatarajan/offlyn-apply/Windows-ollama-setup/scripts/native-host/install-mac-linux.sh';
+const HELPER_WIN_BAT_URL =
+ 'https://raw.githubusercontent.com/rahulraonatarajan/offlyn-apply/Windows-ollama-setup/scripts/native-host/install-win.bat';
function detectOS(): 'mac' | 'windows' | 'linux' {
const ua = navigator.userAgent.toLowerCase();
@@ -472,7 +474,18 @@ function populateHelperInstructions(): void {
class="btn btn-primary"
style="display:inline-flex;align-items:center;gap:8px;font-size:14px;padding:10px 20px;text-decoration:none;margin-bottom:14px;">
${DOWNLOAD_SVG}
- Download Installer
+ Download for Mac
+
+ `;
+ } else if (os === 'windows') {
+ container.innerHTML = `
+ Download and double-click the installer — it takes about 10 seconds. No terminal required.
+
+ ${DOWNLOAD_SVG}
+ Download for Windows (.bat)
`;
} else {
@@ -603,7 +616,21 @@ function setupOllamaStepListeners(): void {
class="btn btn-primary"
style="display:inline-flex;align-items:center;gap:8px;font-size:13px;padding:9px 16px;text-decoration:none;margin-bottom:10px;">
${DOWNLOAD_SVG}
- Download Installer
+ Download for Mac
+
+ After the installer finishes, click Re-test Connection below.
+ `;
+ } else if (os === 'windows') {
+ corsWrap.innerHTML = `
+
+ To fix CORS automatically, install the Offlyn Helper first — it configures Ollama permissions in one step.
+
+
+ ${DOWNLOAD_SVG}
+ Download for Windows (.bat)
After the installer finishes, click Re-test Connection below.
`;
diff --git a/apps/extension-firefox/src/shared/mastra-agent.ts b/apps/extension-firefox/src/shared/mastra-agent.ts
index f9fb3d1..8d05566 100644
--- a/apps/extension-firefox/src/shared/mastra-agent.ts
+++ b/apps/extension-firefox/src/shared/mastra-agent.ts
@@ -3,8 +3,6 @@
* Browser-compatible implementation using ollama-ai-provider-v2
*/
-import { generateText } from 'ai';
-import { ollama } from 'ollama-ai-provider-v2';
import type { Message } from './types';
// Browser API shim for environments that don't have it
@@ -36,19 +34,45 @@ class MastraAgentService {
private baseUrl: string;
private model: string;
private embeddingModel: string;
- private ollamaProvider: ReturnType;
/** Set to false after the first embedding failure so callers can skip embedding-based retrieval */
embeddingsAvailable = true;
- constructor(baseUrl = 'http://localhost:11434', model = 'llama3.2') {
+ constructor(baseUrl = 'http://localhost:11434', model = 'llama3.2:1b') {
this.baseUrl = baseUrl;
this.model = model;
this.embeddingModel = 'nomic-embed-text';
+ }
- // Initialize Ollama provider with AI SDK (browser-compatible)
- this.ollamaProvider = ollama(this.model, {
- baseURL: this.baseUrl,
+ /**
+ * Direct fetch to Ollama /api/generate — avoids AI SDK version incompatibilities.
+ */
+ private async ollamaGenerate(
+ system: string,
+ prompt: string,
+ options: { temperature?: number; maxTokens?: number } = {}
+ ): Promise {
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ model: this.model,
+ system,
+ prompt,
+ stream: false,
+ options: {
+ temperature: options.temperature ?? 0.1,
+ num_predict: options.maxTokens ?? 4096,
+ },
+ }),
});
+
+ if (!response.ok) {
+ const errText = await response.text().catch(() => response.statusText);
+ throw new Error(`Ollama /api/generate returned ${response.status}: ${errText}`);
+ }
+
+ const data = await response.json();
+ return (data.response as string) ?? '';
}
/**
@@ -97,23 +121,15 @@ class MastraAgentService {
messageCount: messages.length,
});
- // Convert messages to AI SDK format
const systemMessage = messages.find((m) => m.role === 'system');
const userMessages = messages.filter((m) => m.role !== 'system');
-
- // Combine all user/assistant messages into a prompt
const prompt = userMessages.map((msg) => msg.content).join('\n\n');
- // Generate text using AI SDK — default 4096 to avoid truncating long JSON responses
- const result = await generateText({
- model: this.ollamaProvider,
- system: systemMessage?.content,
- prompt: prompt,
- temperature: options?.temperature ?? 0.1,
- maxTokens: options?.maxTokens ?? 4096,
- });
-
- const content = result.text;
+ const content = await this.ollamaGenerate(
+ systemMessage?.content ?? '',
+ prompt,
+ { temperature: options?.temperature, maxTokens: options?.maxTokens }
+ );
if (!content) {
throw new Error('No response content from AI agent');
@@ -224,17 +240,11 @@ Be thorough and capture ALL details. Return ONLY valid JSON with no markdown for
const userPrompt = `${prompts[sectionType]}\n\nResume text:\n${sectionText}\n\nReturn ONLY the JSON (no markdown, no explanations):`;
- const response = await generateText({
- model: this.ollamaProvider,
- system: systemPrompt,
- prompt: userPrompt,
- temperature: 0.1,
- maxTokens: 3000,
- });
+ const rawText = await this.ollamaGenerate(systemPrompt, userPrompt, { temperature: 0.1, maxTokens: 3000 });
// Extract JSON with repair fallback for malformed LLM output
- const repaired = repairJSON(response.text);
- if (repaired !== null && repaired !== undefined && !(Array.isArray(repaired) && (repaired as unknown[]).length === 0 && response.text.trim().length > 10)) {
+ const repaired = repairJSON(rawText);
+ if (repaired !== null && repaired !== undefined && !(Array.isArray(repaired) && (repaired as unknown[]).length === 0 && rawText.trim().length > 10)) {
return repaired;
}
console.warn(`[AIAgent] No valid JSON found for ${sectionType}`);
@@ -370,14 +380,12 @@ Be thorough and capture ALL details. Return ONLY valid JSON with no markdown for
// Generate summary
onProgress?.('Generating summary...', 95);
try {
- const summaryResult = await generateText({
- model: this.ollamaProvider,
- system: 'Create a brief professional summary (2-3 sentences) from this resume text.',
- prompt: chunks[0],
- temperature: 0.3,
- maxTokens: 200,
- });
- profile.summary = summaryResult.text.trim();
+ const summaryText = await this.ollamaGenerate(
+ 'Create a brief professional summary (2-3 sentences) from this resume text.',
+ chunks[0],
+ { temperature: 0.3, maxTokens: 200 }
+ );
+ profile.summary = summaryText.trim();
} catch {
profile.summary = 'Professional with diverse experience and skills.';
}
@@ -444,23 +452,17 @@ ${resumeText}
Return ONLY the JSON object, nothing else:`;
- const response = await generateText({
- model: this.ollamaProvider,
- system: systemPrompt,
- prompt: userPrompt,
- temperature: 0.1,
- maxTokens: 4000,
- });
+ const rawText = await this.ollamaGenerate(systemPrompt, userPrompt, { temperature: 0.1, maxTokens: 4000 });
- console.log('[AIAgent] Raw response:', response.text.substring(0, 500));
+ console.log('[AIAgent] Raw response:', rawText.substring(0, 500));
// Extract JSON with repair fallback for malformed LLM output
- const repaired = repairJSON(response.text);
+ const repaired = repairJSON(rawText);
if (repaired !== null && repaired !== undefined) {
console.log('[AIAgent] Successfully parsed JSON');
return repaired;
}
- console.error('[AIAgent] JSON repair failed for response:', response.text.substring(0, 200));
+ console.error('[AIAgent] JSON repair failed for response:', rawText.substring(0, 200));
throw new Error('Could not parse JSON from response after repair attempt');
}
}
diff --git a/apps/extension-firefox/src/shared/ollama-client.ts b/apps/extension-firefox/src/shared/ollama-client.ts
index 16d3ddb..1f674a2 100644
--- a/apps/extension-firefox/src/shared/ollama-client.ts
+++ b/apps/extension-firefox/src/shared/ollama-client.ts
@@ -27,7 +27,7 @@ export interface ChatCompletionResponse {
export class OllamaClient {
private baseUrl = 'http://localhost:11434';
- private model = 'llama3.2';
+ private model = 'llama3.2:1b';
private embeddingModel = 'nomic-embed-text'; // Optimized for embeddings
constructor(baseUrl?: string, model?: string) {
@@ -40,12 +40,11 @@ export class OllamaClient {
private async loadStoredConfig(): Promise {
try {
const config = await getOllamaConfig();
- if (config.enabled) {
- this.baseUrl = config.endpoint;
- this.model = config.chatModel;
- this.embeddingModel = config.embeddingModel;
- console.log('[Ollama] Loaded config from storage:', config.endpoint, config.chatModel);
- }
+ // Always load model names from stored config regardless of enabled state
+ this.baseUrl = config.endpoint;
+ this.model = config.chatModel;
+ this.embeddingModel = config.embeddingModel;
+ console.log('[Ollama] Loaded config from storage:', config.endpoint, config.chatModel);
} catch (err) {
console.warn('[Ollama] Failed to load stored config, using defaults:', err);
}
diff --git a/apps/extension-firefox/src/shared/ollama-config.ts b/apps/extension-firefox/src/shared/ollama-config.ts
index 631f610..c0cf864 100644
--- a/apps/extension-firefox/src/shared/ollama-config.ts
+++ b/apps/extension-firefox/src/shared/ollama-config.ts
@@ -13,7 +13,7 @@ export interface OllamaConfig {
export const DEFAULT_OLLAMA_CONFIG: OllamaConfig = {
endpoint: 'http://localhost:11434',
- chatModel: 'llama3.2',
+ chatModel: 'llama3.2:1b',
embeddingModel: 'nomic-embed-text',
lastChecked: 0,
enabled: false,
diff --git a/apps/extension-firefox/src/shared/ollama-service.ts b/apps/extension-firefox/src/shared/ollama-service.ts
index 20faa20..568cc78 100644
--- a/apps/extension-firefox/src/shared/ollama-service.ts
+++ b/apps/extension-firefox/src/shared/ollama-service.ts
@@ -39,7 +39,7 @@ export async function checkOllamaConnection(): Promise {
*/
export async function analyzeFieldsWithOllama(
prompt: string,
- model: string = 'llama3.2'
+ model: string = 'llama3.2:1b'
): Promise {
try {
const response = await fetch(`${OLLAMA_BASE_URL}/api/generate`, {
@@ -284,7 +284,7 @@ Respond with ONLY the value, nothing else. No quotes, no explanation, no preambl
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
- model: 'llama3.2',
+ model: 'llama3.2:1b',
prompt,
stream: useStreaming
})
diff --git a/scripts/native-host/host.ps1 b/scripts/native-host/host.ps1
index e57ad76..76a391b 100644
--- a/scripts/native-host/host.ps1
+++ b/scripts/native-host/host.ps1
@@ -7,7 +7,7 @@
# { "cmd": "run_setup" } -> { "type": "done", "ok": true/false, "output": "..." }
$VERSION = "1.0.0"
-$SCRIPT_BASE = "https://raw.githubusercontent.com/joelnishanth/offlyn-apply/main/scripts/setup-ollama"
+$SCRIPT_BASE = "https://raw.githubusercontent.com/rahulraonatarajan/offlyn-apply/Windows-ollama-setup/scripts/setup-ollama"
function Read-NativeMessage {
$stdin = [Console]::OpenStandardInput()
@@ -32,13 +32,21 @@ function Send-NativeMessage($obj) {
}
function Invoke-Setup {
- $url = "$SCRIPT_BASE/setup-win.ps1"
+ $offlyn_dir = Join-Path $env:USERPROFILE ".offlyn"
+ $local_script = Join-Path $offlyn_dir "setup-win.ps1"
$logFile = "$env:TEMP\offlyn-setup-$(Get-Date -Format 'yyyyMMddHHmmss').log"
try {
- $process = Start-Process powershell.exe `
- -ArgumentList "-ExecutionPolicy", "Bypass", "-Command", "irm '$url' | iex" `
- -Wait -PassThru -RedirectStandardOutput $logFile -RedirectStandardError $logFile
+ if (Test-Path $local_script) {
+ $process = Start-Process powershell.exe `
+ -ArgumentList "-ExecutionPolicy", "Bypass", "-File", $local_script `
+ -Wait -PassThru -RedirectStandardOutput $logFile -RedirectStandardError $logFile
+ } else {
+ $url = "$SCRIPT_BASE/setup-win.ps1"
+ $process = Start-Process powershell.exe `
+ -ArgumentList "-ExecutionPolicy", "Bypass", "-Command", "irm '$url' | iex" `
+ -Wait -PassThru -RedirectStandardOutput $logFile -RedirectStandardError $logFile
+ }
$output = if (Test-Path $logFile) { Get-Content $logFile -Raw } else { "" }
Remove-Item $logFile -Force -ErrorAction SilentlyContinue
diff --git a/scripts/native-host/install-win.bat b/scripts/native-host/install-win.bat
index 12c94b9..9645ff3 100644
--- a/scripts/native-host/install-win.bat
+++ b/scripts/native-host/install-win.bat
@@ -1,60 +1,169 @@
@echo off
-:: Offlyn Helper Installer — Windows
-:: Registers the native messaging host so the Offlyn extension can run Ollama
-:: setup with a single button click (no terminal needed afterwards).
-:: No administrator rights required.
+:: Offlyn Apply - Windows All-in-One Installer
+:: Installs everything Offlyn needs to run AI features locally:
+:: - Offlyn Helper (lets the extension talk to your computer)
+:: - Ollama (runs AI models privately on your device, ~1.6 GB download)
+:: - llama3.2:1b AI chat model (~637 MB)
+:: - nomic-embed-text embedding model (~274 MB)
+:: No administrator rights required. Your data never leaves your computer.
setlocal EnableDelayedExpansion
set HOST_NAME=ai.offlyn.helper
set OFFLYN_DIR=%USERPROFILE%\.offlyn
-set HOST_SCRIPT=%OFFLYN_DIR%\helper.ps1
-set RAW_BASE=https://raw.githubusercontent.com/joelnishanth/offlyn-apply/main/scripts/native-host
+set HOST_PS1=%OFFLYN_DIR%\helper.ps1
+set HOST_EXE=%OFFLYN_DIR%\helper.exe
+set RAW_BASE=https://raw.githubusercontent.com/rahulraonatarajan/offlyn-apply/Windows-ollama-setup/scripts/native-host
+set SETUP_BASE=https://raw.githubusercontent.com/rahulraonatarajan/offlyn-apply/Windows-ollama-setup/scripts/setup-ollama
set CHROME_EXT_ID=bjllpojjllhfghiemokcoknfmhpmfbph
+set CHROME_DEV_EXT_ID=nfflflctcndcpdmoclbcasiblbgjng
set FIREFOX_EXT_ID={e0857c2d-15a6-4d0c-935e-57761715dc3d}
+set CHAT_MODEL=llama3.2:1b
+set EMBED_MODEL=nomic-embed-text
+set ORIGINS=chrome-extension://*,moz-extension://*
+set MANIFEST_FILE=%OFFLYN_DIR%\%HOST_NAME%.json
echo.
-echo Installing Offlyn Helper...
+echo ============================================================
+echo Offlyn Apply - Windows Setup
+echo ============================================================
+echo.
+echo This window will stay open while everything downloads
+echo and installs automatically. No clicks needed from you.
+echo.
+echo Total download size: roughly 2.5 GB
+echo - Ollama AI runtime .......... ~1.6 GB (one-time)
+echo - llama3.2:1b AI model ....... ~637 MB (one-time)
+echo - nomic-embed-text model ..... ~274 MB (one-time)
+echo.
+echo Estimated time: 10-40 minutes depending on your internet.
+echo Feel free to use your computer while this runs.
+echo ============================================================
echo.
-:: Create directory
+:: ---- Step 1: Create .offlyn directory ----------------------------------------
if not exist "%OFFLYN_DIR%" mkdir "%OFFLYN_DIR%"
-:: Download the PowerShell host script
-powershell -ExecutionPolicy Bypass -Command ^
- "Invoke-WebRequest -Uri '%RAW_BASE%/host.ps1' -OutFile '%HOST_SCRIPT%' -UseBasicParsing"
-if %errorlevel% neq 0 (
- echo ERROR: Failed to download helper script.
- echo Please check your internet connection and try again.
- pause
- exit /b 1
-)
-
-:: Write the manifest JSON
-set MANIFEST_FILE=%OFFLYN_DIR%\%HOST_NAME%.json
-(
- echo {
- echo "name": "%HOST_NAME%",
- echo "description": "Offlyn AI Setup Helper",
- echo "path": "%HOST_SCRIPT%",
- echo "type": "stdio",
- echo "allowed_origins": ["chrome-extension://%CHROME_EXT_ID%/"],
- echo "allowed_extensions": ["%FIREFOX_EXT_ID%"]
- echo }
-) > "%MANIFEST_FILE%"
-
-:: Register for Chrome (user-level, no admin required)
-reg add "HKCU\Software\Google\Chrome\NativeMessagingHosts\%HOST_NAME%" ^
- /ve /t REG_SZ /d "%MANIFEST_FILE%" /f >nul 2>&1
-
-:: Register for Firefox (user-level, no admin required)
-reg add "HKCU\Software\Mozilla\NativeMessagingHosts\%HOST_NAME%" ^
- /ve /t REG_SZ /d "%MANIFEST_FILE%" /f >nul 2>&1
-
-echo Offlyn Helper installed!
-echo.
-echo Return to the Offlyn extension and click
-echo the 'Set Up AI' button -- it will handle
-echo everything from here.
+:: ---- Step 2: Download helper.ps1 ---------------------------------------------
+echo [1/5] Setting up Offlyn Helper (small download, takes ~10 seconds)...
+powershell -ExecutionPolicy Bypass -Command "Invoke-WebRequest -Uri '%RAW_BASE%/host.ps1' -OutFile '%HOST_PS1%' -UseBasicParsing"
+if %errorlevel% neq 0 goto :download_error
+
+:: ---- Step 3: Compile helper.exe ----------------------------------------------
+:: Chrome uses CreateProcess and cannot launch .bat/.ps1 directly.
+:: This small C# relay bridges Chrome stdin/stdout to helper.ps1.
+echo [2/5] Building browser bridge (takes ~30 seconds)...
+powershell -ExecutionPolicy Bypass -NoProfile -Command "$s='using System;using System.Diagnostics;using System.IO;using System.Reflection;using System.Threading.Tasks;class P{static int Main(){var d=Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location);var p1=Path.Combine(d,\"helper.ps1\");var i=new ProcessStartInfo{FileName=\"powershell.exe\",Arguments=\"-NoLogo -NoProfile -ExecutionPolicy Bypass -File \\\"\"+p1+\"\\\"\",UseShellExecute=false,RedirectStandardInput=true,RedirectStandardOutput=true};using(var p=Process.Start(i)){var si=Console.OpenStandardInput();var so=Console.OpenStandardOutput();var t1=Task.Run(()=>{try{si.CopyTo(p.StandardInput.BaseStream);}catch{}try{p.StandardInput.Close();}catch{}});var t2=Task.Run(()=>{try{p.StandardOutput.BaseStream.CopyTo(so);so.Flush();}catch{}});Task.WaitAny(t1,t2);p.WaitForExit();return p.ExitCode;}}}'; Add-Type -TypeDefinition $s -OutputAssembly '%HOST_EXE%' -OutputType ConsoleApplication 2>&1"
+if not exist "%HOST_EXE%" goto :compile_error
+echo helper.exe OK
+
+:: ---- Step 4: Write manifest and register native messaging host ---------------
+echo [3/5] Registering Offlyn with Chrome and Firefox...
+powershell -ExecutionPolicy Bypass -Command "$m=[ordered]@{name='%HOST_NAME%';description='Offlyn AI Setup Helper';path='%HOST_EXE%';type='stdio';allowed_origins=@('chrome-extension://%CHROME_EXT_ID%/','chrome-extension://%CHROME_DEV_EXT_ID%/');allowed_extensions=@('%FIREFOX_EXT_ID%')}; $m | ConvertTo-Json | Set-Content '%MANIFEST_FILE%' -Encoding UTF8"
+reg add "HKCU\Software\Google\Chrome\NativeMessagingHosts\%HOST_NAME%" /ve /t REG_SZ /d "%MANIFEST_FILE%" /f >nul 2>&1
+reg add "HKCU\Software\Mozilla\NativeMessagingHosts\%HOST_NAME%" /ve /t REG_SZ /d "%MANIFEST_FILE%" /f >nul 2>&1
+echo Registered OK
+
+:: ---- Step 5: Install Ollama if needed ----------------------------------------
+echo [4/5] Checking Ollama (the local AI engine)...
+where ollama >nul 2>&1
+if %errorlevel% equ 0 goto :ollama_already_installed
+
+echo Ollama not found.
+echo.
+echo Downloading Ollama installer (~1.6 GB).
+echo This is the largest step -- please be patient.
+echo The progress bar above will show download speed.
+echo Do NOT close this window.
+echo.
+powershell -ExecutionPolicy Bypass -Command "Invoke-WebRequest 'https://ollama.com/download/OllamaSetup.exe' -OutFile '%TEMP%\OllamaSetup.exe' -UseBasicParsing"
+echo.
+echo Download complete! Installing now (takes about 1 minute)...
+"%TEMP%\OllamaSetup.exe" /S
+del "%TEMP%\OllamaSetup.exe" >nul 2>&1
+:: Refresh PATH so ollama command is available in this session
+powershell -ExecutionPolicy Bypass -Command "$p=[System.Environment]::GetEnvironmentVariable('PATH','Machine')+';'+[System.Environment]::GetEnvironmentVariable('PATH','User'); [System.Environment]::SetEnvironmentVariable('PATH',$p,'Process')"
+for /f "usebackq tokens=*" %%P in (`powershell -NoProfile -Command "[System.Environment]::GetEnvironmentVariable('PATH','Machine')+';'+[System.Environment]::GetEnvironmentVariable('PATH','User')"`) do set PATH=%%P
+echo Ollama installed successfully!
+goto :configure_cors
+
+:ollama_already_installed
+echo Ollama is already installed -- skipping download.
+
+:: ---- Step 6: Configure CORS --------------------------------------------------
+:configure_cors
+powershell -ExecutionPolicy Bypass -Command "[System.Environment]::SetEnvironmentVariable('OLLAMA_ORIGINS','%ORIGINS%','User')"
+set OLLAMA_ORIGINS=%ORIGINS%
+echo Extension access configured OK
+
+:: ---- Step 7: Start Ollama and wait -------------------------------------------
+echo [5/5] Starting Ollama and downloading AI models...
+echo (This step downloads ~911 MB total. Progress shown below.)
+echo.
+taskkill /IM ollama.exe /F >nul 2>&1
+timeout /t 2 /nobreak >nul
+start /B ollama serve
+
+echo Starting Ollama engine, please wait...
+powershell -ExecutionPolicy Bypass -Command "for($i=0;$i-lt 30;$i++){try{$null=Invoke-WebRequest 'http://localhost:11434/api/version' -UseBasicParsing -TimeoutSec 2;exit 0}catch{Start-Sleep 1}};exit 1"
+if %errorlevel% neq 0 goto :ollama_start_error
+echo Ollama engine is running!
+echo.
+echo Downloading AI chat model: %CHAT_MODEL% (~637 MB)...
+echo (This is the model that fills out your job applications)
+ollama pull %CHAT_MODEL%
+echo.
+echo Downloading AI search model: %EMBED_MODEL% (~274 MB)...
+echo (This helps Offlyn understand your resume)
+ollama pull %EMBED_MODEL%
+
+:: ---- Done --------------------------------------------------------------------
+echo.
+echo ============================================================
+echo All done! Offlyn AI is ready on your computer.
+echo ============================================================
+echo.
+echo NEXT STEPS (takes 30 seconds):
+echo.
+echo 1. Fully close Chrome (File - Exit, or right-click the
+echo Chrome icon in your taskbar and choose "Exit").
+echo.
+echo 2. Reopen Chrome.
+echo.
+echo 3. Click the Offlyn extension icon and then
+echo click "Test Connection" -- it should say Connected!
+echo.
+echo Your data stays 100%% on your computer. Nothing is uploaded.
+echo ============================================================
echo.
pause
+exit /b 0
+
+:download_error
+echo.
+echo ============================================================
+echo ERROR: Could not download a required file.
+echo.
+echo Please check your internet connection and try again.
+echo If the problem persists, visit: https://github.com/rahulraonatarajan/offlyn-apply
+echo ============================================================
+pause & exit /b 1
+
+:compile_error
+echo.
+echo ============================================================
+echo ERROR: Could not build the browser bridge (helper.exe).
+echo.
+echo Make sure you are running Windows 10 or later and try again.
+echo ============================================================
+pause & exit /b 1
+
+:ollama_start_error
+echo.
+echo ============================================================
+echo ERROR: Ollama installed but did not start in time.
+echo.
+echo Try restarting your computer and running this installer again.
+echo Or open a new Command Prompt window and type: ollama serve
+echo ============================================================
+pause & exit /b 1
diff --git a/scripts/setup-ollama/setup-win.ps1 b/scripts/setup-ollama/setup-win.ps1
index 08448d7..24aa4de 100644
--- a/scripts/setup-ollama/setup-win.ps1
+++ b/scripts/setup-ollama/setup-win.ps1
@@ -60,11 +60,11 @@ Write-Host "OK Ollama is running"
# ── Step 4: Pull models ────────────────────────────────────────────────────
$modelList = & ollama list 2>&1
-if ($modelList -notmatch "llama3\.2") {
- Write-Host "-> Downloading llama3.2 (~2.2 GB — this takes several minutes)..."
- & ollama pull llama3.2
+if ($modelList -notmatch "llama3\.2:1b") {
+ Write-Host "-> Downloading llama3.2:1b (~637 MB)..."
+ & ollama pull llama3.2:1b
} else {
- Write-Host "OK llama3.2 already downloaded"
+ Write-Host "OK llama3.2:1b already downloaded"
}
if ($modelList -notmatch "nomic-embed-text") {
diff --git a/test-cors.ps1 b/test-cors.ps1
new file mode 100644
index 0000000..35219b3
--- /dev/null
+++ b/test-cors.ps1
@@ -0,0 +1,31 @@
+# Simulate exactly what the Chrome extension background worker sends
+$origin = "chrome-extension://hfflflctcndcpdmoclbcasiblbgjng"
+
+# Test 1: CORS preflight (OPTIONS)
+Write-Host "=== CORS Preflight OPTIONS ==="
+try {
+ $preflight = Invoke-WebRequest -Uri "http://127.0.0.1:11434/api/generate" -Method OPTIONS -Headers @{
+ "Origin" = $origin
+ "Access-Control-Request-Method" = "POST"
+ "Access-Control-Request-Headers" = "content-type"
+ } -UseBasicParsing
+ Write-Host "Status: $($preflight.StatusCode)"
+ Write-Host "ACAO: $($preflight.Headers['Access-Control-Allow-Origin'])"
+ Write-Host "ACAM: $($preflight.Headers['Access-Control-Allow-Methods'])"
+} catch {
+ Write-Host "Preflight FAILED: $($_.Exception.Message)"
+}
+
+# Test 2: Actual POST (as extension would send)
+Write-Host ""
+Write-Host "=== POST /api/generate ==="
+$body = '{"model":"llama3.2:1b","system":"You are helpful.","prompt":"Say: WORKS","stream":false,"options":{"temperature":0.1,"num_predict":20}}'
+try {
+ $r = Invoke-WebRequest -Uri "http://127.0.0.1:11434/api/generate" -Method POST -ContentType "application/json" -Headers @{"Origin"=$origin} -Body $body -UseBasicParsing
+ Write-Host "Status: $($r.StatusCode)"
+ Write-Host "ACAO: $($r.Headers['Access-Control-Allow-Origin'])"
+ $content = $r.Content | ConvertFrom-Json
+ Write-Host "Response: $($content.response)"
+} catch {
+ Write-Host "POST FAILED: $($_.Exception.Message)"
+}