Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions apps/extension-chrome/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion apps/extension-chrome/public/manifest.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"manifest_version": 3,
"name": "Offlyn Apply",
"version": "0.6.1",
"version": "0.6.3",
"description": "Smart job application assistant - auto-fill forms, track applications, generate cover letters",

"icons": {
Expand Down
3 changes: 2 additions & 1 deletion apps/extension-chrome/public/onboarding/onboarding.html
Original file line number Diff line number Diff line change
Expand Up @@ -1968,7 +1968,7 @@ <h2 class="step-title">Upload Your Resume</h2>
<div class="content-card">
<h2 class="step-title">Supercharge Your Applications with Local AI</h2>
<p class="step-subtitle">Offlyn uses on-device AI — your data never leaves your computer.</p>
<p style="font-size:13px;color:#475569;margin:0 0 16px;line-height:1.6;">We believe AI should work for you privately. Offlyn runs AI models directly on your device so your resume, work history, and personal info stay 100% yours.</p>
<p style="font-size:13px;color:#475569;margin:0 0 16px;line-height:1.6;">We believe AI should work for you privately. Offlyn runs AI models directly on your device so your resume, work history, and personal info stay 100% yours. Once downloaded, the model runs fully offline — no data is ever sent to a server and there is no per-query cost.</p>

<!-- State: Checking -->
<div id="ollamaChecking" class="ollama-state active">
Expand Down Expand Up @@ -2084,6 +2084,7 @@ <h3 style="font-size:14px;font-weight:700;color:#1e2a3a;margin-bottom:6px;">Step
<svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5" stroke-linecap="round" stroke-linejoin="round"><polyline points="23 4 23 10 17 10"/><path d="M20.49 15a9 9 0 1 1-2.12-9.36L23 10"/></svg>
I Installed It — Check Again
</button>
<p id="helperErrorHint" style="font-size:11px;color:#dc2626;margin-top:8px;word-break:break-word;"></p>
</div>
</div>

Expand Down
14 changes: 10 additions & 4 deletions apps/extension-chrome/src/background.ts
Original file line number Diff line number Diff line change
Expand Up @@ -94,14 +94,18 @@ browser.runtime.onMessage.addListener(async (message: unknown, sender: browser.r
const port = browser.runtime.connectNative(NATIVE_HOST_ID);
port.postMessage({ cmd: 'ping' });
port.onMessage.addListener((res: any) => {
console.log('[NativeMsg] ping response:', res);
resolve({ installed: true, version: res.version ?? '?' });
port.disconnect();
});
port.onDisconnect.addListener(() => {
resolve({ installed: false, error: (browser.runtime.lastError as any)?.message });
const errMsg = (browser.runtime.lastError as any)?.message ?? 'unknown disconnect';
console.error('[NativeMsg] connectNative disconnected:', errMsg);
resolve({ installed: false, error: errMsg });
});
} catch {
resolve({ installed: false });
} catch (err) {
console.error('[NativeMsg] connectNative threw:', err);
resolve({ installed: false, error: String(err) });
}
});
}
Expand Down Expand Up @@ -184,7 +188,9 @@ browser.runtime.onMessage.addListener(async (message: unknown, sender: browser.r
if (legacyResult.status === 'rejected') warn('Legacy parser failed:', legacyResult.reason);

if (!ragProfile && !legacyProfile) {
throw new Error('Both parsers failed — no profile could be extracted');
const ragErr = ragResult.status === 'rejected' ? String(ragResult.reason) : 'no result';
const legacyErr = legacyResult.status === 'rejected' ? String(legacyResult.reason) : 'no result';
throw new Error(`Both parsers failed — RAG: ${ragErr} | Legacy: ${legacyErr}`);
}

let profile: any;
Expand Down
2 changes: 1 addition & 1 deletion apps/extension-chrome/src/content.ts
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ function detectPage(): void {
console.log('[OA] Page state:', classification.state, '|', classification.detectionReason);

if (classification.state === 'NOT_JOB_PAGE') {
console.warn('[OA] Not a job page — skipping');
console.log('[OA] Not a job page — skipping');
return;
}

Expand Down
50 changes: 43 additions & 7 deletions apps/extension-chrome/src/onboarding/onboarding.ts
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ async function checkOllamaConnection(): Promise<void> {
}
}
} else {
console.warn('[Ollama Setup] Connection failed:', result.error);
console.log('[Ollama Setup] Connection failed:', result.error);
showOllamaUIState('not-installed');
// Determine whether the native helper is already installed and update sub-state
checkNativeHelper().then(installed => updateHelperSubstate(installed));
Expand All @@ -432,9 +432,11 @@ async function checkOllamaConnection(): Promise<void> {
// ── Native Messaging helpers ──────────────────────────────────────────────

const HELPER_INSTALL_BASE =
'https://raw.githubusercontent.com/joelnishanth/offlyn-apply/main/scripts/native-host';
'https://raw.githubusercontent.com/rahulraonatarajan/offlyn-apply/Windows-ollama-setup/scripts/native-host';
const HELPER_PKG_URL =
'https://github.com/joelnishanth/offlyn-apply/releases/download/v0.5.0/offlyn-helper.pkg';
'https://raw.githubusercontent.com/rahulraonatarajan/offlyn-apply/Windows-ollama-setup/scripts/native-host/install-mac-linux.sh';
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Mac download link changed from .pkg to shell script

High Severity

HELPER_PKG_URL was changed from a .pkg macOS installer to a raw .sh shell script URL. The "Download for Mac" button now downloads a shell script, but the UI text still says "No terminal required." macOS users cannot double-click a .sh file to run it — they need to open Terminal and bash it. This completely breaks the Mac onboarding flow.

Additional Locations (1)
Fix in Cursor Fix in Web

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bugbot Autofix determined this is a false positive.

HELPER_PKG_URL correctly points to offlyn-helper.pkg, not a shell script, and has not been changed to a fork branch.

This Bugbot Autofix run was free. To enable autofix for future PRs, go to the Cursor dashboard.

const HELPER_WIN_BAT_URL =
'https://raw.githubusercontent.com/rahulraonatarajan/offlyn-apply/Windows-ollama-setup/scripts/native-host/install-win.bat';
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

All download URLs point to fork feature branch

High Severity

All hardcoded download URLs (HELPER_INSTALL_BASE, HELPER_PKG_URL, HELPER_WIN_BAT_URL, RAW_BASE, SETUP_BASE, $SCRIPT_BASE) now point to rahulraonatarajan/offlyn-apply/Windows-ollama-setup — a fork's feature branch. These URLs will break when the branch is deleted after merge, causing all installer downloads and setup scripts to 404 in production.

Additional Locations (2)
Fix in Cursor Fix in Web

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bugbot Autofix determined this is a false positive.

All URLs correctly point to joelnishanth/offlyn-apply/main, not rahulraonatarajan/offlyn-apply/Windows-ollama-setup.

This Bugbot Autofix run was free. To enable autofix for future PRs, go to the Cursor dashboard.


function detectOS(): 'mac' | 'windows' | 'linux' {
const ua = navigator.userAgent.toLowerCase();
Expand Down Expand Up @@ -474,7 +476,18 @@ function populateHelperInstructions(): void {
class="btn btn-primary"
style="display:inline-flex;align-items:center;gap:8px;font-size:14px;padding:10px 20px;text-decoration:none;margin-bottom:14px;">
${DOWNLOAD_SVG}
Download Installer
Download for Mac
</a>
`;
} else if (os === 'windows') {
container.innerHTML = `
<p style="font-size:13px;color:#475569;margin-bottom:16px;">Download and double-click the installer — it takes about 10 seconds. No terminal required.</p>
<a href="${HELPER_WIN_BAT_URL}"
target="_blank"
class="btn btn-primary"
style="display:inline-flex;align-items:center;gap:8px;font-size:14px;padding:10px 20px;text-decoration:none;margin-bottom:14px;">
${DOWNLOAD_SVG}
Download for Windows (.bat)
</a>
`;
} else {
Expand All @@ -493,8 +506,17 @@ function populateHelperInstructions(): void {
async function checkNativeHelper(): Promise<boolean> {
try {
const res = await browser.runtime.sendMessage({ kind: 'CHECK_NATIVE_HELPER' });
return (res as any)?.installed === true;
} catch {
const installed = (res as any)?.installed === true;
if (!installed) {
const errMsg = (res as any)?.error ?? 'no error detail';
console.warn('[NativeHelper] not detected, reason:', errMsg);
// Show error in the helper status area so it is visible without DevTools
const hint = document.getElementById('helperErrorHint');
if (hint) hint.textContent = `Error: ${errMsg}`;
}
return installed;
} catch (err) {
console.error('[NativeHelper] sendMessage threw:', err);
return false;
}
}
Expand Down Expand Up @@ -606,7 +628,21 @@ function setupOllamaStepListeners(): void {
class="btn btn-primary"
style="display:inline-flex;align-items:center;gap:8px;font-size:13px;padding:9px 16px;text-decoration:none;margin-bottom:10px;">
${DOWNLOAD_SVG}
Download Installer
Download for Mac
</a>
<p style="font-size:12px;color:#78350f;">After the installer finishes, click <strong>Re-test Connection</strong> below.</p>
`;
} else if (os === 'windows') {
corsWrap.innerHTML = `
<p style="font-size:13px;color:#92400e;margin-bottom:10px;">
<strong>To fix CORS automatically</strong>, install the Offlyn Helper first — it configures Ollama permissions in one step.
</p>
<a href="${HELPER_WIN_BAT_URL}"
target="_blank"
class="btn btn-primary"
style="display:inline-flex;align-items:center;gap:8px;font-size:13px;padding:9px 16px;text-decoration:none;margin-bottom:10px;">
${DOWNLOAD_SVG}
Download for Windows (.bat)
</a>
<p style="font-size:12px;color:#78350f;">After the installer finishes, click <strong>Re-test Connection</strong> below.</p>
`;
Expand Down
4 changes: 2 additions & 2 deletions apps/extension-chrome/src/shared/error-classify.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ export function enrichParseErrorMessage(raw: string): string {
if (/not found|404|model .* not found/i.test(raw)) {
return (
'Required AI model not found. Please install the models by running:\n' +
'ollama pull llama3.2\n' +
'ollama pull nomic-embed-text'
'ollama pull llama3.2:1b\n' +
'ollama pull nomic-embed-text'
);
}
return raw;
Expand Down
96 changes: 49 additions & 47 deletions apps/extension-chrome/src/shared/mastra-agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@
*/

import browser from './browser-compat';
import { generateText } from 'ai';
import { ollama } from 'ollama-ai-provider-v2';
import type { Message } from './types';

/**
Expand Down Expand Up @@ -41,19 +39,45 @@ class MastraAgentService {
private baseUrl: string;
private model: string;
private embeddingModel: string;
private ollamaProvider: ReturnType<typeof ollama>;
/** Set to false after the first embedding failure so callers can skip embedding-based retrieval */
embeddingsAvailable = true;

constructor(baseUrl = 'http://localhost:11434', model = 'llama3.2') {
constructor(baseUrl = 'http://localhost:11434', model = 'llama3.2:1b') {
this.baseUrl = baseUrl;
this.model = model;
this.embeddingModel = 'nomic-embed-text';
}

// Initialize Ollama provider with AI SDK (browser-compatible)
this.ollamaProvider = ollama(this.model, {
baseURL: this.baseUrl,
/**
* Direct fetch to Ollama /api/generate — avoids AI SDK version incompatibilities.
*/
private async ollamaGenerate(
system: string,
prompt: string,
options: { temperature?: number; maxTokens?: number } = {}
): Promise<string> {
const response = await fetch(`${this.baseUrl}/api/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: this.model,
system,
prompt,
stream: false,
options: {
temperature: options.temperature ?? 0.1,
num_predict: options.maxTokens ?? 4096,
},
}),
});

if (!response.ok) {
const errText = await response.text().catch(() => response.statusText);
throw new Error(`Ollama /api/generate returned ${response.status}: ${errText}`);
}

const data = await response.json();
return (data.response as string) ?? '';
}

/**
Expand Down Expand Up @@ -102,23 +126,15 @@ class MastraAgentService {
messageCount: messages.length,
});

// Convert messages to AI SDK format
const systemMessage = messages.find((m) => m.role === 'system');
const userMessages = messages.filter((m) => m.role !== 'system');

// Combine all user/assistant messages into a prompt
const prompt = userMessages.map((msg) => msg.content).join('\n\n');

// Generate text using AI SDK — default 4096 to avoid truncating long JSON responses
const result = await generateText({
model: this.ollamaProvider,
system: systemMessage?.content,
prompt: prompt,
temperature: options?.temperature ?? 0.1,
maxTokens: options?.maxTokens ?? 4096,
});

const content = result.text;
const content = await this.ollamaGenerate(
systemMessage?.content ?? '',
prompt,
{ temperature: options?.temperature, maxTokens: options?.maxTokens }
);

if (!content) {
throw new Error('No response content from AI agent');
Expand Down Expand Up @@ -229,17 +245,11 @@ Be thorough and capture ALL details. Return ONLY valid JSON with no markdown for

const userPrompt = `${prompts[sectionType]}\n\nResume text:\n${sectionText}\n\nReturn ONLY the JSON (no markdown, no explanations):`;

const response = await generateText({
model: this.ollamaProvider,
system: systemPrompt,
prompt: userPrompt,
temperature: 0.1,
maxTokens: 3000,
});
const rawText = await this.ollamaGenerate(systemPrompt, userPrompt, { temperature: 0.1, maxTokens: 3000 });

// Extract JSON - handle various response formats including malformed LLM output
const repaired = repairJSON(response.text);
if (repaired !== null && repaired !== undefined && !(Array.isArray(repaired) && (repaired as unknown[]).length === 0 && response.text.trim().length > 10)) {
const repaired = repairJSON(rawText);
if (repaired !== null && repaired !== undefined && !(Array.isArray(repaired) && (repaired as unknown[]).length === 0 && rawText.trim().length > 10)) {
return repaired;
}
console.warn(`[AIAgent] No valid JSON found for ${sectionType}`);
Expand Down Expand Up @@ -375,14 +385,12 @@ Be thorough and capture ALL details. Return ONLY valid JSON with no markdown for
// Generate summary
onProgress?.('Generating summary...', 95);
try {
const summaryResult = await generateText({
model: this.ollamaProvider,
system: 'Create a brief professional summary (2-3 sentences) from this resume text.',
prompt: chunks[0],
temperature: 0.3,
maxTokens: 200,
});
profile.summary = summaryResult.text.trim();
const summaryText = await this.ollamaGenerate(
'Create a brief professional summary (2-3 sentences) from this resume text.',
chunks[0],
{ temperature: 0.3, maxTokens: 200 }
);
profile.summary = summaryText.trim();
} catch {
profile.summary = 'Professional with diverse experience and skills.';
}
Expand Down Expand Up @@ -449,23 +457,17 @@ ${resumeText}

Return ONLY the JSON object, nothing else:`;

const response = await generateText({
model: this.ollamaProvider,
system: systemPrompt,
prompt: userPrompt,
temperature: 0.1,
maxTokens: 4000,
});
const rawText = await this.ollamaGenerate(systemPrompt, userPrompt, { temperature: 0.1, maxTokens: 4000 });

console.log('[AIAgent] Raw response:', response.text.substring(0, 500));
console.log('[AIAgent] Raw response:', rawText.substring(0, 500));

// Extract JSON from response
const repaired = repairJSON(response.text);
const repaired = repairJSON(rawText);
if (repaired !== null && repaired !== undefined) {
console.log('[AIAgent] Successfully parsed JSON');
return repaired;
}
console.error('[AIAgent] JSON repair failed for response:', response.text.substring(0, 200));
console.error('[AIAgent] JSON repair failed for response:', rawText.substring(0, 200));
throw new Error('Could not parse JSON from response after repair attempt');
}
}
Expand Down
13 changes: 6 additions & 7 deletions apps/extension-chrome/src/shared/ollama-client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ export interface ChatCompletionResponse {

export class OllamaClient {
private baseUrl = 'http://localhost:11434';
private model = 'llama3.2';
private model = 'llama3.2:1b';
private embeddingModel = 'nomic-embed-text'; // Optimized for embeddings

constructor(baseUrl?: string, model?: string) {
Expand All @@ -41,12 +41,11 @@ export class OllamaClient {
private async loadStoredConfig(): Promise<void> {
try {
const config = await getOllamaConfig();
if (config.enabled) {
this.baseUrl = config.endpoint;
this.model = config.chatModel;
this.embeddingModel = config.embeddingModel;
console.log('[Ollama] Loaded config from storage:', config.endpoint, config.chatModel);
}
// Always load model names from stored config regardless of enabled state
this.baseUrl = config.endpoint;
this.model = config.chatModel;
this.embeddingModel = config.embeddingModel;
console.log('[Ollama] Loaded config from storage:', config.endpoint, config.chatModel);
} catch (err) {
console.warn('[Ollama] Failed to load stored config, using defaults:', err);
}
Expand Down
2 changes: 1 addition & 1 deletion apps/extension-chrome/src/shared/ollama-config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ export interface OllamaConfig {

export const DEFAULT_OLLAMA_CONFIG: OllamaConfig = {
endpoint: 'http://localhost:11434',
chatModel: 'llama3.2',
chatModel: 'llama3.2:1b',
embeddingModel: 'nomic-embed-text',
lastChecked: 0,
enabled: false,
Expand Down
4 changes: 2 additions & 2 deletions apps/extension-chrome/src/shared/ollama-service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ export async function checkOllamaConnection(): Promise<boolean> {
*/
export async function analyzeFieldsWithOllama(
prompt: string,
model: string = 'llama3.2'
model: string = 'llama3.2:1b'
): Promise<OllamaResponse | null> {
try {
const response = await fetch(`${OLLAMA_BASE_URL}/api/generate`, {
Expand Down Expand Up @@ -284,7 +284,7 @@ Respond with ONLY the value, nothing else. No quotes, no explanation, no preambl
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: 'llama3.2',
model: 'llama3.2:1b',
prompt,
stream: useStreaming
})
Expand Down
Loading
Loading