Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions src/config/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,29 @@ export class Config implements ConfigInterface {

public async loadConfig(configPath: string): Promise<boolean> {
this.configFilePath = path.join(configPath, "config.json");
const isTestMode = process.env.MOCHA_ENV === "test";

if (!fs.existsSync(this.configFilePath)) {
// In test mode, skip interactive prompts and use default configuration
if (isTestMode) {
this.set("openai.model", DEFAULT_OPENAI_MODEL);
this.set("ollama.model", DEFAULT_OLLAMA_MODEL);
this.set("github-copilot.model", DEFAULT_GITHUB_COPILOT_MODEL);
this.set("mode", "default");

// Use OpenAI if API key is available, otherwise use mock LLM for tests
const apiKey = process.env.LOZ_OPENAI_API_KEY || process.env.OPENAI_API_KEY;
if (apiKey) {
this.set("model", DEFAULT_OPENAI_MODEL);
this.set("api", "openai");
this.set("openai.apikey", apiKey);
} else {
this.set("model", DEFAULT_OLLAMA_MODEL);
this.set("api", "mock");
}
return false;
}

const rl = readlinePromises.createInterface({
input: process.stdin,
output: process.stdout,
Expand Down
56 changes: 56 additions & 0 deletions src/llm/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -316,3 +316,59 @@ export class GitHubCopilotAPI extends LLMService {
return this.auth;
}
}

export class MockLLMAPI extends LLMService {
constructor() {
super();
this.api = null;
}

// Map of test prompts to expected commands
private getCommandForPrompt(prompt: string): string {
// Extract the actual user prompt from the system prompt
const userPromptMatch = prompt.match(/Input:\s*(.+?)(?:\nResponse:|$)/);
const userPrompt = userPromptMatch ? userPromptMatch[1].trim() : prompt;

if (userPrompt.includes("Detect GPUs")) {
return '{ "commands": ["lspci | grep -i vga"] }';
} else if (userPrompt.includes("current date and time")) {
return '{ "commands": ["date"] }';
} else if (userPrompt.includes("available memory")) {
return '{ "commands": ["free -h"] }';
} else if (userPrompt.includes("largest file")) {
return '{ "commands": ["find . -type f -exec ls -l {} + | sort -k 5 -nr | head -n 1"] }';
} else if (userPrompt.includes("apache2")) {
return '{ "commands": ["systemctl status apache2"] }';
} else if (userPrompt.includes("Find sfsdfef")) {
return '{ "commands": ["grep \'sfsdfef\' *"] }';
} else {
// Default: return a safe echo command without user input
return '{ "commands": ["echo Mock LLM - command not recognized"] }';
}
}

public async completion(
params: LLMSettings,
): Promise<{ content: string; model: string }> {
if (DEBUG) {
console.log("Mock LLM completion");
console.log("Model: " + params.model);
}

const content = this.getCommandForPrompt(params.prompt);

return {
content,
model: params.model,
};
}

public async completionStream(params: LLMSettings): Promise<any> {
// For mock, we don't really stream, just return the completion
const completion = await this.completion(params);
// Return an async iterator that yields the content
return (async function* () {
yield { choices: [{ delta: { content: completion.content } }] };
})();
}
}
28 changes: 20 additions & 8 deletions src/loz.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import * as fs from "fs";
import * as path from "path";
import * as os from "os";
import * as readlinePromises from "readline/promises";
import { OpenAiAPI, OllamaAPI, GitHubCopilotAPI, LLMSettings } from "./llm";
import { OpenAiAPI, OllamaAPI, GitHubCopilotAPI, MockLLMAPI, LLMSettings } from "./llm";
import { CommandLinePrompt } from "./prompt";
import { ChatHistoryManager, PromptAndAnswer } from "./history";
import { runCommand, runShellCommand, checkGitRepo } from "./utils";
Expand Down Expand Up @@ -82,15 +82,27 @@ export class Loz {
await this.config.loadConfig(this.configPath);

const api = this.checkAPI() || "openai";
const isTestMode = process.env.MOCHA_ENV === "test";

if (api === "mock") {
// Use mock LLM for testing
this.llmAPI = new MockLLMAPI();
this.defaultSettings.model =
this.config.get("model")?.value || DEFAULT_OLLAMA_MODEL;
return;
}

if (api === "ollama") {
const result = await runShellCommand("ollama --version");
if (DEBUG) console.log(result);
if (result.indexOf("ollama") === -1) {
console.log(
"Please install ollama first: see https://ollama.ai/download \n",
);
process.exit(1);
// Skip ollama version check in test mode
if (!isTestMode) {
const result = await runShellCommand("ollama --version");
if (DEBUG) console.log(result);
if (result.indexOf("ollama") === -1) {
console.log(
"Please install ollama first: see https://ollama.ai/download \n",
);
process.exit(1);
}
}
this.llmAPI = new OllamaAPI();
this.defaultSettings.model =
Expand Down
Loading