Skip to content

Commit 91fc233

Browse files
committed
Add HuggingFace model import — pull any GGUF model during setup
1 parent 8279c54 commit 91fc233

File tree

2 files changed

+87
-6
lines changed

2 files changed

+87
-6
lines changed

README.md

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,43 @@ HackCode auto-selects the best uncensored model for your hardware. All models ru
210210

211211
The 35B MoE model uses only 3B active parameters per token — so it runs fast — while having 35B total parameters for high-quality output. Best of both worlds.
212212

213-
You can also use any other Ollama model: `llama3`, `deepseek-coder`, `codestral`, `mistral` — HackCode works with all of them.
213+
### Pull Any Model from HuggingFace
214+
215+
Not limited to the built-in list. During setup, press **`[h]`** to pull any GGUF model directly from [HuggingFace](https://huggingface.co):
216+
217+
```
218+
[Step 2/3] AI Model
219+
220+
[a] Qwen3.5-4B Uncensored ~3GB
221+
[b] Qwen3.5-8B Uncensored ~5GB
222+
...
223+
[h] Pull any model from HuggingFace
224+
[s] Skip model download
225+
226+
> h
227+
228+
HuggingFace Model Import
229+
Paste a HuggingFace model URL or repo ID.
230+
231+
HuggingFace model> https://huggingface.co/dealignai/Gemma-4-31B-JANG_4M-CRACK
232+
233+
Pulling hf.co/dealignai/Gemma-4-31B-JANG_4M-CRACK from HuggingFace...
234+
```
235+
236+
Works with any model on HuggingFace — jailbroken, uncensored, fine-tuned, experimental. Paste the URL or just the repo ID. Some community favorites:
237+
238+
```bash
239+
# Jailbroken models
240+
ollama pull hf.co/dealignai/Gemma-4-31B-JANG_4M-CRACK
241+
242+
# Uncensored coding models
243+
ollama pull hf.co/bartowski/Qwen3-30B-A3B-GGUF
244+
245+
# Reasoning models
246+
ollama pull hf.co/unsloth/DeepSeek-R1-0528-GGUF
247+
```
248+
249+
If it's on HuggingFace and it's GGUF, HackCode can run it.
214250

215251
---
216252

rust/crates/rusty-claude-cli/src/setup.rs

Lines changed: 50 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -172,18 +172,51 @@ pub fn run_setup() -> Result<(), Box<dyn std::error::Error>> {
172172
};
173173
println!(" {BOLD}[{}]{RESET} {:35} {DIM}{:8} min {}GB RAM{RESET}{rec}", m.key, m.name, m.size, m.min_ram);
174174
}
175+
println!(" {BOLD}[h]{RESET} Pull any model from HuggingFace");
175176
println!(" {BOLD}[s]{RESET} Skip model download");
176177
println!();
177178

178179
let choice = ask(&format!(" {GREEN}>{RESET} "));
179180
let choice = if choice.is_empty() { recommended.key.to_string() } else { choice };
180181

181-
let model_id = MODELS.iter()
182-
.find(|m| m.key == choice)
183-
.map(|m| m.id)
184-
.unwrap_or(recommended.id);
182+
let model_id: String;
183+
184+
if choice == "h" {
185+
println!();
186+
println!(" {BOLD}HuggingFace Model Import{RESET}");
187+
println!(" {DIM}Paste a HuggingFace model URL or repo ID.{RESET}");
188+
println!(" {DIM}Examples:{RESET}");
189+
println!(" {DIM}https://huggingface.co/dealignai/Gemma-4-31B-JANG_4M-CRACK{RESET}");
190+
println!(" {DIM}bartowski/Qwen3-30B-A3B-GGUF{RESET}");
191+
println!(" {DIM}unsloth/DeepSeek-R1-0528-GGUF{RESET}");
192+
println!();
193+
let hf_input = ask(&format!(" {GREEN}HuggingFace model>{RESET} "));
194+
// Strip full URL to repo ID: huggingface.co/user/model -> user/model
195+
let hf_repo = hf_input
196+
.trim()
197+
.replace("https://huggingface.co/", "")
198+
.replace("http://huggingface.co/", "")
199+
.trim_end_matches('/')
200+
.to_string();
201+
202+
if hf_repo.is_empty() {
203+
println!(" {RED}No model specified, using recommended model.{RESET}");
204+
model_id = recommended.id.to_string();
205+
} else {
206+
let hf_ollama_id = format!("hf.co/{hf_repo}");
207+
println!("\n Pulling {BOLD}{hf_ollama_id}{RESET} from HuggingFace...");
208+
println!(" {DIM}This may take a while depending on model size.{RESET}");
209+
run_cmd(&format!("ollama pull \"{hf_ollama_id}\""));
210+
model_id = hf_ollama_id;
211+
}
212+
} else {
213+
model_id = MODELS.iter()
214+
.find(|m| m.key == choice)
215+
.map(|m| m.id.to_string())
216+
.unwrap_or_else(|| recommended.id.to_string());
217+
};
185218

186-
if choice != "s" && which("ollama") {
219+
if choice != "s" && choice != "h" && which("ollama") {
187220
println!("\n Pulling {BOLD}{model_id}{RESET}...");
188221
run_cmd(&format!("ollama pull \"{model_id}\""));
189222

@@ -196,6 +229,18 @@ pub fn run_setup() -> Result<(), Box<dyn std::error::Error>> {
196229
run_cmd(&format!("ollama create hackcode-uncensored -f \"{}\"", modelfile_path.display()));
197230
println!(" {GREEN}✓{RESET} Model ready as {BOLD}hackcode-uncensored{RESET}");
198231
}
232+
233+
// For HuggingFace models, create alias with the pulled model
234+
if choice == "h" && which("ollama") && !model_id.is_empty() {
235+
let modelfile = format!(
236+
"FROM {model_id}\nPARAMETER temperature 0.7\nPARAMETER num_ctx 32768\n"
237+
);
238+
let modelfile_path = config_dir().join("Modelfile");
239+
let _ = fs::write(&modelfile_path, &modelfile);
240+
run_cmd(&format!("ollama create hackcode-uncensored -f \"{}\"", modelfile_path.display()));
241+
println!(" {GREEN}✓{RESET} Model ready as {BOLD}hackcode-uncensored{RESET}");
242+
}
243+
199244
let model_id = "hackcode-uncensored";
200245
println!();
201246

0 commit comments

Comments
 (0)