diff --git a/qwen3-fine-tuning/.python-version b/qwen3-fine-tuning/.python-version new file mode 100644 index 00000000..902b2c90 --- /dev/null +++ b/qwen3-fine-tuning/.python-version @@ -0,0 +1 @@ +3.11 \ No newline at end of file diff --git a/qwen3-fine-tuning/README.md b/qwen3-fine-tuning/README.md new file mode 100644 index 00000000..e69de29b diff --git a/qwen3-fine-tuning/fine_tune_qwen3.py b/qwen3-fine-tuning/fine_tune_qwen3.py new file mode 100644 index 00000000..b6fc0abd --- /dev/null +++ b/qwen3-fine-tuning/fine_tune_qwen3.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +################################################################# +# IMPORTS AND SETUP # +################################################################# +import unsloth +import torch +from unsloth import FastModel +from datasets import load_dataset +from trl import SFTTrainer, SFTConfig +from transformers import TextStreamer, GenerationConfig +import re + +# from huggingface_hub import login # Uncomment for Hugging Face Hub push + + +################################################################# +# LOAD MODEL AND TOKENIZER # +################################################################# +print("Loading Qwen3 model and tokenizer...") +model, tokenizer = FastModel.from_pretrained( + model_name="unsloth/Qwen3-14B", + max_seq_length=2048, # Choose any for long context + load_in_4bit=True, # 4 bit quantization to reduce memory + full_finetuning=False, +) + + +################################################################# +# LOAD DATASET # +################################################################# +print("Loading Bullet Echo Wiki QA dataset...") +dataset_name = "bexgboost/bullet-echo-wiki-qa" +full_dataset = load_dataset(dataset_name, trust_remote_code=True) + +# Split dataset into training and validation sets (90% train, 10% validation) +train_val_split = full_dataset["train"].train_test_split( + test_size=0.1, seed=42, shuffle=True +) +train_dataset = train_val_split["train"] +val_dataset = train_val_split["test"] # This becomes our validation set + +print( + f"Training examples: {len(train_dataset)}, Validation examples: {len(val_dataset)}" +) + + +################################################################# +# FORMAT DATASET # +################################################################# +print("Formatting datasets with Qwen3 chat template...") +EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN + + +def format_data(example): + # Qwen3 uses a chat template, so we'll format it accordingly + messages = [ + {"role": "user", "content": example["question"]}, + {"role": "assistant", "content": example["answer"] + EOS_TOKEN}, + ] + # The tokenizer.apply_chat_template handles special tokens for Qwen3 + return {"text": tokenizer.apply_chat_template(messages, tokenize=False)} + + +# Format both training and validation datasets +formatted_train_dataset = train_dataset.map(format_data) +formatted_val_dataset = val_dataset.map(format_data) + + +################################################################# +# TOKENIZE DATASET # +################################################################# +print("Tokenizing datasets...") + + +def tokenize_function(examples): + # padding=False because SFTTrainer will handle padding + return tokenizer( + examples["text"], + padding=False, + truncation=True, + max_length=model.config.max_position_embeddings, + ) + + +# Process both datasets +processed_train_dataset = formatted_train_dataset.map( + tokenize_function, + batched=True, + remove_columns=["id", "question", "answer", "text"], + desc="Tokenizing training dataset", +) + +processed_val_dataset = formatted_val_dataset.map( + tokenize_function, + batched=True, + remove_columns=["id", "question", "answer", "text"], + desc="Tokenizing validation dataset", +) + + +################################################################# +# SETUP PEFT MODEL # +################################################################# +print("Setting up PEFT model with LoRA...") +model = FastModel.get_peft_model( + model, + r=8, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "gate_proj", + "up_proj", + "down_proj", + ], + finetune_vision_layers=False, # Turn off for just text! + finetune_language_layers=True, + finetune_attention_modules=True, + finetune_mlp_modules=True, + lora_alpha=8, + lora_dropout=0, + bias="none", + use_gradient_checkpointing="unsloth", + random_state=1000, + use_rslora=False, +) + + +################################################################# +# CONFIGURE TRAINER # +################################################################# +print("Configuring SFTTrainer with evaluation...") +trainer = SFTTrainer( + model=model, + tokenizer=tokenizer, + train_dataset=processed_train_dataset, + eval_dataset=processed_val_dataset, # Add validation dataset + args=SFTConfig( + dataset_text_field="text", + per_device_train_batch_size=2, + per_device_eval_batch_size=2, # Batch size for evaluation + gradient_accumulation_steps=4, + warmup_steps=5, + num_train_epochs=3, + # max_steps=100, # For quick testing + learning_rate=2e-4, + logging_steps=100, + optim="adamw_8bit", + weight_decay=0.01, + lr_scheduler_type="linear", + seed=3407, + output_dir="outputs", + eval_strategy="steps", # Changed from evaluation_strategy + eval_steps=200, # Evaluate every 200 steps + save_strategy="steps", # Save checkpoints based on evaluation + save_steps=200, # Save every 200 steps + load_best_model_at_end=True, # Load best model at the end of training + metric_for_best_model="eval_loss", # Use evaluation loss to determine best model + greater_is_better=False, # Lower loss is better + save_total_limit=3, # Keep only the 3 best checkpoints + ), +) + + +################################################################# +# TRAIN MODEL # +################################################################# +print("Starting fine-tuning process with validation...") +training_results = trainer.train() + +# Print evaluation metrics +print("Training completed!") +print(f"Final training metrics: {training_results.metrics}") + + +################################################################# +# MODEL INFERENCE # +################################################################# +print("Setting up model for inference...") +unsloth.FastModel.for_inference(model) # Enable native 2x faster inference + +# Import needed for generation configuration + + +def generate_response( + model, tokenizer, query, temperature=0.7, top_p=0.9, max_new_tokens=256 +): + """ + Generate a response from the fine-tuned model. + + Args: + model: The fine-tuned model + tokenizer: The tokenizer + query: The user query/question + temperature: Controls randomness in generation (lower = more deterministic) + top_p: Nucleus sampling parameter (lower = more focused) + max_new_tokens: Maximum new tokens to generate + + Returns: + Generated response text + """ + # Format the query as a chat message + messages = [{"role": "user", "content": query}] + + # Prepare model inputs + inputs = tokenizer.apply_chat_template( + messages, add_generation_prompt=True, return_tensors="pt" + ).to("cuda") + + # Create attention mask (all 1s) with the same shape as inputs + attention_mask = torch.ones_like(inputs).to("cuda") + + # Configure generation parameters + generation_config = GenerationConfig( + temperature=temperature, + top_p=top_p, + do_sample=True, + max_new_tokens=max_new_tokens, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + remove_invalid_values=True, + # Disable thinking tags + suppression_tokens=( + [ + tokenizer.encode("", add_special_tokens=False)[0], + tokenizer.encode("", add_special_tokens=False)[0], + ] + if len(tokenizer.encode("", add_special_tokens=False)) > 0 + else None + ), + ) + + # Custom text filtering function + def filter_thinking(text): + # Remove anything between and tags + text = re.sub(r".*?", "", text, flags=re.DOTALL) + # Remove any remaining or tags + text = re.sub(r"|", "", text) + return text + + # Custom streamer class to filter thinking tags + class FilteredTextStreamer(TextStreamer): + def on_finalized_text(self, text: str, stream_end: bool = False): + filtered_text = filter_thinking(text) + if filtered_text.strip(): # Only print non-empty text + print(filtered_text, end="", flush=True) + + # Initialize filtered text streamer + streamer = FilteredTextStreamer( + tokenizer, skip_prompt=True, skip_special_tokens=True + ) + + # Display query + print(f"User: {query}") + print("Assistant:") + + # Generate response + output = model.generate( + inputs, + attention_mask=attention_mask, + generation_config=generation_config, + streamer=streamer, + return_dict_in_generate=True, + output_scores=False, + ) + + # For non-streaming use (optional): + # output_text = tokenizer.decode(output.sequences[0], skip_special_tokens=True) + # return filter_thinking(output_text) + + print("\n") # Add a newline after generation + return None # Since we're streaming, we don't return the output + + +# Test the model with sample queries +print("\n--- Testing Model Responses ---") + +test_queries = [ + "What happens to a hero's movement when their special ability is activated in Bullet Echo?", + "How does the Technician's passive benefit allies during gameplay?", +] + +for query in test_queries: + generate_response(model, tokenizer, query) + + +################################################################# +# SAVE MODEL # +################################################################# +print("\nSaving fine-tuned model...") +output_model_name = "qwen3-bullet-echo-qa-lora" +model.save_pretrained(output_model_name) +tokenizer.save_pretrained(output_model_name) +print(f"Model successfully saved to: ./{output_model_name}") + +# Optional: Push to Hugging Face Hub +# from huggingface_hub import login +# login() +# hub_model_id = f"your-hf-username/{output_model_name}" +# model.push_to_hub(hub_model_id) +# tokenizer.push_to_hub(hub_model_id) +# print(f"Model pushed to Hugging Face Hub: {hub_model_id}") + +print("\n🦥 Fine-tuning script completed successfully! 🦥") + +################################################################# +# LOAD SAVED MODEL # +################################################################# +print("\n--- Loading Saved Fine-tuned Model ---") + +# Load the saved model and tokenizer +saved_model_path = output_model_name # "qwen3-bullet-echo-qa-lora" +loaded_model, loaded_tokenizer = FastModel.from_pretrained( + model_name=output_model_name, + max_seq_length=2048, + load_in_4bit=True, + full_finetuning=False, +) + +# Enable faster inference +unsloth.FastModel.for_inference(loaded_model) + +print("Model successfully loaded for inference!") + +# Test with new queries +print("\n--- Testing Loaded Model Responses ---") + +new_test_queries = [ + "What's the best strategy for Cyclops in Bullet Echo?", + "How does the Stalker's invisibility work in the game?", + "Which heroes are effective against Bastion in Bullet Echo?", +] + +for query in new_test_queries: + generate_response(loaded_model, loaded_tokenizer, query, temperature=0.2) + +print("\n🦥 Model loading and inference testing completed! 🦥") diff --git a/qwen3-fine-tuning/images/vram-calculator.png b/qwen3-fine-tuning/images/vram-calculator.png new file mode 100644 index 00000000..b41aa910 Binary files /dev/null and b/qwen3-fine-tuning/images/vram-calculator.png differ diff --git a/qwen3-fine-tuning/notebook.ipynb b/qwen3-fine-tuning/notebook.ipynb new file mode 100644 index 00000000..9c00a703 --- /dev/null +++ b/qwen3-fine-tuning/notebook.ipynb @@ -0,0 +1,1056 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Step-by-Step Fine-tuning Qwen 3 on a Custom Dataset With Unsloth and Firecrawl" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Introduction\n", + "\n", + "Qwen 3, released in April 2025 by Alibaba Cloud, is a family of open-source language models ranging from 0.6B to 235B parameters. Available under the Apache 2.0 license, these models have gained widespread adoption with over 300 million downloads and 100,000+ derivative models on platforms like Hugging Face.\n", + "\n", + "Qwen 3 performs exceptionally well in reasoning-intensive tasks such as mathematics, coding, and logical analysis. Even smaller models like Qwen3-4B compete with much larger predecessors, making advanced AI capabilities accessible across various computational environments.\n", + "\n", + "Fine-tuning Qwen 3 for specific domains enhances its performance in specialized areas, improving accuracy and relevance for particular use cases. In this guide, we'll walk through the process of fine-tuning Qwen3-14B on a custom question-answer dataset built from scratch using Firecrawl. We'll optimize the training process with Unsloth to achieve efficient and effective results.\n", + "\n", + "> Find the companion [notebook and script with full code](https://github.com/mendableai/firecrawl-app-examples/tree/main/qwen3-fine-tuning) for this article through our GitHub repository." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Qwen 3 Architecture and Capabilities\n", + "\n", + "Let's explore the technical architecture and capabilities of Qwen 3 models before diving into the fine-tuning process.\n", + "\n", + "### Technical specifications\n", + "\n", + "Qwen 3 offers both dense and Mixture-of-Expert (MoE) architectures to accommodate different needs:\n", + "\n", + "**Dense Models:**\n", + "\n", + "| Model | Layers | Heads (Key/Value) | Context Length | Parameters |\n", + "|-------|--------|-------------------|----------------|------------|\n", + "| Qwen3-0.6B | 28 | 16/8 | 32K | 0.6B |\n", + "| Qwen3-1.7B | 28 | 16/8 | 32K | 1.7B |\n", + "| Qwen3-4B | 36 | 32/8 | 32K | 4B |\n", + "| Qwen3-8B | 36 | 32/8 | 128K | 8B |\n", + "| Qwen3-14B | 40 | 40/8 | 128K | 14B |\n", + "| Qwen3-32B | 64 | 64/8 | 128K | 32B |\n", + "\n", + "The dense models vary in size and capability. Smaller models (Qwen3-0.6B and Qwen3-1.7B) are appropriate for edge devices and applications with latency constraints, with some limitations in reasoning complexity. Mid-range models (Qwen3-4B and Qwen3-8B) provide a balance between performance and resource usage, suitable for many production applications. Larger models (Qwen3-14B and Qwen3-32B) are better equipped for complex reasoning tasks where sophisticated problem-solving is required.\n", + "\n", + "**MoE Models:**\n", + "\n", + "| Model | Layers | Heads (Key/Value) | Experts | Context Length | Parameters (Total/Activated) |\n", + "|-------|--------|-------------------|---------|----------------|------------------------------|\n", + "| Qwen3-30B-A3B | 48 | 32/4 | 128/8 | 128K | 30B/3B |\n", + "| Qwen3-235B-A22B | 94 | 64/4 | 128/8 | 128K | 235B/22B |\n", + "\n", + "MoE models use a different approach where only a subset of parameters (experts) are activated for each input token. This design allows for larger total parameter counts while keeping computational requirements manageable. The Qwen3-30B-A3B model activates approximately 3B parameters during inference from its total 30B parameters, making it suitable for applications requiring advanced reasoning with moderate computational resources. The Qwen3-235B-A22B model activates about 22B of its 235B parameters, providing high performance across various tasks while maintaining reasonable inference costs.\n", + "\n", + "All these models were trained on 36 trillion tokens through a structured three-stage process, followed by additional post-training to enhance reasoning and instruction-following capabilities.\n", + "\n", + "### Chat templating and response formatting\n", + "\n", + "Qwen 3 uses a structured chat template that defines clear roles (user/assistant) and manages conversation flow. A notable feature is its \"thinking mode,\" which allows the model to show its reasoning process before providing an answer. This is especially useful for complex tasks where step-by-step thinking improves answer quality.\n", + "\n", + "Understanding this template structure is essential for our fine-tuning process, as we'll need to format our custom dataset to match Qwen 3's expected input format. The template handles:\n", + "- Role assignments in conversations\n", + "- Turn management\n", + "- Special token placement\n", + "- Integration of thinking tags when needed\n", + "\n", + "When we implement our fine-tuning pipeline, we'll need to ensure our question-answer pairs are properly formatted according to these conventions to achieve optimal results.\n", + "\n", + "In this guide, we'll fine-tune Qwen3-14B on a domain-specific question-answer dataset built from scratch using Firecrawl's web scraping capabilities. We'll extract information from the Bullet Echo game wiki and transform it into a structured QA format suitable for training the model to become a specialized game assistant.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1: Creating a Custom Dataset with Firecrawl\n", + "\n", + "There are [many datasets on HuggingFace](https://huggingface.co/datasets) to fine-tune models like Qwen 3:\n", + "\n", + "![](https://www.firecrawl.dev/images/blog/llama4-fine-tune/datasets.png)\n", + "\n", + "However, most fine-tuning scenarios require building custom datasets, which can be time-consuming. You often need to integrate content from various sources like documents, images, videos, or websites. If your project involves web content, Firecrawl can be particularly helpful. For this tutorial, we'll fine-tune Qwen 3 using an online knowledge base for the Bullet Echo mobile game.\n", + "\n", + "Bullet Echo is a multiplayer game featuring short battle royale matches that take only 2-3 minutes to complete. Advancing through the ranks requires tactical skill, as players control heroes with unique abilities in a top-down 2D shooting environment:\n", + "\n", + "![](https://cdn6.aptoide.com/imgs/0/6/f/06f695064ae703821206bebc8943feac_screen.png)\n", + "\n", + "The game's comprehensive information is documented and maintained by the community on the Bullet Echo Fandom wiki:\n", + "\n", + "![](https://www.firecrawl.dev/images/blog/llama4-fine-tune/wiki.png)\n", + "\n", + "The wiki contains approximately 180 articles, and we'll need to scrape all of them to create a structured dataset in the format shown below:\n", + "\n", + "```json\n", + "{\n", + " {\n", + " \"id\": \"c7296197-34a1-4eba-bc54-f2044c01af15\",\n", + " \"question\": \"Why might a player choose a tank character like Ramsay or Leviathan in Bullet Echo?\",\n", + " \"answer\": \"A player might choose a tank character like Ramsay or Leviathan in Bullet Echo because tanks are designed to be more durable and absorb more damage, helping protect their team and lead pushes during gameplay.\"\n", + " },\n", + " {\n", + " \"id\": \"b1b148c6-e0d7-491b-a87a-f299824cb934\",\n", + " \"question\": \"What are base stats in Bullet Echo and how do they affect gameplay?\",\n", + " \"answer\": \"Base stats in Bullet Echo refer to the fundamental characteristics of a hero, such as health, damage, speed, and armor. These stats influence how effectively a hero can survive, move, and engage in combat, making them crucial for gameplay performance.\"\n", + " },\n", + " ...\n", + "}\n", + "```\n", + "\n", + "All QA pairs combined must teach an LLM the game trivia in its entirety. Let's see how to go from an initial catalogue of articles to a JSON dataset step-by-step.\n", + "\n", + "### Using Firecrawl for targeted web scraping\n", + "\n", + "The first step is to extract all article links from the wiki. Firecrawl's schema-based extraction makes this straightforward:\n", + "\n", + "```python\n", + "# Define data models\n", + "class Article(BaseModel):\n", + " url: str\n", + " title: str\n", + "\n", + "class ArticleList(BaseModel):\n", + " articles: List[Article]\n", + "\n", + "# Scrape the wiki pages list\n", + "result = app.scrape_url(\n", + " \"https://bullet-echo.fandom.com/wiki/Special:AllPages\",\n", + " params={\n", + " \"formats\": [\"extract\"],\n", + " \"extract\": {\"schema\": ArticleList.model_json_schema()}\n", + " }\n", + ")\n", + "```\n", + "\n", + "Instead of writing brittle CSS selectors or XPath expressions, Firecrawl uses AI to understand what data you want based on your schema definition. This approach works even when websites change their structure, as the AI focuses on content semantics rather than DOM specifics.\n", + "\n", + "### Converting the Bullet Echo wiki into structured markdown\n", + "\n", + "After collecting all article URLs, we use Firecrawl's batch scraping to convert HTML content to clean markdown:\n", + "\n", + "```python\n", + "# Batch scrape all articles\n", + "batch_id = app.batch_scrape(\n", + " urls=[article.url for article in articles],\n", + " params={\"formats\": [\"markdown\"], \"onlyMainContent\": True}\n", + ")\n", + "\n", + "# Get results\n", + "while True:\n", + " status = app.check_batch_status(batch_id)\n", + " if status[\"status\"] == \"completed\":\n", + " break\n", + " time.sleep(10) # Wait and check again\n", + "```\n", + "\n", + "The `onlyMainContent` parameter is particularly valuable, as it automatically filters out navigation elements, ads, footers, and other non-essential content. This results in clean documentation focused only on the game knowledge we need.\n", + "\n", + "### Processing raw content into LLMs.txt format\n", + "\n", + "To prepare for QA pair generation, we need to chunk the documents into manageable pieces:\n", + "\n", + "```python\n", + "# Chunk processing function\n", + "def chunk_document(doc_path, chunk_size=800, overlap=200):\n", + " text = Path(doc_path).read_text()\n", + " chunks = []\n", + " \n", + " tokens = text.split()\n", + " for i in range(0, len(tokens), chunk_size - overlap):\n", + " chunk = \" \".join(tokens[i:i + chunk_size])\n", + " if len(chunk) > 100: # Only keep substantive chunks\n", + " chunks.append(chunk)\n", + " \n", + " return chunks\n", + "\n", + "# Apply chunking to all markdown files\n", + "all_chunks = []\n", + "for file in markdown_files:\n", + " chunks = chunk_document(file)\n", + " for chunk in chunks:\n", + " doc_info = {\"source\": file.stem, \"content\": chunk}\n", + " all_chunks.append(doc_info)\n", + "```\n", + "\n", + "This chunking process breaks down longer articles into overlapping segments, maintaining context while creating pieces that are the right size for our QA generation step.\n", + "\n", + "### Generating high-quality QA pairs from web content\n", + "\n", + "The final step uses a large language model to generate question-answer pairs from our structured content:\n", + "\n", + "```python\n", + "from openai import OpenAI\n", + "client = OpenAI()\n", + "\n", + "def generate_qa_pairs(chunk, num_pairs=3):\n", + " prompt = f\"\"\"Generate {num_pairs} high-quality question-answer pairs based on this text:\n", + " \n", + " {chunk['content']}\n", + " \n", + " The questions should be diverse and cover different aspects of the information.\n", + " Return in JSON format with 'question' and 'answer' fields.\"\"\"\n", + " \n", + " response = client.chat.completions.create(\n", + " model=\"gpt-4\",\n", + " messages=[{\"role\": \"user\", \"content\": prompt}],\n", + " response_format={\"type\": \"json_object\"}\n", + " )\n", + " \n", + " return json.loads(response.choices[0].message.content)[\"pairs\"]\n", + "```\n", + "\n", + "By applying this function to our chunked content, we generate approximately 3,000 question-answer pairs covering all aspects of the Bullet Echo game. The resulting dataset is then saved to JSONL format and uploaded to HuggingFace for fine-tuning.\n", + "\n", + "The complete dataset creation pipeline demonstrates how Firecrawl's natural language-based extraction simplifies complex web scraping tasks. Instead of dealing with HTML parsing headaches, developers can focus on describing the data they need, allowing Firecrawl's AI to handle the technical details.\n", + "\n", + "For the complete implementation details, check out the [full Llama 4 fine-tuning article](https://www.firecrawl.dev/blog/fine-tuning-llama4-custom-dataset-firecrawl) and the [associated GitHub repository](https://github.com/mendableai/firecrawl-app-examples/tree/main/llama4-fine-tuning).\n", + "\n", + "Also, don't forget to [sign up for Firecrawl](https://firecrawl.dev) and [go through the quickstart documentation](https://docs.firecrawl.dev/introduction) before you dive in." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Step 2: Environment Setup for Fine-tuning\n", + "\n", + "After creating our dataset, the next crucial step is setting up the right environment for fine-tuning. This involves selecting appropriate hardware and configuring the necessary software components to efficiently train our model.\n", + "\n", + "### Hardware considerations\n", + "\n", + "Fine-tuning a 14B parameter model like Qwen3-14B requires significant GPU resources. For our setup, a single NVIDIA L40 GPU with 48GB VRAM is sufficient when using QLoRA and 4-bit quantization, costing approximately $0.90 per hour on RunPod.\n", + "\n", + "![QLoRA VRAM Calculator showing memory requirements for Qwen3-14B fine-tuning](images/vram-calculator.png)\n", + "\n", + "The image above shows the VRAM calculator from [apxml.com/tools/vram-calculator](https://apxml.com/tools/vram-calculator) demonstrating that fine-tuning Qwen3-14B with 4-bit quantization, LoRA rank 16, batch size 4, and sequence length of 8,192 tokens requires approximately 24% (11.59 GB) of a 48GB GPU. This tool is invaluable for planning your hardware requirements before starting fine-tuning.\n", + "\n", + "The memory breakdown includes:\n", + "- Base model weights (4-bit): ~7.0 GB\n", + "- LoRA adapters: ~0.08 GB\n", + "- Activations: ~1.68 GB\n", + "- QLoRA buffers: ~1.78 GB\n", + "- Framework overhead: ~1.07 GB\n", + "\n", + "### Fine-tuning approaches: Full, LoRA, and QLoRA\n", + "\n", + "When fine-tuning large language models like Qwen3, we have several approaches to consider:\n", + "\n", + "- **Full fine-tuning**: Updates all model parameters, providing maximum adaptation but requiring enormous computational resources and memory. For a 14B parameter model, this approach is impractical without access to multiple high-end GPUs.\n", + "- **LoRA (Low-Rank Adaptation)**: Adds small trainable rank decomposition matrices to existing weights while keeping original parameters frozen. This reduces trainable parameters to less than 1% of the original model.\n", + "- **QLoRA (Quantized LoRA)**: Combines LoRA with 4-bit quantization of the base model weights, dramatically reducing memory requirements while maintaining performance.\n", + "\n", + "For our Bullet Echo game assistant, QLoRA is ideal as it enables fine-tuning Qwen3-14B on a single GPU while preserving most of the model's capabilities. Given our specialized dataset's focused domain, this approach provides the perfect balance of efficiency and performance.\n", + "\n", + "### Setting up RunPod for fine-tuning\n", + "\n", + "RunPod offers on-demand GPU resources that are perfect for fine-tuning large language models. Here's how to set up a RunPod instance for our Qwen 3 fine-tuning:\n", + "\n", + "1. **Create a RunPod account**: Sign up at [runpod.io](https://runpod.io/) if you don't already have an account.\n", + "\n", + "2. **Select a GPU**: From the RunPod dashboard, click \"Deploy\" and choose a single L40 GPU (48GB). This is sufficient for our fine-tuning needs thanks to QLoRA optimization.\n", + "\n", + "3. **Choose a Docker template**: Select a PyTorch template with version 2.7 or higher and CUDA support.\n", + "\n", + "4. **Configure storage**: \n", + " - Set container disk to at least 40GB\n", + " - Add a volume of 40-50GB for storing the model and dataset\n", + "\n", + "5. **Deploy the pod**: Once configured, click \"Deploy\" to launch your GPU instance.\n", + "\n", + "6. **Connect to your pod**: After deployment (usually takes 1-2 minutes), click \"Connect\" and select \"JupyterLab\" to access the development environment.\n", + "\n", + "7. **Create a new notebook**: In JupyterLab, click the \"+\" button in the file browser and select \"Python 3\" to create a new notebook for our fine-tuning code.\n", + "\n", + "### Required libraries and dependencies\n", + "\n", + "Once your JupyterLab environment is running, install the necessary libraries in the first cell of your notebook:\n", + "\n", + "```python\n", + "!pip install --no-deps bitsandbytes accelerate xformers==0.0.29.post3 peft trl==0.15.2 triton cut_cross_entropy unsloth_zoo\n", + "!pip install sentencepiece protobuf datasets huggingface_hub hf_transfer\n", + "!pip install --no-deps unsloth\n", + "!pip install regex transformers rich\n", + "```\n", + "\n", + "The core components of our setup include:\n", + "- **unsloth**: Provides 2x faster fine-tuning capabilities\n", + "- **transformers**: Hugging Face's library for working with pre-trained models\n", + "- **peft**: Implements Parameter-Efficient Fine-Tuning techniques like LoRA\n", + "- **bitsandbytes**: Enables 4-bit quantization for memory efficiency\n", + "- **trl**: Training and fine-tuning framework with SFTTrainer\n", + "\n", + "After installation, import the necessary modules:\n", + "\n", + "```python\n", + "import unsloth\n", + "import torch\n", + "from unsloth import FastModel\n", + "from datasets import load_dataset\n", + "from trl import SFTTrainer, SFTConfig\n", + "from transformers import TextStreamer, GenerationConfig\n", + "import re\n", + "```\n", + "\n", + "With this environment setup complete, we're ready to move on to loading and preparing our Qwen 3 model for fine-tuning." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 3: Loading and Preparing the Qwen 3 Model\n", + "\n", + "With our environment set up, we can now proceed to load and prepare the Qwen 3 model for fine-tuning. This step involves initializing the model with appropriate quantization settings, configuring the tokenizer, and implementing memory optimization techniques.\n", + "\n", + "### Model initialization with quantization\n", + "\n", + "The first step is loading the pre-trained Qwen3-14B model with quantization to reduce memory usage. We use Unsloth's `FastModel` class, which provides optimized loading and fine-tuning capabilities:\n", + "\n", + "```python\n", + "print(\"Loading Qwen3 model and tokenizer...\")\n", + "model, tokenizer = FastModel.from_pretrained(\n", + " model_name=\"unsloth/Qwen3-14B\",\n", + " max_seq_length=2048, # Choose any value for context length\n", + " load_in_4bit=True, # 4-bit quantization to reduce memory\n", + " full_finetuning=False,\n", + ")\n", + "```\n", + "\n", + "This code loads both the model and tokenizer in a single step. Let's examine each parameter:\n", + "\n", + "- `model_name`: Specifies which model to load. Here we're using Unsloth's optimized version of Qwen3-14B.\n", + "- `max_seq_length`: Sets the maximum sequence length the model can handle. For most fine-tuning tasks, 2048 tokens is sufficient, but you can adjust this based on your specific needs.\n", + "- `load_in_4bit`: Enables 4-bit quantization, which significantly reduces memory usage while maintaining most of the model's performance.\n", + "- `full_finetuning`: Set to `False` since we're using LoRA/QLoRA instead of full fine-tuning.\n", + "\n", + "When adapting this for your own projects, consider:\n", + "- For longer text inputs, increase `max_seq_length` (up to the model's context limit)\n", + "- For smaller GPUs, keep `load_in_4bit=True`\n", + "- For tasks requiring extreme precision, consider `load_in_8bit=True` instead (uses more memory)\n", + "\n", + "### Tokenizer setup\n", + "\n", + "Unsloth's `from_pretrained` method automatically loads the appropriate tokenizer for the model. For Qwen 3, this tokenizer handles several special aspects:\n", + "\n", + "1. **Chat formatting**: Qwen 3's tokenizer incorporates chat templates that properly format conversations with user/assistant roles.\n", + "2. **Special tokens**: Manages model-specific tokens like BOS (beginning of sequence), EOS (end of sequence), and role indicators.\n", + "3. **Thinking tags**: Supports Qwen 3's special `` tags for internal reasoning processes.\n", + "\n", + "The tokenizer is critical for both training and inference. During training, it will convert your text data into token IDs that the model can process. During inference, it handles both the input formatting and output decoding.\n", + "\n", + "### Memory optimization techniques\n", + "\n", + "Efficient memory usage is crucial when fine-tuning large models like Qwen3-14B. The approach we're using incorporates several optimization techniques:\n", + "\n", + "1. **4-bit quantization**: Reduces model weight precision from 16-bit (or 32-bit) to 4-bit, shrinking memory requirements by 4-8x with minimal performance impact.\n", + "\n", + "2. **Sequential loading**: Unsloth loads model weights in sequence rather than all at once, reducing peak memory usage during initialization.\n", + "\n", + "3. **Gradient checkpointing**: Will be enabled when we configure PEFT, reducing memory requirements by recomputing some activations during backpropagation rather than storing them.\n", + "\n", + "4. **Parameter freezing**: With LoRA, we keep most of the model's parameters frozen, greatly reducing memory needed for storing gradients.\n", + "\n", + "These optimizations allow us to fit a 14B parameter model into a single GPU's memory while maintaining most of its capabilities. When working with different model sizes, you may need to adjust these settings:\n", + "\n", + "- For smaller models (below 7B parameters), 8-bit quantization might offer a better precision/memory trade-off\n", + "- For larger models (above 20B parameters), you might need multiple GPUs or more aggressive optimizations\n", + "- For edge cases with very limited memory, consider smaller models like Qwen3-4B instead\n", + "\n", + "With the model and tokenizer loaded and optimized, we're ready to move on to preparing our dataset for fine-tuning. The next step will involve formatting the data according to Qwen 3's specific requirements and processing it for efficient training." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Step 4: Dataset Preparation\n", + "\n", + "Now that we have our model loaded and optimized, we need to prepare our dataset for fine-tuning. As mentioned in Step 1, we've already created and uploaded a Bullet Echo game QA dataset to Hugging Face using Firecrawl. The complete process for creating this dataset is detailed in the [Llama 4 fine-tuning article](https://www.firecrawl.dev/blog/fine-tuning-llama4-custom-dataset-firecrawl), but now we'll focus on loading and preparing this dataset specifically for Qwen 3 fine-tuning.\n", + "\n", + "### Loading the Bullet Echo QA dataset\n", + "\n", + "We start by loading our pre-existing dataset from Hugging Face and splitting it into training and validation sets:\n", + "\n", + "```python\n", + "print(\"Loading Bullet Echo Wiki QA dataset...\")\n", + "dataset_name = \"bexgboost/bullet-echo-wiki-qa\"\n", + "full_dataset = load_dataset(dataset_name, trust_remote_code=True)\n", + "\n", + "# Split dataset into training and validation sets (90% train, 10% validation)\n", + "train_val_split = full_dataset[\"train\"].train_test_split(\n", + " test_size=0.1, seed=42, shuffle=True\n", + ")\n", + "train_dataset = train_val_split[\"train\"]\n", + "val_dataset = train_val_split[\"test\"] # This becomes our validation set\n", + "\n", + "print(\n", + " f\"Training examples: {len(train_dataset)}, Validation examples: {len(val_dataset)}\"\n", + ")\n", + "```\n", + "\n", + "We use a 90/10 split for training and validation data, which is a common practice for fine-tuning. The validation set will help us monitor the model's performance on unseen data during training.\n", + "\n", + "When adapting this to your own projects:\n", + "- Use a dataset that matches your target application domain\n", + "- Consider the size of your validation set based on your dataset size (5-20% is typical)\n", + "- Ensure your dataset is properly formatted with high-quality question-answer pairs\n", + "- If your dataset is very large, you might consider using a smaller subset for faster iterations\n", + "\n", + "### Formatting with Qwen 3's chat template\n", + "\n", + "One critical aspect of fine-tuning Qwen 3 is properly formatting the data using its chat template. Different models use different formats, and Qwen 3 has specific requirements for how conversations should be structured:\n", + "\n", + "```python\n", + "print(\"Formatting datasets with Qwen3 chat template...\")\n", + "EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN\n", + "\n", + "def format_data(example):\n", + " # Qwen3 uses a chat template, so we'll format it accordingly\n", + " messages = [\n", + " {\"role\": \"user\", \"content\": example[\"question\"]},\n", + " {\"role\": \"assistant\", \"content\": example[\"answer\"] + EOS_TOKEN},\n", + " ]\n", + " # The tokenizer.apply_chat_template handles special tokens for Qwen3\n", + " return {\"text\": tokenizer.apply_chat_template(messages, tokenize=False)}\n", + "\n", + "# Format both training and validation datasets\n", + "formatted_train_dataset = train_dataset.map(format_data)\n", + "formatted_val_dataset = val_dataset.map(format_data)\n", + "```\n", + "\n", + "This formatting function:\n", + "1. Takes each example from our dataset\n", + "2. Structures it as a conversation with user (question) and assistant (answer) roles\n", + "3. Adds the end-of-sequence token to the assistant's response\n", + "4. Applies Qwen 3's chat template to format it correctly\n", + "\n", + "The `tokenizer.apply_chat_template()` method handles all the model-specific formatting, including adding special tokens and structuring the conversation properly. For your own projects, you'll need to adapt this function based on:\n", + "- The structure of your dataset (it might not be in QA format)\n", + "- The specific conversation structure you want the model to learn\n", + "- Any additional context or system prompts you want to include\n", + "\n", + "### Tokenization and processing\n", + "\n", + "Next, we tokenize our formatted datasets to convert the text into token IDs that the model can process:\n", + "\n", + "```python\n", + "print(\"Tokenizing datasets...\")\n", + "\n", + "def tokenize_function(examples):\n", + " # padding=False because SFTTrainer will handle padding\n", + " return tokenizer(\n", + " examples[\"text\"],\n", + " padding=False,\n", + " truncation=True,\n", + " max_length=model.config.max_position_embeddings,\n", + " )\n", + "\n", + "# Process both datasets\n", + "processed_train_dataset = formatted_train_dataset.map(\n", + " tokenize_function,\n", + " batched=True,\n", + " remove_columns=[\"id\", \"question\", \"answer\", \"text\"],\n", + " desc=\"Tokenizing training dataset\",\n", + ")\n", + "\n", + "processed_val_dataset = formatted_val_dataset.map(\n", + " tokenize_function,\n", + " batched=True,\n", + " remove_columns=[\"id\", \"question\", \"answer\", \"text\"],\n", + " desc=\"Tokenizing validation dataset\",\n", + ")\n", + "```\n", + "\n", + "This tokenization process:\n", + "1. Converts text to token IDs using the model's vocabulary\n", + "2. Applies truncation if sequences exceed the maximum length\n", + "3. Removes the original columns since we only need the tokenized input for training\n", + "4. Processes data in batches for efficiency\n", + "\n", + "For your own projects, consider:\n", + "- Adjusting the `max_length` parameter based on your sequence lengths\n", + "- Setting appropriate `remove_columns` based on your dataset structure\n", + "- Using `batched=True` for faster processing on large datasets\n", + "- Setting an appropriate `num_proc` parameter if working with very large datasets to use parallel processing\n", + "\n", + "### Train-validation splitting\n", + "\n", + "We've already implemented the train-validation split earlier in our code, but it's worth noting that validation is crucial for monitoring training progress and preventing overfitting. The validation set provides an unbiased evaluation of the model during training and helps determine when to stop training or which checkpoint to select as the final model.\n", + "\n", + "For fine-tuning projects with different needs:\n", + "- If your dataset is very small, consider using cross-validation instead of a simple split\n", + "- If you have a multi-class dataset, ensure your validation set has a representative distribution of all classes\n", + "- For production models, consider adding a separate test set that's never used during training\n", + "\n", + "> To learn more about preparing datasets for fine-tuning in Unsloth, read [the official guide from the documentation](https://docs.unsloth.ai/basics/datasets-guide).\n", + "\n", + "With our dataset now properly loaded, formatted, tokenized, and split, we're ready to move on to configuring the Parameter-Efficient Fine-Tuning (PEFT) approach with LoRA in the next step.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 5: Configuring PEFT with LoRA\n", + "\n", + "Now that our dataset is prepared, we need to configure Parameter-Efficient Fine-Tuning (PEFT) using LoRA. This approach allows us to fine-tune a large model like Qwen3-14B with minimal memory requirements by adding small, trainable adapters to key layers while keeping most of the original model frozen.\n", + "\n", + "### Setting up LoRA parameters\n", + "\n", + "We use Unsloth's `get_peft_model` function to configure our model for PEFT:\n", + "\n", + "```python\n", + "print(\"Setting up PEFT model with LoRA...\")\n", + "model = FastModel.get_peft_model(\n", + " model,\n", + " r=8, # LoRA rank\n", + " target_modules=[ # Which layers to apply LoRA to\n", + " \"q_proj\",\n", + " \"k_proj\",\n", + " \"v_proj\",\n", + " \"o_proj\",\n", + " \"gate_proj\",\n", + " \"up_proj\",\n", + " \"down_proj\",\n", + " ],\n", + " finetune_vision_layers=False, # Turn off for just text!\n", + " finetune_language_layers=True,\n", + " finetune_attention_modules=True,\n", + " finetune_mlp_modules=True,\n", + " lora_alpha=8, # LoRA scaling factor\n", + " lora_dropout=0, # Dropout probability for LoRA layers\n", + " bias=\"none\", # Whether to train bias parameters\n", + " use_gradient_checkpointing=\"unsloth\", # Memory optimization\n", + " random_state=1000, # For reproducibility\n", + " use_rslora=False, # Regular LoRA vs Rank-Stabilized LoRA\n", + ")\n", + "```\n", + "\n", + "Let's break down these parameters in detail:\n", + "\n", + "- **r (rank)**: Determines the size of the LoRA matrices. Higher values give more learning capacity but require more memory. For our Bullet Echo task, a rank of 8 provides a good balance between capacity and efficiency.\n", + "\n", + "- **target_modules**: Specifies which layers in the model receive LoRA adapters. We target attention layers (`q_proj`, `k_proj`, `v_proj`, `o_proj`) and MLP components (`gate_proj`, `up_proj`, `down_proj`). This comprehensive coverage ensures effective learning across all key model components.\n", + "\n", + "- **finetune_*_layers**: Controls which types of layers to fine-tune. We enable adaptation for language and core transformer components while disabling vision layers since our task is purely text-based.\n", + "\n", + "- **lora_alpha**: Scaling factor for LoRA updates. Usually set equal to or slightly higher than rank. This influences how much the LoRA adapters contribute to the final output.\n", + "\n", + "- **lora_dropout**: Regularization for LoRA layers to prevent overfitting. We use 0 for this task, but values between 0.05-0.2 can help with larger datasets or longer training runs.\n", + "\n", + "- **use_gradient_checkpointing**: Memory optimization technique that trades computation for memory by recomputing some values during backpropagation rather than storing them. The \"unsloth\" setting uses Unsloth's optimized implementation.\n", + "\n", + "### Adapting LoRA configuration for different tasks\n", + "\n", + "When adapting this configuration for your own projects, consider these guidelines:\n", + "\n", + "**For smaller datasets (1,000-5,000 examples):**\n", + "- Use a lower rank (4-8) to prevent overfitting\n", + "- Consider adding lora_dropout (0.05-0.1)\n", + "- Target fewer modules if very limited data is available\n", + "\n", + "**For larger datasets (10,000+ examples):**\n", + "- Increase rank (16-32) for more learning capacity\n", + "- Target more modules for comprehensive adaptation\n", + "- Potentially use longer training with more epochs\n", + "\n", + "**For specialized domains (technical, medical, legal):**\n", + "- Higher rank may help capture domain-specific knowledge\n", + "- More comprehensive module targeting is beneficial\n", + "- Consider longer training to develop domain expertise\n", + "\n", + "**For memory-constrained environments:**\n", + "- Lower rank (4) reduces adapter memory requirements\n", + "- Reduce batch size and gradient accumulation steps\n", + "- Ensure gradient checkpointing is enabled\n", + "\n", + "### Understanding LoRA's efficiency benefits\n", + "\n", + "LoRA dramatically reduces the number of trainable parameters compared to full fine-tuning. For our Qwen3-14B model with rank 8, we're training only about 0.23% of the model's parameters:\n", + "\n", + "```text\n", + "Trainable parameters = 32,112,640/14,000,000,000 (0.23% trained)\n", + "```\n", + "\n", + "This approach offers several advantages:\n", + "\n", + "1. **Memory efficiency**: Fine-tuning fits on a single GPU instead of requiring multiple high-end GPUs\n", + "2. **Training speed**: Fewer parameters mean faster training iterations\n", + "3. **Reduced overfitting**: Less risk of catastrophic forgetting of the base model's capabilities\n", + "4. **Storage efficiency**: The trained adapters are only a few MB compared to tens of GB for a full model\n", + "\n", + "With our PEFT configuration complete, we're ready to set up the training process with appropriate evaluation strategies and hyperparameters in the next step.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 6: Setting Up the Training Process\n", + "\n", + "After configuring our model with LoRA, the next step is to set up the training process. This involves configuring the SFTTrainer with appropriate hyperparameters, evaluation strategies, and optimization settings to ensure efficient and effective fine-tuning.\n", + "\n", + "### SFTTrainer configuration\n", + "\n", + "We use the `SFTTrainer` from the TRL (Transformer Reinforcement Learning) library, which streamlines the fine-tuning process for language models. Here's how we configure it:\n", + "\n", + "```python\n", + "print(\"Configuring SFTTrainer with evaluation...\")\n", + "trainer = SFTTrainer(\n", + " model=model,\n", + " tokenizer=tokenizer,\n", + " train_dataset=processed_train_dataset,\n", + " eval_dataset=processed_val_dataset, # Add validation dataset\n", + " args=SFTConfig(\n", + " dataset_text_field=\"text\",\n", + " per_device_train_batch_size=2,\n", + " per_device_eval_batch_size=2, # Batch size for evaluation\n", + " gradient_accumulation_steps=4,\n", + " warmup_steps=5,\n", + " num_train_epochs=3,\n", + " # max_steps=100, # For quick testing\n", + " learning_rate=2e-4,\n", + " logging_steps=200,\n", + " optim=\"adamw_8bit\",\n", + " weight_decay=0.01,\n", + " lr_scheduler_type=\"linear\",\n", + " seed=3407,\n", + " output_dir=\"outputs\",\n", + " eval_strategy=\"steps\",\n", + " eval_steps=200, # Evaluate every 200 steps\n", + " save_strategy=\"steps\", # Save checkpoints based on evaluation\n", + " save_steps=200, # Save every 200 steps\n", + " load_best_model_at_end=True, # Load best model at the end of training\n", + " metric_for_best_model=\"eval_loss\", # Use evaluation loss to determine best model\n", + " greater_is_better=False, # Lower loss is better\n", + " save_total_limit=3, # Keep only the 3 best checkpoints\n", + " ),\n", + ")\n", + "```\n", + "\n", + "Let's examine key parameters and their significance:\n", + "\n", + "- **Batch sizes**: `per_device_train_batch_size=2` and `per_device_eval_batch_size=2` specify the number of examples processed together. Small batch sizes help manage memory usage with large models.\n", + "\n", + "- **Gradient accumulation**: With `gradient_accumulation_steps=4`, we accumulate gradients across multiple batches before updating the model, effectively increasing the batch size to 8 (2 × 4) without increasing memory usage.\n", + "\n", + "- **Training duration**: `num_train_epochs=3` sets the number of complete passes through the training dataset, which is typically sufficient for LoRA fine-tuning.\n", + "\n", + "- **Learning rate**: `learning_rate=2e-4` sets how quickly the model parameters are updated. This slightly higher value (compared to full fine-tuning) works well with LoRA as we're updating fewer parameters.\n", + "\n", + "- **Evaluation strategy**: `eval_strategy=\"steps\"` and `eval_steps=200` configure the trainer to evaluate the model every 200 training steps on the validation dataset.\n", + "\n", + "- **Checkpoint saving**: `save_strategy=\"steps\"`, `save_steps=200`, and `save_total_limit=3` ensure we save checkpoints regularly while keeping only the best ones to save disk space.\n", + "\n", + "- **Model selection**: `load_best_model_at_end=True` and `metric_for_best_model=\"eval_loss\"` configure the trainer to automatically select the checkpoint with the lowest validation loss as the final model.\n", + "\n", + "### Adapting training configuration for your projects\n", + "\n", + "When adapting this configuration to your own fine-tuning projects, consider these adjustments:\n", + "\n", + "**For larger datasets:**\n", + "- Increase `logging_steps` and `eval_steps` to evaluate less frequently \n", + "- Consider decreasing `num_train_epochs` to prevent overfitting\n", + "- You might need to adjust `learning_rate` downward for more stable training\n", + "\n", + "**For smaller datasets:**\n", + "- Decrease `eval_steps` to get more frequent validation results\n", + "- Consider increasing `num_train_epochs` to allow more learning\n", + "- Use a higher `weight_decay` value to prevent overfitting\n", + "\n", + "**For memory-constrained environments:**\n", + "- Decrease `per_device_train_batch_size` (possibly to 1)\n", + "- Increase `gradient_accumulation_steps` to maintain effective batch size\n", + "- Consider using `fp16=True` for additional memory savings\n", + "\n", + "**For production-quality models:**\n", + "- Implement early stopping with `early_stopping_patience`\n", + "- Use cross-validation instead of a single validation split\n", + "- Consider a more sophisticated learning rate scheduler like `cosine`\n", + "\n", + "The effective batch size in our configuration is 8 (2 × 4), which balances learning stability and memory usage. For different tasks, you might need to adjust this based on your dataset size and available GPU memory.\n", + "\n", + "With our training process properly configured, we're ready to begin the actual fine-tuning of our Qwen 3 model in the next step." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Step 7: Training the Model\n", + "\n", + "With our model and training process configured, we're now ready to begin the fine-tuning. This step involves executing the training loop, monitoring the metrics, and interpreting the results to ensure our model is learning effectively.\n", + "\n", + "### Executing the training loop\n", + "\n", + "Starting the training process is straightforward with the SFTTrainer:\n", + "\n", + "```python\n", + "print(\"Starting fine-tuning process with validation...\")\n", + "training_results = trainer.train()\n", + "\n", + "# Print evaluation metrics\n", + "print(\"Training completed!\")\n", + "print(f\"Final training metrics: {training_results.metrics}\")\n", + "```\n", + "\n", + "When you execute this code, the trainer takes care of:\n", + "- Loading and batching the dataset\n", + "- Computing forward and backward passes\n", + "- Updating the model parameters\n", + "- Evaluating on the validation set at specified intervals\n", + "- Saving checkpoints based on performance\n", + "- Logging metrics for monitoring\n", + "\n", + "### Monitoring metrics and progress\n", + "\n", + "During training, you'll see a progress bar with key information:\n", + "\n", + "```\n", + "==((====))== Unsloth - 2x faster free finetuning | Num GPUs used = 1\n", + " \\\\ /| Num examples = 2,711 | Num Epochs = 3 | Total steps = 1,017\n", + "O^O/ \\\\_/ \\\\ Batch size per device = 2 | Gradient accumulation steps = 4\n", + "\\\\ / Data Parallel GPUs = 1 | Total batch size (2 x 4 x 1) = 8\n", + " \"-____-\" Trainable parameters = 32,112,640/14,000,000,000 (0.23% trained)\n", + "```\n", + "\n", + "This header provides a summary of your training configuration, including:\n", + "- The number of examples being trained on\n", + "- Total epochs and training steps\n", + "- Batch size and gradient accumulation configuration\n", + "- The percentage of parameters being trained (0.23% in our case)\n", + "\n", + "As training progresses, a table shows the training and validation metrics at regular intervals:\n", + "\n", + "```\n", + "Step Training Loss Validation Loss\n", + "200 1.571900 1.287612\n", + "400 1.211400 1.214332\n", + "600 1.081000 1.182069\n", + "800 0.960400 1.207953\n", + "1000 0.879500 1.197931\n", + "```\n", + "\n", + "### Interpreting evaluation results\n", + "\n", + "The key metrics to monitor are:\n", + "\n", + "1. **Training loss**: This should generally decrease over time. If it plateaus early, you might need to increase your learning rate or model capacity.\n", + "\n", + "2. **Validation loss**: This is crucial for detecting overfitting. It should decrease along with training loss. If validation loss starts increasing while training loss continues to decrease, your model is likely overfitting.\n", + "\n", + "In our example, training loss decreases steadily from 1.57 to 0.88, showing that the model is learning from the data. The validation loss initially decreases but then fluctuates slightly, which is normal as the model fine-tunes its parameters.\n", + "\n", + "Final metrics after training might look like:\n", + "\n", + "```python\n", + "Final training metrics: {'train_runtime': 1461.829, 'train_samples_per_second': 5.564, 'train_steps_per_second': 0.696, 'total_flos': 5.716715497264128e+16, 'train_loss': 1.136481964013804}\n", + "```\n", + "\n", + "These metrics provide:\n", + "- Total training time (1461 seconds in this case)\n", + "- Processing speed (samples and steps per second)\n", + "- Computational resource usage (FLOPS)\n", + "- Average training loss across all training steps\n", + "\n", + "### Troubleshooting common issues\n", + "\n", + "When fine-tuning your own models, you might encounter these common issues:\n", + "\n", + "**Overfitting**:\n", + "- Symptoms: Validation loss increases while training loss continues to decrease\n", + "- Solutions: Add dropout, reduce training epochs, increase weight decay, or use a smaller LoRA rank\n", + "\n", + "**Underfitting**:\n", + "- Symptoms: Both training and validation loss remain high\n", + "- Solutions: Increase training epochs, use a larger LoRA rank, target more modules, or increase learning rate\n", + "\n", + "**Memory errors**:\n", + "- Symptoms: CUDA out of memory errors\n", + "- Solutions: Reduce batch size, increase gradient accumulation steps, use 4-bit instead of 8-bit quantization, or choose a smaller model\n", + "\n", + "**Slow convergence**:\n", + "- Symptoms: Loss decreases very slowly\n", + "- Solutions: Increase learning rate, adjust warmup steps, or check data quality\n", + "\n", + "**Poor inference results**:\n", + "- Symptoms: Model outputs don't match expectations\n", + "- Solutions: Check data quality, ensure proper chat template formatting, or increase dataset size\n", + "\n", + "For our Bullet Echo game assistant, the training completes successfully with a steady decrease in training loss, indicating that the model has effectively learned from our dataset. With training complete, we can now move on to testing the model and evaluating its performance on new queries in the next step.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Step 8: Inference and Testing\n", + "\n", + "After successfully training our model, we need to test its capabilities by running inference on new questions. This step involves setting up the model for generation, configuring the response parameters, and evaluating the quality of outputs.\n", + "\n", + "### Setting up model for inference\n", + "\n", + "First, we need to prepare our model for inference by optimizing it for generation:\n", + "\n", + "```python\n", + "print(\"Setting up model for inference...\")\n", + "unsloth.FastModel.for_inference(model) # Enable native 2x faster inference\n", + "```\n", + "\n", + "This one line enables Unsloth's optimized inference mode, which can provide up to 2x faster generation compared to standard implementations.\n", + "\n", + "### Custom generation configuration\n", + "\n", + "Next, we create a function to generate responses from our fine-tuned model:\n", + "\n", + "```python\n", + "def generate_response(\n", + " model, tokenizer, query, temperature=0.7, top_p=0.9, max_new_tokens=256\n", + "):\n", + " \"\"\"\n", + " Generate a response from the fine-tuned model.\n", + " \"\"\"\n", + " # Format the query as a chat message\n", + " messages = [{\"role\": \"user\", \"content\": query}]\n", + "\n", + " # Prepare model inputs\n", + " inputs = tokenizer.apply_chat_template(\n", + " messages, add_generation_prompt=True, return_tensors=\"pt\"\n", + " ).to(\"cuda\")\n", + "\n", + " # Create attention mask (all 1s) with the same shape as inputs\n", + " attention_mask = torch.ones_like(inputs).to(\"cuda\")\n", + "\n", + " # Configure generation parameters\n", + " generation_config = GenerationConfig(\n", + " temperature=temperature,\n", + " top_p=top_p,\n", + " do_sample=True,\n", + " max_new_tokens=max_new_tokens,\n", + " pad_token_id=tokenizer.pad_token_id,\n", + " eos_token_id=tokenizer.eos_token_id,\n", + " remove_invalid_values=True,\n", + " # Disable thinking tags\n", + " suppression_tokens=(\n", + " [\n", + " tokenizer.encode(\"\", add_special_tokens=False)[0],\n", + " tokenizer.encode(\"\", add_special_tokens=False)[0],\n", + " ]\n", + " if len(tokenizer.encode(\"\", add_special_tokens=False)) > 0\n", + " else None\n", + " ),\n", + " )\n", + "```\n", + "\n", + "This configuration specifies important generation parameters:\n", + "\n", + "- **temperature**: Controls randomness in generation. Higher values (e.g., 1.0) produce more diverse outputs, while lower values (e.g., 0.2) make responses more deterministic.\n", + "- **top_p**: Controls nucleus sampling, where only tokens with cumulative probability less than top_p are considered. This helps focus the generation while maintaining diversity.\n", + "- **max_new_tokens**: Sets the maximum length of the generated response.\n", + "- **suppression_tokens**: Prevents certain tokens (like thinking tags) from appearing in the output.\n", + "\n", + "### Implementing thinking tag filtering\n", + "\n", + "Qwen 3 models sometimes use \"thinking\" tags to show their reasoning process. While this is useful for complex problems, we may want to hide this internal reasoning in our final output:\n", + "\n", + "```python\n", + "# Custom text filtering function\n", + "def filter_thinking(text):\n", + " # Remove anything between and tags\n", + " text = re.sub(r\".*?\", \"\", text, flags=re.DOTALL)\n", + " # Remove any remaining or tags\n", + " text = re.sub(r\"|\", \"\", text)\n", + " return text\n", + "\n", + "# Custom streamer class to filter thinking tags\n", + "class FilteredTextStreamer(TextStreamer):\n", + " def on_finalized_text(self, text: str, stream_end: bool = False):\n", + " filtered_text = filter_thinking(text)\n", + " if filtered_text.strip(): # Only print non-empty text\n", + " print(filtered_text, end=\"\", flush=True)\n", + "```\n", + "\n", + "This custom streamer filters out the thinking process in real-time during generation, providing a cleaner user experience.\n", + "\n", + "### Testing with sample queries\n", + "\n", + "With our generation function set up, we can test the model with some Bullet Echo game questions:\n", + "\n", + "```python\n", + "# Test the model with sample queries\n", + "print(\"\\n--- Testing Model Responses ---\")\n", + "\n", + "test_queries = [\n", + " \"What's the best strategy for Cyclops in Bullet Echo?\",\n", + " \"How does the Stalker's invisibility work in the game?\",\n", + " \"Which heroes are effective against Bastion in Bullet Echo?\",\n", + "]\n", + "\n", + "for query in test_queries:\n", + " generate_response(model, tokenizer, query)\n", + "```\n", + "\n", + "Sample outputs might look like:\n", + "\n", + "```\n", + "User: What's the best strategy for Cyclops in Bullet Echo?\n", + "Assistant: The best strategy for Cyclops is to stay hidden, as this is his greatest strength. He excels in ambush tactics, allowing him to surprise enemies and maximize his effectiveness in stealthy encounters.\n", + "\n", + "User: How does the Stalker's invisibility work in the game?\n", + "Assistant: The Stalker uses a special ability called invisibility, which makes the character temporarily undetectable by opponents. This is often used for stealth movements and surprise attacks.\n", + "```\n", + "\n", + "### Adapting inference for different applications\n", + "\n", + "When implementing inference for your own projects, consider these variations:\n", + "\n", + "**For interactive applications:**\n", + "- Use streaming generation to show responses as they're generated\n", + "- Consider lower temperature values (0.3-0.5) for more deterministic answers\n", + "- Implement a maximum length cutoff appropriate to your UI\n", + "\n", + "**For batch processing:**\n", + "- Disable streaming for faster processing\n", + "- Consider using higher batch sizes if memory allows\n", + "- Store complete outputs rather than printing them\n", + "\n", + "**For specialized domains:**\n", + "- Adjust temperature based on how creative vs. factual responses should be\n", + "- Consider adding domain-specific post-processing to validate outputs\n", + "- You might want to keep thinking tags visible for complex reasoning tasks\n", + "\n", + "## Step 9: Saving and Deploying the Model\n", + "\n", + "Our fine-tuned model is performing well, so it's time to save it for future use and deployment. This step covers saving the model locally, options for sharing via Hugging Face Hub, and verifying that the saved model works correctly.\n", + "\n", + "### Saving the fine-tuned model locally\n", + "\n", + "First, we save the fine-tuned model and tokenizer to local storage:\n", + "\n", + "```python\n", + "print(\"\\nSaving fine-tuned model...\")\n", + "output_model_name = \"qwen3-bullet-echo-qa-lora\"\n", + "model.save_pretrained(output_model_name)\n", + "tokenizer.save_pretrained(output_model_name)\n", + "print(f\"Model successfully saved to: ./{output_model_name}\")\n", + "```\n", + "\n", + "This creates a directory containing all the necessary files:\n", + "- The LoRA adapter weights (much smaller than the full model)\n", + "- Configuration files specifying the model architecture and parameters\n", + "- Tokenizer files including vocabulary and special token mappings\n", + "\n", + "### Hugging Face Hub integration (optional)\n", + "\n", + "For sharing your model with others or deploying to production, you can push it to Hugging Face Hub:\n", + "\n", + "```python\n", + "# Optional: Push to Hugging Face Hub\n", + "# from huggingface_hub import login\n", + "# login()\n", + "# hub_model_id = f\"your-hf-username/{output_model_name}\"\n", + "# model.push_to_hub(hub_model_id)\n", + "# tokenizer.push_to_hub(hub_model_id)\n", + "# print(f\"Model pushed to Hugging Face Hub: {hub_model_id}\")\n", + "```\n", + "\n", + "This makes your model accessible to others and integrates with various deployment platforms that support Hugging Face models.\n", + "\n", + "### Loading and verifying the saved model\n", + "\n", + "To ensure everything was saved correctly, we load the model back and test it:\n", + "\n", + "```python\n", + "print(\"\\n--- Loading Saved Fine-tuned Model ---\")\n", + "\n", + "# Load the saved model and tokenizer\n", + "saved_model_path = output_model_name # \"qwen3-bullet-echo-qa-lora\"\n", + "loaded_model, loaded_tokenizer = FastModel.from_pretrained(\n", + " model_name=output_model_name,\n", + " max_seq_length=2048,\n", + " load_in_4bit=True,\n", + " full_finetuning=False,\n", + ")\n", + "\n", + "# Enable faster inference\n", + "unsloth.FastModel.for_inference(loaded_model)\n", + "\n", + "print(\"Model successfully loaded for inference!\")\n", + "\n", + "# Test with new queries\n", + "print(\"\\n--- Testing Loaded Model Responses ---\")\n", + "\n", + "new_test_queries = [\n", + " \"What's the best strategy for Cyclops in Bullet Echo?\",\n", + " \"How does the Stalker's invisibility work in the game?\",\n", + " \"Which heroes are effective against Bastion in Bullet Echo?\",\n", + "]\n", + "\n", + "for query in new_test_queries:\n", + " generate_response(loaded_model, loaded_tokenizer, query, temperature=0.2)\n", + "```\n", + "\n", + "Notice we use a lower temperature (0.2) here to get more deterministic responses for easier comparison.\n", + "\n", + "### Deployment considerations\n", + "\n", + "When deploying your fine-tuned model for real-world use, consider these approaches:\n", + "\n", + "**For web applications:**\n", + "- Use Hugging Face Inference API for managed hosting\n", + "- Deploy as a container with FastAPI or Flask for more control\n", + "- Consider quantizing to INT8 or INT4 for production efficiency\n", + "\n", + "**For local applications:**\n", + "- Export to ONNX format for faster CPU inference\n", + "- Use llama.cpp for optimized deployment on edge devices\n", + "- Consider merging LoRA weights with the base model for simplified deployment\n", + "\n", + "**For scaling considerations:**\n", + "- Use vLLM or text-generation-inference for higher throughput\n", + "- Implement caching for common queries\n", + "- Consider distilling into a smaller model for resource-constrained environments\n", + "\n", + "By following these steps, you've successfully fine-tuned a powerful Qwen 3 model to create a specialized assistant for the Bullet Echo game. The resulting model can now answer domain-specific questions with accuracy and relevance, while maintaining the general capabilities of the base model.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion\n", + "\n", + "In this step-by-step guide, we've walked through the complete process of fine-tuning Qwen 3 on a custom dataset. We started by creating a specialized question-answer dataset from the Bullet Echo wiki using Firecrawl's AI-powered extraction capabilities, then prepared our training environment with appropriate hardware and memory optimizations. Through Parameter-Efficient Fine-Tuning with QLoRA, we were able to adapt a 14B parameter model while training only 0.23% of its parameters, making the process feasible on a single GPU. Our implementation of proper validation strategies, optimization techniques, and inference configuration resulted in a model that can accurately answer domain-specific questions about the Bullet Echo game.\n", + "\n", + "The techniques demonstrated here can be applied to create specialized AI assistants for virtually any domain. Whether you're building a customer support bot, a technical documentation assistant, or a domain-specific knowledge base, the combination of Firecrawl for dataset creation and Unsloth for optimized fine-tuning provides a powerful toolkit for customizing large language models. To create your own custom datasets for fine-tuning, consider exploring [Firecrawl's AI-powered extraction](https://firecrawl.dev) capabilities, which eliminate the need for complex web scraping code and make dataset creation accessible even without extensive technical knowledge. As language models continue to evolve, the ability to efficiently adapt them to specialized domains will remain a key competitive advantage for developers and organizations.\n", + "\n", + "> Don't forget to check out [the full code](https://github.com/mendableai/firecrawl-app-examples/tree/main/qwen3-fine-tuning) for this article from our GitHub repository." + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/qwen3-fine-tuning/pyproject.toml b/qwen3-fine-tuning/pyproject.toml new file mode 100644 index 00000000..b6da56b1 --- /dev/null +++ b/qwen3-fine-tuning/pyproject.toml @@ -0,0 +1,7 @@ +[project] +name = "qwen3-fine-tuning" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.11" +dependencies = [] diff --git a/qwen3-fine-tuning/qwen3-fine-tune.ipynb b/qwen3-fine-tuning/qwen3-fine-tune.ipynb new file mode 100644 index 00000000..1ce1bfe4 --- /dev/null +++ b/qwen3-fine-tuning/qwen3-fine-tune.ipynb @@ -0,0 +1,838 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ce7ee550-9cc3-4725-8487-d9995378affc", + "metadata": {}, + "source": [ + "# Fine-tuning Qwen 3 on a Custom Dataset" + ] + }, + { + "cell_type": "markdown", + "id": "ec49a9fc-ba24-4583-bc07-9bbdb652d46c", + "metadata": {}, + "source": [ + "## Imports and setup" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "1247d75d-a193-4b69-8711-09ab4ebca7bc", + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install --no-deps bitsandbytes accelerate xformers==0.0.29.post3 peft trl==0.15.2 triton cut_cross_entropy unsloth_zoo\n", + "!pip install sentencepiece protobuf datasets huggingface_hub hf_transfer\n", + "!pip install --no-deps unsloth\n", + "!pip install regex transformers rich" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "842abacc-c719-4d84-97ab-42f0577a243b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING[XFORMERS]: xFormers can't load C++/CUDA extensions. xFormers was built for:\n", + " PyTorch 2.6.0+cu124 with CUDA 1204 (you have 2.8.0.dev20250319+cu128)\n", + " Python 3.11.11 (you have 3.11.11)\n", + " Please reinstall xformers (see https://github.com/facebookresearch/xformers#installing-xformers)\n", + " Memory-efficient attention, SwiGLU, sparse and more won't be available.\n", + " Set XFORMERS_MORE_DETAILS=1 for more details\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🦥 Unsloth Zoo will now patch everything to make training faster!\n" + ] + } + ], + "source": [ + "import unsloth\n", + "import torch\n", + "from unsloth import FastModel\n", + "from datasets import load_dataset\n", + "from trl import SFTTrainer, SFTConfig\n", + "from transformers import TextStreamer, GenerationConfig\n", + "import re" + ] + }, + { + "cell_type": "markdown", + "id": "2b315379-d5d2-43fa-ad0c-0bbec0dec8a5", + "metadata": {}, + "source": [ + "## Load model and tokenizer" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "a2d66215-3349-40ab-882d-b98fc974ca6e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loading Qwen3 model and tokenizer...\n", + "==((====))== Unsloth 2025.5.2: Fast Qwen3 patching. Transformers: 4.51.3.\n", + " \\\\ /| NVIDIA L40S. Num GPUs = 1. Max memory: 44.521 GB. Platform: Linux.\n", + "O^O/ \\_/ \\ Torch: 2.8.0.dev20250319+cu128. CUDA: 8.9. CUDA Toolkit: 12.8. Triton: 3.3.0\n", + "\\ / Bfloat16 = TRUE. FA [Xformers = None. FA2 = False]\n", + " \"-____-\" Free license: http://github.com/unslothai/unsloth\n", + "Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2c4727f7ba7b4e129932afaac8fa18b7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading checkpoint shards: 0%| | 0/3 [00:00\n", + " \n", + " \n", + " [1017/1017 24:13, Epoch 3/3]\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
StepTraining LossValidation Loss
2001.5719001.287612
4001.2114001.214332
6001.0810001.182069
8000.9604001.207953
10000.8795001.197931

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Unsloth: Not an error, but Qwen3ForCausalLM does not accept `num_items_in_batch`.\n", + "Using gradient accumulation will be very slightly less accurate.\n", + "Read more on gradient accumulation issues here: https://unsloth.ai/blog/gradient\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Training completed!\n" + ] + } + ], + "source": [ + "print(\"Starting fine-tuning process with validation...\")\n", + "training_results = trainer.train()\n", + "\n", + "# Print evaluation metrics\n", + "print(\"Training completed!\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "4c8b5924-bb35-44b5-8c08-a25b745bb83b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Final training metrics: {'train_runtime': 1461.829, 'train_samples_per_second': 5.564, 'train_steps_per_second': 0.696, 'total_flos': 5.716715497264128e+16, 'train_loss': 1.136481964013804}\n" + ] + } + ], + "source": [ + "print(f\"Final training metrics: {training_results.metrics}\")" + ] + }, + { + "cell_type": "markdown", + "id": "5afb684e-c9fe-4501-be87-99867236582b", + "metadata": {}, + "source": [ + "## Model inference" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "acc1f03a-9ab1-4c16-86d6-6d1a1867132e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Setting up model for inference...\n" + ] + } + ], + "source": [ + "print(\"Setting up model for inference...\")\n", + "unsloth.FastModel.for_inference(model) # Enable native 2x faster inference\n", + "\n", + "\n", + "def generate_response(\n", + " model, tokenizer, query, temperature=0.7, top_p=0.9, max_new_tokens=256\n", + "):\n", + " \"\"\"\n", + " Generate a response from the fine-tuned model.\n", + "\n", + " Args:\n", + " model: The fine-tuned model\n", + " tokenizer: The tokenizer\n", + " query: The user query/question\n", + " temperature: Controls randomness in generation (lower = more deterministic)\n", + " top_p: Nucleus sampling parameter (lower = more focused)\n", + " max_new_tokens: Maximum new tokens to generate\n", + "\n", + " Returns:\n", + " Generated response text\n", + " \"\"\"\n", + " # Format the query as a chat message\n", + " messages = [{\"role\": \"user\", \"content\": query}]\n", + "\n", + " # Prepare model inputs\n", + " inputs = tokenizer.apply_chat_template(\n", + " messages, add_generation_prompt=True, return_tensors=\"pt\"\n", + " ).to(\"cuda\")\n", + "\n", + " # Create attention mask (all 1s) with the same shape as inputs\n", + " attention_mask = torch.ones_like(inputs).to(\"cuda\")\n", + "\n", + " # Configure generation parameters\n", + " generation_config = GenerationConfig(\n", + " temperature=temperature,\n", + " top_p=top_p,\n", + " do_sample=True,\n", + " max_new_tokens=max_new_tokens,\n", + " pad_token_id=tokenizer.pad_token_id,\n", + " eos_token_id=tokenizer.eos_token_id,\n", + " remove_invalid_values=True,\n", + " # Disable thinking tags\n", + " suppression_tokens=(\n", + " [\n", + " tokenizer.encode(\"\", add_special_tokens=False)[0],\n", + " tokenizer.encode(\"\", add_special_tokens=False)[0],\n", + " ]\n", + " if len(tokenizer.encode(\"\", add_special_tokens=False)) > 0\n", + " else None\n", + " ),\n", + " )\n", + "\n", + " # Custom text filtering function\n", + " def filter_thinking(text):\n", + " # Remove anything between and tags\n", + " text = re.sub(r\".*?\", \"\", text, flags=re.DOTALL)\n", + " # Remove any remaining or tags\n", + " text = re.sub(r\"|\", \"\", text)\n", + " return text\n", + "\n", + " # Custom streamer class to filter thinking tags\n", + " class FilteredTextStreamer(TextStreamer):\n", + " def on_finalized_text(self, text: str, stream_end: bool = False):\n", + " filtered_text = filter_thinking(text)\n", + " if filtered_text.strip(): # Only print non-empty text\n", + " print(filtered_text, end=\"\", flush=True)\n", + "\n", + " # Initialize filtered text streamer\n", + " streamer = FilteredTextStreamer(\n", + " tokenizer, skip_prompt=True, skip_special_tokens=True\n", + " )\n", + "\n", + " # Display query\n", + " print(f\"User: {query}\")\n", + " print(\"Assistant:\")\n", + "\n", + " # Generate response\n", + " output = model.generate(\n", + " inputs,\n", + " attention_mask=attention_mask,\n", + " generation_config=generation_config,\n", + " streamer=streamer,\n", + " return_dict_in_generate=True,\n", + " output_scores=False,\n", + " )\n", + "\n", + " # For non-streaming use (optional):\n", + " # output_text = tokenizer.decode(output.sequences[0], skip_special_tokens=True)\n", + " # return filter_thinking(output_text)\n", + "\n", + " print(\"\\n\") # Add a newline after generation\n", + " return None # Since we're streaming, we don't return the output" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "5cbd2416-8b12-4c8e-86c0-a671d2b8f7a4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--- Testing Model Responses ---\n", + "User: What's the best strategy for Cyclops in Bullet Echo?\n", + "Assistant:\n", + "The best strategy for Cyclops is to stay hidden, as this is his greatest strength. He excels in ambush tactics, allowing him to surprise enemies and maximize his effectiveness in stealthy encounters.\n", + "\n", + "User: How does the Stalker's invisibility work in the game?\n", + "Assistant:\n", + "The Stalker uses a special ability called invisibility, which makes the character temporarily undetectable by opponents. This is often used for stealth movements and surprise attacks.\n", + "\n", + "User: Which heroes are effective against Bastion in Bullet Echo?\n", + "Assistant:\n", + "Heroes with high damage and quick movement, such as Levi, Blot, and Lynx, are effective against Bastion due to their ability to deal quick hits and outmaneuver his slow but powerful attacks.\n", + "\n" + ] + } + ], + "source": [ + "# Test the model with sample queries\n", + "print(\"\\n--- Testing Model Responses ---\")\n", + "\n", + "test_queries = [\n", + " \"What's the best strategy for Cyclops in Bullet Echo?\",\n", + " \"How does the Stalker's invisibility work in the game?\",\n", + " \"Which heroes are effective against Bastion in Bullet Echo?\",\n", + "]\n", + "\n", + "for query in test_queries:\n", + " generate_response(model, tokenizer, query)" + ] + }, + { + "cell_type": "markdown", + "id": "85578bd7-3e0c-4424-a4ec-ac58385f32d8", + "metadata": {}, + "source": [ + "## Save model" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "992a07bb-2d65-4fd4-acbc-cca8eafdeae7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Saving fine-tuned model...\n", + "Model successfully saved to: ./qwen3-bullet-echo-qa-lora\n", + "\n", + "🦥 Fine-tuning script completed successfully! 🦥\n" + ] + } + ], + "source": [ + "print(\"\\nSaving fine-tuned model...\")\n", + "output_model_name = \"qwen3-bullet-echo-qa-lora\"\n", + "model.save_pretrained(output_model_name)\n", + "tokenizer.save_pretrained(output_model_name)\n", + "print(f\"Model successfully saved to: ./{output_model_name}\")\n", + "\n", + "# Optional: Push to Hugging Face Hub\n", + "# from huggingface_hub import login\n", + "# login()\n", + "# hub_model_id = f\"your-hf-username/{output_model_name}\"\n", + "# model.push_to_hub(hub_model_id)\n", + "# tokenizer.push_to_hub(hub_model_id)\n", + "# print(f\"Model pushed to Hugging Face Hub: {hub_model_id}\")\n", + "\n", + "print(\"\\n🦥 Fine-tuning script completed successfully! 🦥\")" + ] + }, + { + "cell_type": "markdown", + "id": "91a89ca4-a37b-4f0f-aebf-e0a9202ea45b", + "metadata": {}, + "source": [ + "## Load saved model" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "26bcfd58-7dd0-458b-a80d-4fd5dccf33ec", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--- Loading Saved Fine-tuned Model ---\n", + "==((====))== Unsloth 2025.5.2: Fast Qwen3 patching. Transformers: 4.51.3.\n", + " \\\\ /| NVIDIA L40S. Num GPUs = 1. Max memory: 44.521 GB. Platform: Linux.\n", + "O^O/ \\_/ \\ Torch: 2.8.0.dev20250319+cu128. CUDA: 8.9. CUDA Toolkit: 12.8. Triton: 3.3.0\n", + "\\ / Bfloat16 = TRUE. FA [Xformers = None. FA2 = False]\n", + " \"-____-\" Free license: http://github.com/unslothai/unsloth\n", + "Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "f2c5052a639e46c7913e00107d193279", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading checkpoint shards: 0%| | 0/3 [00:00