From 63be80aee1824327e51dde0cdcd1e3d014d4f707 Mon Sep 17 00:00:00 2001 From: yjianpen Date: Sat, 23 Aug 2025 17:23:17 -0700 Subject: [PATCH] Adding the most popular interview question: implementing lora from scratch! --- Problems/140_low_rank_adaptation/learn.md | 47 ++++++++++++++++++++ Problems/140_low_rank_adaptation/solution.py | 32 +++++++++++++ 2 files changed, 79 insertions(+) create mode 100644 Problems/140_low_rank_adaptation/learn.md create mode 100644 Problems/140_low_rank_adaptation/solution.py diff --git a/Problems/140_low_rank_adaptation/learn.md b/Problems/140_low_rank_adaptation/learn.md new file mode 100644 index 00000000..afd84213 --- /dev/null +++ b/Problems/140_low_rank_adaptation/learn.md @@ -0,0 +1,47 @@ + +# Learn Section + +### Writing Mathematical Expressions with LaTeX + +This editor supports LaTeX for rendering mathematical equations and expressions. Here's how you can use it: + +1. **Inline Math**: + - Wrap your expression with single `$` symbols. + - Example: `$E = mc^2$` → Renders as: ( $E = mc^2$ ) + +2. **Block Math**: + - Wrap your expression with double `$$` symbols. + - Example: + ``` + $$ + \int_a^b f(x) \, dx + $$ + ``` + Renders as: + $$ + \int_a^b f(x) \, dx + $$ + +3. **Math Functions**: + - Use standard LaTeX functions like `\frac`, `\sqrt`, `\sum`, etc. + - Examples: + - `$\frac{a}{b}$` → ( $\frac{a}{b}$ ) + - `$\sqrt{x}$` → ( $\sqrt{x}$ ) + +4. **Greek Letters and Symbols**: + - Use commands like `\alpha`, `\beta`, etc., for Greek letters. + - Example: `$\alpha + \beta = \gamma$` → ( $\alpha + \beta = \gamma$ ) + +5. **Subscripts and Superscripts**: + - Use `_{}` for subscripts and `^{}` for superscripts. + - Examples: + - `$x_i$` → ( $x_i$ ) + - `$x^2$` → ( $x^2$ ) + +6. **Combined Examples**: + - `$\sum_{i=1}^n i^2 = \frac{n(n+1)(2n+1)}{6}$` + Renders as: + $\sum_{i=1}^n i^2 = \frac{n(n+1)(2n+1)}{6}$ + +Feel free to write your own mathematical expressions, and they will be rendered beautifully in the preview! + \ No newline at end of file diff --git a/Problems/140_low_rank_adaptation/solution.py b/Problems/140_low_rank_adaptation/solution.py new file mode 100644 index 00000000..86d570d8 --- /dev/null +++ b/Problems/140_low_rank_adaptation/solution.py @@ -0,0 +1,32 @@ +class LoRALinear(nn.Module): + def __init__(self, original_layer_shape: tuple, rank: int, alpha: float, init_scaling_factor:float, random_seed:int): + super().__init__() + # Freeze the original layer's weights + # Extract in_features and out_features from the shape tuple + in_features, out_features = original_layer_shape + self.original_layer = torch.nn.Linear(in_features, out_features) + self.original_layer.requires_grad_(False) + + # Initialize the low-rank matrices A and B + g = torch.Generator() + g.manual_seed(random_seed) + self.lora_A = nn.Parameter(torch.randn(in_features, rank, generator=g) * init_scaling_factor) + print("lora A", self.lora_A) + self.lora_B = nn.Parameter(torch.zeros(rank, out_features)) + print("lora B", self.lora_B) + + # The scaling factor + self.scaling = alpha / rank + + def forward(self, x): + # Original output + x = torch.tensor(x) + original_output = self.original_layer(x) + + # LoRA update + # (x @ self.lora_A) performs matrix multiplication of input x with matrix A + # (x @ self.lora_A @ self.lora_B) is the full low-rank update + lora_update = x @ self.lora_A @ self.lora_B + + # Final output is the sum of the original and the scaled LoRA update + return original_output + lora_update * self.scaling \ No newline at end of file