diff --git a/.github/classroom/autograding.json b/.github/classroom/autograding.json
new file mode 100644
index 0000000..d6ec51d
--- /dev/null
+++ b/.github/classroom/autograding.json
@@ -0,0 +1,54 @@
+{
+ "tests": [
+ {
+ "name": "Task 4.1 - 1D Convolution",
+ "setup": "pip install -e .",
+ "run": "python -m pytest -m task4_1 --tb=no -q",
+ "input": "",
+ "output": "",
+ "comparison": "included",
+ "timeout": 15,
+ "points": 25
+ },
+ {
+ "name": "Task 4.2 - 2D Convolution",
+ "setup": "",
+ "run": "python -m pytest -m task4_2 --tb=no -q",
+ "input": "",
+ "output": "",
+ "comparison": "included",
+ "timeout": 15,
+ "points": 25
+ },
+ {
+ "name": "Task 4.3 - Pooling Operations",
+ "setup": "",
+ "run": "python -m pytest -m task4_3 --tb=no -q",
+ "input": "",
+ "output": "",
+ "comparison": "included",
+ "timeout": 15,
+ "points": 25
+ },
+ {
+ "name": "Task 4.4 - Advanced NN Functions",
+ "setup": "",
+ "run": "python -m pytest -m task4_4 --tb=no -q",
+ "input": "",
+ "output": "",
+ "comparison": "included",
+ "timeout": 15,
+ "points": 15
+ },
+ {
+ "name": "Style Check",
+ "setup": "",
+ "run": "python -m ruff check . && python -m pyright",
+ "input": "",
+ "output": "",
+ "comparison": "included",
+ "timeout": 10,
+ "points": 10
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.github/workflows/classroom.yaml b/.github/workflows/classroom.yaml
new file mode 100644
index 0000000..43af394
--- /dev/null
+++ b/.github/workflows/classroom.yaml
@@ -0,0 +1,78 @@
+name: Autograding Tests
+'on':
+- push
+- repository_dispatch
+permissions:
+ checks: write
+ actions: read
+ contents: read
+jobs:
+ run-autograding-tests:
+ runs-on: ubuntu-latest
+ if: github.actor != 'github-classroom[bot]'
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ - name: Setup Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.9'
+ - name: Install dependencies and package
+ run: |
+ python -m pip install --upgrade pip
+ pip install -e ".[dev,extra]"
+ - name: Task 4.1 - 1D Convolution
+ id: task-4-1-1d-convolution
+ uses: classroom-resources/autograding-command-grader@v1
+ with:
+ test-name: Task 4.1 - 1D Convolution
+ setup-command: ''
+ command: python -m pytest -m task4_1 --tb=no -q
+ timeout: 15
+ max-score: 25
+ - name: Task 4.2 - 2D Convolution
+ id: task-4-2-2d-convolution
+ uses: classroom-resources/autograding-command-grader@v1
+ with:
+ test-name: Task 4.2 - 2D Convolution
+ setup-command: ''
+ command: python -m pytest -m task4_2 --tb=no -q
+ timeout: 15
+ max-score: 25
+ - name: Task 4.3 - Pooling Operations
+ id: task-4-3-pooling-operations
+ uses: classroom-resources/autograding-command-grader@v1
+ with:
+ test-name: Task 4.3 - Pooling Operations
+ setup-command: ''
+ command: python -m pytest -m task4_3 --tb=no -q
+ timeout: 15
+ max-score: 25
+ - name: Task 4.4 - Advanced NN Functions
+ id: task-4-4-advanced-nn-functions
+ uses: classroom-resources/autograding-command-grader@v1
+ with:
+ test-name: Task 4.4 - Advanced NN Functions
+ setup-command: ''
+ command: python -m pytest -m task4_4 --tb=no -q
+ timeout: 15
+ max-score: 15
+ - name: Style Check
+ id: style-check
+ uses: classroom-resources/autograding-command-grader@v1
+ with:
+ test-name: Style Check
+ setup-command: ''
+ command: python -m ruff check . && python -m pyright
+ timeout: 10
+ max-score: 10
+ - name: Autograding Reporter
+ uses: classroom-resources/autograding-grading-reporter@v1
+ env:
+ TASK-4-1-1D-CONVOLUTION_RESULTS: "${{steps.task-4-1-1d-convolution.outputs.result}}"
+ TASK-4-2-2D-CONVOLUTION_RESULTS: "${{steps.task-4-2-2d-convolution.outputs.result}}"
+ TASK-4-3-POOLING-OPERATIONS_RESULTS: "${{steps.task-4-3-pooling-operations.outputs.result}}"
+ TASK-4-4-ADVANCED-NN-FUNCTIONS_RESULTS: "${{steps.task-4-4-advanced-nn-functions.outputs.result}}"
+ STYLE-CHECK_RESULTS: "${{steps.style-check.outputs.result}}"
+ with:
+ runners: task-4-1-1d-convolution,task-4-2-2d-convolution,task-4-3-pooling-operations,task-4-4-advanced-nn-functions,style-check
\ No newline at end of file
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index d8ea226..f8642d8 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,44 +1,15 @@
-# To use:
-#
-# pre-commit run -a
-#
-# Or:
-#
-# pre-commit install # (runs every time you commit in git)
-#
-# To update this file:
-#
-# pre-commit autoupdate
-#
-# See https://github.com/pre-commit/pre-commit
-
repos:
-# Standard hooks
-- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.3.0
- hooks:
- - id: check-added-large-files
- - id: check-case-conflict
- - id: check-docstring-first
- - id: check-merge-conflict
- - id: check-symlinks
- - id: check-toml
- - id: debug-statements
- - id: mixed-line-ending
- - id: requirements-txt-fixer
- - id: trailing-whitespace
-
-- repo: https://github.com/astral-sh/ruff-pre-commit
- # Ruff version.
- rev: v0.6.1
- hooks:
- # Run the linter.
- - id: ruff
- args: [ --fix ]
- # Run the formatter.
- - id: ruff-format
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ # Ruff version.
+ rev: v0.8.4
+ hooks:
+ # Run the linter.
+ - id: ruff
+ args: [ --fix ]
+ # Run the formatter.
+ - id: ruff-format
-- repo: https://github.com/RobertCraigie/pyright-python
- rev: v1.1.376
- hooks:
- - id: pyright
+ - repo: https://github.com/RobertCraigie/pyright-python
+ rev: v1.1.390
+ hooks:
+ - id: pyright
\ No newline at end of file
diff --git a/README.md b/README.md
index 50ccd77..051deb1 100644
--- a/README.md
+++ b/README.md
@@ -1,36 +1,139 @@
-# MiniTorch Module 4
+# MiniTorch Module 4: Neural Networks and Convolutions
+[](https://github.com/minitorch/minitorch/actions)
+
* Docs: https://minitorch.github.io/
* Overview: https://minitorch.github.io/module4.html
-This module requires `fast_ops.py`, `cuda_ops.py`, `scalar.py`, `tensor_functions.py`, `tensor_data.py`, `tensor_ops.py`, `operators.py`, `module.py`, and `autodiff.py` from Module 3.
+Module 4 extends MiniTorch with convolutional neural network operations to build an image recognition system. You'll implement a version of LeNet on MNIST for digit recognition and 1D convolution for NLP sentiment classification:
+- **1D and 2D convolution** for feature extraction and image processing
+- **Pooling operations** for spatial down-sampling and dimension reduction
+- **Advanced NN functions** including softmax, dropout, and max operations
+- **MNIST digit classification** and **SST2 sentiment analysis** with CNNs
-Additionally you will need to install and download the MNist library.
+## Quick Start
-(On Mac, this may require installing the `wget` command)
+```bash
+# Install dependencies
+pip install -e ".[dev,extra]"
-```
+# Set up MNIST dataset
pip install python-mnist
mnist_get_data.sh
+
+# Sync files from Module 3
+python sync_previous_module.py ../Module-3 .
+
+# Run Module 4 tests
+pytest -m task4_1
+pytest -m task4_2
+pytest -m task4_3
+pytest -m task4_4
+
+# Train CNN models
+python project/run_mnist_multiclass.py # MNIST digits
+python project/run_sentiment.py # SST2 sentiment
+
+# Run style checks
+pre-commit run --all-files
```
+## Tasks Overview
+
+### Task 4.1: 1D Convolution
+**File**: `minitorch/fast_conv.py`
+- Implement `_tensor_conv1d()` function
+- Support forward and backward convolution with parallel processing
+
+### Task 4.2: 2D Convolution
+**File**: `minitorch/fast_conv.py`
+- Implement `_tensor_conv2d()` function
+- Optimize for image processing with efficient memory access
+
+### Task 4.3: Pooling Operations
+**File**: `minitorch/nn.py`
+- Implement `tile()` tensor reshaping function
+- Implement `avgpool2d()` for average pooling
+
+### Task 4.4: Advanced Neural Network Functions
+**File**: `minitorch/nn.py`
+- Implement `max()`, `softmax()`, `logsoftmax()`, and `dropout()`
+- Implement `maxpool2d()` for max pooling operations
+- Add property tests and ensure gradient computation correctness
+
+### Task 4.4b: Extra Credit (CUDA Convolution)
+**File**: `minitorch/cuda_conv.py`
+- Implement `conv1d` and `conv2d` on CUDA for efficient GPU processing
+- Critical for large-scale image recognition performance
+- Show output on Google Colab
+
+### Task 4.5: Training an Image Classifier
+**Files**: `project/run_sentiment.py` and `project/run_mnist_multiclass.py`
+- Implement Conv1D, Conv2D, and Network for both sentiment and image classification
+- Train models on SST2 sentiment data and MNIST digit classification
+- Use Streamlit visualization to view hidden states of your model
-* Tests:
+**Training Requirements:**
+- Train a model on Sentiment (SST2), add training logs as `sentiment.txt` showing train loss, train accuracy and validation accuracy (should achieve >70% best validation accuracy)
+- Train a model on Digit classification (MNIST), add logs as `mnist.txt` showing train loss and validation accuracy
+- Implement Conv1D, Conv2D, and Network classes for both training files
+## Testing
+
+**Module 4 Tasks:**
+```bash
+pytest -m task4_1 # 1D convolution
+pytest -m task4_2 # 2D convolution
+pytest -m task4_3 # Pooling operations
+pytest -m task4_4 # Advanced NN functions
```
-python run_tests.py
+
+**CNN Training:**
+```bash
+python project/run_mnist_multiclass.py # MNIST digit classification
+python project/run_sentiment.py # SST2 sentiment analysis
+```
+
+**Style Checks:**
+```bash
+pre-commit run --all-files
+ruff check . && pyright
```
-This assignment requires the following files from the previous assignments. You can get these by running
+## CNN Applications
+
+**MNIST digit classification with CNNs:**
+- Convolutional feature extraction
+- Spatial pooling for dimension reduction
+- Performance comparison with fully-connected networks
+
+**Expected improvements over fully-connected:**
+- Fewer parameters through weight sharing
+- Translation invariance for image recognition
+- Hierarchical feature learning
+
+## Module Requirements
+
+This module requires files from previous assignments, so make sure to pull them over to your new repo. We recommend getting familiar with `tensor.py`, since you might find some of those functions useful for implementing this module.
+
+Get the required files by running:
```bash
-python sync_previous_module.py previous-module-dir current-module-dir
+python sync_previous_module.py
```
-The files that will be synced are:
+**Required files from Module 3:**
+- All tensor system files and fast operations
+- Module framework and autodifferentiation
+- Training scripts and datasets
+
+## Resources
- minitorch/tensor_data.py minitorch/tensor_functions.py minitorch/tensor_ops.py minitorch/operators.py minitorch/scalar.py minitorch/scalar_functions.py minitorch/module.py minitorch/autodiff.py minitorch/module.py project/run_manual.py project/run_scalar.py project/run_tensor.py minitorch/operators.py minitorch/module.py minitorch/autodiff.py minitorch/tensor.py minitorch/datasets.py minitorch/testing.py minitorch/optim.py minitorch/tensor_ops.py minitorch/fast_ops.py minitorch/cuda_ops.py project/parallel_check.py tests/test_tensor_general.py
\ No newline at end of file
+- **[Installation Guide](installation.md)**: Setup instructions with MNIST
+- **[Testing Guide](testing.md)**: CNN testing strategies
+- **MiniTorch Docs**: https://minitorch.github.io/
+- **Module 4 Overview**: https://minitorch.github.io/module4.html
\ No newline at end of file
diff --git a/installation.md b/installation.md
new file mode 100644
index 0000000..b7058ad
--- /dev/null
+++ b/installation.md
@@ -0,0 +1,108 @@
+# MiniTorch Module 4 Installation
+
+MiniTorch requires Python 3.8 or higher. To check your version of Python, run:
+
+```bash
+>>> python --version
+```
+
+We recommend creating a global MiniTorch workspace directory that you will use
+for all modules:
+
+```bash
+>>> mkdir workspace; cd workspace
+```
+
+## Environment Setup
+
+We highly recommend setting up a *virtual environment*. The virtual environment lets you install packages that are only used for your assignments and do not impact the rest of the system.
+
+**Option 1: Anaconda (Recommended)**
+```bash
+>>> conda create --name minitorch python # Run only once
+>>> conda activate minitorch
+>>> conda install llvmlite # For optimization
+```
+
+**Option 2: Venv**
+```bash
+>>> python -m venv venv # Run only once
+>>> source venv/bin/activate
+```
+
+The first line should be run only once, whereas the second needs to be run whenever you open a new terminal to get started for the class. You can tell if it works by checking if your terminal starts with `(minitorch)` or `(venv)`.
+
+## Getting the Code
+
+Each assignment is distributed through a Git repo. Once you accept the assignment from GitHub Classroom, a personal repository under Cornell-Tech-ML will be created for you. You can then clone this repository to start working on your assignment.
+
+```bash
+>>> git clone {{ASSIGNMENT}}
+>>> cd {{ASSIGNMENT}}
+```
+
+## Installation
+
+Install all packages in your virtual environment:
+
+```bash
+>>> python -m pip install -e ".[dev,extra]"
+```
+
+## Syncing Previous Module Files
+
+Module 4 requires files from Module 0, Module 1, Module 2, and Module 3. Sync them using:
+
+```bash
+>>> python sync_previous_module.py
+```
+
+Example:
+```bash
+>>> python sync_previous_module.py ../Module-3 .
+```
+
+Replace `` with the path to your Module 3 directory and `` with `.` for the current directory.
+
+This will copy the following required files:
+- `minitorch/tensor_data.py`
+- `minitorch/tensor_functions.py`
+- `minitorch/tensor_ops.py`
+- `minitorch/operators.py`
+- `minitorch/scalar.py`
+- `minitorch/scalar_functions.py`
+- `minitorch/module.py`
+- `minitorch/autodiff.py`
+- `minitorch/tensor.py`
+- `minitorch/datasets.py`
+- `minitorch/testing.py`
+- `minitorch/optim.py`
+- `minitorch/fast_ops.py`
+- `minitorch/cuda_ops.py`
+- `project/run_manual.py`
+- `project/run_scalar.py`
+- `project/run_tensor.py`
+- `project/parallel_check.py`
+- `tests/test_tensor_general.py`
+
+## MNIST Dataset Setup
+
+Module 4 requires the MNIST handwritten digit dataset for CNN training. Install and download the dataset:
+
+```bash
+>>> pip install python-mnist
+>>> mnist_get_data.sh
+```
+
+On Mac, this may require installing the `wget` command:
+```bash
+>>> brew install wget
+```
+
+## CUDA Support (Optional - for Task 4.4b Extra Credit)
+
+Task 4.4b requires CUDA and should be completed on **Google Colab** with GPU runtime:
+
+1. **Upload your Module-4 files to Google Colab**
+2. **Enable GPU runtime**: Runtime → Change runtime type → Hardware accelerator: GPU
+3. **Install in Colab**: Follow the [Module-3 Colab guide](https://colab.research.google.com/drive/1gyUFUrCXdlIBz9DYItH9YN3gQ2DvUMsI?usp=sharing) for CUDA setup, then install Module-4 with CUDA support:
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
index 442ba84..8782b20 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,17 +5,53 @@ build-backend = "hatchling.build"
[project]
name = "minitorch"
version = "0.5"
+description = "A minimal deep learning library for educational purposes"
+requires-python = ">=3.8"
+dependencies = [
+ "colorama==0.4.6",
+ "hypothesis==6.138.2",
+ "numba>=0.61.2",
+ "numpy<2.0",
+ "pytest==8.4.1",
+ "pytest-env==1.1.5",
+ "typing_extensions",
+]
+
+[project.optional-dependencies]
+dev = [
+ "pre-commit==4.3.0",
+ "ruff>=0.8.0",
+ "pyright>=1.1.390",
+]
+cuda = [
+ "numba-cuda[cu12]>=0.4.0",
+]
+extra = [
+ "python-mnist",
+ "streamlit==1.48.1",
+ "streamlit-ace",
+ "plotly==5.24.1",
+ "torch>=2.9.0",
+ "datasets==2.4.0",
+ "embeddings==0.0.8",
+ "networkx==3.5",
+ "pydot==1.4.1",
+ "watchdog==1.0.2",
+ "altair==4.2.2",
+]
[tool.pyright]
include = ["**/minitorch"]
-ignore = [
+exclude = [
"**/docs",
- "**/docs/module1/**",
+ "**/docs/module4/**",
"**/assignments",
"**/project",
"**/mt_diagrams",
"**/.*",
"*chainrule.py*",
+ "**/minitorch/autodiff.py",
+ "sync_previous_module.py",
]
venvPath = "."
venv = ".venv"
@@ -30,6 +66,7 @@ reportUnknownLambdaType = "none"
reportIncompatibleMethodOverride = "none"
reportPrivateUsage = "none"
reportMissingParameterType = "error"
+reportMissingImports = "none"
[tool.pytest.ini_options]
@@ -61,7 +98,6 @@ markers = [
"task4_4",
]
[tool.ruff]
-
exclude = [
".git",
"__pycache__",
@@ -72,10 +108,22 @@ exclude = [
"**/mt_diagrams/*",
"**/minitorch/testing.py",
"**/docs/**/*",
+ "minitorch/optim.py",
+ "minitorch/datasets.py",
+ "minitorch/scalar.py",
+ "minitorch/autodiff.py",
+ "minitorch/module.py",
+ "minitorch/tensor.py",
+ "minitorch/tensor_data.py",
+ "minitorch/tensor_functions.py",
+ "minitorch/tensor_ops.py",
+ "minitorch/fast_ops.py",
+ "minitorch/cuda_ops.py",
+ "sync_previous_module.py",
]
+[tool.ruff.lint]
ignore = [
- "ANN101",
"ANN401",
"N801",
"E203",
@@ -96,7 +144,7 @@ ignore = [
"D107",
"D213",
"ANN204",
- "ANN102",
+ "D203"
]
select = ["D", "E", "F", "N", "ANN"]
fixable = [
@@ -147,5 +195,7 @@ fixable = [
]
unfixable = []
-[tool.ruff.extend-per-file-ignores]
+[tool.ruff.lint.extend-per-file-ignores]
"tests/**/*.py" = ["D"]
+"minitorch/scalar_functions.py" = ["ANN001", "ANN201"]
+"minitorch/tensor_functions.py" = ["ANN001", "ANN201"]
diff --git a/requirements.extra.txt b/requirements.extra.txt
deleted file mode 100644
index 070fa1d..0000000
--- a/requirements.extra.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-datasets==2.4.0
-embeddings==0.0.8
-plotly==4.14.3
-pydot==1.4.1
-python-mnist
-streamlit==1.12.0
-streamlit-ace
-torch
-watchdog==1.0.2
-altair==4.2.2
-networkx==3.3
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index c9cd8a0..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-colorama==0.4.3
-hypothesis == 6.54
-numba == 0.60
-numpy == 2.0.0
-pre-commit == 2.20.0
-pytest == 8.3.2
-pytest-env
-pytest-runner == 5.2
-typing_extensions
diff --git a/setup.py b/setup.py
deleted file mode 100644
index ff4cfa9..0000000
--- a/setup.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from setuptools import setup
-
-setup(py_modules=[])
diff --git a/sync_previous_module.py b/sync_previous_module.py
index 9110bf9..ef67b0f 100644
--- a/sync_previous_module.py
+++ b/sync_previous_module.py
@@ -1,50 +1,72 @@
"""
-Description:
-Note: Make sure that both the new and old module files are in same directory!
+Sync Previous Module Files
-This script helps you sync your previous module works with current modules.
-It takes 2 arguments, source_dir_name and destination_dir_name.
-All the files which will be moved are specified in files_to_sync.txt as newline separated strings
+This script helps you sync files from your previous module to the current module.
+It copies files specified in 'files_to_sync.txt' from the source directory to the destination directory.
-Usage: python sync_previous_module.py
+Usage: python sync_previous_module.py
-Ex: python sync_previous_module.py mle-module-0-sauravpanda24 mle-module-1-sauravpanda24
+Examples:
+ python sync_previous_module.py ./my-awesome-module-3 ./my-awesome-module-4
+ python sync_previous_module.py ~/assignments/Module-3-unicorn_ninja ~/assignments/Module-4-unicorn_ninja
"""
import os
import shutil
import sys
-if len(sys.argv) != 3:
- print(
- "Invalid argument count! Please pass source directory and destination directory after the file name"
- )
- sys.exit()
+def print_usage():
+ """Print usage information and examples."""
+ print(__doc__)
-# Get the users path to evaluate the username and root directory
-current_path = os.getcwd()
-grandparent_path = "/".join(current_path.split("/")[:-1])
+def read_files_to_sync():
+ """Read the list of files to sync from files_to_sync.txt"""
+ try:
+ with open("files_to_sync.txt", "r") as f:
+ return f.read().splitlines()
+ except FileNotFoundError:
+ print("Error: files_to_sync.txt not found!")
+ sys.exit(1)
-print("Looking for modules in : ", grandparent_path)
+def sync_files(source, dest, files_to_move):
+ """Copy files from source to destination directory."""
+ if not os.path.exists(source):
+ print(f"Error: Source directory '{source}' does not exist!")
+ sys.exit(1)
-# List of files which we want to move
-f = open("files_to_sync.txt", "r+")
-files_to_move = f.read().splitlines()
-f.close()
+ if not os.path.exists(dest):
+ print(f"Error: Destination directory '{dest}' does not exist!")
+ sys.exit(1)
-# get the source and destination from arguments
-source = sys.argv[1]
-dest = sys.argv[2]
-
-# copy the files from source to destination
-try:
+ copied_files = 0
for file in files_to_move:
- print(f"Moving file : ", file)
- shutil.copy(
- os.path.join(grandparent_path, source, file),
- os.path.join(grandparent_path, dest, file),
- )
- print(f"Finished moving {len(files_to_move)} files")
-except Exception as e:
- print(
- "Something went wrong! please check if the source and destination folders are present in same folder"
- )
+ source_path = os.path.join(source, file)
+ dest_path = os.path.join(dest, file)
+
+ if not os.path.exists(source_path):
+ print(f"Warning: File '{file}' not found in source directory, skipping")
+ continue
+
+ try:
+ os.makedirs(os.path.dirname(dest_path), exist_ok=True)
+ shutil.copy(source_path, dest_path)
+ print(f"Copied: {file}")
+ copied_files += 1
+ except Exception as e:
+ print(f"Error copying '{file}': {e}")
+
+ print(f"Finished copying {copied_files} files")
+
+def main():
+ if len(sys.argv) != 3:
+ print("Error: Invalid number of arguments!")
+ print_usage()
+ sys.exit(1)
+
+ source = sys.argv[1]
+ dest = sys.argv[2]
+ files_to_move = read_files_to_sync()
+
+ sync_files(source, dest, files_to_move)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/testing.md b/testing.md
new file mode 100644
index 0000000..420b4eb
--- /dev/null
+++ b/testing.md
@@ -0,0 +1,66 @@
+## Testing Your Implementation
+
+### Running Tests
+
+This project uses pytest for testing. Tests are organized by task:
+
+```bash
+# Module 4 Tasks - Run locally
+pytest -m task4_1 # 1D convolution
+pytest -m task4_2 # 2D convolution
+pytest -m task4_3 # Pooling operations
+pytest -m task4_4 # Advanced NN functions
+
+# Run all tests
+pytest
+
+# Run tests with verbose output
+pytest -v
+
+# Run a specific test file
+pytest tests/test_conv.py # Convolution tests
+pytest tests/test_nn.py # Neural network tests
+
+# Run a specific test function
+pytest tests/test_conv.py::test_conv1d_simple
+pytest tests/test_nn.py::test_softmax
+```
+
+### MNIST Dataset Testing
+
+**Module 4 requires MNIST dataset for CNN training:**
+
+Before running CNN training tests, ensure MNIST is properly installed:
+```bash
+# Verify MNIST dataset
+python -c "import mnist; print('MNIST available')"
+
+# Test MNIST loading in MiniTorch
+python project/run_mnist_multiclass.py
+```
+
+### Style and Code Quality Checks
+
+This project enforces code style and quality using several tools:
+
+```bash
+# Run all pre-commit hooks (recommended)
+pre-commit run --all-files
+
+# Individual style checks:
+ruff check . # Linting (style, imports, docstrings)
+ruff format . # Code formatting
+pyright . # Type checking
+```
+
+### Pre-commit Hooks (Automatic Style Checking)
+
+The project uses pre-commit hooks that run automatically before each commit:
+
+```bash
+# Install pre-commit hooks (one-time setup)
+pre-commit install
+
+# Now style checks run automatically on every commit
+git commit -m "your message" # Will run style checks first
+```
\ No newline at end of file