Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions minitorch/fast_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,14 @@ def _tensor_conv1d(
`batch, out_channels, width`

`reverse` decides if weight is anchored left (False) or right.
(See diagrams)

(See diagrams)
Implementation hints:
- Use nested loops over batch, output channels, output width, input channels, and kernel width
- Use prange for appropriate outer loops to enable parallelization
- Calculate input position based on output position and kernel position
- Handle reverse flag to determine kernel indexing direction
- Apply bounds checking to avoid accessing invalid memory positions
- Use provided strides to compute correct memory offsets
Args:
----
out (Storage): storage for `out` tensor.
Expand Down Expand Up @@ -187,6 +193,13 @@ def _tensor_conv2d(
`Reverse` decides if weight is anchored top-left (False) or bottom-right.
(See diagrams)

Implementation hints:
- Use nested loops over batch, output channels, output height, output width, input channels, kernel height, and kernel width
- Use prange for appropriate outer loops to enable parallelization
- Calculate input positions based on output position and kernel positions
- Handle reverse flag to determine kernel indexing direction for both height and width
- Apply bounds checking to avoid accessing invalid memory positions for both dimensions
- Use provided strides to compute correct memory offsets for 4D tensors

Args:
----
Expand Down
96 changes: 91 additions & 5 deletions minitorch/nn.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
from typing import Tuple

from . import operators
from .autodiff import Context
from .fast_ops import FastOps
from .tensor import Tensor
from .tensor_functions import Function, rand, tensor
from .tensor_functions import rand
from typing import Optional
from .tensor_functions import Function, tensor

from .autodiff import Context


# List of functions in this file:
Expand Down Expand Up @@ -39,4 +40,89 @@ def tile(input: Tensor, kernel: Tuple[int, int]) -> Tuple[Tensor, int, int]:
raise NotImplementedError("Need to implement for Task 4.3")


# TODO: Implement for Task 4.3.
def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
"""Tiled average pooling 2D

Args:
----
input: batch x channel x height x width
kernel: height x width of pooling

Returns:
-------
Pooled tensor
"""
# TODO: Implement for Task 4.3.
raise NotImplementedError("Need to implement for Task 4.3")


class Max(Function):
@staticmethod
def forward(ctx: Context, a: Tensor, dim: Tensor) -> Tensor:
"""Forward of max should be max reduction"""
# TODO: Implement for Task 4.4.
raise NotImplementedError("Need to implement for Task 4.4")

@staticmethod
def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]:
"""Backward of max should be argmax (see argmax function)"""
# TODO: Implement for Task 4.4.
raise NotImplementedError("Need to implement for Task 4.4")


def max(input: Tensor, dim: int) -> Tensor:
"""Apply max reduction over a dimension"""
# TODO: Implement for Task 4.4.
raise NotImplementedError("Need to implement for Task 4.4")


def argmax(input: Tensor, dim: int) -> Tensor:
"""Compute the argmax as a 1-hot tensor"""
# TODO: Implement for Task 4.4.
raise NotImplementedError("Need to implement for Task 4.4")


def softmax(input: Tensor, dim: int) -> Tensor:
"""Compute the softmax as a tensor"""
# TODO: Implement for Task 4.4.
raise NotImplementedError("Need to implement for Task 4.4")


def logsoftmax(input: Tensor, dim: int) -> Tensor:
"""Compute the log of the softmax as a tensor
See https://en.wikipedia.org/wiki/LogSumExp#log-sum-exp_trick_for_log-domain_calculations """
# TODO: Implement for Task 4.4.
raise NotImplementedError("Need to implement for Task 4.4")


def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
"""Tiled max pooling 2D

Args:
----
input: batch x channel x height x width
kernel: height x width of pooling

Returns:
-------
Pooled tensor
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError("Need to implement for Task 4.4")


def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor:
"""Dropout positions based on random noise

Args:
----
input: input tensor
rate: probability [0, 1) of dropping out each position
ignore: skip dropout, i.e. do nothing at all

Returns:
-------
tensor with random positions dropped out
"""
# TODO: Implement for Task 4.4.
raise NotImplementedError("Need to implement for Task 4.4")
Loading