Skip to content

Commit a434de2

Browse files
committed
remove sandbox code
1 parent defbfb4 commit a434de2

File tree

1 file changed

+1
-33
lines changed

1 file changed

+1
-33
lines changed

questions/123_mixed_precision_training/solution.py

Lines changed: 1 addition & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -32,36 +32,4 @@ def backward(self, gradients):
3232

3333
# Unscale gradients (ensure result is float32)
3434
unscaled_gradients = gradients_fp32 / self.loss_scale
35-
return unscaled_gradients.astype(np.float32)
36-
37-
38-
tests = [
39-
{
40-
"input": "import numpy as np\nmp = MixedPrecision(loss_scale=1024.0)\nweights = np.array([0.5, -0.3], dtype=np.float32)\ninputs = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\ntargets = np.array([1.0, 0.0], dtype=np.float32)\nloss = mp.forward(weights, inputs, targets)\nprint(f\"Loss: {loss:.4f}\")\nprint(f\"Loss dtype: {type(loss).__name__}\")\ngrads = np.array([512.0, -256.0], dtype=np.float32)\nresult = mp.backward(grads)\nprint(f\"Gradients: {result}\")\nprint(f\"Grad dtype: {result.dtype}\")",
41-
"output": "Loss: 1638.4000\nLoss dtype: float\nGradients: [0.5 -0.25]\nGrad dtype: float32"
42-
},
43-
{
44-
"input": "import numpy as np\nmp = MixedPrecision(loss_scale=512.0)\nweights = np.array([1.0, 0.5], dtype=np.float64)\ninputs = np.array([[2.0, 1.0]], dtype=np.float64)\ntargets = np.array([3.0], dtype=np.float64)\nloss = mp.forward(weights, inputs, targets)\nprint(f\"Loss: {loss:.1f}\")\nprint(f\"Loss dtype: {type(loss).__name__}\")\ngrads = np.array([1024.0, 512.0], dtype=np.float16)\nresult = mp.backward(grads)\nprint(f\"Gradients: [{result[0]:.0f} {result[1]:.0f}]\")\nprint(f\"Grad dtype: {result.dtype}\")",
45-
"output": "Loss: 256.0\nLoss dtype: float\nGradients: [2 1]\nGrad dtype: float32"
46-
},
47-
{
48-
"input": "import numpy as np\nmp = MixedPrecision(loss_scale=100.0)\nweights = np.array([0.1, 0.2], dtype=np.float32)\ninputs = np.array([[1.0, 1.0]], dtype=np.float32)\ntargets = np.array([0.5], dtype=np.float32)\nloss = mp.forward(weights, inputs, targets)\nprint(f\"Loss: {loss:.1f}\")\nprint(f\"Loss dtype: {type(loss).__name__}\")\ngrads = np.array([200.0, 100.0], dtype=np.float64)\nresult = mp.backward(grads)\nprint(f\"Gradients: [{result[0]:.0f} {result[1]:.0f}]\")\nprint(f\"Grad dtype: {result.dtype}\")",
49-
"output": "Loss: 4.0\nLoss dtype: float\nGradients: [2 1]\nGrad dtype: float32"
50-
},
51-
{
52-
"input": "import numpy as np\nmp = MixedPrecision(loss_scale=2048.0)\nweights = np.array([0.25], dtype=np.float64)\ninputs = np.array([[4.0]], dtype=np.float64)\ntargets = np.array([2.0], dtype=np.float64)\nloss = mp.forward(weights, inputs, targets)\nprint(f\"Loss: {loss:.1f}\")\nprint(f\"Loss dtype: {type(loss).__name__}\")\ngrads = np.array([np.nan], dtype=np.float16)\nresult = mp.backward(grads)\nprint(f\"Gradients: [{result[0]:.0f}]\")\nprint(f\"Grad dtype: {result.dtype}\")",
53-
"output": "Loss: 2048.0\nLoss dtype: float\nGradients: [0]\nGrad dtype: float32"
54-
},
55-
{
56-
"input": "import numpy as np\nmp = MixedPrecision(loss_scale=256.0)\nweights = np.array([1.0], dtype=np.float16)\ninputs = np.array([[2.0]], dtype=np.float16)\ntargets = np.array([3.0], dtype=np.float16)\nloss = mp.forward(weights, inputs, targets)\nprint(f\"Loss: {loss:.1f}\")\nprint(f\"Loss dtype: {type(loss).__name__}\")\ngrads = np.array([np.inf], dtype=np.float64)\nresult = mp.backward(grads)\nprint(f\"Gradients: [{result[0]:.0f}]\")\nprint(f\"Grad dtype: {result.dtype}\")",
57-
"output": "Loss: 256.0\nLoss dtype: float\nGradients: [0]\nGrad dtype: float32"
58-
}
59-
]
60-
61-
for i, test in enumerate(tests):
62-
print(f"Test #{i+1}.")
63-
print("Got:")
64-
exec(test['input'])
65-
print(f"\nExpected: {test['output']}")
66-
print("--------" * 3)
67-
35+
return unscaled_gradients.astype(np.float32)

0 commit comments

Comments
 (0)