11[
2- {
3- "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=1024.0)\n weights = np.array([0.5, -0.3], dtype=np.float32)\n inputs = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n targets = np.array([1.0, 0.0], dtype=np.float32)\n loss = mp.forward(weights, inputs, targets)\n print(f\" Loss: {loss:.4f}\" )\n print(f\" Loss dtype: {type(loss).__name__}\" )\n grads = np.array([512.0, -256.0], dtype=np.float32)\n result = mp.backward(grads)\n print(f\" Gradients: {result}\" )\n print(f\" Grad dtype: {result.dtype}\" )" ,
4- "expected_output" : " Loss: 665.0000\n Loss dtype: float\n Gradients: [0.5 -0.25]\n Grad dtype: float32"
5- },
6- {
7- "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=512.0)\n weights = np.array([1.0, 0.5], dtype=np.float64)\n inputs = np.array([[2.0, 1.0]], dtype=np.float64)\n targets = np.array([3.0], dtype=np.float64)\n loss = mp.forward(weights, inputs, targets)\n print(f\" Loss: {loss:.1f}\" )\n print(f\" Loss dtype: {type(loss).__name__}\" )\n grads = np.array([1024.0, 512.0], dtype=np.float16)\n result = mp.backward(grads)\n print(f\" Gradients: [{result[0]:.0f} {result[1]:.0f}]\" )\n print(f\" Grad dtype: {result.dtype}\" )" ,
8- "expected_output" : " Loss: 128.0\n Loss dtype: float\n Gradients: [2 1]\n Grad dtype: float32"
9- },
10- {
11- "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=100.0)\n weights = np.array([0.1, 0.2], dtype=np.float32)\n inputs = np.array([[1.0, 1.0]], dtype=np.float32)\n targets = np.array([0.5], dtype=np.float32)\n loss = mp.forward(weights, inputs, targets)\n print(f\" Loss: {loss:.1f}\" )\n print(f\" Loss dtype: {type(loss).__name__}\" )\n grads = np.array([200.0, 100.0], dtype=np.float64)\n result = mp.backward(grads)\n print(f\" Gradients: [{result[0]:.0f} {result[1]:.0f}]\" )\n print(f\" Grad dtype: {result.dtype}\" )" ,
12- "expected_output" : " Loss: 4.0\n Loss dtype: float\n Gradients: [2 1]\n Grad dtype: float32"
13- },
14- {
15- "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=2048.0)\n weights = np.array([0.25], dtype=np.float64)\n inputs = np.array([[4.0]], dtype=np.float64)\n targets = np.array([2.0], dtype=np.float64)\n loss = mp.forward(weights, inputs, targets)\n print(f\" Loss: {loss:.1f}\" )\n print(f\" Loss dtype: {type(loss).__name__}\" )\n grads = np.array([np.nan], dtype=np.float16)\n result = mp.backward(grads)\n print(f\" Gradients: [{result[0]:.0f}]\" )\n print(f\" Grad dtype: {result.dtype}\" )" ,
16- "expected_output" : " Loss: 2048.0\n Loss dtype: float\n Gradients: [0]\n Grad dtype: float32"
17- },
18- {
19- "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=256.0)\n weights = np.array([1.0], dtype=np.float16)\n inputs = np.array([[2.0]], dtype=np.float16)\n targets = np.array([3.0], dtype=np.float16)\n loss = mp.forward(weights, inputs, targets)\n print(f\" Loss: {loss:.1f}\" )\n print(f\" Loss dtype: {type(loss).__name__}\" )\n grads = np.array([np.inf], dtype=np.float64)\n result = mp.backward(grads)\n print(f\" Gradients: [{result[0]:.0f}]\" )\n print(f\" Grad dtype: {result.dtype}\" )" ,
20- "expected_output" : " Loss: 256.0\n Loss dtype: float\n Gradients: [0]\n Grad dtype: float32"
21- }
2+ {
3+ "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=1024.0)\n weights = np.array([0.5, -0.3], dtype=np.float32)\n inputs = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n targets = np.array([1.0, 0.0], dtype=np.float32)\n loss = mp.forward(weights, inputs, targets)\n print(f\" Loss: {loss:.4f}\" )\n print(f\" Loss dtype: {type(loss).__name__}\" )" ,
4+ "expected_output" : " Loss: 665.0000\n Loss dtype: float32"
5+ },
6+ {
7+ "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=1024.0)\n grads = np.array([512.0, -256.0], dtype=np.float32)\n result = mp.backward(grads)\n print(f\" Gradients: {result}\" )\n print(f\" Grad dtype: {result.dtype}\" )" ,
8+ "expected_output" : " Gradients: [ 0.5 -0.25]\n Grad dtype: float32"
9+ },
10+ {
11+ "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=512.0)\n weights = np.array([1.0, 0.5], dtype=np.float64)\n inputs = np.array([[2.0, 1.0]], dtype=np.float64)\n targets = np.array([3.0], dtype=np.float64)\n loss = mp.forward(weights, inputs, targets)\n print(f\" Loss: {loss:.1f}\" )\n print(f\" Loss dtype: {type(loss).__name__}\" )" ,
12+ "expected_output" : " Loss: 128.0\n Loss dtype: float32"
13+ },
14+ {
15+ "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=512.0)\n grads = np.array([1024.0, 512.0], dtype=np.float16)\n result = mp.backward(grads)\n print(f\" Gradients: [{result[0]:.0f} {result[1]:.0f}]\" )\n print(f\" Grad dtype: {result.dtype}\" )" ,
16+ "expected_output" : " Gradients: [2 1]\n Grad dtype: float32"
17+ },
18+ {
19+ "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=100.0)\n weights = np.array([0.1, 0.2], dtype=np.float32)\n inputs = np.array([[1.0, 1.0]], dtype=np.float32)\n targets = np.array([0.5], dtype=np.float32)\n loss = mp.forward(weights, inputs, targets)\n print(f\" Loss: {loss:.1f}\" )\n print(f\" Loss dtype: {type(loss).__name__}\" )" ,
20+ "expected_output" : " Loss: 4.0\n Loss dtype: float32"
21+ },
22+ {
23+ "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=100.0)\n grads = np.array([200.0, 100.0], dtype=np.float64)\n result = mp.backward(grads)\n print(f\" Gradients: [{result[0]:.0f} {result[1]:.0f}]\" )\n print(f\" Grad dtype: {result.dtype}\" )" ,
24+ "expected_output" : " Gradients: [2 1]\n Grad dtype: float32"
25+ },
26+ {
27+ "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=2048.0)\n weights = np.array([0.25], dtype=np.float64)\n inputs = np.array([[4.0]], dtype=np.float64)\n targets = np.array([2.0], dtype=np.float64)\n loss = mp.forward(weights, inputs, targets)\n print(f\" Loss: {loss:.1f}\" )\n print(f\" Loss dtype: {type(loss).__name__}\" )" ,
28+ "expected_output" : " Loss: 2048.0\n Loss dtype: float32"
29+ },
30+ {
31+ "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=2048.0)\n grads = np.array([np.nan], dtype=np.float16)\n result = mp.backward(grads)\n print(f\" Gradients: [{result[0]:.0f}]\" )\n print(f\" Grad dtype: {result.dtype}\" )" ,
32+ "expected_output" : " Gradients: [0]\n Grad dtype: float32"
33+ },
34+ {
35+ "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=256.0)\n weights = np.array([1.0], dtype=np.float16)\n inputs = np.array([[2.0]], dtype=np.float16)\n targets = np.array([3.0], dtype=np.float16)\n loss = mp.forward(weights, inputs, targets)\n print(f\" Loss: {loss:.1f}\" )\n print(f\" Loss dtype: {type(loss).__name__}\" )" ,
36+ "expected_output" : " Loss: 256.0\n Loss dtype: float32"
37+ },
38+ {
39+ "test" : " import numpy as np\n mp = MixedPrecision(loss_scale=256.0)\n grads = np.array([np.inf], dtype=np.float64)\n result = mp.backward(grads)\n print(f\" Gradients: [{result[0]:.0f}]\" )\n print(f\" Grad dtype: {result.dtype}\" )" ,
40+ "expected_output" : " Gradients: [0]\n Grad dtype: float32"
41+ }
2242]
0 commit comments