Skip to content
Open

hello #196

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
8 changes: 5 additions & 3 deletions ML/Pytorch/Basics/pytorch_simple_fullynet.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,16 +95,18 @@ def forward(self, x):
for epoch in range(num_epochs):
for batch_idx, (data, targets) in enumerate(tqdm(train_loader)):
# Get data to cuda if possible
print( data.shape)
print( targets.shape)
data = data.to(device=device)
targets = targets.to(device=device)

# Get to correct shape
data = data.reshape(data.shape[0], -1)

# Forward
scores = model(data)
scores = model.forward(data)
loss = criterion(scores, targets)

print(f"Loss at epoch {epoch}, batch {batch_idx}: {loss.item()}")
# Backward
optimizer.zero_grad()
loss.backward()
Expand All @@ -131,7 +133,7 @@ def check_accuracy(loader, model):

num_correct = 0
num_samples = 0
model.eval()
model.eval()#评估模式,这会关闭dropout等

# We don't need to keep track of gradients here so we wrap it in torch.no_grad()
with torch.no_grad():
Expand Down
17 changes: 10 additions & 7 deletions ML/Pytorch/Basics/pytorch_tensorbasics.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,11 +163,12 @@
values, indices = torch.min(x, dim=0) # Can also do x.min(dim=0)
abs_x = torch.abs(x) # Returns x where abs function has been applied to every element
z = torch.argmax(x, dim=0) # Gets index of the maximum value
z = torch.argmin(x, dim=0) # Gets index of the minimum value
z = torch.argmin(x, dim=0)
print(z)# Gets index of the minimum value
mean_x = torch.mean(x.float(), dim=0) # mean requires x to be float
z = torch.eq(x, y) # Element wise comparison, in this case z = [False, False, False]
sorted_y, indices = torch.sort(y, dim=0, descending=False)

print(indices)
z = torch.clamp(x, min=0)
# All values < 0 set to 0 and values > 0 unchanged (this is exactly ReLU function)
# If you want to values over max_val to be clamped, do torch.clamp(x, min=min_val, max=max_val)
Expand Down Expand Up @@ -207,7 +208,7 @@
rows = torch.tensor([1, 0])
cols = torch.tensor([4, 0])
print(x[rows, cols]) # Gets second row fifth column and first row first column

# which is same as doing: [x[1,4], x[0,0]] 高级索引
# More advanced indexing
x = torch.arange(10)
print(x[(x < 2) | (x > 8)]) # will be [0, 1, 9]
Expand All @@ -216,7 +217,9 @@
# Useful operations for indexing
print(
torch.where(x > 5, x, x * 2)
) # gives [0, 2, 4, 6, 8, 10, 6, 7, 8, 9], all values x > 5 yield x, else x*2
)
#满足第一个条件执行第二个 反之执行第三个
# gives [0, 2, 4, 6, 8, 10, 6, 7, 8, 9], all values x > 5 yield x, else x*2
x = torch.tensor([0, 0, 1, 2, 2, 3, 4]).unique() # x = [0, 1, 2, 3, 4]
print(
x.ndimension()
Expand All @@ -231,7 +234,7 @@
# ============================================================= #

x = torch.arange(9)

print(x.shape) # Shape is [9]
# Let's say we want to reshape it to be 3x3
x_3x3 = x.view(3, 3)

Expand All @@ -256,7 +259,7 @@
# using pointers to construct these matrices). This is a bit complicated and I need to explore this more
# as well, at least you know it's a problem to be cautious of! A solution is to do the following
print(y.contiguous().view(9)) # Calling .contiguous() before view and it works

#内存跳动
# Moving on to another operation, let's say we want to add two tensors dimensions togethor
x1 = torch.rand(2, 5)
x2 = torch.rand(2, 5)
Expand Down Expand Up @@ -284,7 +287,7 @@
z = torch.chunk(x, chunks=2, dim=1)
print(z[0].shape)
print(z[1].shape)

#分成若干个子张量
# Let's say we want to add an additional dimension
x = torch.arange(
10
Expand Down
37 changes: 37 additions & 0 deletions Test/tensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import torch
import torch.nn as nn

my_tensor = torch.tensor([[1, 2, 3], [4, 5, 6]])
x=torch.empty(size=(3,3)).uniform_(0,1)
y=torch.diag(torch.ones(3))
z=torch.ones(3)
print(x)
print(y)
print(z)
import numpy as np
a = np.array([1, 2, 3])
b = torch.from_numpy(a)#这里是将numpy数组转换为tensor
print(a)
print(b)
c=b.numpy()#这里是将tensor转换为numpy数组
print(c.dtype)
import torch
x = torch.tensor([1, 2, 3])
print(torch.diag(x))
# 输出:
# tensor([[1, 0, 0],
# [0, 2, 0],
# [0, 0, 3]])
A = torch.tensor([[1, 2], [3, 4]])
print(torch.diag(A))
# 输出: tensor([1, 4])
p=torch.rand(3, 4)
print(p)
q=torch.eye(4)
print(q)
z=torch.empty(3,4).normal_(mean=0,std=1)
print(z)
j=torch.arange(1,10,2)
print(j)
k=torch.empty(3,4)
print(k)
1,178 changes: 1,178 additions & 0 deletions _downloads/c195adbae0504b6504c93e0fd18235ce/mario_rl_tutorial.ipynb

Large diffs are not rendered by default.