Skip to content

Commit 94726b1

Browse files
committed
clean up scipy/numpy imports
1 parent 3a1d282 commit 94726b1

File tree

3 files changed

+104
-120
lines changed

3 files changed

+104
-120
lines changed

multivariate_regressor.py

Lines changed: 16 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -8,19 +8,15 @@
88
Just simple linear regression with regularization - nothing new here
99
"""
1010

11-
from scipy import randn
12-
from scipy import dot
13-
from scipy import shape
14-
from scipy import reshape
15-
from scipy import size
16-
from scipy.sparse import eye
17-
from scipy.linalg import pinv
11+
import numpy as np
12+
from scipy import sparse
13+
1814

1915
def ideal_data(num, dimX, dimY, _rank=None, noise=1):
2016
"""Full rank data"""
21-
X = randn(num, dimX)
22-
W = randn(dimX, dimY)
23-
Y = dot(X, W) + randn(num, dimY) * noise
17+
X = np.random.randn(num, dimX)
18+
W = np.random.randn(dimX, dimY)
19+
Y = np.dot(X, W) + np.random.randn(num, dimY) * noise
2420
return X, Y
2521

2622

@@ -32,21 +28,21 @@ class MultivariateRegressor(object):
3228
- reg is a regularization parameter (optional).
3329
"""
3430
def __init__(self, X, Y, reg=None):
35-
if size(shape(X)) is 1:
36-
X = reshape(X, (-1, 1))
37-
if size(shape(Y)) is 1:
38-
Y = reshape(Y, (-1, 1))
31+
if np.size(np.shape(X)) == 1:
32+
X = np.reshape(X, (-1, 1))
33+
if np.size(np.shape(Y)) == 1:
34+
Y = np.reshape(Y, (-1, 1))
3935
if reg is None:
4036
reg = 0
4137

42-
W1 = pinv(dot(X.T, X) + reg * eye(size(X, 1)))
43-
W2 = dot(X, W1)
44-
self.W = dot(Y.T, W2)
38+
W1 = np.linalg.pinv(np.dot(X.T, X) + reg * sparse.eye(np.size(X, 1)))
39+
W2 = np.dot(X, W1)
40+
self.W = np.dot(Y.T, W2)
4541

4642
def __str__(self):
4743
return 'Multivariate Linear Regression'
4844

4945
def predict(self, X):
50-
if size(shape(X)) == 1:
51-
X = reshape(X, (-1, 1))
52-
return dot(X, self.W.T)
46+
if np.size(np.shape(X)) == 1:
47+
X = np.reshape(X, (-1, 1))
48+
return np.dot(X, self.W.T)

reduced_rank_regressor.py

Lines changed: 16 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -7,21 +7,15 @@
77
88
Optimal linear 'bottlenecking' or 'multitask learning'.
99
"""
10+
import numpy as np
11+
from scipy import sparse
1012

11-
from scipy import randn
12-
from scipy import dot
13-
from scipy import shape
14-
from scipy import reshape
15-
from scipy import size
16-
from scipy.sparse import eye
17-
from scipy.linalg import pinv
18-
from scipy.linalg import svd
1913

2014
def ideal_data(num, dimX, dimY, rrank, noise=1):
2115
"""Low rank data"""
22-
X = randn(num, dimX)
23-
W = dot(randn(dimX, rrank), randn(rrank, dimY))
24-
Y = dot(X, W) + randn(num, dimY) * noise
16+
X = np.random.randn(num, dimX)
17+
W = np.dot(np.random.randn(dimX, rrank), np.random.randn(rrank, dimY))
18+
Y = np.dot(X, W) + np.random.randn(num, dimY) * noise
2519
return X, Y
2620

2721

@@ -34,25 +28,25 @@ class ReducedRankRegressor(object):
3428
- reg is a regularization parameter (optional).
3529
"""
3630
def __init__(self, X, Y, rank, reg=None):
37-
if size(shape(X)) == 1:
38-
X = reshape(X, (-1, 1))
39-
if size(shape(Y)) == 1:
40-
Y = reshape(Y, (-1, 1))
31+
if np.size(np.shape(X)) == 1:
32+
X = np.reshape(X, (-1, 1))
33+
if np.size(np.shape(Y)) == 1:
34+
Y = np.reshape(Y, (-1, 1))
4135
if reg is None:
4236
reg = 0
4337
self.rank = rank
4438

45-
CXX = dot(X.T, X) + reg * eye(size(X, 1))
46-
CXY = dot(X.T, Y)
47-
_U, _S, V = svd(dot(CXY.T, dot(pinv(CXX), CXY)))
39+
CXX = np.dot(X.T, X) + reg * sparse.eye(np.size(X, 1))
40+
CXY = np.dot(X.T, Y)
41+
_U, _S, V = np.linalg.svd(np.dot(CXY.T, np.dot(np.linalg.pinv(CXX), CXY)))
4842
self.W = V[0:rank, :].T
49-
self.A = dot(pinv(CXX), dot(CXY, self.W)).T
43+
self.A = np.dot(np.linalg.pinv(CXX), np.dot(CXY, self.W)).T
5044

5145
def __str__(self):
5246
return 'Reduced Rank Regressor (rank = {})'.format(self.rank)
5347

5448
def predict(self, X):
5549
"""Predict Y from X."""
56-
if size(shape(X)) == 1:
57-
X = reshape(X, (-1, 1))
58-
return dot(X, dot(self.A.T, self.W.T))
50+
if np.size(np.shape(X)) == 1:
51+
X = np.reshape(X, (-1, 1))
52+
return np.dot(X, np.dot(self.A.T, self.W.T))

system_identifier.py

Lines changed: 72 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -14,57 +14,49 @@
1414
x_(i+1) = dot(A, x_i) + dot(B, u_i)
1515
This is a linear dynamical system.
1616
"""
17+
import numpy as np
18+
import scipy as sp
19+
from scipy import sparse
20+
from scipy.sparse import linalg as sparse_linalg
1721

18-
from numpy.lib import diag
19-
from scipy import randn
20-
from scipy import dot
21-
from scipy import shape
22-
from scipy import reshape
23-
from scipy import size
24-
from scipy import zeros
25-
from scipy import array
26-
from scipy.sparse import bmat
27-
from scipy import ravel
28-
from scipy import concatenate
29-
from scipy.sparse import eye
30-
from scipy.linalg import pinv
31-
from scipy.sparse.linalg import eigs
32-
from scipy.linalg import svd
3322

3423
def ideal_data(num, dimU, dimY, dimX, noise=1):
3524
"""Linear system data"""
3625
# generate randomized linear system matrices
37-
A = randn(dimX, dimX)
38-
B = randn(dimX, dimU)
39-
C = randn(dimY, dimX)
40-
D = randn(dimY, dimU)
26+
A = np.random.randn(dimX, dimX)
27+
B = np.random.randn(dimX, dimU)
28+
C = np.random.randn(dimY, dimX)
29+
D = np.random.randn(dimY, dimU)
4130

4231
# make sure state evolution is stable
43-
U, S, V = svd(A)
44-
A = dot(U, dot(diag(S / max(S)), V))
45-
U, S, V = svd(B)
46-
S2 = zeros((size(U,1), size(V,0)))
47-
S2[:,:size(U,1)] = diag(S / max(S))
48-
B = dot(U, dot(S2, V))
32+
U, S, V = np.linalg.svd(A)
33+
A = np.dot(U, np.dot(np.lib.diag(S / max(S)), V))
34+
U, S, V = np.linalg.svd(B)
35+
S2 = np.zeros((np.size(U, 1), np.size(V, 0)))
36+
S2[:, :np.size(U, 1)] = np.lib.diag(S / max(S))
37+
B = np.dot(U, np.dot(S2, V))
4938

5039
# random input
51-
U = randn(num, dimU)
40+
U = np.random.randn(num, dimU)
5241

5342
# initial state
54-
X = reshape(randn(dimX), (1,-1))
43+
X = np.reshape(np.random.randn(dimX), (1, -1))
5544

5645
# initial output
57-
Y = reshape(dot(C, X[-1]) + dot(D, U[0]), (1,-1))
46+
Y = np.reshape(np.dot(C, X[-1]) + np.dot(D, U[0]), (1, -1))
5847

5948
# generate next state
60-
X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, U[0]), (1,-1))))
49+
X = np.concatenate(
50+
(X, np.reshape(np.dot(A, X[-1]) + np.dot(B, U[0]), (1, -1))))
6151

6252
# and so forth
6353
for u in U[1:]:
64-
Y = concatenate((Y, reshape(dot(C, X[-1]) + dot(D, u), (1,-1))))
65-
X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, u), (1,-1))))
54+
Y = np.concatenate(
55+
(Y, np.reshape(np.dot(C, X[-1]) + np.dot(D, u), (1, -1))))
56+
X = np.concatenate(
57+
(X, np.reshape(np.dot(A, X[-1]) + np.dot(B, u), (1, -1))))
6658

67-
return U, Y + randn(num, dimY) * noise
59+
return U, Y + np.random.randn(num, dimY) * noise
6860

6961

7062
class SystemIdentifier(object):
@@ -76,87 +68,89 @@ class SystemIdentifier(object):
7668
- reg is a regularization parameter (optional).
7769
"""
7870
def __init__(self, U, Y, statedim, reg=None):
79-
if size(shape(U)) == 1:
80-
U = reshape(U, (-1,1))
81-
if size(shape(Y)) == 1:
82-
Y = reshape(Y, (-1,1))
71+
if np.size(np.shape(U)) == 1:
72+
U = np.reshape(U, (-1, 1))
73+
if np.size(np.shape(Y)) == 1:
74+
Y = np.reshape(Y, (-1, 1))
8375
if reg is None:
8476
reg = 0
8577

86-
yDim = size(Y,1)
87-
uDim = size(U,1)
78+
yDim = np.size(Y, 1)
79+
uDim = np.size(U, 1)
8880

89-
self.output_size = size(Y,1) # placeholder
81+
self.output_size = np.size(Y, 1) # placeholder
9082

9183
# number of samples of past/future we'll mash together into a 'state'
9284
width = 1
9385
# total number of past/future pairings we get as a result
94-
K = size(U,0) - 2 * width + 1
86+
K = np.size(U, 0) - 2 * width + 1
9587

9688
# build hankel matrices containing pasts and futures
97-
U_p = array([ravel(U[t : t + width]) for t in range(K)]).T
98-
U_f = array([ravel(U[t + width : t + 2 * width]) for t in range(K)]).T
99-
Y_p = array([ravel(Y[t : t + width]) for t in range(K)]).T
100-
Y_f = array([ravel(Y[t + width : t + 2 * width]) for t in range(K)]).T
89+
U_p = np.array([np.ravel(U[t:t + width]) for t in range(K)]).T
90+
U_f = np.array([np.ravel(U[t + width:t + 2 * width]) for t in range(K)]).T
91+
Y_p = np.array([np.ravel(Y[t:t + width]) for t in range(K)]).T
92+
Y_f = np.array([np.ravel(Y[t + width:t + 2 * width]) for t in range(K)]).T
10193

10294
# solve the eigenvalue problem
103-
YfUfT = dot(Y_f, U_f.T)
104-
YfUpT = dot(Y_f, U_p.T)
105-
YfYpT = dot(Y_f, Y_p.T)
106-
UfUpT = dot(U_f, U_p.T)
107-
UfYpT = dot(U_f, Y_p.T)
108-
UpYpT = dot(U_p, Y_p.T)
109-
F = bmat([[None, YfUfT, YfUpT, YfYpT],
110-
[YfUfT.T, None, UfUpT, UfYpT],
111-
[YfUpT.T, UfUpT.T, None, UpYpT],
112-
[YfYpT.T, UfYpT.T, UpYpT.T, None]])
113-
Ginv = bmat([[pinv(dot(Y_f,Y_f.T)), None, None, None],
114-
[None, pinv(dot(U_f,U_f.T)), None, None],
115-
[None, None, pinv(dot(U_p,U_p.T)), None],
116-
[None, None, None, pinv(dot(Y_p,Y_p.T))]])
117-
F = F - eye(size(F, 0)) * reg
95+
YfUfT = np.dot(Y_f, U_f.T)
96+
YfUpT = np.dot(Y_f, U_p.T)
97+
YfYpT = np.dot(Y_f, Y_p.T)
98+
UfUpT = np.dot(U_f, U_p.T)
99+
UfYpT = np.dot(U_f, Y_p.T)
100+
UpYpT = np.dot(U_p, Y_p.T)
101+
F = sparse.bmat([
102+
[None, YfUfT, YfUpT, YfYpT],
103+
[YfUfT.T, None, UfUpT, UfYpT],
104+
[YfUpT.T, UfUpT.T, None, UpYpT],
105+
[YfYpT.T, UfYpT.T, UpYpT.T, None],
106+
])
107+
Ginv = sparse.bmat([
108+
[np.linalg.pinv(np.dot(Y_f, Y_f.T)), None, None, None],
109+
[None, np.linalg.pinv(np.dot(U_f, U_f.T)), None, None],
110+
[None, None, np.linalg.pinv(np.dot(U_p, U_p.T)), None],
111+
[None, None, None, np.linalg.pinv(np.dot(Y_p, Y_p.T))],
112+
])
113+
F = F - sparse.eye(sp.size(F, 0)) * reg
118114

119115
# Take smallest eigenvalues
120-
_, W = eigs(Ginv.dot(F), k=statedim, which='SR')
116+
_, W = sparse_linalg.eigs(Ginv.dot(F), k=statedim, which='SR')
121117

122118
# State sequence is a weighted combination of the past
123-
W_U_p = W[ width * (yDim + uDim) : width * (yDim + uDim + uDim), :]
124-
W_Y_p = W[ width * (yDim + uDim + uDim):, :]
125-
X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)
119+
W_U_p = W[width * (yDim + uDim):width * (yDim + uDim + uDim), :]
120+
W_Y_p = W[width * (yDim + uDim + uDim):, :]
121+
X_hist = np.dot(W_U_p.T, U_p) + np.dot(W_Y_p.T, Y_p)
126122

127123
# Regress; trim inputs to match the states we retrieved
128-
R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
129-
L = concatenate((X_hist[:, 1: ], Y[width:-width].T), 0)
130-
RRi = pinv(dot(R, R.T))
131-
RL = dot(R, L.T)
132-
Sys = dot(RRi, RL).T
124+
R = np.concatenate((X_hist[:, :-1], U[width:-width].T), 0)
125+
L = np.concatenate((X_hist[:, 1:], Y[width:-width].T), 0)
126+
RRi = np.linalg.pinv(np.dot(R, R.T))
127+
RL = np.dot(R, L.T)
128+
Sys = np.dot(RRi, RL).T
133129
self.A = Sys[:statedim, :statedim]
134130
self.B = Sys[:statedim, statedim:]
135131
self.C = Sys[statedim:, :statedim]
136132
self.D = Sys[statedim:, statedim:]
137133

138-
139-
def __str__ (self):
134+
def __str__(self):
140135
return "Linear Dynamical System"
141136

142-
143137
def predict(self, U):
144138
# If U is a vector, reshape it
145-
if size(shape(U)) == 1:
146-
U = reshape(U, (-1, 1))
139+
if np.size(np.shape(U)) == 1:
140+
U = np.reshape(U, (-1, 1))
147141

148142
# assume some random initial state
149-
X = reshape(randn(size(self.A, 1)), (1, -1))
143+
X = np.reshape(np.random.randn(np.size(self.A, 1)), (1, -1))
150144

151145
# intitial output
152-
Y = reshape(dot(self.C, X[-1]) + dot(self.D, U[0]), (1, -1))
146+
Y = np.reshape(np.dot(self.C, X[-1]) + np.dot(self.D, U[0]), (1, -1))
153147

154148
# generate next state
155-
X = concatenate((X, reshape(dot(self.A, X[-1]) + dot(self.B, U[0]), (1, -1))))
149+
X = np.concatenate((X, np.reshape(np.dot(self.A, X[-1]) + np.dot(self.B, U[0]), (1, -1))))
156150

157151
# and so forth
158152
for u in U[1:]:
159-
Y = concatenate((Y, reshape(dot(self.C, X[-1]) + dot(self.D, u), (1, -1))))
160-
X = concatenate((X, reshape(dot(self.A, X[-1]) + dot(self.B, u), (1, -1))))
153+
Y = np.concatenate((Y, np.reshape(np.dot(self.C, X[-1]) + np.dot(self.D, u), (1, -1))))
154+
X = np.concatenate((X, np.reshape(np.dot(self.A, X[-1]) + np.dot(self.B, u), (1, -1))))
161155

162156
return Y

0 commit comments

Comments
 (0)