14
14
x_(i+1) = dot(A, x_i) + dot(B, u_i)
15
15
This is a linear dynamical system.
16
16
"""
17
+ import numpy as np
18
+ import scipy as sp
19
+ from scipy import sparse
20
+ from scipy .sparse import linalg as sparse_linalg
17
21
18
- from numpy .lib import diag
19
- from scipy import randn
20
- from scipy import dot
21
- from scipy import shape
22
- from scipy import reshape
23
- from scipy import size
24
- from scipy import zeros
25
- from scipy import array
26
- from scipy .sparse import bmat
27
- from scipy import ravel
28
- from scipy import concatenate
29
- from scipy .sparse import eye
30
- from scipy .linalg import pinv
31
- from scipy .sparse .linalg import eigs
32
- from scipy .linalg import svd
33
22
34
23
def ideal_data (num , dimU , dimY , dimX , noise = 1 ):
35
24
"""Linear system data"""
36
25
# generate randomized linear system matrices
37
- A = randn (dimX , dimX )
38
- B = randn (dimX , dimU )
39
- C = randn (dimY , dimX )
40
- D = randn (dimY , dimU )
26
+ A = np . random . randn (dimX , dimX )
27
+ B = np . random . randn (dimX , dimU )
28
+ C = np . random . randn (dimY , dimX )
29
+ D = np . random . randn (dimY , dimU )
41
30
42
31
# make sure state evolution is stable
43
- U , S , V = svd (A )
44
- A = dot (U , dot (diag (S / max (S )), V ))
45
- U , S , V = svd (B )
46
- S2 = zeros ((size (U ,1 ), size (V ,0 )))
47
- S2 [:,: size (U ,1 )] = diag (S / max (S ))
48
- B = dot (U , dot (S2 , V ))
32
+ U , S , V = np . linalg . svd (A )
33
+ A = np . dot (U , np . dot (np . lib . diag (S / max (S )), V ))
34
+ U , S , V = np . linalg . svd (B )
35
+ S2 = np . zeros ((np . size (U , 1 ), np . size (V , 0 )))
36
+ S2 [:, : np . size (U , 1 )] = np . lib . diag (S / max (S ))
37
+ B = np . dot (U , np . dot (S2 , V ))
49
38
50
39
# random input
51
- U = randn (num , dimU )
40
+ U = np . random . randn (num , dimU )
52
41
53
42
# initial state
54
- X = reshape (randn (dimX ), (1 ,- 1 ))
43
+ X = np . reshape (np . random . randn (dimX ), (1 , - 1 ))
55
44
56
45
# initial output
57
- Y = reshape (dot (C , X [- 1 ]) + dot (D , U [0 ]), (1 ,- 1 ))
46
+ Y = np . reshape (np . dot (C , X [- 1 ]) + np . dot (D , U [0 ]), (1 , - 1 ))
58
47
59
48
# generate next state
60
- X = concatenate ((X , reshape (dot (A , X [- 1 ]) + dot (B , U [0 ]), (1 ,- 1 ))))
49
+ X = np .concatenate (
50
+ (X , np .reshape (np .dot (A , X [- 1 ]) + np .dot (B , U [0 ]), (1 , - 1 ))))
61
51
62
52
# and so forth
63
53
for u in U [1 :]:
64
- Y = concatenate ((Y , reshape (dot (C , X [- 1 ]) + dot (D , u ), (1 ,- 1 ))))
65
- X = concatenate ((X , reshape (dot (A , X [- 1 ]) + dot (B , u ), (1 ,- 1 ))))
54
+ Y = np .concatenate (
55
+ (Y , np .reshape (np .dot (C , X [- 1 ]) + np .dot (D , u ), (1 , - 1 ))))
56
+ X = np .concatenate (
57
+ (X , np .reshape (np .dot (A , X [- 1 ]) + np .dot (B , u ), (1 , - 1 ))))
66
58
67
- return U , Y + randn (num , dimY ) * noise
59
+ return U , Y + np . random . randn (num , dimY ) * noise
68
60
69
61
70
62
class SystemIdentifier (object ):
@@ -76,87 +68,89 @@ class SystemIdentifier(object):
76
68
- reg is a regularization parameter (optional).
77
69
"""
78
70
def __init__ (self , U , Y , statedim , reg = None ):
79
- if size (shape (U )) == 1 :
80
- U = reshape (U , (- 1 ,1 ))
81
- if size (shape (Y )) == 1 :
82
- Y = reshape (Y , (- 1 ,1 ))
71
+ if np . size (np . shape (U )) == 1 :
72
+ U = np . reshape (U , (- 1 , 1 ))
73
+ if np . size (np . shape (Y )) == 1 :
74
+ Y = np . reshape (Y , (- 1 , 1 ))
83
75
if reg is None :
84
76
reg = 0
85
77
86
- yDim = size (Y ,1 )
87
- uDim = size (U ,1 )
78
+ yDim = np . size (Y , 1 )
79
+ uDim = np . size (U , 1 )
88
80
89
- self .output_size = size (Y ,1 ) # placeholder
81
+ self .output_size = np . size (Y , 1 ) # placeholder
90
82
91
83
# number of samples of past/future we'll mash together into a 'state'
92
84
width = 1
93
85
# total number of past/future pairings we get as a result
94
- K = size (U ,0 ) - 2 * width + 1
86
+ K = np . size (U , 0 ) - 2 * width + 1
95
87
96
88
# build hankel matrices containing pasts and futures
97
- U_p = array ([ravel (U [t : t + width ]) for t in range (K )]).T
98
- U_f = array ([ravel (U [t + width : t + 2 * width ]) for t in range (K )]).T
99
- Y_p = array ([ravel (Y [t : t + width ]) for t in range (K )]).T
100
- Y_f = array ([ravel (Y [t + width : t + 2 * width ]) for t in range (K )]).T
89
+ U_p = np . array ([np . ravel (U [t : t + width ]) for t in range (K )]).T
90
+ U_f = np . array ([np . ravel (U [t + width : t + 2 * width ]) for t in range (K )]).T
91
+ Y_p = np . array ([np . ravel (Y [t : t + width ]) for t in range (K )]).T
92
+ Y_f = np . array ([np . ravel (Y [t + width : t + 2 * width ]) for t in range (K )]).T
101
93
102
94
# solve the eigenvalue problem
103
- YfUfT = dot (Y_f , U_f .T )
104
- YfUpT = dot (Y_f , U_p .T )
105
- YfYpT = dot (Y_f , Y_p .T )
106
- UfUpT = dot (U_f , U_p .T )
107
- UfYpT = dot (U_f , Y_p .T )
108
- UpYpT = dot (U_p , Y_p .T )
109
- F = bmat ([[None , YfUfT , YfUpT , YfYpT ],
110
- [YfUfT .T , None , UfUpT , UfYpT ],
111
- [YfUpT .T , UfUpT .T , None , UpYpT ],
112
- [YfYpT .T , UfYpT .T , UpYpT .T , None ]])
113
- Ginv = bmat ([[pinv (dot (Y_f ,Y_f .T )), None , None , None ],
114
- [None , pinv (dot (U_f ,U_f .T )), None , None ],
115
- [None , None , pinv (dot (U_p ,U_p .T )), None ],
116
- [None , None , None , pinv (dot (Y_p ,Y_p .T ))]])
117
- F = F - eye (size (F , 0 )) * reg
95
+ YfUfT = np .dot (Y_f , U_f .T )
96
+ YfUpT = np .dot (Y_f , U_p .T )
97
+ YfYpT = np .dot (Y_f , Y_p .T )
98
+ UfUpT = np .dot (U_f , U_p .T )
99
+ UfYpT = np .dot (U_f , Y_p .T )
100
+ UpYpT = np .dot (U_p , Y_p .T )
101
+ F = sparse .bmat ([
102
+ [None , YfUfT , YfUpT , YfYpT ],
103
+ [YfUfT .T , None , UfUpT , UfYpT ],
104
+ [YfUpT .T , UfUpT .T , None , UpYpT ],
105
+ [YfYpT .T , UfYpT .T , UpYpT .T , None ],
106
+ ])
107
+ Ginv = sparse .bmat ([
108
+ [np .linalg .pinv (np .dot (Y_f , Y_f .T )), None , None , None ],
109
+ [None , np .linalg .pinv (np .dot (U_f , U_f .T )), None , None ],
110
+ [None , None , np .linalg .pinv (np .dot (U_p , U_p .T )), None ],
111
+ [None , None , None , np .linalg .pinv (np .dot (Y_p , Y_p .T ))],
112
+ ])
113
+ F = F - sparse .eye (sp .size (F , 0 )) * reg
118
114
119
115
# Take smallest eigenvalues
120
- _ , W = eigs (Ginv .dot (F ), k = statedim , which = 'SR' )
116
+ _ , W = sparse_linalg . eigs (Ginv .dot (F ), k = statedim , which = 'SR' )
121
117
122
118
# State sequence is a weighted combination of the past
123
- W_U_p = W [ width * (yDim + uDim ) : width * (yDim + uDim + uDim ), :]
124
- W_Y_p = W [ width * (yDim + uDim + uDim ):, :]
125
- X_hist = dot (W_U_p .T , U_p ) + dot (W_Y_p .T , Y_p )
119
+ W_U_p = W [width * (yDim + uDim ): width * (yDim + uDim + uDim ), :]
120
+ W_Y_p = W [width * (yDim + uDim + uDim ):, :]
121
+ X_hist = np . dot (W_U_p .T , U_p ) + np . dot (W_Y_p .T , Y_p )
126
122
127
123
# Regress; trim inputs to match the states we retrieved
128
- R = concatenate ((X_hist [:, :- 1 ], U [width :- width ].T ), 0 )
129
- L = concatenate ((X_hist [:, 1 : ], Y [width :- width ].T ), 0 )
130
- RRi = pinv (dot (R , R .T ))
131
- RL = dot (R , L .T )
132
- Sys = dot (RRi , RL ).T
124
+ R = np . concatenate ((X_hist [:, :- 1 ], U [width :- width ].T ), 0 )
125
+ L = np . concatenate ((X_hist [:, 1 :], Y [width :- width ].T ), 0 )
126
+ RRi = np . linalg . pinv (np . dot (R , R .T ))
127
+ RL = np . dot (R , L .T )
128
+ Sys = np . dot (RRi , RL ).T
133
129
self .A = Sys [:statedim , :statedim ]
134
130
self .B = Sys [:statedim , statedim :]
135
131
self .C = Sys [statedim :, :statedim ]
136
132
self .D = Sys [statedim :, statedim :]
137
133
138
-
139
- def __str__ (self ):
134
+ def __str__ (self ):
140
135
return "Linear Dynamical System"
141
136
142
-
143
137
def predict (self , U ):
144
138
# If U is a vector, reshape it
145
- if size (shape (U )) == 1 :
146
- U = reshape (U , (- 1 , 1 ))
139
+ if np . size (np . shape (U )) == 1 :
140
+ U = np . reshape (U , (- 1 , 1 ))
147
141
148
142
# assume some random initial state
149
- X = reshape (randn (size (self .A , 1 )), (1 , - 1 ))
143
+ X = np . reshape (np . random . randn (np . size (self .A , 1 )), (1 , - 1 ))
150
144
151
145
# intitial output
152
- Y = reshape (dot (self .C , X [- 1 ]) + dot (self .D , U [0 ]), (1 , - 1 ))
146
+ Y = np . reshape (np . dot (self .C , X [- 1 ]) + np . dot (self .D , U [0 ]), (1 , - 1 ))
153
147
154
148
# generate next state
155
- X = concatenate ((X , reshape (dot (self .A , X [- 1 ]) + dot (self .B , U [0 ]), (1 , - 1 ))))
149
+ X = np . concatenate ((X , np . reshape (np . dot (self .A , X [- 1 ]) + np . dot (self .B , U [0 ]), (1 , - 1 ))))
156
150
157
151
# and so forth
158
152
for u in U [1 :]:
159
- Y = concatenate ((Y , reshape (dot (self .C , X [- 1 ]) + dot (self .D , u ), (1 , - 1 ))))
160
- X = concatenate ((X , reshape (dot (self .A , X [- 1 ]) + dot (self .B , u ), (1 , - 1 ))))
153
+ Y = np . concatenate ((Y , np . reshape (np . dot (self .C , X [- 1 ]) + np . dot (self .D , u ), (1 , - 1 ))))
154
+ X = np . concatenate ((X , np . reshape (np . dot (self .A , X [- 1 ]) + np . dot (self .B , u ), (1 , - 1 ))))
161
155
162
156
return Y
0 commit comments