3 def ext_kf(u
,P
,F
,Q
=0,d
=None,H
=None,R
=None):
5 One step of the extended Kalman filter.
6 If there is no data, only advance in time.
7 :param u: the state vector, shape n
8 :param P: the state covariance, shape (n,n)
9 :param F: the model function, args vector u, returns F(u) and Jacobian J(u)
10 :param Q: the process model noise covariance, shape (n,n)
11 :param d: data vector, shape (m). If none, only advance in time
12 :param H: observation matrix, shape (m,n)
13 :param R: data error covariance, shape (n,n)
14 :return ua: the analysis state vector, shape (n)
15 :return Pa: the analysis covariance matrix, shape (n,n)
18 return np
.atleast_2d(a
) # convert to at least 2d array
21 return np
.atleast_1d(a
) # convert to at least 1d array
24 uf
, J
= F(u
) # advance the model state in time and get the Jacobian
25 uf
= d1(uf
) # if scalar, make state a 1D array
26 J
= d2(J
) # if scalar, make jacobian a 2D array
27 P
= d2(P
) # if scalar, make Jacobian as 2D array
28 Pf
= d2(J
.T
@ P
) @ J
+ Q
# advance the state covariance Pf = J' * P * J + Q
30 if d
is None or not d
.size
: # no data, no analysis
32 # K = P H' * inverse(H * P * H' + R) = (inverse(H * P * H' + R)*(H P))'
34 HP
= d2(H
@ P
) # precompute a part used twice
35 K
= d2(np
.linalg
.solve( d2(HP
@ H
.T
) + R
, HP
)).T
# Kalman gain
38 res
= d1(H
@ d1(uf
) - d
) # res = H*uf - d
39 ua
= uf
- K
@ res
# analysis mean uf - K*res
40 Pa
= Pf
- K
@ d2(H
@ P
) # analysis covariance
43 ### Define model function with drying, wetting, and rain equilibria
46 r0
= 0.05 # threshold rainfall [mm/h]
47 rs
= 8.0 # saturation rain intensity [mm/h]
48 Tr
= 14.0 # time constant for rain wetting model [h]
49 S
= 250 # saturation intensity [dimensionless]
50 T
= 10.0 # time constant for wetting/drying
52 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
54 def model_moisture(m0
,Eqd
,Eqw
,r
,t
,partials
=0,T
=10.0,tlen
=1.0):
56 # m0 starting fuel moistureb (%s
57 # Eqd drying equilibrium (%)
58 # Eqw wetting equilibrium (%)
59 # r rain intensity (mm/h)
62 # returns: same as model_decay
63 # if partials==0: m1 = fuel moisture contents after time 1 hour
65 # ==2: m1, dm1/dm0, dm1/dE
70 T1
= (1.0 - np
.exp(- (r
- r0
) / rs
)) / Tr
82 exp_t
= np
.exp(-tlen
*T1
)
83 m1
= E
+ (m0
- E
)*exp_t
86 #if t>=933 and t < 940:
87 # print('t,Eqw,Eqd,r,T1,E,m0,m1,dm1_dm0,dm1_dE',
88 # t,Eqw,Eqd,r,T1,E,m0,m1,dm1_dm0,dm1_dE)
94 return m1
, dm1_dm0
, dm1_dE
97 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
99 def model_augmented(u0
,Ed
,Ew
,r
,t
):
100 # state u is the vector [m,dE] with dE correction to equilibria Ed and Ew at t
102 m0
, Ec
= u0
# decompose state u0
103 # reuse model_moisture(m0,Eqd,Eqw,r,partials=0):
105 # m0 starting fuel moistureb (1)
106 # Ed drying equilibrium (1)
107 # Ew wetting equilibrium (1)
108 # r rain intensity (mm/h)
110 # returns: same as model_decay
111 # if partials==0: m1 = fuel moisture contents after time 1 hour
113 # ==2: m1, dm1/dm0, dm1/dE
114 m1
, dm1_dm0
, dm1_dE
= model_moisture(m0
,Ed
+ Ec
, Ew
+ Ec
, r
, t
, partials
=2)
115 u1
= np
.array([m1
,Ec
]) # dE is just copied
116 J
= np
.array([[dm1_dm0
, dm1_dE
],
120 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
122 def run_augmented_kf(d
,Ed
,Ew
,rain
,h2
,hours
):
123 u
= np
.zeros((2,hours
))
124 u
[:,0]=[0.1,0.0] # initialize,background state
125 P
= np
.zeros((2,2,hours
))
126 P
[:,:,0] = np
.array([[1e-3, 0.],
127 [0., 1e-3]]) # background state covariance
128 Q
= np
.array([[1e-3, 0.],
129 [0, 1e-3]]) # process noise covariance
130 H
= np
.array([[1., 0.]]) # first component observed
131 R
= np
.array([1e-3]) # data variance
133 # ext_kf(u,P,F,Q=0,d=None,H=None,R=None) returns ua, Pa
135 # print('initial u=',u,'P=',P)
136 # print('Q=',Q,'H=',H,'R=',R)
138 for t
in range(1,h2
):
139 # use lambda construction to pass additional arguments to the model
140 u
[:,t
],P
[:,:,t
] = ext_kf(u
[:,t
-1],P
[:,:,t
-1],
141 lambda uu
: model_augmented(uu
,Ed
[t
],Ew
[t
],rain
[t
],t
),
143 # print('time',t,'data',d[t],'filtered',u[0,t],'Ec',u[1,t])
144 for t
in range(h2
,hours
):
145 u
[:,t
],P
[:,:,t
] = ext_kf(u
[:,t
-1],P
[:,:,t
-1],
146 lambda uu
: model_augmented(uu
,Ed
[t
],Ew
[t
],rain
[t
],t
),
148 # print('time',t,'data',d[t],'forecast',u[0,t],'Ec',u[1,t])
151 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
154 import tensorflow
as tf
155 from keras
.models
import Sequential
156 from keras
.layers
import Dense
, SimpleRNN
157 from keras
.utils
.vis_utils
import plot_model
158 from sklearn
.preprocessing
import MinMaxScaler
159 from sklearn
.metrics
import mean_squared_error
161 import matplotlib
.pyplot
as plt
162 import tensorflow
as tf
163 import keras
.backend
as K
165 def staircase(x
,y
,timesteps
,trainsteps
,return_sequences
=False):
166 # x [trainsteps+forecaststeps,features] all inputs
167 # y [trainsteps,outputs]
168 # timesteps: split x and y into samples length timesteps, shifted by 1
169 # trainsteps: number of timesteps to use for training, no more than y.shape[0]
170 print('shape x = ',x
.shape
)
171 print('shape y = ',y
.shape
)
172 print('timesteps=',timesteps
)
173 print('trainsteps=',trainsteps
)
175 features
= x
.shape
[1]
176 forecaststeps
= x
.shape
[0]-trainsteps
177 samples
= trainsteps
-timesteps
+1
178 print('staircase: samples=',samples
,'timesteps=',timesteps
,'features=',features
)
179 x_train
= np
.empty([samples
, timesteps
, features
])
180 print('return_sequences=',return_sequences
)
182 print('returning all timesteps in a sample')
183 y_train
= np
.empty([samples
, timesteps
, outputs
]) # all
184 for i
in range(samples
):
185 for k
in range(timesteps
):
186 for j
in range(features
):
187 x_train
[i
,k
,j
] = x
[i
+k
,j
]
188 for j
in range(outputs
):
189 y_train
[i
,k
,j
] = y
[i
+k
,j
]
191 print('returning only the last timestep in a sample')
192 y_train
= np
.empty([samples
, outputs
])
193 for i
in range(samples
):
194 for j
in range(features
):
195 for k
in range(timesteps
):
196 x_train
[i
,k
,j
] = x
[i
+k
,j
]
197 for j
in range(outputs
):
198 y_train
[i
,j
] = y
[i
+timesteps
-1,j
]
200 return x_train
, y_train
202 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
204 def seq2batches(x
,y
,timesteps
,trainsteps
):
205 # x [trainsteps+forecaststeps,features] all inputs
206 # y [trainsteps,outputs]
207 # timesteps: split x and y into samples length timesteps, shifted by 1
208 # trainsteps: number of timesteps to use for training, no more than y.shape[0]
209 print('shape x = ',x
.shape
)
210 print('shape y = ',y
.shape
)
211 print('timesteps=',timesteps
)
212 print('trainsteps=',trainsteps
)
214 features
= x
.shape
[1]
215 samples
= trainsteps
- timesteps
+ 1
216 print('samples=',samples
)
217 x_train
= np
.empty([samples
, timesteps
, features
])
218 y_train
= np
.empty([samples
, timesteps
, outputs
]) # only the last
219 print('samples=',samples
,' timesteps=',timesteps
,
220 ' features=',features
,' outputs=',outputs
)
221 for i
in range(samples
):
222 for k
in range(timesteps
):
223 for j
in range(features
):
224 x_train
[i
,k
,j
] = x
[i
+k
,j
]
225 for j
in range(outputs
):
226 y_train
[i
,k
,j
] = y
[i
+k
,j
] # return sequences
227 return x_train
, y_train
229 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
231 def create_RNN_2(hidden_units
, dense_units
, activation
, stateful
=False,
232 batch_shape
=None, input_shape
=None, dense_layers
=1,
233 rnn_layers
=1,return_sequences
=False,
236 inputs
= tf
.keras
.Input(batch_shape
=batch_shape
)
238 inputs
= tf
.keras
.Input(shape
=input_shape
)
239 # https://stackoverflow.com/questions/43448029/how-can-i-print-the-values-of-keras-tensors
240 # inputs2 = K.print_tensor(inputs, message='inputs = ') # change allso inputs to inputs2 below, must be used
242 for i
in range(rnn_layers
):
243 x
= tf
.keras
.layers
.SimpleRNN(hidden_units
,activation
=activation
[0],
244 stateful
=stateful
,return_sequences
=return_sequences
)(x
245 # ,initial_state=initial_state
247 # x = tf.keras.layers.Dense(hidden_units, activation=activation[1])(x)
248 for i
in range(dense_layers
):
249 x
= tf
.keras
.layers
.Dense(dense_units
, activation
=activation
[1])(x
)
250 model
= tf
.keras
.Model(inputs
=inputs
, outputs
=x
)
251 model
.compile(loss
='mean_squared_error', optimizer
='adam')