1 # File used to store hyperparameters.
3 # Stateless RNN, batch_size declared at fit step
8 rnn_layers: 1 # Number of hidden recurrent layers
9 rnn_units: 20 # Number of units per hidden recurrent layer
10 dense_layers: 1 # hidden dense layers AFTER recurrent layers and BEFORE final output cell
11 dense_units: 5 # number of units for hidden dense layers
12 activation: ['linear', 'linear'] # Activation type for hidden layers, dense layers respectively
14 dropout: [0.2, 0.2] # NOTE: length must match total number of layers, default is 1 hidden recurrent layer and 1 dense output layer
15 recurrent_dropout: 0.2 # Length must match number of recurrent layers
16 reset_states: True # reset hidden states after training epoch, triggers reset_states() via callbacks
17 # batch_reset: 10 # reset states after given number of batches
18 batch_schedule_type: 'constant' # Schedule to Reset Hidden State
19 bmin: 20 # Minimum number of batches for batch reset schedule
22 clipvalue: 10.0 # gradient clipping param, gradient can't exceed this value
23 phys_initialize: False # physics initialization
25 verbose_weights: True # Prints out hashs of weights for tracking reproducibility
26 verbose_fit: False # Prints out all training epochs, makes computation much slower
27 # features_list: ['Ed', 'Ew', 'solar', 'wind', 'rain']
28 features_list: ['Ed', 'Ew', 'rain']
30 scaler: 'standard' # One of methods in scalers dictionary in moisture_rnn.py
31 train_frac: 0.5 # percent of input data to be used in training set
32 val_frac: 0.1 # percent of input data to be used in validation set. Test set size determined from train_frac and val_frac
33 early_stopping_patience: 5 # Number of epochs with no improvement after which training will be stopped.
44 activation: ['linear', 'linear']
45 recurrent_activation: 'sigmoid'
47 dropout: [0.2, 0.2] # NOTE: length must match total number of layers, default is 1 hidden recurrent layer and 1 dense output layer
48 recurrent_dropout: 0.2 # Length must match number of recurrent layers
49 reset_states: True # reset hidden states after training epoch, triggers reset_states() via callbacks
50 # batch_reset: 150 # reset states after given number of batches
53 clipvalue: 1.0 # gradient clipping param, gradient can't exceed this value
54 phys_initialize: False # physics initialization
56 verbose_weights: True # Prints out hashs of weights for tracking reproducibility
57 verbose_fit: False # Prints out all training epochs, makes computation much slower
58 features_list: ['Ed', 'Ew', 'rain']
60 scaler: 'minmax' # One of methods in scalers dictionary in moisture_rnn.py
63 early_stopping_patience: 5 # Number of epochs with no improvement after which training will be stopped.
66 # Param sets for reproducibility
77 activation: ['linear', 'linear']
80 recurrent_dropout: 0.2
86 phys_initialize: False
90 features_list: ['Ed', 'Ew', 'solar', 'wind', 'rain']
95 early_stopping_patience: 9999 # early stopping not used in repro case, so setting to a huge value to ignore
105 # dense_layers: 0 # hidden dense layers AFTER recurrent layers and BEFORE final output cell
107 # activation: ['linear', 'linear']
108 # centering: [0.0,0.0]
109 # dropout: [0.0, 0.0] # NOTE: length must match total number of layers, default is 1 hidden recurrent layer and 1 dense output layer
110 # recurrent_dropout: 0.0 # Length must match number of recurrent layers
111 # reset_states: True # reset hidden states after training epoch, triggers reset_states() via callbacks
113 # learning_rate: 0.001
114 # phys_initialize: False # physics initialization
116 # verbose_weights: True # Prints out hashs of weights for tracking reproducibility
117 # verbose_fit: False # Prints out all training epochs, makes computation much slower
118 # features_list: ['Ed', 'Ew', 'rain']
120 # scaler: 'reproducibility'