1 # File used to store hyperparameters.
3 # Stateless RNN, batch_size declared at fit step
8 rnn_layers: 1 # Number of hidden recurrent layers
9 rnn_units: 20 # Number of units per hidden recurrent layer
10 dense_layers: 1 # hidden dense layers AFTER recurrent layers and BEFORE final output cell
11 dense_units: 5 # number of units for hidden dense layers
12 activation: ['relu', 'relu'] # Activation type for hidden layers, dense layers respectively
13 dropout: [0.2, 0.2] # NOTE: length must match total number of layers, default is 1 hidden recurrent layer and 1 dense output layer
14 recurrent_dropout: 0.2 # Length must match number of recurrent layers
15 reset_states: True # reset hidden states after training epoch, triggers reset_states() via callbacks
16 # batch_reset: 10 # reset states after given number of batches
17 batch_schedule_type: 'exp' # Schedule to Reset Hidden State
18 bmin: 20 # Minimum number of batches for batch reset schedule
22 clipvalue: 10.0 # gradient clipping param, gradient can't exceed this value
23 phys_initialize: False # physics initialization
25 verbose_weights: True # Prints out hashs of weights for tracking reproducibility
26 verbose_fit: False # Prints out all training epochs, makes computation much slower
27 # features_list: ['Ed', 'Ew', 'solar', 'wind', 'rain']
28 features_list: ['Ed', 'Ew', 'rain']
30 scaler: 'standard' # One of methods in scalers dictionary in moisture_rnn.py
31 time_fracs: [.9, .05, .05] # Percentage of data based on time span for train/val/test
32 early_stopping_patience: 5 # Number of epochs with no improvement after which training will be stopped.
33 predict_spinup_hours: 5 # Number of hours to run through the model before prediction errors evaluated. Used to stabilize hidden state
43 activation: ['relu', 'relu']
44 recurrent_activation: 'sigmoid'
45 dropout: [0.2, 0.2] # NOTE: length must match total number of layers, default is 1 hidden recurrent layer and 1 dense output layer
46 recurrent_dropout: 0.2 # Length must match number of recurrent layers
47 reset_states: True # reset hidden states after training epoch, triggers reset_states() via callbacks
48 batch_schedule_type: 'constant' # Schedule to Reset Hidden State
49 bmin: 20 # Minimum number of batches for batch reset schedule
53 clipvalue: 1.0 # gradient clipping param, gradient can't exceed this value
54 phys_initialize: False # physics initialization
56 verbose_weights: True # Prints out hashs of weights for tracking reproducibility
57 verbose_fit: False # Prints out all training epochs, makes computation much slower
58 features_list: ['Ed', 'Ew', 'rain']
60 scaler: 'minmax' # One of methods in scalers dictionary in moisture_rnn.py
61 time_fracs: [.9, .05, .05] # Percentage of data based on time span for train/val/test
62 early_stopping_patience: 5 # Number of epochs with no improvement after which training will be stopped.
63 predict_spinup_hours: 5 # Number of hours to run through the model before prediction errors evaluated. Used to stabilize hidden state
65 # Param sets for reproducibility
76 activation: ['linear', 'linear']
78 recurrent_dropout: 0.2
83 phys_initialize: False
87 features_list: ['Ed', 'Ew', 'solar', 'wind', 'rain']
90 time_fracs: [.5, .2, .3] # Percentage of data based on time span for train/val/test
91 early_stopping_patience: 9999 # early stopping not used in repro case, so setting to a huge value to ignore
92 predict_spinup_hours: null # Number of hours to run through the model before prediction errors evaluated. Used to stabilize hidden state
102 # dense_layers: 0 # hidden dense layers AFTER recurrent layers and BEFORE final output cell
104 # activation: ['linear', 'linear']
105 # centering: [0.0,0.0]
106 # dropout: [0.0, 0.0] # NOTE: length must match total number of layers, default is 1 hidden recurrent layer and 1 dense output layer
107 # recurrent_dropout: 0.0 # Length must match number of recurrent layers
108 # reset_states: True # reset hidden states after training epoch, triggers reset_states() via callbacks
110 # learning_rate: 0.001
111 # phys_initialize: False # physics initialization
113 # verbose_weights: True # Prints out hashs of weights for tracking reproducibility
114 # verbose_fit: False # Prints out all training epochs, makes computation much slower
115 # features_list: ['Ed', 'Ew', 'rain']
117 # scaler: 'reproducibility'