1 # File used to store hyperparameters.
4 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
6 # Stateless RNN, batch_size declared at fit step
11 rnn_layers: 1 # Number of hidden recurrent layers
12 rnn_units: 20 # Number of units per hidden recurrent layer
13 dense_layers: 1 # hidden dense layers AFTER recurrent layers and BEFORE final output cell
14 dense_units: 5 # number of units for hidden dense layers
15 activation: ['relu', 'relu'] # Activation type for hidden layers, dense layers respectively
16 dropout: [0.2, 0.2] # NOTE: length must match total number of layers, default is 1 hidden recurrent layer and 1 dense output layer
17 recurrent_dropout: 0.2 # Length must match number of recurrent layers
18 reset_states: True # reset hidden states after training epoch, triggers reset_states() via callbacks
19 # batch_reset: 10 # reset states after given number of batches
20 batch_schedule_type: 'exp' # Schedule to Reset Hidden State
21 bmin: 20 # Minimum number of batches for batch reset schedule
25 clipvalue: 10.0 # gradient clipping param, gradient can't exceed this value
26 phys_initialize: False # physics initialization
28 verbose_weights: True # Prints out hashs of weights for tracking reproducibility
29 verbose_fit: False # Prints out all training epochs, makes computation much slower
30 # features_list: ['Ed', 'Ew', 'solar', 'wind', 'rain']
31 features_list: ['Ed', 'Ew', 'rain']
33 scaler: 'standard' # One of methods in scalers dictionary in moisture_rnn.py
34 time_fracs: [.9, .05, .05] # Percentage of data based on time span for train/val/test
35 early_stopping_patience: 5 # Number of epochs with no improvement after which training will be stopped.
36 predict_spinup_hours: 5 # Number of hours to run through the model before prediction errors evaluated. Used to stabilize hidden state
46 activation: ['relu', 'relu']
47 recurrent_activation: 'sigmoid'
48 dropout: [0.2, 0.2] # NOTE: length must match total number of layers, default is 1 hidden recurrent layer and 1 dense output layer
49 recurrent_dropout: 0.2 # Length must match number of recurrent layers
50 reset_states: True # reset hidden states after training epoch, triggers reset_states() via callbacks
51 batch_schedule_type: 'constant' # Schedule to Reset Hidden State
52 bmin: 20 # Minimum number of batches for batch reset schedule
56 clipvalue: 1.0 # gradient clipping param, gradient can't exceed this value
57 phys_initialize: False # physics initialization
59 verbose_weights: True # Prints out hashs of weights for tracking reproducibility
60 verbose_fit: False # Prints out all training epochs, makes computation much slower
61 features_list: ['Ed', 'Ew', 'rain']
63 scaler: 'minmax' # One of methods in scalers dictionary in moisture_rnn.py
64 time_fracs: [.9, .05, .05] # Percentage of data based on time span for train/val/test
65 early_stopping_patience: 25 # Number of epochs with no improvement after which training will be stopped.
66 predict_spinup_hours: 5 # Number of hours to run through the model before prediction errors evaluated. Used to stabilize hidden state
68 # Param sets for reproducibility
79 activation: ['linear', 'linear']
81 recurrent_dropout: 0.2
86 phys_initialize: False
90 features_list: ['Ed', 'Ew', 'solar', 'wind', 'rain']
93 time_fracs: [.5, .2, .3] # Percentage of data based on time span for train/val/test
94 early_stopping_patience: 9999 # early stopping not used in repro case, so setting to a huge value to ignore
95 predict_spinup_hours: null # Number of hours to run through the model before prediction errors evaluated. Used to stabilize hidden state
105 # dense_layers: 0 # hidden dense layers AFTER recurrent layers and BEFORE final output cell
107 # activation: ['linear', 'linear']
108 # centering: [0.0,0.0]
109 # dropout: [0.0, 0.0] # NOTE: length must match total number of layers, default is 1 hidden recurrent layer and 1 dense output layer
110 # recurrent_dropout: 0.0 # Length must match number of recurrent layers
111 # reset_states: True # reset hidden states after training epoch, triggers reset_states() via callbacks
113 # learning_rate: 0.001
114 # phys_initialize: False # physics initialization
116 # verbose_weights: True # Prints out hashs of weights for tracking reproducibility
117 # verbose_fit: False # Prints out all training epochs, makes computation much slower
118 # features_list: ['Ed', 'Ew', 'rain']
120 # scaler: 'reproducibility'
126 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
133 colsample_bytree: 0.9
139 ### Params sent by Schreck, slow and less accurate for this dataset
140 # objective: "reg:squarederror"
146 # colsample_bytree: 0.8995496645826047
147 # gamma: 0.6148001693726943
148 # learning_rate: 0.07773680788294579
150 # subsample: 0.7898672617361431
151 # metric: "valid_rmse"
154 n_estimators: 25 # Number of trees in the forest
155 criterion: "squared_error" # Function to measure the quality of a split (previously "mse")
156 max_depth: 5 # Maximum depth of the tree
157 min_samples_split: 2 # Minimum number of samples required to split an internal node
158 min_samples_leaf: 1 # Minimum number of samples required to be at a leaf node
159 max_features: .8 # Number of features to consider when looking for the best split
160 bootstrap: true # Whether bootstrap samples are used when building trees
161 max_samples: null # If bootstrap is True, the number of samples to draw from X to train each base estimator
162 random_state: null # Controls both the randomness of the bootstrapping of the samples and the sampling of the features
163 verbose: 0 # Controls the verbosity when fitting and predicting
164 warm_start: false # When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble