4 "cell_type": "markdown",
5 "id": "9131b151-f29c-47da-8ef8-880f0017d559",
13 "execution_count": null,
18 "import reproducibility"
23 "execution_count": null,
24 "id": "c7291842-a72d-4c4e-9312-6c0c31df18e0",
28 "# both can change\n",
30 "import numpy as np\n",
31 "import pandas as pd\n",
32 "import tensorflow as tf\n",
35 "# Configuring the number of threads\n",
37 "#tf.config.threading.set_intra_op_parallelism_threads(NUM_THREADS)\n",
38 "#tf.config.threading.set_inter_op_parallelism_threads(NUM_THREADS)\n",
40 "from keras.models import Sequential\n",
41 "from keras.layers import Dense, SimpleRNN\n",
42 "# from keras.utils.vis_utils import plot_model\n",
43 "from keras.utils import plot_model\n",
45 "from sklearn.preprocessing import MinMaxScaler\n",
46 "from sklearn.metrics import mean_squared_error\n",
49 "import matplotlib.pyplot as plt\n",
50 "import tensorflow as tf\n",
51 "import keras.backend as K\n",
52 "# from keras.utils.vis_utils import plot_model\n",
53 "from scipy.interpolate import LinearNDInterpolator, interpn\n",
54 "from scipy.optimize import root\n",
55 "import pickle, os\n",
56 "from utils import hash2\n",
62 "execution_count": null,
63 "id": "75cbad66-7e26-486b-b6e3-91d3cc7a4dbd",
67 "from utils import logging_setup\n",
73 "execution_count": null,
74 "id": "bb5b3dcf-e0b1-4939-ac18-866060715ae5",
78 "# Local modules for handling data and running moisture models\n",
79 "import data_funcs as datf\n",
80 "from data_funcs import format_raws, retrieve_raws, format_precip, fixnan, load_and_fix_data\n",
81 "from data_funcs import raws_data, synthetic_data, plot_data, check_data, rmse_data, to_json, from_json\n",
82 "import moisture_models as mod\n",
83 "from moisture_rnn import run_case, run_rnn, create_RNN_2, staircase, train_rnn, rnn_predict"
87 "cell_type": "markdown",
88 "id": "eebb5f2a-d5d9-47db-bc01-87a6560c19e5",
96 "execution_count": null,
97 "id": "1de795c3-3cad-454c-9f0b-94a9e937b21a",
101 "# Change directory for data read/write\n",
103 "dict_file='data/testing_dict.pickle' # input path of FMDA dictionaries\n",
104 "output_path='outputs/outputs.json' # RNN output file\n",
105 "reproducibility_file='data/reproducibility_dict.pickle'"
110 "execution_count": null,
115 "from module_param_sets import param_sets"
120 "execution_count": null,
121 "id": "94c55b47-97d8-4cc9-956f-38efc5be74e2",
125 "param_sets_keys=['0']\n",
126 "# param_sets_keys = param_sets.keys()"
131 "execution_count": null,
132 "id": "a12aaf59-9276-484d-be48-c704a7c452da",
136 "# read test datasets\n",
137 "test_dict=load_and_fix_data(dict_file)\n",
138 "logging.info(\"testing datasets test_dict.keys():%s\",test_dict.keys())\n",
140 "repro_dict=load_and_fix_data(reproducibility_file)\n",
141 "logging.info(\"reproducibity dataset repro_dict.keys(): %s\",repro_dict.keys())"
146 "execution_count": null,
147 "id": "7ff87917-536b-4451-a472-90940d96a6cf",
152 "logging.info('params_sets_keys=%s',param_sets_keys)\n",
154 "for i in param_sets_keys:\n",
156 " params=param_sets[i]\n",
158 " # Run reproducbility case\n",
159 " print('Running reproducibility')\n",
160 " assert param_sets[i]['purpose'] == 'reproducibility'\n",
161 " output[i]={'params':params,'cases':{}}\n",
162 " case = 'case11'\n",
163 " case_data=repro_dict[case]\n",
164 " case_data[\"h2\"] = round(case_data[\"hours\"] * params['train_frac'])\n",
165 " # To match output from moisture_rnn_pkl, uncomment\n",
166 " # print(\"Setting No prediction phase\")\n",
167 " # params['initialize'] = False\n",
168 " # case_data['h2'] = case_data[\"hours\"]\n",
169 " print(case_data['h2'])\n",
171 " output[i]['cases'][case]=run_case(case_data,params)\n",
172 " print('*** params',i,'reproducibility case','summary ***')\n",
173 " print('params=',params)\n",
174 " print('outputs=',json.dumps(output[i]['cases'][case],indent=4,sort_keys=True))\n",
175 " print('writing the results to file',output_path)\n",
176 " json.dump(output,open(output_path,'w'),indent=4,sort_keys=True)\n",
178 " #print('params=',params)\n",
179 " if params['cases'] == 'all':\n",
180 " params['cases'] = list(test_dict.keys())\n",
181 " print(\"expanding 'all' to\",params['cases'])\n",
182 " output[i]={'params':params,'cases':{}}\n",
183 " cases = params['cases']\n",
184 " print('cases=',cases)\n",
185 " for case in cases:\n",
186 " for initialize in [True,False]:\n",
187 " # print(json.dumps(params,indent=4,sort_keys=True))\n",
188 " case_data=test_dict[case]\n",
189 " print('case=',case,case_data['title'])\n",
190 " if not 'title' in case_data.keys():\n",
191 " case_data['title']=case\n",
192 " if not 'hours' in case_data.keys():\n",
193 " case_data['hours']=len(case_data['fm'])\n",
194 " # case_data['h2']=int(20*24) # length of training period\n",
195 " if params['synthetic'] or 'Synth' not in case_data['title']: \n",
196 " params['initialize']=initialize \n",
197 " output[i]['cases'][case]={'initialize':{initialize:run_case(case_data,params)}} # add to results\n",
198 " print('*** params',i,'case',case,'summary ***')\n",
199 " print('params=',params)\n",
200 " print('outputs=',json.dumps(output[i]['cases'][case],indent=4,sort_keys=True))\n",
201 " print('writing the results to file',output_path)\n",
202 " json.dump(output,open(output_path,'w'),indent=4,sort_keys=True)\n",
204 " print('skipping synthetic case',case,case_data['title'])\n",
205 " print('cases=',cases)\n",
206 "print(json.dumps(output,indent=4,sort_keys=True))\n",
212 "execution_count": null,
213 "id": "d64157c2-24e6-4e42-9c8d-0be45ce0c529",
217 "logging.info('fmda_rnn_rain.ipynb done')"
222 "execution_count": null,
230 "execution_count": null,
239 "display_name": "Python 3 (ipykernel)",
240 "language": "python",
248 "file_extension": ".py",
249 "mimetype": "text/x-python",
251 "nbconvert_exporter": "python",
252 "pygments_lexer": "ipython3",