-
Notifications
You must be signed in to change notification settings - Fork 0
/
CMIP6_IO.py
500 lines (406 loc) · 23.8 KB
/
CMIP6_IO.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
from datetime import datetime
import logging
import xarray as xr
import cftime
import numpy as np
from xmip.preprocessing import combined_preprocessing
import CMIP6_model
import CMIP6_config
import CMIP6_regrid
import xesmf as xe
import os
import texttable
import pandas as pd
from google.cloud import storage
import logging
import gcsfs
import hashlib
import time
import base64
class CMIP6_IO:
def __init__(self):
self.models = []
self.storage_client = storage.Client()
self.bucket_name = "actea-shared"
self.bucket = self.storage_client.bucket(self.bucket_name)
self.fs = gcsfs.GCSFileSystem(project="downscale")
self.logger = logging.getLogger("CMIP6-log")
self.logger.setLevel(logging.INFO)
def format_netcdf_filename(self, dir, model_name, member_id, current_experiment_id, key):
if member_id is None:
return "{}/{}/{}/CMIP6_{}_{}_{}_weighted.nc".format(dir, current_experiment_id,
model_name,
model_name, current_experiment_id, key)
else:
return "{}/{}/{}/CMIP6_{}_{}_{}_{}.nc".format(dir, current_experiment_id,
model_name,
model_name, member_id, current_experiment_id, key)
def calculate_md5_sha(self, file_name: str) -> (str, str):
"""
Calculate the md5 hash tag for a file to ensure that upload to gs works correctly.
https://stackoverflow.com/questions/52686848/does-google-cloud-storage-client-in-python-check-crc-or-md5-automatically
Parameters
-----------
file_name: name of the file to calculate md5 and sha1
Returns
-----------
md5, sha1
"""
start = time.time()
hash_md5 = hashlib.md5()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return base64.b64encode(hash_md5.digest()).decode()
def upload_to_gcs(self, fname: str, fname_gcs: str = None):
""" upload file to GCS.
Method that uploads file to the GCS blob. Calculates the md5 sha
prior to uploading which is used by GCS to ensure that
upload was successful.
"""
md5 = self.calculate_md5_sha(f"{fname}")
if fname_gcs:
blob = self.bucket.blob(fname_gcs)
else:
blob = self.bucket.blob(fname)
blob.md5_hash = md5
blob.upload_from_filename(fname)
self.logger.info(f"[CMIP6_IO] Finished uploading to : {fname} ({blob.name})")
def print_table_of_models_and_members(self):
table = texttable.Texttable()
table.set_cols_align(["c", "c", "c"])
table.set_cols_valign(["t", "m", "b"])
table.header(["Model", "Member", "Variable"])
for model in self.models:
for member_id in model.member_ids:
var_info = ""
for var in model.ocean_vars[member_id]:
var_info += "{} ".format(var)
table.add_row([model.name, member_id, var_info])
table.set_cols_width([30, 30, 50])
print(table.draw() + "\n")
def list_dataset_on_gs(self, prefix: str) -> []:
"""
List all files on gs at a given prefix
"""
files_on_gcs = []
for blob in self.storage_client.list_blobs(self.bucket, prefix=prefix):
files_on_gcs.append(blob.name)
self.logger.info(f"[CMIP6_IO] Found {len(files_on_gcs)} on gs at {prefix}")
return files_on_gcs
def open_dataset_on_gs(self, file_name: str, decode_times:bool=True) -> xr.Dataset:
if storage.Blob(bucket=self.bucket, name=str(file_name)).exists(self.storage_client):
file_name = f"{self.bucket_name}/{file_name}"
print(f"[CMIP6_IO] Opening file {file_name}")
self.logger.debug(f"[CMIP6_IO] Opening file {file_name}")
fileObj = self.fs.open(file_name)
return xr.open_dataset(fileObj, engine="h5netcdf", decode_times=decode_times)
def organize_cmip6_netcdf_files_into_datasets(self, config: CMIP6_config.Config_albedo, current_experiment_id):
source_id = config.source_id
member_id = config.member_id
if source_id in config.models.keys():
model_object = config.models[source_id]
else:
model_object = CMIP6_model.CMIP6_MODEL(name=source_id)
logging.info(f"[CMIP6_IO] Organizing NetCDF CMIP6 model object {model_object.name} and id {member_id}")
for variable_id in config.variable_ids:
netcdf_filename = self.format_netcdf_filename(config.cmip6_netcdf_dir,
model_object.name,
config.member_id,
current_experiment_id,
variable_id)
if storage.Blob(bucket=self.bucket, name=netcdf_filename).exists(self.storage_client):
ds = self.open_dataset_on_gs(netcdf_filename, decode_times=True)
# Extract the time period of interest
print(source_id, variable_id)
ds = ds.sel(time=slice(config.start_date, config.end_date),y=slice(config.min_lat, config.max_lat))
logging.info("[CMIP6_IO] {} => NetCDF: Extracted {} range from {} to {} for {}".format(source_id,
variable_id,
ds["time"].values[0],
ds["time"].values[-1],current_experiment_id))
# Save the info to model object
if not member_id in model_object.member_ids:
model_object.member_ids.append(member_id)
if not member_id in model_object.ocean_vars.keys():
model_object.ocean_vars[member_id] = []
if not variable_id in model_object.ocean_vars[member_id]:
current_vars = model_object.ocean_vars[member_id]
current_vars.append(variable_id)
model_object.ocean_vars[member_id] = current_vars
self.dataset_into_model_dictionary(config.member_id, variable_id, ds, model_object)
self.models.append(model_object)
logging.info(f"[CMIP6_IO] Stored {len(model_object.ocean_vars)} variables for model {model_object.name}")
def to_360day_monthly(self, ds:xr.Dataset):
"""Change the calendar to datetime and precision to monthly."""
# https://github.com/pydata/xarray/issues/3320
time1 = ds.time.copy()
for itime in range(ds.sizes['time']):
bb = ds.time.values[itime].timetuple()
time1.values[itime] = datetime(bb[0], bb[1], 16)
logging.info("[CMIP6_IO] Fixed time units start at {} and end at {}".format(time1.values[0],time1.values[-1]))
ds = ds.assign_coords({'time': time1})
return ds
# Loop over all models and scenarios listed in CMIP6_light.config
# and store each CMIP6 variable and scenario into a CMIP6 model object
def organize_cmip6_datasets(self, config: CMIP6_config.Config_albedo, current_experiment_id):
# for experiment_id in config.experiment_ids:
for grid_label in config.grid_labels:
if config.source_id in config.models.keys():
model_object = config.models[config.source_id]
else:
model_object = CMIP6_model.CMIP6_MODEL(name=config.source_id)
logging.info("[CMIP6_IO] Organizing CMIP6 model object {}".format(model_object.name))
collection_of_variables = []
missing=[]
for variable_id, table_id in zip(config.variable_ids, config.table_ids):
# Historical query string
query_string = "source_id=='{}'and table_id=='{}' and member_id=='{}' and grid_label=='{}' and experiment_id=='historical' and variable_id=='{}'".format(
config.source_id,
table_id,
config.member_id,
grid_label,
variable_id)
ds_hist = self.perform_cmip6_query(config, query_string)
# Future projection depending on choice in experiment_id
query_string = "source_id=='{}'and table_id=='{}' and member_id=='{}' and grid_label=='{}' and experiment_id=='{}' and variable_id=='{}'".format(
config.source_id,
table_id,
config.member_id,
grid_label,
current_experiment_id,
variable_id,
)
ds_proj = self.perform_cmip6_query(config, query_string)
if isinstance(ds_proj, xr.Dataset) and isinstance(ds_hist, xr.Dataset):
# Concatenate the historical and projections datasets
ds = xr.concat([ds_hist, ds_proj], dim="time")
if not ds.indexes["time"].dtype in ["datetime64[ns]"]:
start_date = datetime.fromisoformat(config.start_date)
end_date = datetime.fromisoformat(config.end_date)
ds = self.to_360day_monthly(ds)
else:
start_date = config.start_date
end_date = config.end_date
ds = xr.decode_cf(ds)
logging.info(
"[CMIP6_IO] Variable: {} and units {}".format(variable_id, ds[variable_id].units))
if variable_id in ["prw"]:
# 1 kg of rain water spread over 1 square meter of surface is 1 mm in thickness
# The pvlib functions takes cm so we convert values
ds[variable_id].values = ds[variable_id].values / 10.0
ds.attrs["units"] = "cm"
logging.info(
"[CMIP6_IO] Minimum {} and maximum {} values after converting to {} units".format(np.nanmin(ds[variable_id].values),
np.nanmax(ds[variable_id].values),
ds[variable_id].units))
if variable_id in ["tas"]:
if ds[variable_id].units in ["K","Kelvin","kelvin"]:
ds[variable_id].values = ds[variable_id].values - 273.15
ds.attrs["units"] = "C"
logging.info(
"[CMIP6_IO] Minimum {} and maximum {} values after converting to {} units".format(
np.nanmin(ds[variable_id].values),
np.nanmax(ds[variable_id].values),
ds[variable_id].units))
# Remove the duplicate overlapping times (e.g. 2001-2014)
_, index = np.unique(ds["time"], return_index=True)
ds = ds.isel(time=index)
# if not isinstance((ds.indexes["time"]), pd.DatetimeIndex):
# ds["time"] = ds.indexes["time"].to_datetimeindex()
ds = ds.sel(time=slice(start_date, end_date))
ds["time"] = pd.to_datetime(ds.indexes["time"])
# Extract the time period of interest
ds = ds.sel(time=slice(start_date, end_date))
logging.info(
"[CMIP6_IO] {} => Extracted {} range from {} to {} for member {}".format(config.source_id,
variable_id,
ds[
"time"].values[
0],
ds[
"time"].values[
-1],
config.member_id))
# pass the pre-processing directly
dset_processed = combined_preprocessing(ds)
if variable_id in ["chl"]:
if config.source_id in ["CESM2", "CESM2-FV2", "CESM2-WACCM-FV2"]:
dset_processed = dset_processed.isel(lev_partial=config.selected_depth)
else:
dset_processed = dset_processed.isel(lev=config.selected_depth)
if variable_id in ["thetao"]:
dset_processed = dset_processed.isel(lev=config.selected_depth)
if variable_id in ["ph"]:
logging.info("[CMIP6_IO] => Extract only depth level {}".format(config.selected_depth))
dset_processed = dset_processed.isel(lev=config.selected_depth)
collection_of_variables.append(variable_id)
# Save the info to model object
if not config.member_id in model_object.member_ids:
model_object.member_ids.append(config.member_id)
if not config.member_id in model_object.ocean_vars.keys():
model_object.ocean_vars[config.member_id] = []
if not variable_id in model_object.ocean_vars[config.member_id]:
current_vars = model_object.ocean_vars[config.member_id]
current_vars.append(variable_id)
model_object.ocean_vars[config.member_id] = current_vars
self.dataset_into_model_dictionary(config.member_id, variable_id,
dset_processed,
model_object)
if collection_of_variables!=config.variable_ids:
missing = [x for x in config.variable_ids if not x in collection_of_variables or collection_of_variables.remove(x)]
logging.error(f"[CMIP6_IO] Error - unable to find some variable {missing}")
else:
missing=[]
if len(missing)==0:
collection_of_variables=[]
missing=[]
self.models.append(model_object)
logging.info("[CMIP6_IO] Stored {} variables for model {}".format(len(model_object.ocean_vars),
model_object.name))
def dataset_into_model_dictionary(self,
member_id: str,
variable_id: str,
dset: xr.Dataset,
model_object: CMIP6_model.CMIP6_MODEL):
# Store each dataset for each variable as a dictionary of variables for each member_id
try:
existing_ds = model_object.ds_sets[member_id]
except KeyError:
existing_ds = {}
existing_ds[variable_id] = dset
model_object.ds_sets[member_id] = existing_ds
def perform_cmip6_query(self, config, query_string: str) -> xr.Dataset:
df_sub = config.df.query(query_string)
if df_sub.zstore.values.size == 0:
return df_sub
mapper = config.fs.get_mapper(df_sub.zstore.values[-1])
logging.debug("[CMIP6_IO] df_sub: {}".format(df_sub))
ds = xr.open_zarr(mapper, consolidated=True, mask_and_scale=True)
# print("Time encoding: {} - {}".format(ds.indexes['time'], ds.indexes['time'].dtype))
if not ds.indexes["time"].dtype in ["datetime64[ns]", "object"]:
time_object = ds.indexes['time'].to_datetimeindex() # pd.DatetimeIndex([ds["time"].values[0]])
# Convert if necessary
if time_object[0].year == 1:
times = ds.indexes['time'].to_datetimeindex() # pd.DatetimeIndex([ds["time"].values])
times_plus_2000 = []
for t in times:
times_plus_2000.append(
cftime.DatetimeNoLeap(t.year + 2000, t.month, t.day, t.hour)
)
ds["time"].values = times_plus_2000
ds = xr.decode_cf(ds)
return ds
def write_netcdf(self, ds: xr.Dataset, out_file: str) -> None:
enc = {}
for k in ds.data_vars:
if ds[k].ndim < 2:
continue
enc[k] = {
"zlib": True,
"complevel": 3,
"fletcher32": True,
"chunksizes": tuple(map(lambda x: x//2, ds[k].shape))
}
ds.to_netcdf(out_file, format="NETCDF4", engine="netcdf4", encoding=enc)
"""
Regrid to cartesian grid and save to NetCDF:
For any Amon related variables (wind, clouds), the resolution from CMIP6 models is less than
1 degree longitude x latitude. To interpolate to a 1x1 degree grid we therefore first interpolate to a
2x2 degrees grid and then subsequently to a 1x1 degree grid.
"""
def extract_dataset_and_save_to_netcdf(self, model_obj, config: CMIP6_config.Config_albedo, current_experiment_id):
if os.path.exists(config.cmip6_outdir) is False:
os.mkdir(config.cmip6_outdir)
# ds_out_amon = xe.util.grid_global(2, 2)
ds_out_amon = xe.util.grid_2d(config.min_lon,
config.max_lon, 2,
config.min_lat,
config.max_lat, 2)
# ds_out = xe.util.grid_global(1, 1)
ds_out = xe.util.grid_2d(config.min_lon,
config.max_lon, 1,
config.min_lat,
config.max_lat, 1)
re = CMIP6_regrid.CMIP6_regrid()
for key in model_obj.ds_sets[model_obj.current_member_id].keys():
current_ds = model_obj.ds_sets[model_obj.current_member_id][key] # .sel(
# y=slice(int(config.min_lat), int(config.max_lat)),
# x=slice(int(config.min_lon), int(config.max_lon)))
if all(item in current_ds.dims for item in ['time', 'y', 'x', 'vertex', 'bnds']):
# ds_trans = current_ds.chunk({'time': -1}).transpose('bnds', 'time', 'vertex', 'y', 'x')
ds_trans = current_ds.chunk({'time': -1}).transpose('time', 'y', 'x', 'bnds', 'vertex')
elif all(item in current_ds.dims for item in ['time', 'y', 'x', 'vertices', 'bnds']):
ds_trans = current_ds.chunk({'time': -1}).transpose('bnds', 'time', 'vertices', 'y', 'x')
elif all(item in current_ds.dims for item in ['time', 'y', 'x', 'bnds']):
ds_trans = current_ds.chunk({'time': -1}).transpose('bnds', 'time', 'y', 'x')
elif all(item in current_ds.dims for item in ['time', 'y', 'x', 'nvertices']):
ds_trans = current_ds.chunk({'time': -1}).transpose('time', 'y', 'x','nvertices')
else:
ds_trans = current_ds.chunk({'time': -1}).transpose('bnds', 'time', 'y', 'x')
if key in ["uas", "vas", "clt", "tas"]:
out_amon = re.regrid_variable(key,
ds_trans,
ds_out_amon,
interpolation_method=config.interp) #.to_dataset()
out = re.regrid_variable(key, out_amon, ds_out,
interpolation_method=config.interp)
else:
ds_out = self.create_grid(ds_trans, key, 1).sel(lat=slice(config.min_lat, config.max_lat),
lon=slice(config.min_lon, config.max_lon))
out = re.regrid_variable(key, ds_trans,
ds_out,
interpolation_method=config.interp)
if config.write_CMIP6_to_file:
out_dir = "{}/{}/{}".format(config.cmip6_outdir, current_experiment_id, model_obj.name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
outfile = "{}/{}/{}/CMIP6_{}_{}_{}_{}.nc".format(config.cmip6_outdir,
current_experiment_id,
model_obj.name,
model_obj.name,
model_obj.current_member_id,
current_experiment_id,
key)
if os.path.exists(outfile): os.remove(outfile)
# Convert to dataset before writing to netcdf file. Writing to file downloads and concatenates all
# of the data and we therefore re-chunk to split the process into several using dask
# ds = ds_trans.to_dataset()
self.write_netcdf(out.chunk({'time': -1}), out_file=outfile)
self.upload_to_gcs(outfile)
logging.info(f"[CMIP6_light] wrote variable {key} to file")
def create_grid(self, ds, var_name, step):
# 2. We create a grid for CMIP6 data which is a subset of the GLORYS grid resolution and an approximation to
# # the actual CMIP6 resolution. This makes
# the two grids compatible - a requirement from ISIMIP3BASD
#
# We want the number of grid points to be a subset of the full resolution in
# the GLORYS grid so we get the total count of values.
print("ds before", ds)
lats = ds.lat.values
counts_lat = int(len(lats) / step)
lats_new = np.linspace(
int(round(np.ceil(np.min(lats)))),
int(round(np.floor(np.max(lats)))),
counts_lat,
endpoint=True,
)
# Resolution from GLORYS file
lons = ds.lon.values
counts_lon = int(len(lons) / step)
# Spatial range from the CMIP6 file
lons_new = np.linspace(
int(round(np.ceil(np.min(lons)))),
int(round(np.floor(np.max(lons)))),
counts_lon,
endpoint=True,
)
# Create a DataArray void of data
return xr.DataArray(
name=var_name,
coords={
"time": (["time"], ds.time.values),
"lat": (["lat"], lats_new),
"lon": (["lon"], lons_new),
},
dims=["time", "lat", "lon"],
).to_dataset()