Skip to content

Commit 812584d

Browse files
committed
Make zips strict unless it is causing errors
In which case set them to explicit False
1 parent 13d64b3 commit 812584d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+317
-209
lines changed

asv_bench/benchmarks/groupby.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def setup(self, use_cftime, use_flox):
174174
# GH9426 - deep-copying CFTime object arrays is weirdly slow
175175
asda = xr.DataArray(time)
176176
labeled_time = []
177-
for year, month in zip(asda.dt.year, asda.dt.month):
177+
for year, month in zip(asda.dt.year, asda.dt.month, strict=True):
178178
labeled_time.append(cftime.datetime(year, month, 1))
179179

180180
self.da = xr.DataArray(

properties/test_pandas_roundtrip.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def test_roundtrip_dataarray(data, arr) -> None:
8080
tuple
8181
)
8282
)
83-
coords = {name: np.arange(n) for (name, n) in zip(names, arr.shape)}
83+
coords = {name: np.arange(n) for (name, n) in zip(names, arr.shape, strict=True)}
8484
original = xr.DataArray(arr, dims=names, coords=coords)
8585
roundtripped = xr.DataArray(original.to_pandas())
8686
xr.testing.assert_identical(original, roundtripped)

xarray/backends/api.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1585,8 +1585,9 @@ def save_mfdataset(
15851585
multifile=True,
15861586
**kwargs,
15871587
)
1588-
for ds, path, group in zip(datasets, paths, groups)
1589-
]
1588+
for ds, path, group in zip(datasets, paths, groups, strict=True)
1589+
],
1590+
strict=True,
15901591
)
15911592

15921593
try:
@@ -1600,7 +1601,10 @@ def save_mfdataset(
16001601
import dask
16011602

16021603
return dask.delayed(
1603-
[dask.delayed(_finalize_store)(w, s) for w, s in zip(writes, stores)]
1604+
[
1605+
dask.delayed(_finalize_store)(w, s)
1606+
for w, s in zip(writes, stores, strict=True)
1607+
]
16041608
)
16051609

16061610

xarray/backends/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -431,7 +431,7 @@ def set_dimensions(self, variables, unlimited_dims=None):
431431
for v in unlimited_dims: # put unlimited_dims first
432432
dims[v] = None
433433
for v in variables.values():
434-
dims.update(dict(zip(v.dims, v.shape)))
434+
dims.update(dict(zip(v.dims, v.shape, strict=True)))
435435

436436
for dim, length in dims.items():
437437
if dim in existing_dims and length != existing_dims[dim]:

xarray/backends/h5netcdf_.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,9 @@ def open_store_variable(self, name, var):
208208
"shuffle": var.shuffle,
209209
}
210210
if var.chunks:
211-
encoding["preferred_chunks"] = dict(zip(var.dimensions, var.chunks))
211+
encoding["preferred_chunks"] = dict(
212+
zip(var.dimensions, var.chunks, strict=True)
213+
)
212214
# Convert h5py-style compression options to NetCDF4-Python
213215
# style, if possible
214216
if var.compression == "gzip":

xarray/backends/netCDF4_.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,7 @@ def _extract_nc4_variable_encoding(
278278
chunksizes = encoding["chunksizes"]
279279
chunks_too_big = any(
280280
c > d and dim not in unlimited_dims
281-
for c, d, dim in zip(chunksizes, variable.shape, variable.dims)
281+
for c, d, dim in zip(chunksizes, variable.shape, variable.dims, strict=True)
282282
)
283283
has_original_shape = "original_shape" in encoding
284284
changed_shape = (
@@ -446,7 +446,9 @@ def open_store_variable(self, name: str, var):
446446
else:
447447
encoding["contiguous"] = False
448448
encoding["chunksizes"] = tuple(chunking)
449-
encoding["preferred_chunks"] = dict(zip(var.dimensions, chunking))
449+
encoding["preferred_chunks"] = dict(
450+
zip(var.dimensions, chunking, strict=True)
451+
)
450452
# TODO: figure out how to round-trip "endian-ness" without raising
451453
# warnings from netCDF4
452454
# encoding['endian'] = var.endian()

xarray/backends/zarr.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ def _determine_zarr_chunks(enc_chunks, var_chunks, ndim, name, safe_chunks):
186186
# TODO: incorporate synchronizer to allow writes from multiple dask
187187
# threads
188188
if var_chunks and enc_chunks_tuple:
189-
for zchunk, dchunks in zip(enc_chunks_tuple, var_chunks):
189+
for zchunk, dchunks in zip(enc_chunks_tuple, var_chunks, strict=True):
190190
for dchunk in dchunks[:-1]:
191191
if dchunk % zchunk:
192192
base_error = (
@@ -548,7 +548,7 @@ def open_store_variable(self, name, zarr_array=None):
548548

549549
encoding = {
550550
"chunks": zarr_array.chunks,
551-
"preferred_chunks": dict(zip(dimensions, zarr_array.chunks)),
551+
"preferred_chunks": dict(zip(dimensions, zarr_array.chunks, strict=True)),
552552
"compressor": zarr_array.compressor,
553553
"filters": zarr_array.filters,
554554
}
@@ -576,7 +576,7 @@ def get_dimensions(self):
576576
dimensions = {}
577577
for k, v in self.zarr_group.arrays():
578578
dim_names, _ = _get_zarr_dims_and_attrs(v, DIMENSION_KEY, try_nczarr)
579-
for d, s in zip(dim_names, v.shape):
579+
for d, s in zip(dim_names, v.shape, strict=True):
580580
if d in dimensions and dimensions[d] != s:
581581
raise ValueError(
582582
f"found conflicting lengths for dimension {d} "

xarray/coding/calendar_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ def convert_calendar(
207207
_convert_to_new_calendar_with_new_day_of_year(
208208
date, newdoy, calendar, use_cftime
209209
)
210-
for date, newdoy in zip(time.variable._data.array, new_doy)
210+
for date, newdoy in zip(time.variable._data.array, new_doy, strict=True)
211211
],
212212
dims=(dim,),
213213
name=dim,

xarray/core/alignment.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -405,14 +405,15 @@ def align_indexes(self) -> None:
405405
zip(
406406
[joined_index] + matching_indexes,
407407
[joined_index_vars] + matching_index_vars,
408+
strict=True,
408409
)
409410
)
410411
need_reindex = self._need_reindex(dims, cmp_indexes)
411412
else:
412413
if len(matching_indexes) > 1:
413414
need_reindex = self._need_reindex(
414415
dims,
415-
list(zip(matching_indexes, matching_index_vars)),
416+
list(zip(matching_indexes, matching_index_vars, strict=True)),
416417
)
417418
else:
418419
need_reindex = False
@@ -557,7 +558,7 @@ def reindex_all(self) -> None:
557558
self.results = tuple(
558559
self._reindex_one(obj, matching_indexes)
559560
for obj, matching_indexes in zip(
560-
self.objects, self.objects_matching_indexes
561+
self.objects, self.objects_matching_indexes, strict=True
561562
)
562563
)
563564

@@ -952,7 +953,7 @@ def is_alignable(obj):
952953
fill_value=fill_value,
953954
)
954955

955-
for position, key, aligned_obj in zip(positions, keys, aligned):
956+
for position, key, aligned_obj in zip(positions, keys, aligned, strict=True):
956957
if key is no_key:
957958
out[position] = aligned_obj
958959
else:

xarray/core/combine.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,8 @@ def _infer_concat_order_from_coords(datasets):
139139
# Append positions along extra dimension to structure which
140140
# encodes the multi-dimensional concatenation order
141141
tile_ids = [
142-
tile_id + (position,) for tile_id, position in zip(tile_ids, order)
142+
tile_id + (position,)
143+
for tile_id, position in zip(tile_ids, order, strict=True)
143144
]
144145

145146
if len(datasets) > 1 and not concat_dims:
@@ -148,7 +149,7 @@ def _infer_concat_order_from_coords(datasets):
148149
"order the datasets for concatenation"
149150
)
150151

151-
combined_ids = dict(zip(tile_ids, datasets))
152+
combined_ids = dict(zip(tile_ids, datasets, strict=True))
152153

153154
return combined_ids, concat_dims
154155

@@ -349,7 +350,7 @@ def _nested_combine(
349350
combined_ids = _infer_concat_order_from_positions(datasets)
350351
else:
351352
# Already sorted so just use the ids already passed
352-
combined_ids = dict(zip(ids, datasets))
353+
combined_ids = dict(zip(ids, datasets, strict=True))
353354

354355
# Check that the inferred shape is combinable
355356
_check_shape_tile_ids(combined_ids)

0 commit comments

Comments
 (0)