Skip to content

Commit c5b0fe4

Browse files
authored
Drop python 3.9 (#652)
1 parent 14d069c commit c5b0fe4

File tree

8 files changed

+106
-97
lines changed

8 files changed

+106
-97
lines changed

.github/workflows/ci.yaml

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -24,19 +24,22 @@ jobs:
2424
strategy:
2525
fail-fast: false
2626
matrix:
27-
python-version: ["3.9", "3.10", "3.11", "3.12"]
27+
python-version: ["3.10", "3.11", "3.12"]
2828
steps:
2929
- uses: actions/checkout@v4
3030
with:
3131
fetch-depth: 0 # Fetch all history for all branches and tags.
3232

33-
- name: Create conda environment
34-
uses: mamba-org/provision-with-micromamba@main
33+
- name: set up conda environment
34+
uses: mamba-org/setup-micromamba@v1
3535
with:
36-
cache-downloads: true
37-
micromamba-version: "latest"
3836
environment-file: ci/environment.yml
39-
extra-specs: |
37+
init-shell: >-
38+
bash
39+
cache-environment: true
40+
cache-downloads: true
41+
post-cleanup: "all"
42+
create-args: |
4043
python=${{ matrix.python-version }}
4144
4245
- name: Install intake-esm
@@ -70,14 +73,17 @@ jobs:
7073
with:
7174
fetch-depth: 0 # Fetch all history for all branches and tags.
7275

73-
- name: Create conda environment
74-
uses: mamba-org/provision-with-micromamba@main
76+
- name: set up conda environment
77+
uses: mamba-org/setup-micromamba@v1
7578
with:
76-
cache-downloads: true
77-
micromamba-version: "latest"
7879
environment-file: ci/environment-upstream-dev.yml
79-
extra-specs: |
80-
python=3.11
80+
init-shell: >-
81+
bash
82+
cache-environment: true
83+
cache-downloads: true
84+
post-cleanup: "all"
85+
create-args: |
86+
python=3.12
8187
8288
- name: Install intake-esm
8389
run: |

intake_esm/_search.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def search(
4040
for column, values in query.items():
4141
local_mask = np.zeros(len(df), dtype=bool)
4242
column_is_stringtype = isinstance(
43-
df[column].dtype, (object, pd.core.arrays.string_.StringDtype)
43+
df[column].dtype, object | pd.core.arrays.string_.StringDtype
4444
)
4545
column_has_iterables = column in columns_with_iterables
4646
for value in values:
@@ -62,8 +62,8 @@ def search_apply_require_all_on(
6262
*,
6363
df: pd.DataFrame,
6464
query: dict[str, typing.Any],
65-
require_all_on: typing.Union[str, list[typing.Any]],
66-
columns_with_iterables: set = None,
65+
require_all_on: str | list[typing.Any],
66+
columns_with_iterables: set | None = None,
6767
) -> pd.DataFrame:
6868
_query = query.copy()
6969
# Make sure to remove columns that were already

intake_esm/cat.py

Lines changed: 36 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ class AggregationType(str, enum.Enum):
4141
join_existing = 'join_existing'
4242
union = 'union'
4343

44-
model_config = ConfigDict(validate_default=True, validate_assignment=True)
44+
model_config = ConfigDict(validate_assignment=True)
4545

4646

4747
class DataFormat(str, enum.Enum):
@@ -50,22 +50,22 @@ class DataFormat(str, enum.Enum):
5050
reference = 'reference'
5151
opendap = 'opendap'
5252

53-
model_config = ConfigDict(validate_default=True, validate_assignment=True)
53+
model_config = ConfigDict(validate_assignment=True)
5454

5555

5656
class Attribute(pydantic.BaseModel):
5757
column_name: pydantic.StrictStr
5858
vocabulary: pydantic.StrictStr = ''
5959

60-
model_config = ConfigDict(validate_default=True, validate_assignment=True)
60+
model_config = ConfigDict(validate_assignment=True)
6161

6262

6363
class Assets(pydantic.BaseModel):
6464
column_name: pydantic.StrictStr
65-
format: typing.Optional[DataFormat] = None
66-
format_column_name: typing.Optional[pydantic.StrictStr] = None
65+
format: DataFormat | None = None
66+
format_column_name: pydantic.StrictStr | None = None
6767

68-
model_config = ConfigDict(validate_default=True, validate_assignment=True)
68+
model_config = ConfigDict(validate_assignment=True)
6969

7070
@pydantic.model_validator(mode='after')
7171
def _validate_data_format(cls, model):
@@ -82,7 +82,7 @@ class Aggregation(pydantic.BaseModel):
8282
attribute_name: pydantic.StrictStr
8383
options: dict = {}
8484

85-
model_config = ConfigDict(validate_default=True, validate_assignment=True)
85+
model_config = ConfigDict(validate_assignment=True)
8686

8787

8888
class AggregationControl(pydantic.BaseModel):
@@ -101,18 +101,16 @@ class ESMCatalogModel(pydantic.BaseModel):
101101
esmcat_version: pydantic.StrictStr
102102
attributes: list[Attribute]
103103
assets: Assets
104-
aggregation_control: typing.Optional[AggregationControl] = None
104+
aggregation_control: AggregationControl | None = None
105105
id: str = ''
106-
catalog_dict: typing.Optional[list[dict]] = None
107-
catalog_file: typing.Optional[pydantic.StrictStr] = None
108-
description: typing.Optional[pydantic.StrictStr] = None
109-
title: typing.Optional[pydantic.StrictStr] = None
110-
last_updated: typing.Optional[typing.Union[datetime.datetime, datetime.date]] = None
106+
catalog_dict: list[dict] | None = None
107+
catalog_file: pydantic.StrictStr | None = None
108+
description: pydantic.StrictStr | None = None
109+
title: pydantic.StrictStr | None = None
110+
last_updated: datetime.datetime | datetime.date | None = None
111111
_df: pd.DataFrame = pydantic.PrivateAttr()
112112

113-
model_config = ConfigDict(
114-
arbitrary_types_allowed=True, validate_default=True, validate_assignment=True
115-
)
113+
model_config = ConfigDict(arbitrary_types_allowed=True, validate_assignment=True)
116114

117115
@pydantic.model_validator(mode='after')
118116
def validate_catalog(cls, model):
@@ -136,11 +134,11 @@ def save(
136134
self,
137135
name: str,
138136
*,
139-
directory: str = None,
137+
directory: str | None = None,
140138
catalog_type: str = 'dict',
141-
to_csv_kwargs: dict = None,
142-
json_dump_kwargs: dict = None,
143-
storage_options: dict[str, typing.Any] = None,
139+
to_csv_kwargs: dict | None = None,
140+
json_dump_kwargs: dict | None = None,
141+
storage_options: dict[str, typing.Any] | None = None,
144142
) -> None:
145143
"""
146144
Save the catalog to a file.
@@ -193,7 +191,7 @@ def save(
193191

194192
if catalog_type == 'file':
195193
csv_kwargs = {'index': False}
196-
csv_kwargs.update(to_csv_kwargs or {})
194+
csv_kwargs |= to_csv_kwargs or {}
197195
compression = csv_kwargs.get('compression')
198196
extensions = {'gzip': '.gz', 'bz2': '.bz2', 'zip': '.zip', 'xz': '.xz', None: ''}
199197
csv_file_name = f'{csv_file_name}{extensions[compression]}'
@@ -206,15 +204,15 @@ def save(
206204

207205
with fs.open(json_file_name, 'w') as outfile:
208206
json_kwargs = {'indent': 2}
209-
json_kwargs.update(json_dump_kwargs or {})
207+
json_kwargs |= json_dump_kwargs or {}
210208
json.dump(data, outfile, **json_kwargs)
211209

212210
print(f'Successfully wrote ESM catalog json file to: {json_file_name}')
213211

214212
@classmethod
215213
def load(
216214
cls,
217-
json_file: typing.Union[str, pydantic.FilePath, pydantic.AnyUrl],
215+
json_file: str | pydantic.FilePath | pydantic.AnyUrl,
218216
storage_options: dict[str, typing.Any] = None,
219217
read_csv_kwargs: dict[str, typing.Any] = None,
220218
) -> 'ESMCatalogModel':
@@ -287,16 +285,20 @@ def _cast_agg_columns_with_iterables(self) -> None:
287285
to avoid hashing issues (e.g. TypeError: unhashable type: 'list')
288286
"""
289287
if self.aggregation_control:
290-
columns = list(
288+
if columns := list(
291289
self.columns_with_iterables.intersection(
292-
set(map(lambda agg: agg.attribute_name, self.aggregation_control.aggregations))
290+
set(
291+
map(
292+
lambda agg: agg.attribute_name,
293+
self.aggregation_control.aggregations,
294+
)
295+
)
293296
)
294-
)
295-
if columns:
297+
):
296298
self._df[columns] = self._df[columns].apply(tuple)
297299

298300
@property
299-
def grouped(self) -> typing.Union[pd.core.groupby.DataFrameGroupBy, pd.DataFrame]:
301+
def grouped(self) -> pd.core.groupby.DataFrameGroupBy | pd.DataFrame:
300302
if self.aggregation_control:
301303
if self.aggregation_control.groupby_attrs:
302304
self.aggregation_control.groupby_attrs = list(
@@ -318,7 +320,7 @@ def grouped(self) -> typing.Union[pd.core.groupby.DataFrameGroupBy, pd.DataFrame
318320
)
319321
return self.df.groupby(cols)
320322

321-
def _construct_group_keys(self, sep: str = '.') -> dict[str, typing.Union[str, tuple[str]]]:
323+
def _construct_group_keys(self, sep: str = '.') -> dict[str, str | tuple[str]]:
322324
internal_keys = self.grouped.groups.keys()
323325
public_keys = map(
324326
lambda key: key if isinstance(key, str) else sep.join(str(value) for value in key),
@@ -352,7 +354,7 @@ def search(
352354
self,
353355
*,
354356
query: typing.Union['QueryModel', dict[str, typing.Any]],
355-
require_all_on: typing.Union[str, list[str]] = None,
357+
require_all_on: str | list[str] | None = None,
356358
) -> 'ESMCatalogModel':
357359
"""
358360
Search for entries in the catalog.
@@ -398,13 +400,13 @@ def search(
398400
class QueryModel(pydantic.BaseModel):
399401
"""A Pydantic model to represent a query to be executed against a catalog."""
400402

401-
query: dict[pydantic.StrictStr, typing.Union[typing.Any, list[typing.Any]]]
403+
query: dict[pydantic.StrictStr, typing.Any | list[typing.Any]]
402404
columns: list[str]
403-
require_all_on: typing.Optional[typing.Union[str, list[typing.Any]]] = None
405+
require_all_on: str | list[typing.Any] | None = None
404406

405407
# TODO: Seem to be unable to modify fields in model_validator with
406408
# validate_assignment=True since it leads to recursion
407-
model_config = ConfigDict(validate_default=True, validate_assignment=False)
409+
model_config = ConfigDict(validate_assignment=False)
408410

409411
@pydantic.model_validator(mode='after')
410412
def validate_query(cls, model):
@@ -424,7 +426,7 @@ def validate_query(cls, model):
424426
raise ValueError(f'Column {key} not in columns {columns}')
425427
_query = query.copy()
426428
for key, value in _query.items():
427-
if isinstance(value, (str, int, float, bool)) or value is None or value is pd.NA:
429+
if isinstance(value, str | int | float | bool) or value is None or value is pd.NA:
428430
_query[key] = [value]
429431

430432
model.query = _query

intake_esm/core.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -76,11 +76,11 @@ class esm_datastore(Catalog):
7676

7777
def __init__(
7878
self,
79-
obj: typing.Union[pydantic.FilePath, pydantic.AnyUrl, dict[str, typing.Any]],
79+
obj: pydantic.FilePath | pydantic.AnyUrl | dict[str, typing.Any],
8080
*,
8181
progressbar: bool = True,
8282
sep: str = '.',
83-
registry: typing.Optional[DerivedVariableRegistry] = None,
83+
registry: DerivedVariableRegistry | None = None,
8484
read_csv_kwargs: dict[str, typing.Any] = None,
8585
columns_with_iterables: list[str] = None,
8686
storage_options: dict[str, typing.Any] = None,
@@ -209,7 +209,7 @@ def _get_entries(self) -> dict[str, ESMDataSource]:
209209
_ = self[key]
210210
return self._entries
211211

212-
@pydantic.validate_arguments
212+
@pydantic.validate_call
213213
def __getitem__(self, key: str) -> ESMDataSource:
214214
"""
215215
This method takes a key argument and return a data source
@@ -328,10 +328,10 @@ def __dir__(self) -> list[str]:
328328
def _ipython_key_completions_(self):
329329
return self.__dir__()
330330

331-
@pydantic.validate_arguments
331+
@pydantic.validate_call
332332
def search(
333333
self,
334-
require_all_on: typing.Optional[typing.Union[str, list[str]]] = None,
334+
require_all_on: str | list[str] | None = None,
335335
**query: typing.Any,
336336
):
337337
"""Search for entries in the catalog.
@@ -443,15 +443,15 @@ def search(
443443
cat.derivedcat = self.derivedcat
444444
return cat
445445

446-
@pydantic.validate_arguments
446+
@pydantic.validate_call
447447
def serialize(
448448
self,
449449
name: pydantic.StrictStr,
450-
directory: typing.Optional[typing.Union[pydantic.DirectoryPath, pydantic.StrictStr]] = None,
450+
directory: pydantic.DirectoryPath | pydantic.StrictStr | None = None,
451451
catalog_type: str = 'dict',
452-
to_csv_kwargs: typing.Optional[dict[typing.Any, typing.Any]] = None,
453-
json_dump_kwargs: typing.Optional[dict[typing.Any, typing.Any]] = None,
454-
storage_options: typing.Optional[dict[str, typing.Any]] = None,
452+
to_csv_kwargs: dict[typing.Any, typing.Any] | None = None,
453+
json_dump_kwargs: dict[typing.Any, typing.Any] | None = None,
454+
storage_options: dict[str, typing.Any] | None = None,
455455
) -> None:
456456
"""Serialize catalog to corresponding json and csv files.
457457
@@ -537,15 +537,15 @@ def unique(self) -> pd.Series:
537537
)
538538
return unique
539539

540-
@pydantic.validate_arguments
540+
@pydantic.validate_call
541541
def to_dataset_dict(
542542
self,
543-
xarray_open_kwargs: typing.Optional[dict[str, typing.Any]] = None,
544-
xarray_combine_by_coords_kwargs: typing.Optional[dict[str, typing.Any]] = None,
545-
preprocess: typing.Optional[typing.Callable] = None,
546-
storage_options: typing.Optional[dict[pydantic.StrictStr, typing.Any]] = None,
547-
progressbar: typing.Optional[pydantic.StrictBool] = None,
548-
aggregate: typing.Optional[pydantic.StrictBool] = None,
543+
xarray_open_kwargs: dict[str, typing.Any] | None = None,
544+
xarray_combine_by_coords_kwargs: dict[str, typing.Any] | None = None,
545+
preprocess: typing.Callable | None = None,
546+
storage_options: dict[pydantic.StrictStr, typing.Any] | None = None,
547+
progressbar: pydantic.StrictBool | None = None,
548+
aggregate: pydantic.StrictBool | None = None,
549549
skip_on_error: pydantic.StrictBool = False,
550550
**kwargs,
551551
) -> dict[str, xr.Dataset]:
@@ -687,15 +687,15 @@ def to_dataset_dict(
687687
self.datasets = self._create_derived_variables(datasets, skip_on_error)
688688
return self.datasets
689689

690-
@pydantic.validate_arguments
690+
@pydantic.validate_call
691691
def to_datatree(
692692
self,
693-
xarray_open_kwargs: typing.Optional[dict[str, typing.Any]] = None,
694-
xarray_combine_by_coords_kwargs: typing.Optional[dict[str, typing.Any]] = None,
695-
preprocess: typing.Optional[typing.Callable] = None,
696-
storage_options: typing.Optional[dict[pydantic.StrictStr, typing.Any]] = None,
697-
progressbar: typing.Optional[pydantic.StrictBool] = None,
698-
aggregate: typing.Optional[pydantic.StrictBool] = None,
693+
xarray_open_kwargs: dict[str, typing.Any] | None = None,
694+
xarray_combine_by_coords_kwargs: dict[str, typing.Any] | None = None,
695+
preprocess: typing.Callable | None = None,
696+
storage_options: dict[pydantic.StrictStr, typing.Any] | None = None,
697+
progressbar: pydantic.StrictBool | None = None,
698+
aggregate: pydantic.StrictBool | None = None,
699699
skip_on_error: pydantic.StrictBool = False,
700700
levels: list[str] = None,
701701
**kwargs,

0 commit comments

Comments
 (0)