Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 53 additions & 1 deletion mikeio/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
"quantile",
"scale",
"sum",
"change_datatype",
]


Expand Down Expand Up @@ -94,14 +95,19 @@ def _clone(
start_time: datetime | None = None,
timestep: float | None = None,
items: Sequence[int | DfsDynamicItemInfo] | None = None,
datatype: int | None = None,
) -> DfsFile:
source = DfsFileFactory.DfsGenericOpen(str(infilename))
fi = source.FileInfo

builder = DfsBuilder.Create(fi.FileTitle, "mikeio", __dfs_version__)

# Set up the header
builder.SetDataType(fi.DataType)
if datatype is None:
builder.SetDataType(fi.DataType)
else:
builder.SetDataType(datatype)

builder.SetGeographicalProjection(fi.Projection)

# Copy time axis
Expand Down Expand Up @@ -959,3 +965,49 @@ def _get_repeated_items(
new_items.append(item)

return new_items


def change_datatype(
infilename: str | pathlib.Path,
outfilename: str | pathlib.Path,
datatype: int,
) -> None:
"""Change datatype of a DFS file.

The data type tag is used to classify the file within a specific modeling context,
such as MIKE 21. There is no global standard for these tags—they are interpreted
locally within a model setup.

Application developers can use these tags to classify files such as
bathymetries, input data, or result files according to their own conventions.

Default data type values assigned by MikeIO when creating new files are:
- dfs0: datatype=1
- dfs1-3: datatype=0
- dfsu: datatype=2001

Parameters
----------
infilename : str | pathlib.Path
input filename
outfilename : str | pathlib.Path
output filename
datatype: int
DataType to be used for the output file

Examples
--------
>>> change_datatype("in.dfsu", "out.dfsu", datatype=107)

"""
dfs_out = _clone(infilename, outfilename, datatype=datatype)
dfs_in = DfsFileFactory.DfsGenericOpen(infilename)

# Copy dynamic item data
sourceData = dfs_in.ReadItemTimeStepNext()
while sourceData:
dfs_out.WriteItemTimeStepNext(sourceData.Time, sourceData.Data)
sourceData = dfs_in.ReadItemTimeStepNext()

dfs_out.Close()
dfs_in.Close()
82 changes: 61 additions & 21 deletions notebooks/Generic.ipynb

Large diffs are not rendered by default.

36 changes: 35 additions & 1 deletion tests/test_generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,18 @@
import pandas as pd
import mikeio
from mikeio import generic
from mikeio.generic import scale, diff, sum, extract, avg_time, fill_corrupt, add
from mikeio.generic import (
scale,
diff,
sum,
extract,
avg_time,
fill_corrupt,
add,
change_datatype,
)
import pytest
from mikecore.DfsFileFactory import DfsFileFactory


def test_add_constant(tmp_path: Path) -> None:
Expand Down Expand Up @@ -638,3 +648,27 @@ def test_fill_corrupt_data(tmp_path: Path) -> None:
orig = mikeio.read(infile)
extracted = mikeio.read(fp)
assert extracted.n_timesteps == orig.n_timesteps


def test_change_datatype_dfs0(tmp_path: Path) -> None:
infilename = "tests/testdata/random.dfs0"
outfilename = str(tmp_path / "random_datatype107.dfs0")
OUT_DATA_TYPE = 107

change_datatype(infilename, outfilename, datatype=OUT_DATA_TYPE)
dfs_out = DfsFileFactory.DfsGenericOpen(outfilename)
dfs_in = DfsFileFactory.DfsGenericOpen(infilename)

n_timesteps_in = dfs_in.FileInfo.TimeAxis.NumberOfTimeSteps
n_timesteps_out = dfs_out.FileInfo.TimeAxis.NumberOfTimeSteps
datatype_out = dfs_out.FileInfo.DataType

dfs_out.Close()
dfs_in.Close()

assert datatype_out == OUT_DATA_TYPE
assert n_timesteps_in == n_timesteps_out
# Also check that data is not modified
org = mikeio.read(infilename).to_numpy()
new = mikeio.read(outfilename).to_numpy()
assert np.allclose(org, new, rtol=1e-08, atol=1e-10, equal_nan=True)