Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions yt/frontends/ramses/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,6 +429,9 @@ def _fill_no_ghostzones(self, fd, fields, selector, file_handler):
# Initializing data container
for field in fields:
data[field] = np.zeros(cell_count, "float64")

# Get the size of the data, single precision if RT field, double otherwise
is_single = True if any(field.startswith("Photon") for field in fields) else False

# Do an early exit if the cell count is null
if cell_count == 0:
Expand All @@ -452,6 +455,7 @@ def _fill_no_ghostzones(self, fd, fields, selector, file_handler):
fields,
data,
oct_handler,
is_single,
)
return data

Expand All @@ -474,6 +478,9 @@ def _fill_with_ghostzones(
# Initializing data container
for field in fields:
tr[field] = np.zeros(cell_count, "float64")

# Get the size of the data, single precision if RT field, double otherwise
is_single = True if any(field.startswith("Photon") for field in fields) else False

# Do an early exit if the cell count is null
if cell_count == 0:
Expand Down Expand Up @@ -507,6 +514,7 @@ def _fill_with_ghostzones(
fields,
tr,
oct_handler,
is_single,
domain_inds=domain_inds,
)
return tr
Expand Down
33 changes: 27 additions & 6 deletions yt/frontends/ramses/field_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,11 @@ def offset(self):
*structure* of your fluid file is non-canonical, change this.
"""
nvars = len(self._detected_field_list[self.ds.unique_identifier])

first_field = self.field_list[0][1]
# Get the size of the data, single precision if RT field, double otherwise
is_single = True if first_field.startswith("Photon") else False

with FortranFile(self.fname) as fd:
# Skip headers
nskip = len(self.attrs)
Expand All @@ -269,13 +274,15 @@ def offset(self):
#
# So there are 8 * nvars records each with length (nocts, )
# at each (level, cpus)

offset, level_count = read_offset(
fd,
min_level,
self.domain.domain_id,
self.parameters[self.ds.unique_identifier]["nvar"],
self.domain.amr_header,
Nskip=nvars * 8,
single=is_single,
)

self._level_count = level_count
Expand Down Expand Up @@ -542,10 +549,13 @@ def detect_fields(cls, ds):

rheader = {}

def read_rhs(cast):
def read_rhs(cast, group=None):
line = f.readline()
p, v = line.split("=")
rheader[p.strip()] = cast(v)
if group is not None:
rheader[f"Group {group} {p.strip()}"] = cast(v)
else:
rheader[p.strip()] = cast(v)

with open(fname) as f:
# Read nRTvar, nions, ngroups, iions
Expand All @@ -567,7 +577,7 @@ def read_rhs(cast):
read_rhs(float)
f.readline()

# Reat unit_np, unit_pfd
# Read unit_np, unit_pf
for _ in range(2):
read_rhs(float)

Expand All @@ -580,9 +590,20 @@ def read_rhs(cast):
# Read n star, t2star, g_star
for _ in range(3):
read_rhs(float)

# Touchy part, we have to read the photon group properties
mylog.debug("Not reading photon group properties")
f.readline()
f.readline()

# Get global group properties (groupL0, groupL1, spec2group)
for _ in range(3):
read_rhs(lambda line: [float(e) for e in line.split()])

# get egy for each group (used to get proper energy densities)
# NOTE: this assumes there are 8 energy groups
for _ in range(8):
group = int(f.readline().split()[1])
read_rhs(lambda line: [float(e) for e in line.split()], group)
f.readline()
f.readline()

cls.rt_parameters[ds.unique_identifier] = rheader

Expand Down
39 changes: 28 additions & 11 deletions yt/frontends/ramses/io_utils.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,13 @@ ctypedef np.float64_t DOUBLE_t
cdef int INT32_SIZE = sizeof(np.int32_t)
cdef int INT64_SIZE = sizeof(np.int64_t)
cdef int DOUBLE_SIZE = sizeof(np.float64_t)
cdef int SINGLE_SIZE = sizeof(np.float32_t)


cdef inline int skip_len(int Nskip, int record_len) noexcept nogil:
cdef inline int skip_len(int Nskip, int record_len, np.npy_bool single) noexcept nogil:
# If the data is single precision, we need to skip 4 bytes
if single:
return Nskip * (record_len * SINGLE_SIZE + INT64_SIZE)
return Nskip * (record_len * DOUBLE_SIZE + INT64_SIZE)


Expand Down Expand Up @@ -111,7 +115,7 @@ def read_amr(FortranFile f, dict headers,
@cython.wraparound(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cpdef read_offset(FortranFile f, INT64_t min_level, INT64_t domain_id, INT64_t nvar, dict headers, int Nskip):
cpdef read_offset(FortranFile f, INT64_t min_level, INT64_t domain_id, INT64_t nvar, dict headers, int Nskip, np.npy_bool single):

cdef np.ndarray[np.int64_t, ndim=2] offset, level_count
cdef INT64_t ndim, twotondim, nlevelmax, n_levels, nboundary, ncpu, ncpu_and_bound
Expand Down Expand Up @@ -153,7 +157,7 @@ cpdef read_offset(FortranFile f, INT64_t min_level, INT64_t domain_id, INT64_t n
if ilevel >= min_level:
offset_view[icpu, ilevel - min_level] = f.tell()
level_count_view[icpu, ilevel - min_level] = <INT64_t> file_ncache
f.seek(skip_len(Nskip, file_ncache), SEEK_CUR)
f.seek(skip_len(Nskip, file_ncache, single), SEEK_CUR)

return offset, level_count

Expand All @@ -172,7 +176,9 @@ def fill_hydro(FortranFile f,
INT64_t ndim, list all_fields, list fields,
dict tr,
RAMSESOctreeContainer oct_handler,
np.ndarray[np.int32_t, ndim=1] domain_inds=np.array([], dtype='int32')):
np.npy_bool single,
np.ndarray[np.int32_t, ndim=1] domain_inds=np.array([], dtype='int32'),
):
cdef INT64_t offset
cdef dict tmp
cdef str field
Expand All @@ -197,7 +203,8 @@ def fill_hydro(FortranFile f,

# The ordering is very important here, as we'll write directly into the memory
# address the content of the files.
cdef np.float64_t[::1, :, :] buffer
cdef np.float32_t[::1, :, :] buffer_single
cdef np.float64_t[::1, :, :] buffer_double

jump_len = 0
j = 0
Expand All @@ -211,7 +218,11 @@ def fill_hydro(FortranFile f,
jumps[j] = jump_len
cdef int first_field_index = jumps[0]

buffer = np.empty((level_count.max(), twotondim, nfields_selected), dtype="float64", order='F')
# The buffer is different depending on the size of the data
if single:
buffer_single = np.empty((level_count.max(), twotondim, nfields_selected), dtype="float32", order='F')
else:
buffer_double = np.empty((level_count.max(), twotondim, nfields_selected), dtype="float64", order='F')

# Precompute which levels we need to read
Ncells = len(level_inds)
Expand All @@ -231,7 +242,7 @@ def fill_hydro(FortranFile f,
offset = offsets[icpu, ilevel]
if offset == -1:
continue
f.seek(offset + skip_len(first_field_index, nc), SEEK_SET)
f.seek(offset + skip_len(first_field_index, nc, single), SEEK_SET)

# We have already skipped the first fields (if any)
# so we "rewind" (this will cancel the first seek)
Expand All @@ -241,22 +252,28 @@ def fill_hydro(FortranFile f,
for j in range(nfields_selected):
jump_len += jumps[j]
if jump_len > 0:
f.seek(skip_len(jump_len, nc), SEEK_CUR)
f.seek(skip_len(jump_len, nc, single), SEEK_CUR)
jump_len = 0
f.read_vector_inplace('d', <void*> &buffer[0, i, j])
if single:
f.read_vector_inplace('f', <void*> &buffer_single[0, i, j])
else:
f.read_vector_inplace('d', <void*> &buffer_double[0, i, j])

jump_len += jumps[nfields_selected]

# In principle, we may be left with some fields to skip
# but since we're doing an absolute seek at the beginning of
# the loop on CPUs, we can spare one seek here
## if jump_len > 0:
## f.seek(skip_len(jump_len, nc), SEEK_CUR)
## f.seek(skip_len(jump_len, nc, single), SEEK_CUR)

# Alias buffer into dictionary
tmp = {}
for i, field in enumerate(fields):
tmp[field] = buffer[:, :, i]
if single:
tmp[field] = np.asarray(buffer_single[:, :, i], dtype="float64")
else:
tmp[field] = buffer_double[:, :, i]

if ncpu_selected > 1:
oct_handler.fill_level_with_domain(
Expand Down
Loading