Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TXTExport refactoring #883

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion festim/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@

from .exports.derived_quantities.derived_quantities import DerivedQuantities

from .exports.txt_export import TXTExport, TXTExports
from .exports.txt_export import TXTExport


from .settings import Settings
Expand Down
3 changes: 1 addition & 2 deletions festim/exports/exports.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,7 @@ def write(self, label_to_function, dx):
label_to_function[export.field], self.V_DG1
)
export.function = label_to_function[export.field]
steady = self.final_time == None
export.write(self.t, steady)
export.write(self.t, self.final_time)
self.nb_iterations += 1

def initialise_derived_quantities(self, dx, ds, materials):
Expand Down
174 changes: 104 additions & 70 deletions festim/exports/txt_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,22 +13,38 @@
field (str): the exported field ("solute", "1", "retention",
"T"...)
filename (str): the filename (must end with .txt).
write_at_last (bool): if True, the data will be exported at
the last export time. Otherwise, the data will be exported
at each export time. Defaults to False.
times (list, optional): if provided, the field will be
exported at these timesteps. Otherwise exports at all
timesteps. Defaults to None.
header_format (str, optional): the format of column headers.
Defautls to ".2e".

Attributes:
data (np.array): the data array of the exported field. The first column
is the mesh vertices. Each next column is the field profile at the specific
export time.
header (str): the header of the exported file.
"""

def __init__(self, field, filename, times=None, header_format=".2e") -> None:
def __init__(
self, field, filename, times=None, write_at_last=False, header_format=".2e"
) -> None:
super().__init__(field=field)
if times:
self.times = sorted(times)
else:
self.times = times
self.filename = filename
self.write_at_last = write_at_last
self.header_format = header_format
self._first_time = True

self.data = None
self.header = None
self._unique_indices = None
self._V = None

@property
def filename(self):
Expand All @@ -51,83 +67,101 @@
return True
return False

def when_is_next_time(self, current_time):
if self.times is None:
return None
for time in self.times:
if current_time < time:
return time
return None
def is_last(self, current_time, final_time):
if final_time is None:

Check warning on line 71 in festim/exports/txt_export.py

View check run for this annotation

Codecov / codecov/patch

festim/exports/txt_export.py#L71

Added line #L71 was not covered by tests
# write if steady
return True
elif self.times is None:
if np.isclose(current_time, final_time, atol=0):

Check warning on line 75 in festim/exports/txt_export.py

View check run for this annotation

Codecov / codecov/patch

festim/exports/txt_export.py#L73-L75

Added lines #L73 - L75 were not covered by tests
# write at final time if exports at each timestep
return True

Check warning on line 77 in festim/exports/txt_export.py

View check run for this annotation

Codecov / codecov/patch

festim/exports/txt_export.py#L77

Added line #L77 was not covered by tests
else:
if np.isclose(current_time, self.times[-1], atol=0):

Check warning on line 79 in festim/exports/txt_export.py

View check run for this annotation

Codecov / codecov/patch

festim/exports/txt_export.py#L79

Added line #L79 was not covered by tests
# write at final time if exports at specific times
return True
return False

Check warning on line 82 in festim/exports/txt_export.py

View check run for this annotation

Codecov / codecov/patch

festim/exports/txt_export.py#L81-L82

Added lines #L81 - L82 were not covered by tests

def initialise_TXTExport(self, mesh, project_to_DG=False, materials=None):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should call this method something else. Maybe just initialise and then add docstrings to explain what it does


def write(self, current_time, steady):
# create a DG1 functionspace
V_DG1 = f.FunctionSpace(self.function.function_space().mesh(), "DG", 1)
if project_to_DG:
self._V = f.FunctionSpace(mesh, "DG", 1)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we need to document this new attribute

else:
self._V = f.FunctionSpace(mesh, "CG", 1)

x = f.interpolate(f.Expression("x[0]", degree=1), self._V)
x_column = np.transpose([x.vector()[:]])

# if chemical_pot is True or trap_element_type is DG, get indices of duplicates near interfaces
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
# if chemical_pot is True or trap_element_type is DG, get indices of duplicates near interfaces
# if project_to_DG is True, get indices of duplicates near interfaces

In the scope of this method, there is no such thing as chemica_pot or trap_element_type

# and indices of the first elements from a pair of duplicates otherwise
if project_to_DG:
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought a bit about filtering, maybe we should allow users to control whether the data will be filtered or not?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Control is good yeah, we could have a flag that defaults to true but can be deactivated if need be

# Collect all borders
borders = []
for material in materials:
if material.borders:
for border in material.borders:
borders.append(border)
RemDelaporteMathurin marked this conversation as resolved.
Show resolved Hide resolved
borders = np.unique(borders)

# Find indices of the closest duplicates to interfaces
border_indices = []
for border in borders:
closest_indx = np.abs(x_column - border).argmin()
closest_x = x_column[closest_indx]
for ind in np.where(x_column == closest_x)[0]:
border_indices.append(ind)

# Find indices of first elements in duplicated pairs and mesh borders
_, mesh_indices = np.unique(x_column, return_index=True)

# Get unique indices from both arrays preserving the order in unsorted x-array
unique_indices = []
for indx in np.argsort(x_column, axis=0)[:, 0]:
if (indx in mesh_indices) or (indx in border_indices):
unique_indices.append(indx)

self._unique_indices = np.array(unique_indices)

else:
# Get list of unique indices as integers
self._unique_indices = np.argsort(x_column, axis=0)[:, 0]

self.data = x_column[self._unique_indices]
self.header = "x"
KulaginVladimir marked this conversation as resolved.
Show resolved Hide resolved

def write(self, current_time, final_time):

solution = f.project(self.function, V_DG1)
solution_column = np.transpose(solution.vector()[:])
if self.is_it_time_to_export(current_time):
solution = f.project(self.function, self._V)
solution_column = np.transpose(solution.vector()[:])

# if the directory doesn't exist
# create it
dirname = os.path.dirname(self.filename)
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)

# if steady or it is the first time to export
# write data
# else append new column to the existing file
if steady or self._first_time:
if steady:
header = "x,t=steady"
else:
header = f"x,t={format(current_time, self.header_format)}s"
x = f.interpolate(f.Expression("x[0]", degree=1), V_DG1)
x_column = np.transpose([x.vector()[:]])
data = np.column_stack([x_column, solution_column])
self._first_time = False
# if steady, add the corresponding label
# else append new export time to the header
steady = final_time is None
if steady:
self.header += ",t=steady"
else:
# Update the header
old_file = open(self.filename)
old_header = old_file.readline().split("\n")[0]
old_file.close()
header = old_header + f",t={format(current_time, self.header_format)}s"
# Append new column
old_columns = np.loadtxt(self.filename, delimiter=",", skiprows=1)
data = np.column_stack([old_columns, solution_column])

np.savetxt(self.filename, data, header=header, delimiter=",", comments="")
self.header += f",t={format(current_time, self.header_format)}s"


class TXTExports:
"""
Args:
fields (list): list of exported fields ("solute", "1", "retention",
"T"...)
filenames (list): list of the filenames for each field (must end with .txt).
times (list, optional): if provided, fields will be
exported at these timesteps. Otherwise exports at all
timesteps. Defaults to None.
header_format (str, optional): the format of column headers.
Defautls to ".2e".
"""

def __init__(
self, fields=[], filenames=[], times=None, header_format=".2e"
) -> None:
msg = "TXTExports class will be deprecated in future versions of FESTIM"
warnings.warn(msg, DeprecationWarning)

self.fields = fields
if len(self.fields) != len(filenames):
raise ValueError(
"Number of fields to be exported "
"doesn't match number of filenames in txt exports"
# Add new column of filtered and sorted data
self.data = np.column_stack(
[self.data, solution_column[self._unique_indices]]
)
if times:
self.times = sorted(times)
else:
self.times = times
self.filenames = filenames
self.header_format = header_format
self.exports = []
for function, filename in zip(self.fields, self.filenames):
self.exports.append(TXTExport(function, filename, times, header_format))

if (
self.write_at_last and self.is_last(current_time, final_time)
) or not self.write_at_last:

# Write data
np.savetxt(
self.filename,
self.data,
header=self.header,
delimiter=",",
comments="",
)
61 changes: 37 additions & 24 deletions festim/generic_simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,11 +357,11 @@ def initialise(self):

self.h_transport_problem.initialise(self.mesh, self.materials, self.dt)

# raise warning if the derived quantities don't match the type of mesh
# eg. SurfaceFlux is used with cylindrical mesh
for export in self.exports:
if isinstance(export, festim.DerivedQuantities):
for q in export:
# raise warning if the derived quantities don't match the type of mesh
# eg. SurfaceFlux is used with cylindrical mesh
if self.mesh.type not in q.allowed_meshes:
warnings.warn(
f"{type(q)} may not work as intended for {self.mesh.type} meshes"
Expand All @@ -381,31 +381,41 @@ def initialise(self):
f"SurfaceKinetics boundary condition must be defined on surface {q.surface} to export data with festim.AdsorbedHydrogen"
)

self.exports.initialise_derived_quantities(
self.mesh.dx, self.mesh.ds, self.materials
)

# needed to ensure that data is actually exported at TXTExport.times
# see issue 675
for export in self.exports:
if isinstance(export, festim.TXTExport) and export.times:
if not self.dt.milestones:
self.dt.milestones = []
for time in export.times:
if time not in self.dt.milestones:
msg = "To ensure that TXTExport exports data at the desired times "
msg += "TXTExport.times are added to milestones"
warnings.warn(msg)
self.dt.milestones.append(time)
self.dt.milestones.sort()

# set Soret to True for SurfaceFlux quantities
if isinstance(export, festim.DerivedQuantities):
for q in export:
# set Soret to True for SurfaceFlux quantities
if isinstance(q, festim.SurfaceFlux):
q.soret = self.settings.soret
q.T = self.T.T

if isinstance(export, festim.TXTExport):
# pre-process data depending on the chemical potential flag, trap element type,
# and material borders
project_to_DG = (
self.settings.chemical_pot
or self.settings.traps_element_type == "DG"
)
export.initialise_TXTExport(
self.mesh.mesh,
project_to_DG,
self.materials,
)
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. I removed the repeating loop for export in self.exports below. I don't think there is a needto iterate through it twice durinig initialisation.
  2. I temporarly added new method initialise_TXTExport (former filter_duplicates) that's called once.
  3. Considering this task of FESTIM WS, I assume that we should project fields to DG if settings.traps_element_type = "DG". I added such a check with the project_to_DG variable, but it doesn't help and some information is lost due to filtering. For example, here is how the second figure from the task looks like:
    image
    since there are borders, the output data near the boundary looks like this:
Details

4.646464646464646964e-01,5.000000000000542899e-01
4.747474747474748069e-01,5.000000000000532907e-01
4.848484848484848619e-01,5.000000000000531797e-01
4.949494949494949725e-01,5.000000000000528466e-01
4.949494949494949725e-01,5.000000000000526246e-01

5.050505050505050830e-01,5.000000000000521805e-01
5.151515151515152491e-01,0.000000000000000000e+00
5.252525252525253041e-01,0.000000000000000000e+00

if there are no material.borders, then (fig. 4 from the task):
image
and there are no duplicates in the output file.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's not much we can do about 3. I'm afraid. That is why I always recommend working with XDMF because there is no alteration of the data.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

However, what I showed is the result of filtering. We could filter data only for solute H or I don't know. If it's ok, then I'd maybe add a warning message


# needed to ensure that data is actually exported at TXTExport.times
# see issue 675
if export.times:
if not self.dt.milestones:
self.dt.milestones = []
for time in export.times:
if time not in self.dt.milestones:
msg = "To ensure that TXTExport exports data at the desired times "
msg += "TXTExport.times are added to milestones"
warnings.warn(msg)
self.dt.milestones.append(time)
self.dt.milestones.sort()

self.exports.initialise_derived_quantities(
self.mesh.dx, self.mesh.ds, self.materials
)

def run(self, completion_tone=False):
"""Runs the model.

Expand Down Expand Up @@ -503,7 +513,10 @@ def run_post_processing(self):
self.update_post_processing_solutions()

self.exports.t = self.t
self.exports.write(self.label_to_function, self.mesh.dx)
self.exports.write(
self.label_to_function,
self.mesh.dx,
)

def update_post_processing_solutions(self):
"""Creates the post-processing functions by splitting self.u. Projects
Expand Down
Loading
Loading