Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Document fit functions #54

Merged
merged 33 commits into from
Oct 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
7491673
Merge pull request #20 from amepproject/release-1.0.x
hechtprojects Apr 22, 2024
3ac846e
Merge pull request #28 from amepproject/release-1.0.x
hechtprojects May 22, 2024
3bc6c3e
Added table of neccesary changes.
kai-luca Jun 17, 2024
a484259
WIP: gotta get that bus.
kai-luca Jun 17, 2024
2c822ef
Fixed .gitignore to exclude test artifacts.
kai-luca Jun 18, 2024
65c9256
All tests in base are refactored, work with new data and are cleaned up.
kai-luca Jun 18, 2024
6eca206
All tests in continuum were bad. I removed them and will think about …
kai-luca Jun 18, 2024
b1ebca8
Notes were updated to fit state.
kai-luca Jun 18, 2024
7715ecf
Done up until test_functions. Think abut tests there.
kai-luca Jun 18, 2024
703bab3
load and order tests do not clutter up directories anymore.
kai-luca Jun 19, 2024
01d5e91
load and particle methods are fine now.
kai-luca Jun 19, 2024
76fbed3
pbc tests run fine now.
kai-luca Jun 19, 2024
257b24e
reader tests work fine. They generate only the needed erroneous data.
kai-luca Jun 19, 2024
7c3ef5f
spatialcor tests work fine and are clean.
kai-luca Jun 19, 2024
a6ab31e
timecor tests work fine and are clean.
kai-luca Jun 19, 2024
61d3a30
timecor tests work fine and are clean
kai-luca Jun 19, 2024
e787a99
Added some evaluate tests. They reck up some time when testing. But t…
kai-luca Jun 28, 2024
f341a40
Fixed the zombie dependencies.
kai-luca Jul 8, 2024
a2e9197
Cleaned up test artifacts and added them to .gitignore.
kai-luca Jul 9, 2024
912fc41
Merge branch 'main' into refactor_tests
kai-luca Jul 9, 2024
57da1f3
Migrated numpy.trapz to numpy.trapezoid depending on numpy.__version__.
kai-luca Jul 9, 2024
e35d138
Merge remote-tracking branch 'origin/refactor_tests' into refactor_tests
kai-luca Jul 9, 2024
cb78962
Added all we need to read out of the datasets to save in our format t…
kai-luca Jul 11, 2024
6b0c671
WIP:Shortened __getitem__ and documented it. Now do the sphinx part.
kai-luca Jul 24, 2024
583e981
WIP: special functions are weird. They do NOT want to be documented.
kai-luca Jul 24, 2024
416a95f
Trajectories reference what they contain now. I declared them to be s…
kai-luca Jul 25, 2024
bbf0ff5
Trajectory tests check if ptype is compüatibkle with strings. They are.
kai-luca Jul 26, 2024
ce6d6b9
Cleaned up in base reader __read_data function.
kai-luca Jul 26, 2024
005d83e
Merge branch 'gsd_reader' into issues_49_46
Aug 15, 2024
d6d444f
WIP: GSDReader implementation started
Aug 16, 2024
41b79c0
WIP: Going home to fetch some documents.
kai-luca Aug 19, 2024
1a05c34
All functions and the Gaussian and Gaussian2d classes are documented.
kai-luca Aug 20, 2024
81c3b02
All fit classses are documented now. Also Maxwell-Boltzmann works now…
kai-luca Aug 20, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,11 @@ build/
test/data
test/.coverage
test/coverage.xml
examples/data/continuum/traj.h5amep
examples/data/continuum/#traj.h5amep
examples/data/continuum/##temp*
examples/data/trajs/
examples/data/lammps/traj.h5amep
examples/data/lammps/#traj.h5amep
examples/data/lammps/##temp*
examples/data/invalid
139 changes: 85 additions & 54 deletions amep/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
import inspect
import logging

from typing import Collection
from typing import Collection, Iterable, Sequence
from io import StringIO
from contextlib import redirect_stdout
from tqdm import TqdmExperimentalWarning
Expand Down Expand Up @@ -168,7 +168,8 @@ def check_path(path: str, extension: str) -> tuple[str, str]:
LOADMODES = [
'lammps',
'h5amep',
'field'
'field',
'gsd'
]
# maximum RAM usage in GB per CPU (used for parallelized methods)
MAXMEM = 1
Expand Down Expand Up @@ -899,7 +900,7 @@ def data(

def __read_data(
self, key: str, ptype: int | list | None = None,
pid: int | list | None = None) -> np.ndarray:
pid: int | Sequence | None = None) -> np.ndarray:
r'''
Reads a dataset from the HDF5 file either for all particles
or for all particles of a specific particle type.
Expand Down Expand Up @@ -929,12 +930,10 @@ def __read_data(

if pid:
data_ids = root['frames'][str(self.__step)]['id'][:]
if type(pid) != list:
if not isinstance(pid, Sequence):
pid = [pid]
id_list = []
for id in pid:
index_id = int(np.where(data_ids==id)[0])
id_list.append(index_id)
id_list = [int(np.where(data_ids == part_id)[0])
for part_id in pid]
if key in root['frames'][str(self.__step)].keys():
data = root['frames'][str(self.__step)][key][id_list]
return data
Expand All @@ -944,28 +943,28 @@ def __read_data(
# check if a dataset with the given key exists and read the data
if key in root['frames'][str(self.__step)].keys():
data = root['frames'][str(self.__step)][key][:]

# Transform ptype so it is a list of all ptypes
# If no ptype is provided return all data
if not ptype:
return data
if type(ptype) == int:
# Transform ptype so it is a list of all ptypes
if not isinstance(ptype, Sequence):
ptype = [ptype]
mask = np.zeros(data.shape[0],)
# check particle type
for single_ptype in ptype:
if single_ptype in self.ptypes:
mask += types==single_ptype
else:
warnings.warn(
f"The specified particle type {single_ptype} "\
"does not exist. Returning data without type "\
f"{single_ptype}."
)
mask += sum(types == single_ptype for single_ptype
in ptype if single_ptype in self.ptypes)
if any(single_ptype not in self.ptypes for
single_ptype in ptype):
for single_ptype in ptype:
if single_ptype not in self.ptypes:
warnings.warn(
f"The specified particle type {single_ptype} "
"does not exist. Returning data without type "
f"{single_ptype}."
)
return data[mask.astype(bool)]

else:
raise KeyError(
f"The key {key} does not exist in the frame. "\
raise KeyError(
f"The key {key} does not exist in the frame. "
"Returning no data!"
)

Expand Down Expand Up @@ -1266,42 +1265,55 @@ def __init__(self, reader):

'''
self.__reader = reader
def __getitem__(self,item):

def __getitem__(self, item: int | slice | Iterable[int]
) -> BaseFrame | BaseField | list[BaseField | BaseFrame]:
"""Get an individual frame or field of a simulation.

Supports slicing as well as iterables of valid integer indices.
The return type depends on the type of the trajectory.
Also it depends if only one frame or a collection of frames is requested.
If a collection of frames is requested a list of frames is returned.

Parameters
----------
item : int | slice | Iterable[int]

Returns
-------
BaseFrame | BaseField | list[BaseField | BaseFrame]
"""
if isinstance(item, slice):
sli=range(*item.indices(len(self.__reader.steps)))
if self.type=="field":
out = [BaseField(self.__reader,index) for index in sli]
elif self.type=="particle":
out = [BaseFrame(self.__reader,index) for index in sli]
else:
out = [BaseFrame(self.__reader,index) for index in sli]
return out
elif isinstance(item, list) or isinstance(item, np.ndarray):
if self.type=="field":
out = [BaseField(self.__reader,index) for index in item]
elif self.type=="particle":
out = [BaseFrame(self.__reader,index) for index in item]
else:
out = [BaseFrame(self.__reader,index) for index in item]
return out
elif isinstance(item, int) or isinstance(item, np.integer):
if self.type=="field":
sli = range(*item.indices(len(self.__reader.steps)))
if self.type == "field":
return [BaseField(self.__reader, index) for index in sli]
# Any of these returns defaults to particle Frames.
# If we get more types of trejaectories we have to add them here
# with an if statement as above.
return [BaseFrame(self.__reader, index) for index in sli]
if isinstance(item, Iterable):
if self.type == "field":
return [BaseField(self.__reader, index) for index in item]
return [BaseFrame(self.__reader, index) for index in item]
if isinstance(item, (int, np.integer)):
if self.type == "field":
return BaseField(self.__reader, item)
elif self.type=="particle":
return BaseFrame(self.__reader, item)
return BaseFrame(self.__reader, item)
else:
raise KeyError(
'''BaseTrajectory: Invalid key. Only integer values, 1D lists
and arrays, and slices are allowed.'''
)
raise KeyError('''BaseTrajectory: Invalid key. Only integer values,
1D lists and arrays, and slices are allowed.'''
)

def __iter__(self):
for i in range(len(self.__reader.steps)):
"""Iterate over all frames of the trajectory."""
for i, _ in enumerate(self.__reader.steps):
yield self[i]

def __next__(self):
pass

def __len__(self):
return len(self.__reader.steps)

def add_author_info(
self, author: str, key: str, value: int | float | str) -> None:
'''
Expand Down Expand Up @@ -1329,6 +1341,7 @@ def add_author_info(
if author not in root['info']['authors'].keys():
root['info']['authors'].create_group(author)
root['info']['authors'][author].attrs[key] = value

def get_author_info(self, author: str) -> dict:
r'''
Returns all information for the given author.
Expand All @@ -1352,6 +1365,7 @@ def get_author_info(self, author: str) -> dict:
p = dict(a for a in root['info']['authors'][author].attrs.items())
return p
return {}

def delete_author_info(self, author: str, key: str | None = None) -> None:
r'''
Deletes all information (key=None) or specific information given by
Expand All @@ -1377,6 +1391,7 @@ def delete_author_info(self, author: str, key: str | None = None) -> None:
del root['info']['authors'][author]
elif type(key)==str:
root['info']['authors'][author].attrs.__delitem__(key)

@property
def authors(self) -> list[str]:
r'''
Expand All @@ -1396,6 +1411,7 @@ def authors(self) -> list[str]:
keys = list(root['info']['authors'].keys())
return keys
return []

def add_software_info(self, key: str, value: str | int | float) -> None:
r'''
Add software information to the hdf5 trajectory file.
Expand All @@ -1418,6 +1434,7 @@ def add_software_info(self, key: str, value: str | int | float) -> None:
if 'software' not in root['info'].keys():
root['info'].create_group('software')
root['info']['software'].attrs[key] = value

def delete_software_info(self, key: str | None = None) -> None:
r'''
Deletes all software information (key=None) or specific information
Expand All @@ -1442,6 +1459,7 @@ def delete_software_info(self, key: str | None = None) -> None:
root['info']['software'].attrs.__delitem__(key)
else:
root['info']['software'].attrs.__delitem__(key)

@property
def software(self) -> dict:
r'''
Expand Down Expand Up @@ -1731,7 +1749,16 @@ def dt(self, x):
if type(x) == float:
self.__reader.dt = x
@property
def dim(self):
def dim(self) -> int:
'''
Spatial dimension of the simnulation.

Returns
-------
x : int
Spatial dimension.

'''
return self.__reader.d
@property
def savedir(self):
Expand Down Expand Up @@ -1783,8 +1810,12 @@ def __init__(self) -> None:
def __getitem__(self, key: str):
return getattr(self, key)

def keys(self):
return [name for (name, value) in inspect.getmembers(
def keys(self) -> list[str]:
"""The keys to the evaluation object.

Used so Evaluation-objects can be used as dictionaries.
"""
return [name for (name, _) in inspect.getmembers(
type(self), lambda x: isinstance(x, property)
)]

Expand Down
9 changes: 7 additions & 2 deletions amep/continuum.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
# =============================================================================
# IMPORT MODULES
# =============================================================================
from packaging.version import Version
import numpy as np
import scipy.fft as fft

Expand Down Expand Up @@ -782,8 +783,12 @@ def cluster_properties(
# set all grid points outside the cluster to zero
y[labels != i] = 0
# integral
N = np.trapz(y, x=X, axis=1)
N = np.trapz(N, x=Y[:, 0])
if Version(np.__version__)<Version("2.0.0"):
N = np.trapz(y, x=X, axis=1)
N = np.trapz(N, x=Y[:, 0])
else:
N = np.trapezoid(y, x=X, axis=1)
N = np.trapezoid(N, x=Y[:, 0])
sizes.append(N)

# calculate the radius of gyration and linear extension
Expand Down
46 changes: 32 additions & 14 deletions amep/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
# =============================================================================
# IMPORT MODULES
# =============================================================================
from packaging.version import Version
import warnings
import numpy as np

Expand Down Expand Up @@ -1840,16 +1841,22 @@ def __compute(self):
(self.__k0[i][0]*np.cos(self.__theta) +\
self.__k0[i][1]*np.sin(self.__theta)))

ck += np.trapz(integrand, x=self.__theta, axis=1)

if Version(np.__version__) < Version("2.0.0"):
ck += np.trapz(integrand, x=self.__theta, axis=1)
else:
ck += np.trapezoid(integrand, x=self.__theta, axis=1)

ck = np.abs(ck/len(self.__k0))

# normalization
norm = np.trapz(self.__grt, x=self.__theta, axis=1)
if Version(np.__version__) < Version("2.0.0"):
norm = np.trapz(self.__grt, x=self.__theta, axis=1)
else:
norm = np.trapezoid(self.__grt, x=self.__theta, axis=1)

return ck/norm


def __getk(self):
r'''
Calculates the k vectors corresponding to the first peaks of
Expand Down Expand Up @@ -3191,15 +3198,26 @@ def __init__(
if self.__xmax is None:
if self.__use_density:
# integrated density
val_total_x = np.trapz(
self.__traj[-1].data(self.__ftype),
x = self.__traj[-1].grid[0],
axis=1
)
val_total = np.trapz(
val_total_x,
x = self.__traj[-1].grid[1][:, 0]
)
if Version(np.__version) < Version("2.0.0"):
val_total_x = np.trapz(
self.__traj[-1].data(self.__ftype),
x=self.__traj[-1].grid[0],
axis=1
)
val_total = np.trapz(
val_total_x,
x=self.__traj[-1].grid[1][:, 0]
)
else:
val_total_x = np.trapezoid(
self.__traj[-1].data(self.__ftype),
x=self.__traj[-1].grid[0],
axis=1
)
val_total = np.trapezoid(
val_total_x,
x=self.__traj[-1].grid[1][:, 0]
)
self.__xmax = val_total
else:
# total number of grid points
Expand Down
Loading
Loading