Skip to content

Commit

Permalink
Merge pull request #70 from amepproject/gromacs_reader
Browse files Browse the repository at this point in the history
gromacs+gsd/hoomd readers, general dist function, issues fixed
  • Loading branch information
hechtprojects authored Nov 13, 2024
2 parents b16a127 + ee2471a commit 96a8e9c
Show file tree
Hide file tree
Showing 19 changed files with 1,397 additions and 33 deletions.
30 changes: 22 additions & 8 deletions amep/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,8 @@ def check_path(path: str, extension: str) -> tuple[str, str]:
'lammps',
'h5amep',
'field',
'gsd'
'hoomd',
'gromacs'
]
# maximum RAM usage in GB per CPU (used for parallelized methods)
MAXMEM = 1
Expand Down Expand Up @@ -771,7 +772,7 @@ def keys(self) -> list:

return datakeys
def data(
self, *args, ptype: int | None = None, zerofill: bool = False,
self, *args: str | tuple[list[str], ...], ptype: int | None = None, zerofill: bool = False,
return_keys: bool = False) -> tuple[list, np.ndarray]:
r'''
Returns the entire data frame for all particles or for
Expand All @@ -784,12 +785,15 @@ def data(
that start with "name", "value[*]" returns all datasets with any
number of characters in between the square brackets i.e. "value[1]",
"value[2]", ..., "value[any text with any length]".
Duplicate keys are removed.
Parameters
----------
*args : str
*args : str | tuple[list[str], ...]
Parameter keys. One wildcard character asterisk can be used, see
note above.
note above. Either multiple strings or lists of strings are
allowed, a combination should not be used.
ptype : int | list, optional
Particle type. Is internally converted to a list and all matching
ptypes are returned. The default is None.
Expand All @@ -811,10 +815,21 @@ def data(
data = None
datakeys = []

# allow lists of keys as input
islist=False
listresult=[]
for arg in args:
if isinstance(arg, (list, np.ndarray)):
islist=True
listresult.append(self.data(*arg, ptype = ptype, zerofill = zerofill, return_keys = return_keys))
if islist:
if len(args)==1:
return listresult[0]
return listresult

# return all data if no arguments are given
if len(args)==0:
args = self.keys

else:
# Transform list of all given keys by allowing semi-wildcard matches
# One asterisk * is allowed.
Expand All @@ -836,11 +851,10 @@ def data(
found_key = True
if not found_key:
raise KeyError(
f"""The key {arg} does not exist in the frame,
returning no data!"""
f"The key \"{arg}\" does not exist in the frame, returning no data!"
)
# remove duplicates
args = np.unique(extended_keys)
args = np.array(extended_keys)[np.sort(np.unique(extended_keys, return_index=True)[1])]

# loop through given keys
for i,key in enumerate(args):
Expand Down
219 changes: 212 additions & 7 deletions amep/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
# IMPORT MODULES
# =============================================================================
from packaging.version import Version
from collections.abc import Callable
import warnings
import numpy as np

Expand Down Expand Up @@ -3072,6 +3073,210 @@ def indices(self):
return self.__indices



class Dist(BaseEvaluation):
"""General distribution.
"""

def __init__(
self, traj: ParticleTrajectory | FieldTrajectory,
keys: str | list[str, ...], func: Callable | None = None, skip: float = 0.0,
nav: float = 10, nbins: int = 50, ptype: float | None = None,
ftype: str | None = None, logbins: bool = False,
xmin: float | None = None, xmax: float | None = None,
**kwargs):
r'''
Calculate the distribution of a user-defined key or keys.
Namely the components :math:`v_x, v_y, v_z`
as well as the magnitude :math:`v` of the velocity
and its square :math:`v^2`. It also
takes an average over several frames (= time average).
Parameters
----------
traj : Traj
Trajectory object.
skip : float, optional
Skip this fraction at the beginning of the trajectory.
The default is 0.0.
keys : str, list(str)
name keys, func=None, ...todo...
xmin : float | None, optional
Minimum value for the histogram. If None, then the
minimum value of the last frame will be used
xmax : float | None, optional
Maximum value for the histogram. If None, then the
maximum value of the last frame will be used
nav : int, optional
Number of frames to use for the average. The default is 10.
nbins : int, optional
Number of bins. The default is None.
ptype : float, optional
Particle type. The default is None.
Returns
-------
None
Examples
--------
>>> import amep
>>> path="/home/dormann/Documents/git_amep/examples/data/lammps.h5amep"
>>> traj= amep.load.traj(path)
>>> # distribution of the absolute velocity
>>> dist=amep.evaluate.Dist(traj, "v*", func=np.linalg.norm, axis=1, skip=0.5, logbins=True)
>>> # save results in hdf5 format
>>> dist.save("./eval/dist-eval.h5", name="velocitydistribution")
>>> fig,axs=amep.plot.new()
>>> axs.plot(dist.x, dist.xdist)
>>> axs.set_xlabel("Velocity")
>>> axs.set_ylabel("P(Velocity)")
>>> axs.semilogx()
>>> fig.savefig("/home/dormann/Documents/git_amep/doc/source/_static/images/evaluate/evaluate-Dist.png")
>>> # more examples:
>>> # distribution of the x-position
>>> dist=amep.evaluate.Dist(traj, "x", skip=0.5, logbins=True)
>>> # distribution of the angular velocity
>>> dist=amep.evaluate.Dist(traj, "omega*", func=np.linalg.norm, axis=1, skip=0.5, logbins=True)
.. image:: /_static/images/evaluate/evaluate-Dist.png
:width: 400
:align: center
'''
super(Dist, self).__init__()

if func is None:
func = lambda x: x

self.name = "dist"

self.__traj = traj
self.__keys = keys
self.__skip = skip
self.__nav = nav
self.__nbins = nbins
self.__ptype = ptype
self.__xmin = xmin
self.__xmax = xmax
self.__func = func
self.__logbins = logbins
self.__kwargs = kwargs

if self.__xmin is None or self.__xmax is None:
if isinstance(traj, FieldTrajectory):
minmaxdata=self.__traj[-1].data(self.__keys, ftype=self.__ftype)
else:
minmaxdata=self.__traj[-1].data(self.__keys, ptype=self.__ptype)
minmaxdata = func(minmaxdata, **kwargs)
if self.__xmin is None:
self.__xmin = np.min(minmaxdata)

if self.__xmax is None:
self.__xmax = np.max(minmaxdata)

self.__frames, res, self.__indices = average_func(
self.__compute, np.arange(self.__traj.nframes), skip=self.__skip,
nr=self.__nav, indices=True)

self.__times = self.__traj.times[self.__indices]
self.__xdist = res[0]
self.__x = res[1]

def __compute(self, ind):
r'''
Calculation for a single frame,
Parameters
----------
ind : int
Frame index.
Returns
-------
hist : np.ndarray
Histogram.
bins : np.ndarray
Bin positions. Same shape as hist.
'''
data=self.__traj[ind].data(self.__keys, ptype=self.__ptype)
data=self.__func(data, **self.__kwargs)

keyhist, keybins = distribution(
data, nbins=self.__nbins, xmin=self.__xmin, xmax=self.__xmax, logbins=self.__logbins)

return keyhist, keybins

@property
def xdist(self):
r'''
Time-averaged distribution of the magnitude of the velocity.
Returns
-------
np.ndarray
Distribution of the magnitude of the velocity.
'''
return self.__xdist

@property
def x(self):
r'''
Magnitude of the velocities.
Returns
-------
np.ndarray
Magnitude of the velocities.
'''
return self.__x

@property
def frames(self):
r'''
VelDist for each frame.
Returns
-------
np.ndarray
VelDist for each frame.
'''
return self.__frames

@property
def times(self):
r'''
Times at which the VelDist is evaluated.
Returns
-------
np.ndarray
Times at which the VelDist is evaluated.
'''
return self.__times

@property
def indices(self):
r'''
Indices of all frames for which the VelDist has been
evaluated.
Returns
-------
np.ndarray
Frame indices.
'''
return self.__indices



# =============================================================================
# CLUSTER ANALYSIS
# =============================================================================
Expand Down Expand Up @@ -3517,9 +3722,9 @@ def __init__(
>>> axs.plot(clg.times, clg.frames, label="largest")
>>> clg = amep.evaluate.ClusterGrowth(traj, mode="mean")
>>> axs.plot(clg.times, clg.frames, label="mean")
>>> clg = amep.evaluate.ClusterGrowth(ptraj, mode="mean", min_size=20)
>>> clg = amep.evaluate.ClusterGrowth(traj, mode="mean", min_size=20)
>>> axs.plot(clg.times, clg.frames, label="mean min 20")
>>> clg = amep.evaluate.ClusterGrowth(ptraj, mode="weighted mean")
>>> clg = amep.evaluate.ClusterGrowth(traj, mode="weighted mean")
>>> axs.plot(clg.times, clg.frames, label="weighted mean")
>>> axs.loglog()
>>> axs.legend()
Expand Down Expand Up @@ -3564,7 +3769,7 @@ def __init__(

if nav is None:
nav = self.__traj.nframes

# check mode
if self.__mode not in ["largest", "mean", "weighted mean"]:
raise ValueError(
Expand Down Expand Up @@ -3633,8 +3838,8 @@ def __compute_particles(self, frame) -> int | float:

# if no clusters are detected
if len(values) == 0:
values = 0.0
weights = 1.0
values = [0.0]
weights = [1.0]
else:
weights = values

Expand Down Expand Up @@ -3692,8 +3897,8 @@ def __compute_fields(self, frame):

# case if there is no cluster
if len(values) == 0:
values = 0.0
weights = 1.0
values = [0.0]
weights = [1.0]
else:
weights = values

Expand Down
Loading

0 comments on commit 96a8e9c

Please sign in to comment.