# coding: utf-8
"""
A Flow is a container for Works, and works consist of tasks.
Flows are the final objects that can be dumped directly to a pickle file on disk
Flows are executed using abirun (abipy).
"""
from __future__ import annotations
import os
import sys
import time
import collections
import warnings
import shutil
import tempfile
import json
import numpy as np
import pandas as pd
from io import StringIO
from pprint import pprint
from typing import Any, Union, Iterator, Generator
from tabulate import tabulate
from pydispatch import dispatcher
from monty.collections import dict2namedtuple
from monty.string import list_strings, is_string, make_banner
from monty.operator import operator_from_str
from monty.io import FileLock
from monty.pprint import draw_tree
from monty.termcolor import cprint, colored, cprint_map, get_terminal_size
from monty.inspect import find_top_pyfile
from monty.json import MSONable
from pymatgen.core.units import Memory, UnitError
from abipy.tools.iotools import AtomicFile
from abipy.tools.serialization import pmg_pickle_load, pmg_pickle_dump, pmg_serialize
from abipy.tools.typing import Figure, TYPE_CHECKING
from abipy.core.globals import get_workdir
from abipy.tools.plotting import add_fig_kwargs, get_ax_fig_plt
from abipy.tools.printing import print_dataframe
from abipy.tools.serialization import mjson_loads
from abipy.flowtk import wrappers
from .nodes import Status, Node, NodeError, NodeResults, Dependency, GarbageCollector, check_spectator
from .tasks import Task, ScfTask, TaskManager, FixQueueCriticalError
from .utils import File, Directory, Editor
from .works import NodeContainer, Work, BandStructureWork, PhononWork, BecWork, G0W0Work, QptdmWork, DteWork
from .events import EventsParser
if TYPE_CHECKING: # needed to avoid circular imports
from abipy.abio.inputs import AbinitInput
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Flow",
"G0W0WithQptdmFlow",
"bandstructure_flow",
"g0w0_flow",
]
def as_set(obj: Any) -> set:
"""
Convert obj into a set, returns None if obj is None.
>>> assert as_set(None) is None and as_set(1) == set([1]) and as_set(range(1,3)) == set([1, 2])
"""
if obj is None or isinstance(obj, collections.abc.Set):
return obj
if not isinstance(obj, collections.abc.Iterable):
return set((obj,))
else:
return set(obj)
class FlowResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
#JSON_SCHEMA["properties"] = {
# "queries": {"type": "string", "required": True},
#}
@classmethod
def from_node(cls, flow) -> FlowResults:
"""Initialize an instance from a Work instance."""
new = super().from_node(flow)
# Will put all files found in outdir in GridFs
d = {os.path.basename(f): f for f in flow.outdir.list_filepaths()}
# Add the pickle file.
d["pickle"] = flow.pickle_file if flow.pickle_protocol != 0 else (flow.pickle_file, "t")
new.add_gridfs_files(**d)
return new
class FlowError(NodeError):
"""Base Exception for :class:`Node` methods"""
[docs]
class Flow(Node, NodeContainer, MSONable):
"""
This object is a container of work. Its main task is managing the
possible inter-dependencies among the work and the creation of
dynamic workflows that are generated by callbacks registered by the user.
Attributes:
creation_date: String with the creation_date
pickle_protocol: Protocol for Pickle database (default: -1 i.e. latest protocol)
Important methods for constructing flows:
register_work: register (add) a work to the flow
resister_task: register a work that contains only this task returns the work
allocate: propagate the workdir and manager of the flow to all the registered tasks
build:
build_and_pickle_dump:
"""
VERSION = "0.1"
PICKLE_FNAME = "__AbinitFlow__.pickle"
Error = FlowError
Results = FlowResults
[docs]
@classmethod
def as_flow(cls, obj: Any) -> Flow:
"""Convert obj into a Flow. Accepts filepath, dict, or Flow object."""
if isinstance(obj, cls): return obj
if is_string(obj):
return cls.pickle_load(obj)
elif isinstance(obj, collections.abc.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s into a Flow" % type(obj))
def __init__(self, workdir, manager=None, pickle_protocol=-1, remove=False):
"""
Args:
workdir: String specifying the directory where the works will be produced.
if workdir is None, the initialization of the working directory
is performed by flow.allocate(workdir).
manager: |TaskManager| object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
pickle_protocol: Pickle protocol version used for saving the status of the object.
-1 denotes the latest version supported by the python interpreter.
remove: attempt to remove working directory `workdir` if directory already exists.
"""
super().__init__()
if workdir is not None:
if remove and os.path.exists(workdir): shutil.rmtree(workdir)
self.set_workdir(workdir)
self.creation_date = time.asctime()
if manager is None: manager = TaskManager.from_user_config()
self.manager = manager.deepcopy()
# List of works.
self._works = []
self._waited = 0
# List of callbacks that must be executed when the dependencies reach S_OK
self._callbacks = []
# Install default list of handlers at the flow level.
# Users can override the default list by calling flow.install_event_handlers in the script.
# Example:
#
# # flow level (common case)
# flow.install_event_handlers(handlers=my_handlers)
#
# # task level (advanced mode)
# flow[0][0].install_event_handlers(handlers=my_handlers)
#
self.install_event_handlers()
self.pickle_protocol = int(pickle_protocol)
# ID used to access mongodb
self._mongo_id = None
# Save the location of the script used to generate the flow.
# This trick won't work if we are running with nosetests, py.test etc
pyfile = find_top_pyfile()
if "python" in pyfile or "ipython" in pyfile: pyfile = "<" + pyfile + ">"
self.set_pyfile(pyfile)
# TODO
# Signal slots: a dictionary with the list
# of callbacks indexed by node_id and SIGNAL_TYPE.
# When the node changes its status, it broadcast a signal.
# The flow is listening to all the nodes of the calculation
# [node_id][SIGNAL] = list_of_signal_handlers
#self._sig_slots = slots = {}
#for work in self:
# slots[work] = {s: [] for s in work.S_ALL}
#for task in self.iflat_tasks():
# slots[task] = {s: [] for s in work.S_ALL}
self.on_all_ok_num_calls = 0
# The status of the flow is computed dynamically using the status of the nodes
# if _status is None unless one explicitly set this value with set_status.
self._status = None
[docs]
@pmg_serialize
def as_dict(self, **kwargs) -> dict:
"""
JSON serialization, note that we only need to save
a string with the working directory since the object will be
reconstructed from the pickle file located in workdir
"""
return {"workdir": self.workdir}
# This is needed for fireworks.
to_dict = as_dict
[docs]
@classmethod
def from_dict(cls, d: dict, **kwargs) -> Flow:
"""Reconstruct the flow from the pickle file."""
return cls.pickle_load(d["workdir"], **kwargs)
[docs]
@classmethod
def temporary_flow(cls, workdir=None, manager=None) -> Flow:
"""Return a Flow in a temporary directory. Useful for unit tests."""
workdir = get_workdir(workdir)
return cls(workdir=workdir, manager=manager)
[docs]
def set_status(self, status, msg: str) -> None:
"""
Set and return the status of the flow
Args:
status: Status object or string representation of the status
msg: string with human-readable message used in the case of errors.
"""
status = Status.as_status(status)
self._status = status
self.history.info(f"Status set to `{status}` due to: `{msg}`)")
[docs]
def set_workdir(self, workdir: str, chroot=False) -> None:
"""
Set the working directory. Cannot be set more than once unless chroot is True
"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
# Directories with (input|output|temporary) data.
self.workdir = os.path.abspath(workdir)
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
self.wdir = Directory(self.workdir)
[docs]
def reload(self) -> None:
"""
Reload the flow from the pickle file. Used when we are monitoring the flow
executed by the scheduler. In this case, indeed, the flow might have been changed
by the scheduler and we have to reload the new flow in memory.
"""
new = self.__class__.pickle_load(self.workdir)
self = new
[docs]
@classmethod
def pickle_load(cls, filepath: str, spectator_mode=True, remove_lock=False) -> Flow:
"""
Loads the object from a pickle file and performs initial setup.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
spectator_mode: If True, the nodes of the flow are not connected by signals.
This option is usually used when we want to read a flow
in read-only mode and we want to avoid callbacks that can change the flow.
remove_lock:
True to remove the file lock if any (use it carefully).
"""
filepath = os.path.expanduser(filepath)
if os.path.isdir(filepath):
# Walk through each directory inside path and find the pickle database.
for dirpath, dirnames, filenames in os.walk(filepath):
fnames = [f for f in filenames if f == cls.PICKLE_FNAME]
if fnames:
if len(fnames) == 1:
filepath = os.path.join(dirpath, fnames[0])
break # Exit os.walk
else:
err_msg = "Found multiple databases:\n %s" % str(fnames)
raise RuntimeError(err_msg)
else:
err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath)
raise ValueError(err_msg)
if remove_lock and os.path.exists(filepath + ".lock"):
try:
os.remove(filepath + ".lock")
except Exception:
pass
with FileLock(filepath):
with open(filepath, "rb") as fh:
flow = pmg_pickle_load(fh)
# Check if versions match.
if flow.VERSION != cls.VERSION:
msg = ("File flow version %s != latest version %s\n."
"Regenerate the flow to solve the problem " % (flow.VERSION, cls.VERSION))
warnings.warn(msg)
flow.set_spectator_mode(spectator_mode)
# Recompute the status of each task since tasks that
# have been submitted previously might be completed.
flow.check_status()
return flow
# Handy alias
from_file = pickle_load
[docs]
@classmethod
def pickle_loads(cls, s: str) -> Flow:
"""Reconstruct the flow from a string."""
strio = StringIO()
strio.write(s)
strio.seek(0)
flow = pmg_pickle_load(strio)
return flow
[docs]
def change_manager(self, new_manager: Any) -> TaskManager:
"""Change the manager at runtime."""
for work in self:
work.set_manager(new_manager)
return new_manager
[docs]
def get_panel(self, **kwargs):
"""Build panel with widgets to interact with the |Flow| either in a notebook or in panel app."""
from abipy.panels.flows import FlowPanel
return FlowPanel(self).get_panel(**kwargs)
def __len__(self) -> int:
return len(self.works)
def __iter__(self) -> Iterator[Work]:
return self.works.__iter__()
def __getitem__(self, slice) -> Union[Work, list[Work]]:
return self.works[slice]
[docs]
def set_pyfile(self, pyfile: str) -> None:
"""
Set the path of the python script used to generate the flow.
.. Example:
flow.set_pyfile(__file__)
"""
self._pyfile = os.path.abspath(pyfile)
@property
def pyfile(self) -> Union[str, None]:
"""
Absolute path of the python script used to generate the flow.
None if unset. Set by `set_pyfile`
"""
try:
return self._pyfile
except AttributeError:
return None
@property
def pid_file(self) -> str:
"""The path of the pid file created by PyFlowScheduler."""
return os.path.join(self.workdir, "_PyFlowScheduler.pid")
@property
def has_scheduler(self) -> bool:
"""True if there's a scheduler running the flow."""
return os.path.exists(self.pid_file)
[docs]
def check_pid_file(self) -> int:
"""
This function checks if we are already running the |Flow| with a :class:`PyFlowScheduler`.
Raises: Flow.Error if the pid file of the scheduler exists.
"""
if not os.path.exists(self.pid_file):
return 0
self.show_status()
raise self.Error("""\n\
pid_file
%s
already exists. There are two possibilities:
1) There's an another instance of PyFlowScheduler running
2) The previous scheduler didn't exit in a clean way
To solve case 1:
Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
Then you can restart the new scheduler.
To solve case 2:
Remove the pid_file and restart the scheduler.
Exiting""" % self.pid_file)
@property
def pickle_file(self) -> str:
"""The path of the pickle file."""
return os.path.join(self.workdir, self.PICKLE_FNAME)
@property
def mongo_id(self):
return self._mongo_id
@mongo_id.setter
def mongo_id(self, value):
if self.mongo_id is not None:
raise RuntimeError("Cannot change mongo_id %s" % self.mongo_id)
self._mongo_id = value
[docs]
def validate_json_schema(self):
"""Validate the JSON schema. Return list of errors."""
errors = []
for work in self:
for task in work:
if not task.get_results().validate_json_schema():
errors.append(task)
if not work.get_results().validate_json_schema():
errors.append(work)
if not self.get_results().validate_json_schema():
errors.append(self)
return errors
[docs]
def get_mongo_info(self):
"""
Return a JSON dictionary with information on the flow.
Mainly used for constructing the info section in `FlowEntry`.
The default implementation is empty. Subclasses must implement it
"""
return {}
[docs]
def mongo_assimilate(self):
"""
This function is called by client code when the flow is completed
Return a JSON dictionary with the most important results produced
by the flow. The default implementation is empty. Subclasses must implement it
"""
return {}
@property
def works(self) -> list[Work]:
"""List of |Work| objects contained in self.."""
return self._works
@property
def all_ok(self) -> bool:
"""True if all the works in the flow have reached `S_OK`."""
all_ok = all(work.all_ok for work in self)
if all_ok:
all_ok = self.on_all_ok()
return all_ok
@property
def num_tasks(self) -> int:
"""Total number of tasks"""
return len(list(self.iflat_tasks()))
@property
def errored_tasks(self) -> set[Task]:
"""Set errored tasks."""
tasks = []
for status in [self.S_ERROR, self.S_QCRITICAL, self.S_ABICRITICAL]:
tasks.extend(list(self.iflat_tasks(status=status)))
return set(tasks)
@property
def num_errored_tasks(self) -> int:
"""The number of tasks whose status is `S_ERROR`."""
return len(self.errored_tasks)
@property
def unconverged_tasks(self) -> int:
"""List of unconverged tasks."""
return list(self.iflat_tasks(status=self.S_UNCONVERGED))
@property
def num_unconverged_tasks(self) -> int:
"""The number of tasks whose status is `S_UNCONVERGED`."""
return len(self.unconverged_tasks)
@property
def status_counter(self):
"""
Returns a :class:`Counter` object that counts the number of tasks with
given status (use the string representation of the status as key).
"""
# Count the number of tasks with given status in each work.
counter = self[0].status_counter
for work in self[1:]:
counter += work.status_counter
return counter
@property
def ncores_reserved(self) -> int:
"""
Returns the number of cores reserved in this moment.
A core is reserved if the task is not running but
we have submitted the task to the queue manager.
"""
return sum(work.ncores_reserved for work in self)
@property
def ncores_allocated(self) -> int:
"""
Returns the number of cores allocated at this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still in pending state.
"""
return sum(work.ncores_allocated for work in self)
@property
def ncores_used(self) -> int:
"""
Returns the number of cores used at this moment.
A core is used if there's a job that is running on it.
"""
return sum(work.ncores_used for work in self)
@property
def has_chrooted(self) -> str:
"""
Returns a string that evaluates to True if we have changed
the workdir for visualization purposes e.g. we are using sshfs.
to mount the remote directory where the `Flow` is located.
The string gives the previous workdir of the flow.
"""
try:
return self._chrooted_from
except AttributeError:
return ""
[docs]
def chroot(self, new_workdir: str) -> None:
"""
Change the workir of the |Flow|. Mainly used for
allowing the user to open the GUI on the local host
and access the flow from remote via sshfs.
.. note::
Calling this method will make the flow go in read-only mode.
"""
self._chrooted_from = self.workdir
self.set_workdir(new_workdir, chroot=True)
for i, work in enumerate(self):
new_wdir = os.path.join(self.workdir, "w" + str(i))
work.chroot(new_wdir)
[docs]
def groupby_status(self) -> dict:
"""
Returns dictionary mapping the task status to
the list of named tuples (task, work_index, task_index).
"""
Entry = collections.namedtuple("Entry", "task wi ti")
d = collections.defaultdict(list)
for task, wi, ti in self.iflat_tasks_wti():
d[task.status].append(Entry(task, wi, ti))
# Sort keys according to their status.
return {k: d[k] for k in sorted(list(d.keys()))}
[docs]
def groupby_work_class(self) -> dict:
"""
Returns a dictionary mapping the work class to the list of works in the flow
"""
class2works = {}
for work in self:
cls = work.__class__
if cls not in class2works: class2works[cls] = []
class2works[cls].append(work)
return class2works
[docs]
def groupby_task_class(self) -> dict:
"""
Returns a dictionary mapping the task class to the list of tasks in the flow
"""
# Find all Task classes
class2tasks = {}
for task in self.iflat_tasks():
cls = task.__class__
if cls not in class2tasks: class2tasks[cls] = []
class2tasks[cls].append(task)
return class2tasks
[docs]
def iflat_nodes(self, status=None, op="==", nids=None) -> Generator[None]:
"""
Generators that produces a flat sequence of nodes.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the nodes.
"""
nids = as_set(nids)
if status is None:
if not (nids and self.node_id not in nids):
yield self
for work in self:
if nids and work.node_id not in nids: continue
yield work
for task in work:
if nids and task.node_id not in nids: continue
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
if not (nids and self.node_id not in nids):
if op(self.status, status): yield self
for wi, work in enumerate(self):
if nids and work.node_id not in nids: continue
if op(work.status, status): yield work
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status): yield task
[docs]
def node_from_nid(self, nid: int) -> Node:
"""Return the node in the `Flow` with the given `nid` identifier"""
for node in self.iflat_nodes():
if node.node_id == nid: return node
raise ValueError("Cannot find node with node id: %s" % nid)
[docs]
def iflat_tasks_wti(self, status=None, op="==",
nids=None) -> Generator[tuple[Task,int,int]]:
"""
Generator to iterate over all the tasks of the `Flow`.
Yields:
(task, work_index, task_index)
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the |Task| class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=True)
[docs]
def iflat_tasks(self, status=None, op="==", nids=None) -> Generator[Task]:
"""
Generator to iterate over all the tasks of the |Flow|.
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the |Task| class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=False)
def _iflat_tasks_wti(self, status=None, op="==", nids=None, with_wti=True):
"""
Generator that produces a flat sequence of task.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
(task, work_index, task_index) if with_wti is True else task
"""
nids = as_set(nids)
if status is None:
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if with_wti:
yield task, wi, ti
else:
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status):
if with_wti:
yield task, wi, ti
else:
yield task
[docs]
def check_dependencies(self) -> None:
"""
Test the dependencies of the nodes for possible deadlocks and raise RuntimeError
"""
deadlocks = []
for task in self.iflat_tasks():
for dep in task.deps:
if dep.node.depends_on(task):
deadlocks.append((task, dep.node))
if deadlocks:
lines = ["Detect wrong list of dependecies that will lead to a deadlock:"]
lines.extend(["%s <--> %s" % nodes for nodes in deadlocks])
raise RuntimeError("\n".join(lines))
[docs]
def find_deadlocks(self):
"""
This function detects deadlocks
Return:
named tuple with the tasks grouped in: deadlocks, runnables, running
"""
# Find jobs that can be submitted and and the jobs that are already in the queue.
runnables = []
for work in self:
runnables.extend(work.fetch_alltasks_to_run())
runnables.extend(list(self.iflat_tasks(status=self.S_SUB)))
# Running jobs.
running = list(self.iflat_tasks(status=self.S_RUN))
# Find deadlocks.
err_tasks = self.errored_tasks
deadlocked = []
if err_tasks:
for task in self.iflat_tasks():
if any(task.depends_on(err_task) for err_task in err_tasks):
deadlocked.append(task)
return dict2namedtuple(deadlocked=deadlocked, runnables=runnables, running=running)
[docs]
def check_status(self, **kwargs) -> None:
"""
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
"""
for work in self:
work.check_status()
if kwargs.pop("show", False):
self.show_status(**kwargs)
@property
def status(self) -> Status:
"""Gives the status of the Flow."""
if self._status is None:
# Take the minimum of the status of its tasks and its works
return min(work.get_all_status(only_min=True) for work in self)
return self._status
[docs]
def fix_abicritical(self) -> int:
"""
This function tries to fix critical events originating from ABINIT.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_ABICRITICAL):
count += task.fix_abicritical()
return count
[docs]
def fix_queue_critical(self) -> int:
"""
This function tries to fix critical events originating from the queue submission system.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_QCRITICAL):
self.history.info("Will try to fix task %s" % str(task))
try:
print(task.fix_queue_critical())
count += 1
except FixQueueCriticalError:
self.history.info("Not able to fix task %s" % task)
return count
[docs]
def show_info(self, **kwargs) -> None:
"""
Print info on the flow i.e. total number of tasks, works, tasks grouped by class.
Example:
Task Class Number
------------ --------
ScfTask 1
NscfTask 1
ScrTask 2
SigmaTask 6
"""
stream = kwargs.pop("stream", sys.stdout)
lines = [str(self)]
app = lines.append
app("Number of works: %d, total number of tasks: %s" % (len(self), self.num_tasks))
app("Number of tasks with a given class:\n")
# Build Table
data = [[cls.__name__, len(tasks)]
for cls, tasks in self.groupby_task_class().items()]
app(str(tabulate(data, headers=["Task Class", "Number"])))
stream.write("\n".join(lines))
[docs]
def compare_abivars(self, varnames, nids=None, wslice=None,
printout=False, with_colors=False) -> pd.DataFrame:
"""
Print the input of the tasks to the given stream.
Args:
varnames:
List of Abinit variables. If not None, only the variable in varnames
are selected and printed.
nids: List of node identifiers. By defaults all nodes are shown
wslice: Slice object used to select works.
printout: True to print dataframe.
with_colors: True if task status should be colored.
"""
varnames = [s.strip() for s in list_strings(varnames)]
index, rows = [], []
for task in self.select_tasks(nids=nids, wslice=wslice):
index.append(task.pos_str)
dstruct = task.input.structure.as_dict(fmt="abivars")
od = {}
for vname in varnames:
value = task.input.get(vname, None)
if value is None: # maybe in structure?
value = dstruct.get(vname, None)
od[vname] = value
od["task_class"] = task.__class__.__name__
od["status"] = task.status.colored if with_colors else str(task.status)
rows.append(od)
df = pd.DataFrame(rows, index=index)
if printout:
print_dataframe(df, title="Input variables:")
return df
[docs]
def get_dims_dataframe(self, nids=None, printout=False, with_colors=False) -> pd.DataFrame:
"""
Analyze output files produced by the tasks. Print pandas DataFrame with dimensions.
Args:
nids: List of node identifiers. By defaults all nodes are shown
printout: True to print dataframe.
with_colors: True if task status should be colored.
"""
abo_paths, index, status, abo_relpaths, task_classes, task_nids = [], [], [], [], [], []
for task in self.iflat_tasks(nids=nids):
if task.status not in (self.S_OK, self.S_RUN): continue
if not task.is_abinit_task: continue
abo_paths.append(task.output_file.path)
index.append(task.pos_str)
status.append(task.status.colored if with_colors else str(task.status))
abo_relpaths.append(os.path.relpath(task.output_file.relpath))
task_classes.append(task.__class__.__name__)
task_nids.append(task.node_id)
if not abo_paths: return
# Get dimensions from output files as well as walltime/cputime
from abipy.abio.outputs import AboRobot
robot = AboRobot.from_files(abo_paths)
df = robot.get_dims_dataframe(with_time=True, index=index)
# Add columns to the dataframe.
status = [str(s) for s in status]
df["task_class"] = task_classes
df["relpath"] = abo_relpaths
df["node_id"] = task_nids
df["status"] = status
if printout:
print_dataframe(df, title="Table with Abinit dimensions:\n")
return df
[docs]
def write_fix_flow_script(self) -> None:
"""
Write python script in the flow workdir that can be used by expert
users to change the input variables of the task according to their status.
"""
path_py = os.path.join(self.workdir, "_fix_flow.py")
with open(path_py, "wt") as fh:
fh.write(_FIX_FLOW_SCRIPT)
import stat
st = os.stat(path_py)
os.chmod(path_py, st.st_mode | stat.S_IEXEC)
[docs]
def compare_structures(self, nids=None, with_spglib=False, what="io", verbose=0,
precision=3, printout=False, with_colors=False):
"""
Analyze structures of the tasks (input and output structures if it's a relaxation task.
Print pandas DataFrame
Args:
nids: List of node identifiers. By defaults all nodes are shown
with_spglib: If True, spglib is invoked to get the spacegroup symbol and number
what (str): "i" for input structures, "o" for output structures.
precision: Floating point output precision (number of significant digits).
This is only a suggestion
printout: True to print dataframe.
with_colors: True if task status should be colored.
"""
from abipy.core.structure import dataframes_from_structures
structures, index, status, max_forces, pressures, task_classes = [], [], [], [], [], []
def push_data(post, task, structure, cart_forces, pressure):
"""Helper function to fill lists"""
index.append(task.pos_str + post)
structures.append(structure)
status.append(task.status.colored if with_colors else str(task.status))
if cart_forces is not None:
fmods = np.sqrt([np.dot(f, f) for f in cart_forces])
max_forces.append(fmods.max())
else:
max_forces.append(None)
pressures.append(pressure)
task_classes.append(task.__class__.__name__)
for task in self.iflat_tasks(nids=nids):
if "i" in what:
push_data("_in", task, task.input.structure, cart_forces=None, pressure=None)
if "o" not in what:
continue
# Add final structure, pressure and max force if relaxation task or GS task
if task.status in (task.S_RUN, task.S_OK):
if hasattr(task, "open_hist"):
# Structural relaxations produce HIST.nc and we can get
# the final structure or the structure of the last relaxation step.
try:
with task.open_hist() as hist:
final_structure = hist.final_structure
stress_cart_tensors, pressures_hist = hist.reader.read_cart_stress_tensors()
forces = hist.reader.read_cart_forces(unit="eV ang^-1")[-1]
push_data("_out", task, final_structure, forces, pressures_hist[-1])
except Exception as exc:
cprint("Exception while opening HIST.nc file of task: %s\n%s" % (task, str(exc)), "red")
elif hasattr(task, "open_gsr") and task.status == task.S_OK and task.input.get("iscf", 7) >= 0:
with task.open_gsr() as gsr:
forces = gsr.reader.read_cart_forces(unit="eV ang^-1")
push_data("_out", task, gsr.structure, forces, gsr.pressure)
dfs = dataframes_from_structures(structures, index=index, with_spglib=with_spglib, cart_coords=False)
if any(f is not None for f in max_forces):
# Add pressure and forces to the dataframe
dfs.lattice["P [GPa]"] = pressures
dfs.lattice["Max|F| eV/ang"] = max_forces
# Add columns to the dataframe.
status = [str(s) for s in status]
dfs.lattice["task_class"] = task_classes
dfs.lattice["status"] = dfs.coords["status"] = status
if printout:
print_dataframe(dfs.lattice, title="Lattice parameters:", precision=precision)
if verbose:
print_dataframe(dfs.coords, title="Atomic positions (columns give the site index):")
else:
print("Use `--verbose` to print atoms.")
return dfs
[docs]
def compare_ebands(self, nids=None, with_path=True, with_ibz=True, with_spglib=False, verbose=0,
precision=3, printout=False, with_colors=False) -> tuple:
"""
Analyze electron bands produced by the tasks.
Return pandas DataFrame and |ElectronBandsPlotter|.
Args:
nids: List of node identifiers. By default, all nodes are shown
with_path: Select files with ebands along k-path.
with_ibz: Select files with ebands in the IBZ.
with_spglib: If True, spglib is invoked to get the spacegroup symbol and number
precision: Floating point output precision (number of significant digits).
This is only a suggestion
printout: True to print dataframe.
with_colors: True if task status should be colored.
Return: (df, ebands_plotter)
"""
ebands_list, index, status, ncfiles, task_classes, task_nids = [], [], [], [], [], []
# Cannot use robots because ElectronBands can be found in different filetypes
for task in self.iflat_tasks(nids=nids, status=self.S_OK):
# Read ebands from GSR or SIGRES files.
for ext in ("gsr", "sigres"):
task_open_ncfile = getattr(task, "open_%s" % ext, None)
if task_open_ncfile is not None: break
else:
continue
try:
with task_open_ncfile() as ncfile:
if not with_path and ncfile.ebands.kpoints.is_path: continue
if not with_ibz and ncfile.ebands.kpoints.is_ibz: continue
ebands_list.append(ncfile.ebands)
index.append(task.pos_str)
status.append(task.status.colored if with_colors else str(task.status))
ncfiles.append(os.path.relpath(ncfile.filepath))
task_classes.append(task.__class__.__name__)
task_nids.append(task.node_id)
except Exception as exc:
cprint("Exception while opening nc file of task: %s\n%s" % (task, str(exc)), "red")
if not ebands_list: return (None, None)
from abipy.electrons.ebands import dataframe_from_ebands
df = dataframe_from_ebands(ebands_list, index=index, with_spglib=with_spglib)
ncfiles = [os.path.relpath(p, self.workdir) for p in ncfiles]
# Add columns to the dataframe.
status = [str(s) for s in status]
df["task_class"] = task_classes
df["ncfile"] = ncfiles
df["node_id"] = task_nids
df["status"] = status
if printout:
from abipy.tools.printing import print_dataframe
print_dataframe(df, title="KS electronic bands:", precision=precision)
from abipy.electrons.ebands import ElectronBandsPlotter
ebands_plotter = ElectronBandsPlotter(key_ebands=zip(ncfiles, ebands_list))
return df, ebands_plotter
[docs]
def compare_hist(self, nids=None, with_spglib=False, verbose=0,
precision=3, printout=False, with_colors=False) -> tuple:
"""
Analyze HIST nc files produced by the tasks. Print pandas DataFrame with final results.
Return: (df, hist_plotter)
Args:
nids: List of node identifiers. By defaults all nodes are shown
with_spglib: If True, spglib is invoked to get the spacegroup symbol and number
precision: Floating point output precision (number of significant digits).
This is only a suggestion
printout: True to print dataframe.
with_colors: True if task status should be colored.
"""
hist_paths, index, status, ncfiles, task_classes, task_nids = [], [], [], [], [], []
for task in self.iflat_tasks(nids=nids):
if task.status not in (self.S_OK, self.S_RUN): continue
hist_path = task.outdir.has_abiext("HIST")
if not hist_path: continue
hist_paths.append(hist_path)
index.append(task.pos_str)
status.append(task.status.colored if with_colors else str(task.status))
ncfiles.append(os.path.relpath(hist_path))
task_classes.append(task.__class__.__name__)
task_nids.append(task.node_id)
if not hist_paths: return (None, None)
from abipy.dynamics.hist import HistRobot
robot = HistRobot.from_files(hist_paths, labels=hist_paths)
df = robot.get_dataframe(index=index, with_spglib=with_spglib)
ncfiles = [os.path.relpath(p, self.workdir) for p in ncfiles]
# Add columns to the dataframe.
status = [str(s) for s in status]
df["task_class"] = task_classes
df["ncfile"] = ncfiles
df["node_id"] = task_nids
df["status"] = status
if printout:
title = "Table with final structures, pressures in GPa and force stats in eV/Ang:\n"
from abipy.tools.printing import print_dataframe
print_dataframe(df, title=title, precision=precision)
return df, robot
[docs]
def show_summary(self, **kwargs) -> None:
"""
Print a short summary with the status of the flow and a counter task_status --> number_of_tasks
Args:
stream: File-like object, Default: sys.stdout
Example:
Status Count
--------- -------
Completed 10
<Flow, node_id=27163, workdir=flow_gwconv_ecuteps>, num_tasks=10, all_ok=True
"""
stream = kwargs.pop("stream", sys.stdout)
stream.write("\n")
table = list(self.status_counter.items())
s = tabulate(table, headers=["Status", "Count"])
stream.write(s + "\n")
stream.write("\n")
stream.write("%s, num_tasks=%s, all_ok=%s\n" % (str(self), self.num_tasks, self.all_ok))
stream.write("\n")
[docs]
def get_dataframe(self, as_dict=False) -> pd.DataFrame:
"""
Return pandas dataframe task info or dictionary if as_dict is True.
This function should be called after flow.get_status to update the status.
"""
rows = []
for work in self:
dict_list = work.get_dataframe(as_dict=True)
rows.extend(dict_list)
if as_dict: return rows
return pd.DataFrame(rows)
[docs]
def show_status(self, return_df=False, **kwargs):
"""
Report the status of the works and the status of the different tasks on the specified stream.
Args:
stream: File-like object, Default: sys.stdout
nids: List of node identifiers. By defaults all nodes are shown
wslice: Slice object used to select works.
verbose: Verbosity level (default 0). > 0 to show only the works that are not finalized.
Return:
data_task dictionary with mapping: task --> dict(report=report, timedelta=timedelta)
"""
stream = kwargs.pop("stream", sys.stdout)
nids = as_set(kwargs.pop("nids", None))
wslice = kwargs.pop("wslice", None)
verbose = kwargs.pop("verbose", 0)
#start_time = time.now()
wlist = None
if wslice is not None:
# Convert range to list of work indices.
wlist = list(range(wslice.start, wslice.step, wslice.stop))
#has_colours = stream_has_colours(stream)
has_colours = True
red = "red" if has_colours else None
data_task = {}
for i, work in enumerate(self):
if nids and work.node_id not in nids: continue
print("", file=stream)
name = ""
if hasattr(work, "_name"): name = ", work_name=%s" % work._name
cprint_map("Work #%d: %s, Finalized=%s %s" % (i, work, work.finalized, name), cmap={"True": "green"}, file=stream)
if wlist is not None and i in wlist: continue
if verbose == 0 and work.finalized:
print(" Finalized works are not shown. Use verbose > 0 to force output.", file=stream)
continue
headers = ["Task", "Status", "Queue", "MPI|Omp|Gb",
"Warn|Com", "Class", "Sub|Rest|Corr", "Time",
"Node_ID"]
table = []
tot_num_errors = 0
for task in work:
if nids and task.node_id not in nids: continue
task_name = os.path.basename(task.name)
# FIXME: This should not be done here.
# get_event_report should be called only in check_status
# Parse the events in the main output.
report = task.get_event_report()
# Get time info (run-time or time in queue or None)
stime = None
timedelta = task.datetimes.get_runtime()
if timedelta is not None:
stime = str(timedelta) + "R"
else:
timedelta = task.datetimes.get_time_inqueue()
if timedelta is not None:
stime = str(timedelta) + "Q"
if return_df:
# Add new entry to data_task
data_task[task] = dict(report=report, stime=stime, timedelta=timedelta)
events = "|".join(2*["NA"])
if report is not None:
events = '{:>4}|{:>3}'.format(*map(str, (report.num_warnings, report.num_comments)))
try:
para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("GB"))))
except (KeyError, UnitError):
para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb"))))
task_info = list(map(str, [task.__class__.__name__,
(task.num_launches, task.num_restarts, task.num_corrections), stime, task.node_id]))
qinfo = "None"
if task.queue_id is not None:
qname = str(task.qname)
if not verbose:
qname = qname[:min(5, len(qname))]
qinfo = str(task.queue_id) + "@" + qname
if task.status.is_critical:
tot_num_errors += 1
task_name = colored(task_name, red)
if has_colours:
table.append([task_name, task.status.colored, qinfo, para_info, events] + task_info)
else:
table.append([task_name, str(task.status), qinfo, events, para_info] + task_info)
# Print table and write colorized line with the total number of errors.
print(tabulate(table, headers=headers, tablefmt="grid"), file=stream)
if tot_num_errors:
cprint("Total number of errors: %d" % tot_num_errors, "red", file=stream)
print("", file=stream)
if self.all_ok:
cprint("\nall_ok reached\n", "green", file=stream)
if return_df:
rows = []
for task, data in data_task.items():
d = task.get_dataframe(as_dict=True)
# Add extra info from report
report = data["report"]
d["num_warnings"] = report.num_warnings if report is not None else -1
d["num_comments"] = report.num_comments if report is not None else -1
rows.append(d)
return pd.DataFrame(rows)
[docs]
def show_events(self, status=None, nids=None, stream=sys.stdout):
"""
Print the Abinit events (ERRORS, WARNIING, COMMENTS) to stdout
Args:
status: if not None, only the tasks with this status are select
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object, Default: sys.stdout
Return:
data_task dictionary with mapping: task --> report
"""
nrows, ncols = get_terminal_size()
data_task = {}
for task in self.iflat_tasks(status=status, nids=nids):
report = task.get_event_report()
data_task[task] = report
if report:
print(make_banner(str(task), width=ncols, mark="="), file=stream)
print(report, file=stream)
return data_task
[docs]
def show_corrections(self, status=None, nids=None, stream=sys.stdout) -> int:
"""
Show the corrections applied to the flow at run-time.
Args:
status: if not None, only the tasks with this status are select.
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object, Default: sys.stdout
Return: The number of corrections found.
"""
nrows, ncols = get_terminal_size()
count = 0
for task in self.iflat_tasks(status=status, nids=nids):
if task.num_corrections == 0: continue
count += 1
print(make_banner(str(task), width=ncols, mark="="), file=stream)
for corr in task.corrections:
pprint(corr, stream=stream)
if not count: print("No correction found.", file=stream)
return count
[docs]
def show_history(self, status=None, nids=None, full_history=False,
metadata=False, stream=sys.stdout) -> None:
"""
Print the history of the flow to stream
Args:
status: if not None, only the tasks with this status are select
full_history: Print full info set, including nodes with an empty history.
nids: optional list of node identifiers used to filter the tasks.
metadata: print history metadata (experimental)
stream: File-like object, Default: sys.stdout
"""
nrows, ncols = get_terminal_size()
works_done = []
# Loop on the tasks and show the history of the work if not in works_done
for task in self.iflat_tasks(status=status, nids=nids):
work = task.work
if work not in works_done:
works_done.append(work)
if work.history or full_history:
cprint(make_banner(str(work), width=ncols, mark="="), file=stream, **work.status.color_opts)
print(work.history.to_string(metadata=metadata), file=stream)
if task.history or full_history:
cprint(make_banner(str(task), width=ncols, mark="="), file=stream, **task.status.color_opts)
print(task.history.to_string(metadata=metadata), file=stream)
# Print the history of the flow.
if self.history or full_history:
cprint(make_banner(str(self), width=ncols, mark="="), file=stream, **self.status.color_opts)
print(self.history.to_string(metadata=metadata), file=stream)
[docs]
def listext(self, ext, stream=sys.stdout) -> None:
"""
Print to the given `stream` a table with the list of the output files
with the given `ext` produced by the flow.
"""
nodes_files = []
for node in self.iflat_nodes():
filepath = node.outdir.has_abiext(ext)
if filepath:
nodes_files.append((node, File(filepath)))
if nodes_files:
print("Found %s files with extension `%s` produced by the flow" % (len(nodes_files), ext), file=stream)
table = [[f.relpath, "%.2f" % (f.get_stat().st_size / 1024**2),
node.node_id, node.__class__.__name__]
for node, f in nodes_files]
print(tabulate(table, headers=["File", "Size [Mb]", "Node_ID", "Node Class"]), file=stream)
else:
print("No output file with extension %s has been produced by the flow" % ext, file=stream)
[docs]
def select_tasks(self, nids=None, wslice=None, task_class=None) -> list[Task]:
"""
Return a list with a subset of tasks.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
.. note::
nids and wslice are mutually exclusive.
If no argument is provided, the full list of tasks is returned.
"""
if nids is not None:
assert wslice is None
tasks = self.tasks_from_nids(nids)
elif wslice is not None:
tasks = []
for work in self[wslice]:
tasks.extend([t for t in work])
else:
# All tasks selected if no option is provided.
tasks = list(self.iflat_tasks())
# Filter by task class
if task_class is not None:
tasks = [t for t in tasks if t.isinstance(task_class)]
return tasks
[docs]
def get_task_scfcycles(self, nids=None, wslice=None, task_class=None,
exclude_ok_tasks=False) -> list[Task]:
"""
Return list of (taks, scfcycle) tuples for all the tasks in the flow with a SCF algorithm
e.g. electronic GS-SCF iteration, DFPT-SCF iterations etc.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
exclude_ok_tasks: True if only running tasks should be considered.
Returns:
List of `ScfCycle` subclass instances.
"""
select_status = [self.S_RUN] if exclude_ok_tasks else [self.S_RUN, self.S_OK]
tasks_cycles = []
for task in self.select_tasks(nids=nids, wslice=wslice):
# Filter
if task.status not in select_status or task.cycle_class is None:
continue
if task_class is not None and not task.isinstance(task_class):
continue
try:
cycle = task.cycle_class.from_file(task.output_file.path)
if cycle is not None:
tasks_cycles.append((task, cycle))
except Exception:
# This is intentionally ignored because from_file can fail for several reasons.
pass
return tasks_cycles
[docs]
def show_tricky_tasks(self, verbose=0, stream=sys.stdout) -> None:
"""
Print list of tricky tasks i.e. tasks that have been restarted or
launched more than once or tasks with corrections.
Args:
verbose: Verbosity level. If > 0, task history and corrections (if any) are printed.
stream: File-like object. Default: sys.stdout
"""
nids, tasks = [], []
for task in self.iflat_tasks():
if task.num_launches > 1 or any(n > 0 for n in (task.num_restarts, task.num_corrections)):
nids.append(task.node_id)
tasks.append(task)
if not nids:
cprint("Everything's fine, no tricky tasks found", color="green", file=stream)
else:
self.show_status(nids=nids, stream=stream)
if not verbose:
print("Use --verbose to print task history.", file=stream)
return
for nid, task in zip(nids, tasks):
cprint(repr(task), **task.status.color_opts, stream=stream)
self.show_history(nids=[nid], full_history=False, metadata=False, stream=stream)
#if task.num_restarts:
# self.show_restarts(nids=[nid])
if task.num_corrections:
self.show_corrections(nids=[nid], stream=stream)
[docs]
def inspect(self, nids=None, wslice=None, **kwargs):
"""
Inspect the tasks (SCF iterations, Structural relaxation ...) and
produces matplotlib plots.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
kwargs: keyword arguments passed to `task.inspect` method.
.. note::
nids and wslice are mutually exclusive.
If nids and wslice are both None, all tasks in self are inspected.
Returns:
List of `matplotlib` figures.
"""
figs = []
for task in self.select_tasks(nids=nids, wslice=wslice):
if hasattr(task, "inspect"):
fig = task.inspect(**kwargs)
if fig is None:
cprint("Cannot inspect Task %s" % task, color="blue")
else:
figs.append(fig)
else:
cprint("Task %s does not provide an inspect method" % task, color="blue")
return figs
[docs]
def get_results(self, **kwargs):
results = self.Results.from_node(self)
return results
[docs]
def look_before_you_leap(self) -> str:
"""
This method should be called before running the calculation to make
sure that the most important requirements are satisfied.
Return: string with inconsistencies/errors.
"""
errors = []
try:
self.check_dependencies()
except self.Error as exc:
errors.append(str(exc))
return "\n".join(errors)
[docs]
def tasks_from_nids(self, nids) -> list[Task]:
"""
Return the list of tasks associated to the given list of node identifiers (nids).
.. note::
Invalid ids are ignored
"""
if not isinstance(nids, collections.abc.Iterable): nids = [nids]
n2task = {task.node_id: task for task in self.iflat_tasks()}
return [n2task[n] for n in nids if n in n2task]
[docs]
def wti_from_nids(self, nids) -> list[Task]:
"""Return the list of (w, t) indices from the list of node identifiers nids."""
return [task.pos for task in self.tasks_from_nids(nids)]
[docs]
def open_files(self, what="o", status=None, op="==", nids=None, editor=None):
"""
Open the files of the flow inside an editor (command line interface).
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
status: if not None, only the tasks with this status are select
op: status operator. Requires status. A task is selected
if task.status op status evaluates to true.
nids: optional list of node identifiers used to filter the tasks.
editor: Select the editor. None to use the default editor ($EDITOR shell env var)
"""
# Build list of files to analyze.
files = []
for task in self.iflat_tasks(status=status, op=op, nids=nids):
lst = task.select_files(what)
if lst:
files.extend(lst)
return Editor(editor=editor).edit_files(files)
[docs]
def parse_timing(self, nids=None):
"""
Parse the timer data in the main output file(s) of Abinit.
Requires timopt /= 0 in the input file, usually timopt = -1.
Args:
nids: optional list of node identifiers used to filter the tasks.
Return: :class:`AbinitTimerParser` instance, None if error.
"""
# Get the list of output files according to nids.
paths = [task.output_file.path for task in self.iflat_tasks(nids=nids)]
# Parse data.
from abipy.flowtk.abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(paths)
if read_ok:
return parser
return None
[docs]
def show_abierrors(self, nids=None, stream=sys.stdout):
"""
Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
lines = []
app = lines.append
for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
app(header)
report = task.get_event_report()
if report is not None:
app("num_errors: %s, num_warnings: %s, num_comments: %s" % (
report.num_errors, report.num_warnings, report.num_comments))
app("*** ERRORS ***")
app("\n".join(str(e) for e in report.errors))
app("*** BUGS ***")
app("\n".join(str(b) for b in report.bugs))
else:
app("get_envent_report returned None!")
app("=" * len(header) + 2*"\n")
return stream.writelines(lines)
[docs]
def show_qouts(self, nids=None, stream=sys.stdout):
"""
Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
lines = []
for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
lines.append(header)
if task.qout_file.exists:
with open(task.qout_file.path, "rt") as fh:
lines += fh.readlines()
else:
lines.append("File does not exist!")
lines.append("=" * len(header) + 2*"\n")
return stream.writelines(lines)
[docs]
def debug(self, status=None, nids=None, stream=sys.stdout):
"""
This method is usually used when the flow didn't completed succesfully
It analyzes the files produced the tasks to facilitate debugging.
Info are printed to stdout.
Args:
status: If not None, only the tasks with this status are selected
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
nrows, ncols = get_terminal_size()
# Test for scheduler exceptions first.
sched_excfile = os.path.join(self.workdir, "_exceptions")
if os.path.exists(sched_excfile):
with open(sched_excfile, "r") as fh:
cprint("Found exceptions raised by the scheduler", "red", file=stream)
cprint(fh.read(), color="red", file=stream)
return
if status is not None:
tasks = list(self.iflat_tasks(status=status, nids=nids))
else:
errors = list(self.iflat_tasks(status=self.S_ERROR, nids=nids))
qcriticals = list(self.iflat_tasks(status=self.S_QCRITICAL, nids=nids))
abicriticals = list(self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids))
tasks = errors + qcriticals + abicriticals
# For each task selected:
# 1) Check the error files of the task. If not empty, print the content to stdout and we are done.
# 2) If error files are empty, look at the master log file for possible errors
# 3) If also this check failes, scan all the process log files.
# TODO: This check is not needed if we introduce a new __abinit_error__ file
# that is created by the first MPI process that invokes MPI abort!
#
ntasks = 0
for task in tasks:
print(make_banner(str(task), width=ncols, mark="="), file=stream)
ntasks += 1
# Start with error files.
for efname in ["qerr_file", "stderr_file",]:
err_file = getattr(task, efname)
if err_file.exists:
s = err_file.read()
if not s: continue
print(make_banner(str(err_file), width=ncols, mark="="), file=stream)
cprint(s, color="red", file=stream)
#count += 1
# Check main log file.
try:
report = task.get_event_report()
if report and report.num_errors:
print(make_banner(os.path.basename(report.filename), width=ncols, mark="="), file=stream)
s = "\n".join(str(e) for e in report.errors)
else:
s = None
except Exception as exc:
s = str(exc)
count = 0 # count > 0 means we found some useful info that could explain the failures.
if s is not None:
cprint(s, color="red", file=stream)
count += 1
if not count:
# Inspect all log files produced by the other nodes.
log_files = task.tmpdir.list_filepaths(wildcard="*LOG_*")
if not log_files:
cprint("No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs",
color="magenta", file=stream)
for log_file in log_files:
try:
report = EventsParser().parse(log_file)
if report.errors:
print(report, file=stream)
count += 1
break
except Exception as exc:
cprint(str(exc), color="red", file=stream)
count += 1
break
if not count:
cprint("""
Houston, we could not find any error message that can explain the problem.
Use the `abirun.py FLOWDIR history` command to print the log files of the different nodes.
""",
color="magenta", file=stream)
print("Number of tasks analyzed: %d" % ntasks, file=stream)
[docs]
def cancel(self, nids=None) -> int:
"""
Cancel all the tasks that are in the queue.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
Number of jobs cancelled, negative value if error
"""
if self.has_chrooted:
# TODO: Use paramiko to kill the job?
warnings.warn("Cannot cancel the flow via sshfs!")
return -1
# If we are running with the scheduler, we must send a SIGKILL signal.
if os.path.exists(self.pid_file):
cprint("Found scheduler attached to this flow.", "yellow")
cprint("Sending SIGKILL to the scheduler before cancelling the tasks!", "yellow")
with open(self.pid_file, "rt") as fh:
pid = int(fh.readline())
retcode = os.system("kill -9 %d" % pid)
self.history.info("Sent SIGKILL to the scheduler, retcode: %s" % retcode)
try:
os.remove(self.pid_file)
except IOError:
pass
num_cancelled = 0
for task in self.iflat_tasks(nids=nids):
num_cancelled += task.cancel()
return num_cancelled
[docs]
def get_njobs_in_queue(self, username=None) -> int:
"""
Returns the number of jobs in the queue, None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.manager.qadapter.get_njobs_in_queue(username=username)
[docs]
def rmtree(self, ignore_errors=False, onerror=None) -> None:
"""Remove workdir (same API as shutil.rmtree)."""
if not os.path.exists(self.workdir): return
shutil.rmtree(self.workdir, ignore_errors=ignore_errors, onerror=onerror)
[docs]
def rm_and_build(self) -> None:
"""Remove the workdir and rebuild the flow."""
self.rmtree()
self.build()
[docs]
def build(self, *args, **kwargs) -> None:
"""Make directories and files of the `Flow`."""
# Allocate here if not done yet!
if not self.allocated: self.allocate()
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Check the nodeid file in workdir
nodeid_path = os.path.join(self.workdir, ".nodeid")
if os.path.exists(nodeid_path):
with open(nodeid_path, "rt") as fh:
node_id = int(fh.read())
if self.node_id != node_id:
msg = ("\nFound node_id %s in file:\n\n %s\n\nwhile the node_id of the present flow is %d.\n"
"This means that you are trying to build a new flow in a directory already used by another flow.\n"
"Possible solutions:\n"
" 1) Change the workdir of the new flow.\n"
" 2) remove the old directory either with `rm -r` or by calling the method flow.rmtree()\n"
% (node_id, nodeid_path, self.node_id))
#print(msg)
raise RuntimeError(msg)
else:
with open(nodeid_path, "wt") as fh:
fh.write(str(self.node_id))
if self.pyfile and os.path.isfile(self.pyfile):
shutil.copy(self.pyfile, self.workdir)
# Add README.md file if set
readme_md = getattr(self, "readme_md", None)
if readme_md is not None:
with open(os.path.join(self.workdir, "README.md"), "wt") as fh:
fh.write(readme_md)
# Add abipy_meta.json file if set
data = getattr(self, "abipy_meta_json", None)
if data is not None:
self.write_json_in_workdir("abipy_meta.json", data)
# Write fix_flow.py script for advanced users.
self.write_fix_flow_script()
for work in self:
work.build(*args, **kwargs)
[docs]
def build_and_pickle_dump(self, abivalidate=False):
"""
Build dirs and file of the `Flow` and save the object in pickle format.
Returns 0 if success
Args:
abivalidate: If True, all the input files are validate by calling
the abinit parser. If the validation fails, ValueError is raise.
"""
self.build()
if not abivalidate: return self.pickle_dump()
# Validation with Abinit.
isok, errors = self.abivalidate_inputs()
if isok: return self.pickle_dump()
errlines = []
for i, e in enumerate(errors):
errlines.append("[%d] %s" % (i, e))
raise ValueError("\n".join(errlines))
[docs]
@check_spectator
def pickle_dump(self):
"""
Save the status of the object in pickle format.
Returns 0 if success
"""
if self.has_chrooted:
warnings.warn("Cannot pickle_dump since we have chrooted from %s" % self.has_chrooted)
return -1
#if self.in_spectator_mode:
# warnings.warn("Cannot pickle_dump since flow is in_spectator_mode")
# return -2
protocol = self.pickle_protocol
# Atomic transaction with FileLock.
with FileLock(self.pickle_file):
with AtomicFile(self.pickle_file, mode="wb") as fh:
pmg_pickle_dump(self, fh, protocol=protocol)
return 0
[docs]
def pickle_dumps(self, protocol=None):
"""
Return a string with the pickle representation.
`protocol` selects the pickle protocol. self.pickle_protocol is used if `protocol` is None
"""
strio = StringIO()
pmg_pickle_dump(self, strio,
protocol=self.pickle_protocol if protocol is None else protocol)
return strio.getvalue()
[docs]
def register_task(self, input: AbinitInput,
deps=None, manager=None, task_class=None, append=False) -> Work:
"""
Utility function that generates a `Work` made of a single task
Args:
input: |AbinitInput|
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The |TaskManager| responsible for the submission of the task.
If manager is None, we use the |TaskManager| specified during the creation of the work.
task_class: Task subclass to instantiate. Default: |AbinitTask|
append: If true, the task is added to the last work (a new Work is created if flow is empty)
Returns:
The generated |Work| for the task, work[0] is the actual task.
"""
# append True is much easier to use. In principle should be the default behaviour
# but this would break the previous API so ...
if not append:
work = Work(manager=manager)
else:
if not self.works:
work = Work(manager=manager)
append = False
else:
work = self.works[-1]
task = work.register(input, deps=deps, task_class=task_class)
if not append: self.register_work(work)
return work
[docs]
def new_work(self, deps=None, manager=None, workdir=None) -> Work:
"""
Helper function to add a new empty |Work| and add it to the internal list.
Client code is responsible for filling the new work.
Args:
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The |TaskManager| responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the work.
workdir: The name of the directory used for the |Work|.
Returns:
The registered |Work|.
"""
work = Work()
return self.register_work(work, deps=deps, manager=manager, workdir=workdir)
[docs]
def register_work(self, work: Work, deps=None, manager=None, workdir=None) -> Work:
"""
Register a new |Work| and add it to the internal list, taking into account possible dependencies.
Args:
work: |Work| object.
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The |TaskManager| responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the work.
workdir: The name of the directory used for the |Work|.
Returns: The registered |Work|.
"""
if getattr(self, "workdir", None) is not None:
# The flow has a directory, build the name of the directory of the work.
if workdir is None:
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
else:
work_workdir = os.path.join(self.workdir, os.path.basename(workdir))
work.set_workdir(work_workdir)
if manager is not None:
work.set_manager(manager)
if any(work.node_id == w.node_id for w in self):
raise ValueError(f"{work=} is already registered in the flow.")
self.works.append(work)
if deps:
deps = [Dependency(node, exts) for node, exts in deps.items()]
work.add_deps(deps)
return work
[docs]
def register_work_from_cbk(self, cbk_name, cbk_data, deps, work_class, manager=None) -> Work:
"""
Registers a callback function that will generate the :class:`Task` of the :class:`Work`.
Args:
cbk_name: Name of the callback function (must be a bound method of self)
cbk_data: Additional data passed to the callback function.
deps: List of :class:`Dependency` objects specifying the dependency of the work.
work_class: |Work| class to instantiate.
manager: The |TaskManager| responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the |Flow|.
Returns:
The |Work| that will be finalized by the callback.
"""
# TODO: Can be removed!
# TODO: pass a Work factory instead of a class
# Directory of the Work.
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
# Create an empty work and register the callback
work = work_class(workdir=work_workdir, manager=manager)
self._works.append(work)
deps = [Dependency(node, exts) for node, exts in deps.items()]
if not deps:
raise ValueError("A callback must have deps!")
work.add_deps(deps)
# Wrap the callable in a Callback object and save
# useful info such as the index of the work and the callback data.
cbk = FlowCallback(cbk_name, self, deps=deps, cbk_data=cbk_data)
self._callbacks.append(cbk)
return work
@property
def allocated(self) -> int:
"""Numer of allocations. Set by `allocate`."""
try:
return self._allocated
except AttributeError:
return 0
[docs]
def allocate(self, workdir=None, use_smartio: bool=False, build: bool=False) -> Flow:
"""
Allocate the `Flow` i.e. assign the `workdir` and (optionally)
the |TaskManager| to the different tasks in the Flow.
Args:
workdir: Working directory of the flow. Must be specified here
if we haven't initialized the workdir in the __init__.
build: True to build the flow and save status to pickle file.
"""
if workdir is not None:
# We set the workdir of the flow here
self.set_workdir(workdir)
for i, work in enumerate(self):
work.set_workdir(os.path.join(self.workdir, "w" + str(i)))
if not hasattr(self, "workdir"):
raise RuntimeError("You must call flow.allocate(workdir) if the workdir is not passed to __init__")
for work in self:
# Each work has a reference to its flow.
work.allocate(manager=self.manager)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
self.check_dependencies()
if not hasattr(self, "_allocated"): self._allocated = 0
self._allocated += 1
if use_smartio:
self.use_smartio()
if build:
self.build_and_pickle_dump()
return self
[docs]
def use_smartio(self):
"""
This function should be called when the entire `Flow` has been built.
It tries to reduce the pressure on the hard disk by using Abinit smart-io
capabilities for those files that are not needed by other nodes.
Smart-io means that big files (e.g. WFK) are written only if the calculation
is unconverged so that we can restart from it. No output is produced if
convergence is achieved.
Return: self
"""
if not self.allocated:
#raise RuntimeError("You must call flow.allocate() before invoking flow.use_smartio()")
self.allocate()
for task in self.iflat_tasks():
children = task.get_children()
if not children:
# Change the input so that output files are produced
# only if the calculation is not converged.
task.history.info("Will disable IO for task")
task.set_vars(prtwf=-1, prtden=0) # TODO: prt1wf=-1,
else:
must_produce_abiexts = []
for child in children:
# Get the list of dependencies. Find that task
for d in child.deps:
must_produce_abiexts.extend(d.exts)
must_produce_abiexts = set(must_produce_abiexts)
#print("must_produce_abiexts", must_produce_abiexts)
# Variables supporting smart-io.
smart_prtvars = {
"prtwf": "WFK",
}
# Set the variable to -1 to disable the output
for varname, abiext in smart_prtvars.items():
if abiext not in must_produce_abiexts:
print("%s: setting %s to -1" % (task, varname))
task.set_vars({varname: -1})
return self
[docs]
def show_dependencies(self, stream=sys.stdout) -> None:
"""
Writes to the given stream the ASCII representation of the dependency tree.
"""
def child_iter(node):
return [d.node for d in node.deps]
def text_str(node):
return colored(str(node), color=node.status.color_opts["color"])
for task in self.iflat_tasks():
print(draw_tree(task, child_iter, text_str), file=stream)
[docs]
def on_all_ok(self):
"""
This method is called when all the works in the flow have reached S_OK.
This method shall return True if the calculation is completed or
False if the execution should continue due to side-effects such as adding a new work to the flow.
This methods allows subclasses to implement customized logic such as extending the flow by adding new works.
The flow has an internal counter: on_all_ok_num_calls
that shall be incremented by client code when subclassing this method.
This counter can be used to decide if futher actions are needed or not.
An example of flow that adds a new work (only once) when all_ok is reached for the first time:
def on_all_ok(self):
if self.on_all_ok_num_calls > 0: return True
self.on_all_ok_num_calls += 1
#####################################
# implement_logic_to_create_new_work
#####################################
self.register_work(work)
self.allocate()
self.build_and_pickle_dump()
# The scheduler will keep on running the flow.
return False
"""
return True
[docs]
def on_dep_ok(self, signal, sender):
# TODO
# Replace this callback with dynamic dispatch
# on_all_S_OK for work
# on_S_OK for task
self.history.info("on_dep_ok with sender %s, signal %s" % (str(sender), signal))
for i, cbk in enumerate(self._callbacks):
if not cbk.handle_sender(sender):
self.history.info("%s does not handle sender %s" % (cbk, sender))
continue
if not cbk.can_execute():
self.history.info("Cannot execute %s" % cbk)
continue
# Execute the callback and disable it
self.history.info("flow in on_dep_ok: about to execute callback %s" % str(cbk))
cbk()
cbk.disable()
# Update the database.
self.pickle_dump()
[docs]
@check_spectator
def finalize(self):
"""
This method is called when the flow is completed. Return 0 if success
"""
self.history.info("Calling flow.finalize.")
if self.finalized:
self.history.warning("Calling finalize on an already finalized flow. Returning 1")
return 1
self.finalized = True
# Here we remove the big output files if we have the garbage collector
# and the policy is set to "flow."
if self.gc is not None and self.gc.policy == "flow":
self.history.info("gc.policy set to flow. Will clean task output files.")
for task in self.iflat_tasks():
task.clean_output_files()
return 0
[docs]
def set_garbage_collector(self, exts=None, policy="task") -> None:
"""
Enable the garbage collector that will remove the big output files that are not needed.
Args:
exts: string or list with the Abinit file extensions to be removed. A default is
provided if exts is None
policy: Either `flow` or `task`. If policy is set to 'task', we remove the output
files as soon as the task reaches S_OK. If 'flow', the files are removed
only when the flow is finalized. This option should be used when we are dealing
with a dynamic flow with callbacks generating other tasks since a |Task|
might not be aware of its children when it reached S_OK.
"""
assert policy in ("task", "flow")
exts = list_strings(exts) if exts is not None else ("WFK", "SUS", "SCR", "BSR", "BSC")
gc = GarbageCollector(exts=set(exts), policy=policy)
self.set_gc(gc)
for work in self:
#work.set_gc(gc) # TODO Add support for Works and flow policy
for task in work:
task.set_gc(gc)
[docs]
def connect_signals(self) -> None:
"""
Connect the signals within the `Flow`.
The `Flow` is responsible for catching the important signals raised from its works.
"""
# Connect the signals inside each Work.
for work in self:
work.connect_signals()
# Observe the nodes that must reach S_OK in order to call the callbacks.
for cbk in self._callbacks:
#cbk.enable()
for dep in cbk.deps:
self.history.info("Connecting %s \nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK))
dispatcher.connect(self.on_dep_ok, signal=dep.node.S_OK, sender=dep.node, weak=False)
# Associate to each signal the callback _on_signal
# (bound method of the node that will be called by `Flow`
# Each node will set its attribute _done_signal to True to tell
# the flow that this callback should be disabled.
# Register the callbacks for the Work.
#for work in self:
# slot = self._sig_slots[work]
# for signal in S_ALL:
# done_signal = getattr(work, "_done_ " + signal, False)
# if not done_sig:
# cbk_name = "_on_" + str(signal)
# cbk = getattr(work, cbk_name, None)
# if cbk is None: continue
# slot[work][signal].append(cbk)
# print("connecting %s\nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK))
# dispatcher.connect(self.on_dep_ok, signal=signal, sender=dep.node, weak=False)
# Register the callbacks for the Tasks.
#self.show_receivers()
[docs]
def disconnect_signals(self) -> None:
"""Disable the signals within the `Flow`."""
for work in self:
work.disconnect_signals()
# Disable callbacks.
for cbk in self._callbacks:
cbk.disable()
[docs]
def show_receivers(self, sender=None, signal=None) -> None:
sender = sender if sender is not None else dispatcher.Any
signal = signal if signal is not None else dispatcher.Any
print("*** live receivers ***")
for rec in dispatcher.liveReceivers(dispatcher.getReceivers(sender, signal)):
print("receiver -->", rec)
print("*** end live receivers ***")
[docs]
def set_spectator_mode(self, mode=True) -> None:
"""
When the flow is in spectator_mode, we have to disable signals, pickle dump and possible callbacks
A spectator can still operate on the flow but the new status of the flow won't be saved in
the pickle file. Usually the flow is in spectator mode when we are already running it via
the scheduler or other means and we should not interfere with its evolution.
This is the reason why signals and callbacks must be disabled.
Unfortunately preventing client-code from calling methods with side-effects when
the flow is in spectator mode is not easy (e.g. flow.cancel will cancel the tasks submitted to the
queue and the flow used by the scheduler won't see this change!
"""
# Set the flags of all the nodes in the flow.
mode = bool(mode)
self.in_spectator_mode = mode
for node in self.iflat_nodes():
node.in_spectator_mode = mode
# connect/disconnect signals depending on mode.
if not mode:
self.connect_signals()
else:
self.disconnect_signals()
#def get_results(self, **kwargs)
[docs]
def rapidfire(self, check_status=True, max_nlaunch=-1, max_loops=1, sleep_time=5, **kwargs):
"""
Use :class:`PyLauncher` to submits tasks in rapidfire mode.
kwargs contains the options passed to the launcher.
Args:
check_status:
max_nlaunch: Maximum number of launches. default: no limit.
max_loops: Maximum number of loops
sleep_time: seconds to sleep between rapidfire loop iterations
Return: Number of tasks submitted.
"""
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).rapidfire(max_nlaunch=max_nlaunch, max_loops=max_loops, sleep_time=sleep_time)
[docs]
def single_shot(self, check_status=True, **kwargs):
"""
Use :class:`PyLauncher` to submit one task.
kwargs contains the options passed to the launcher.
Return: Number of tasks submitted.
"""
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).single_shot()
[docs]
def make_scheduler(self, **kwargs):
"""
Build and return a :class:`PyFlowScheduler` to run the flow.
Args:
kwargs: if empty we use the user configuration file.
if `filepath` in kwargs we init the scheduler from filepath.
else pass kwargs to :class:`PyFlowScheduler` __init__ method.
"""
from .launcher import PyFlowScheduler
if not kwargs:
# User config if kwargs is empty
sched = PyFlowScheduler.from_user_config()
else:
# Use from_file if filepath if present, else call __init__
filepath = kwargs.pop("filepath", None)
if filepath is not None:
assert not kwargs
sched = PyFlowScheduler.from_file(filepath)
else:
sched = PyFlowScheduler(**kwargs)
sched.add_flow(self)
return sched
[docs]
def make_light_tarfile(self, name=None):
"""Lightweight tarball file. Mainly used for debugging. Return the name of the tarball file."""
name = os.path.basename(self.workdir) + "-light.tar.gz" if name is None else name
return self.make_tarfile(name=name, exclude_dirs=["outdata", "indata", "tmpdata"])
[docs]
def make_tarfile(self, name=None, max_filesize=None, exclude_exts=None, exclude_dirs=None, verbose=0, **kwargs):
"""
Create a tarball file.
Args:
name: Name of the tarball file. Set to os.path.basename(`flow.workdir`) + "tar.gz"` if name is None.
max_filesize (int or string with unit): a file is included in the tar file if its size <= max_filesize
Can be specified in bytes e.g. `max_files=1024` or with a string with unit e.g. `max_filesize="1 MB"`.
No check is done if max_filesize is None.
exclude_exts: List of file extensions to be excluded from the tar file.
exclude_dirs: List of directory basenames to be excluded.
verbose (int): Verbosity level.
kwargs: keyword arguments passed to the :class:`TarFile` constructor.
Returns: The name of the tarfile.
"""
def any2bytes(s):
"""Convert string or number to memory in bytes."""
if is_string(s):
try:
# latest pymatgen version (as of july 2024)
mem = int(Memory.from_str(s.upper()).to("B"))
except (KeyError, UnitError): # For backward compatibility with older pymatgen versions
try:
mem = int(Memory.from_str(s.replace("B", "b")).to("b"))
except AttributeError: # For even older pymatgen versions
mem = int(Memory.from_string(s.replace("B", "b")).to("b"))
return mem
else:
return int(s)
if max_filesize is not None:
max_filesize = any2bytes(max_filesize)
if exclude_exts:
# Add/remove ".nc" so that we can simply pass "GSR" instead of "GSR.nc"
# Moreover this trick allows one to treat WFK.nc and WFK file on the same footing.
exts = []
for e in list_strings(exclude_exts):
exts.append(e)
if e.endswith(".nc"):
exts.append(e.replace(".nc", ""))
else:
exts.append(e + ".nc")
exclude_exts = exts
def filter(tarinfo):
"""
Function that takes a TarInfo object argument and returns the changed TarInfo object.
If it instead returns None the TarInfo object will be excluded from the archive.
"""
# Skip links.
if tarinfo.issym() or tarinfo.islnk():
if verbose: print("Excluding link: %s" % tarinfo.name)
return None
# Check size in bytes
if max_filesize is not None and tarinfo.size > max_filesize:
if verbose: print("Excluding %s due to max_filesize" % tarinfo.name)
return None
# Filter filenames.
if exclude_exts and any(tarinfo.name.endswith(ext) for ext in exclude_exts):
if verbose: print("Excluding %s due to extension" % tarinfo.name)
return None
# Exlude directories (use dir basenames).
if exclude_dirs and any(dir_name in exclude_dirs for dir_name in tarinfo.name.split(os.path.sep)):
if verbose: print("Excluding %s due to exclude_dirs" % tarinfo.name)
return None
return tarinfo
back = os.getcwd()
os.chdir(os.path.join(self.workdir, ".."))
import tarfile
name = os.path.basename(self.workdir) + ".tar.gz" if name is None else name
with tarfile.open(name=name, mode='w:gz', **kwargs) as tar:
tar.add(os.path.basename(self.workdir), arcname=None, recursive=True, filter=filter)
# Add the script used to generate the flow.
if self.pyfile is not None and os.path.exists(self.pyfile):
tar.add(self.pyfile)
os.chdir(back)
return name
[docs]
def explain(self, what="all", nids=None, verbose=0) -> str:
"""
Return string with the docstrings of the works/tasks in the Flow grouped by class.
Args:
what: "all" to print all nodes, "works" for Works only, "tasks" for tasks only.
nids: list of node identifiers used to filter works or tasks.
verbose: Verbosity level
"""
black_list = {
".. rubric:: Inheritance Diagram",
".. inheritance-diagram:",
}
def polish_doc(doc: str, node_type) -> str:
"""Remove all lines in black_list"""
new_lines = [l for l in doc.splitlines() if not any(bad in l for bad in black_list)]
s = "\n".join(new_lines)
color_node = dict(work="green", task="magenta")
s = colored(s, color=color_node[node_type])
return s
explain_works = what in {"all", "works"}
explain_tasks = what in {"all", "tasks"}
nids = as_set(nids)
lines = []; app = lines.append
if explain_works:
app("")
cls2works = self.groupby_work_class()
for work_cls, works in cls2works.items():
if nids and not any(work.node_id in nids for work in works): continue
s = f"work name: {work_cls.__name__}, declared in module: {work_cls.__module__}"
app(make_banner(s, mark="="))
app(polish_doc(work_cls.__doc__, "work"))
app("Works belonging to this class (%d):" % len(works))
for work in works:
app(4 * " " + str(work) + ", status: " + work.status.colored)
if explain_tasks:
app("")
cls2tasks = self.groupby_task_class()
for task_cls, tasks in cls2tasks.items():
if nids and not any(task.node_id in nids for task in tasks): continue
s = f"Task name: {task_cls.__name__}, declared in module: {task_cls.__module__}"
app(make_banner(s, mark="="))
app(polish_doc(task_cls.__doc__, "task"))
app("Tasks belonging to this class (%s):" % len(tasks))
for task in tasks:
app(4 * " " + str(task) + ", status: " + task.status.colored)
return "\n".join(lines)
[docs]
def show_autoparal(self, nids=None, verbose=0) -> None:
"""
Print to terminal the autoparal configurations for each task in the Flow.
Args:
nids: list of node identifiers used to filter works or tasks.
verbose: Verbosity level
"""
print("")
for task in self.iflat_tasks():
if nids and task.node_id not in nids: continue
json_path = os.path.join(task.workdir, "autoparal.json")
if not os.path.exists(json_path):
if verbose: print("Cannot find:", json_path)
continue
with open(json_path, "rt") as fh:
d = json.load(fh)
optimal_conf = d.pop("optimal_conf")
paral_hints = mjson_loads(json.dumps(d))
df = paral_hints.get_dataframe()
print("Task:", task, ", status:", task.status.colored)
print(df)
print("info:", paral_hints.info)
print("optimal:", optimal_conf)
[docs]
def get_graphviz(self, engine="automatic", graph_attr=None, node_attr=None, edge_attr=None):
"""
Generate flow graph in the DOT language.
Args:
engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage']
graph_attr: Mapping of (attribute, value) pairs for the graph.
node_attr: Mapping of (attribute, value) pairs set for all nodes.
edge_attr: Mapping of (attribute, value) pairs set for all edges.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
self.allocate()
from graphviz import Digraph
fg = Digraph("flow", #filename="flow_%s.gv" % os.path.basename(self.relworkdir),
engine="fdp" if engine == "automatic" else engine)
# Set graph attributes. https://www.graphviz.org/doc/info/
fg.attr(label=repr(self))
fg.attr(rankdir="LR", pagedir="BL")
fg.node_attr.update(color='lightblue2', style='filled')
# Add input attributes.
if graph_attr is not None: fg.graph_attr.update(**graph_attr)
if node_attr is not None: fg.node_attr.update(**node_attr)
if edge_attr is not None: fg.edge_attr.update(**edge_attr)
def node_kwargs(node):
return dict(
#shape="circle",
color=node.color_hex,
fontsize="8.0",
label=(str(node) if not hasattr(node, "pos_str") else
node.pos_str + "\n" + node.__class__.__name__),
)
edge_kwargs = dict(arrowType="vee", style="solid")
cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2")
for work in self:
# Build cluster with tasks.
cluster_name = "cluster%s" % work.name
with fg.subgraph(name=cluster_name) as wg:
wg.attr(**cluster_kwargs)
wg.attr(label="%s (%s)" % (work.__class__.__name__, work.name))
for task in work:
wg.node(task.name, **node_kwargs(task))
# Connect children to task.
for child in task.get_children():
# Find file extensions required by this task
i = [dep.node for dep in child.deps].index(task)
edge_label = "+".join(child.deps[i].exts)
fg.edge(task.name, child.name, label=edge_label, color=task.color_hex,
**edge_kwargs)
# Treat the case in which we have a work producing output for other tasks.
for work in self:
children = work.get_children()
if not children: continue
cluster_name = "cluster%s" % work.name
seen = set()
for child in children:
# This is not needed, too much confusing
#fg.edge(cluster_name, child.name, color=work.color_hex, **edge_kwargs)
# Find file extensions required by work
i = [dep.node for dep in child.deps].index(work)
for ext in child.deps[i].exts:
out = "%s (%s)" % (ext, work.name)
fg.node(out)
fg.edge(out, child.name, **edge_kwargs)
key = (cluster_name, out)
if key not in seen:
seen.add(key)
fg.edge(cluster_name, out, color=work.color_hex, **edge_kwargs)
# Treat the case in which we have a task that depends on external files.
seen = set()
for task in self.iflat_tasks():
#print(task.get_parents())
for node in (p for p in task.get_parents() if p.is_file):
#print("parent file node", node)
#infile = "%s (%s)" % (ext, work.name)
infile = node.filepath
if infile not in seen:
seen.add(infile)
fg.node(infile, **node_kwargs(node))
fg.edge(infile, task.name, color=node.color_hex, **edge_kwargs)
return fg
[docs]
@add_fig_kwargs
def graphviz_imshow(self, ax=None, figsize=None, dpi=300, fmt="png", **kwargs) -> Figure:
"""
Generate flow graph in the DOT language and plot it with matplotlib.
Args:
ax: |matplotlib-Axes| or None if a new figure should be created.
figsize: matplotlib figure size (None to use default)
dpi: DPI value.
fmt: Select format for output image
Return: |matplotlib-Figure|
"""
graph = self.get_graphviz(**kwargs)
graph.format = fmt
graph.attr(dpi=str(dpi))
_, tmpname = tempfile.mkstemp()
path = graph.render(tmpname, view=False, cleanup=True)
ax, fig, _ = get_ax_fig_plt(ax=ax, figsize=figsize, dpi=dpi)
import matplotlib.image as mpimg
ax.imshow(mpimg.imread(path, format="png")) #, interpolation="none")
ax.axis("off")
return fig
[docs]
@add_fig_kwargs
def plot_networkx(self, mode="network", with_edge_labels=False, ax=None, arrows=False,
node_size="num_cores", node_label="name_class", layout_type="spring", **kwargs) -> Figure:
"""
Use networkx to draw the flow with the connections among the nodes and the status of the tasks.
Args:
mode: `networkx` to show connections, `status` to group tasks by status.
with_edge_labels: True to draw edge labels.
ax: |matplotlib-Axes| or None if a new figure should be created.
arrows: if True draw arrowheads.
node_size: By default, the size of the node is proportional to the number of cores used.
node_label: By default, the task class is used to label node.
layout_type: Get positions for all nodes using `layout_type`. e.g. pos = nx.spring_layout(g)
.. warning::
Requires networkx package.
"""
self.allocate()
# Build the graph
import networkx as nx
g = nx.Graph() if not arrows else nx.DiGraph()
edge_labels = {}
for task in self.iflat_tasks():
g.add_node(task, name=task.name)
for child in task.get_children():
g.add_edge(task, child)
# TODO: Add getters! What about locked nodes!
i = [dep.node for dep in child.deps].index(task)
edge_labels[(task, child)] = " ".join(child.deps[i].exts)
filedeps = [d for d in task.deps if d.node.is_file]
for d in filedeps:
#print(d.node, d.exts)
g.add_node(d.node, name="%s (%s)" % (d.node.basename, d.node.node_id))
g.add_edge(d.node, task)
edge_labels[(d.node, task)] = "+".join(d.exts)
# This part is needed if we have a work that produces output used by other nodes.
for work in self:
children = work.get_children()
if not children:
continue
g.add_node(work, name=work.name)
for task in work:
g.add_edge(task, work)
edge_labels[(task, work)] = "all_ok "
for child in children:
g.add_edge(work, child)
i = [dep.node for dep in child.deps].index(work)
edge_labels[(work, child)] = "+".join(child.deps[i].exts)
# Get positions for all nodes using layout_type e.g. pos = nx.spring_layout(g)
pos = getattr(nx, layout_type + "_layout")(g)
# Select function used to compute the size of the node
def make_node_size(node):
if node.is_task:
return 300 * node.manager.num_cores
else:
return 600
# Function used to build the label
def make_node_label(node):
if node_label == "name_class":
if node.is_file:
return "%s\n(%s)" % (node.basename, node.node_id)
else:
return (node.pos_str + "\n" + node.__class__.__name__
if hasattr(node, "pos_str") else str(node))
else:
raise NotImplementedError("node_label: %s" % str(node_label))
labels = {node: make_node_label(node) for node in g.nodes()}
ax, fig, plt = get_ax_fig_plt(ax=ax)
# Select plot type.
if mode == "network":
nx.draw_networkx(g, pos, labels=labels,
node_color=[node.color_rgb for node in g.nodes()],
node_size=[make_node_size(node) for node in g.nodes()],
width=1, style="dotted", with_labels=True, arrows=arrows, ax=ax)
# Draw edge labels
if with_edge_labels:
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, ax=ax)
elif mode == "status":
# Group tasks by status (only tasks are show here).
for status in self.ALL_STATUS:
tasks = list(self.iflat_tasks(status=status))
# Draw nodes (color is given by status)
node_color = status.color_opts["color"]
if node_color is None: node_color = "black"
#print("num nodes %s with node_color %s" % (len(tasks), node_color))
nx.draw_networkx_nodes(g, pos,
nodelist=tasks,
node_color=node_color,
node_size=[make_node_size(task) for task in tasks],
alpha=0.5, ax=ax
#label=str(status),
)
# Draw edges.
nx.draw_networkx_edges(g, pos, width=2.0, alpha=0.5, arrows=arrows, ax=ax) # edge_color='r')
# Draw labels
nx.draw_networkx_labels(g, pos, labels, font_size=12, ax=ax)
# Draw edge labels
if with_edge_labels:
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, ax=ax)
#label_pos=0.5, font_size=10, font_color='k', font_family='sans-serif', font_weight='normal',
# alpha=1.0, bbox=None, ax=None, rotate=True, **kwds)
else:
raise ValueError("Unknown value for mode: %s" % str(mode))
ax.axis("off")
return fig
[docs]
def write_open_notebook(self, foreground) -> int:
"""
Generate an ipython notebook and open it in the browser.
Return system exit code.
"""
import nbformat
nbf = nbformat.v4
nb = nbf.new_notebook()
nb.cells.extend([
#nbf.new_markdown_cell("This is an auto-generated notebook for %s" % os.path.basename(pseudopath)),
nbf.new_code_cell("""\
import sys, os
import numpy as np
%matplotlib notebook
from IPython.display import display
# This to render pandas DataFrames with https://github.com/quantopian/qgrid
#import qgrid
#qgrid.nbinstall(overwrite=True) # copies javascript dependencies to your /nbextensions folder
from abipy import abilab
# Tell AbiPy we are inside a notebook and use seaborn settings for plots.
# See https://seaborn.pydata.org/generated/seaborn.set.html#seaborn.set
abilab.enable_notebook(with_seaborn=True)
"""),
nbf.new_code_cell("flow = abilab.Flow.pickle_load('%s')" % self.workdir),
nbf.new_code_cell("if flow.num_errored_tasks: flow.debug()"),
nbf.new_code_cell("flow.check_status(show=True, verbose=0)"),
nbf.new_code_cell("flow.show_dependencies()"),
nbf.new_code_cell("flow.plot_networkx();"),
nbf.new_code_cell("#flow.get_graphviz();"),
nbf.new_code_cell("flow.show_inputs(nids=None, wslice=None)"),
nbf.new_code_cell("flow.show_history()"),
nbf.new_code_cell("flow.show_corrections()"),
nbf.new_code_cell("flow.show_event_handlers()"),
nbf.new_code_cell("flow.inspect(nids=None, wslice=None)"),
nbf.new_code_cell("flow.show_abierrors()"),
nbf.new_code_cell("flow.show_qouts()"),
])
import tempfile, io
_, nbpath = tempfile.mkstemp(suffix='.ipynb', text=True)
with io.open(nbpath, 'wt', encoding="utf8") as fh:
nbformat.write(nb, fh)
from shutil import which
has_jupyterlab = which("jupyter-lab") is not None
appname = "jupyter-lab" if has_jupyterlab else "jupyter notebook"
if not has_jupyterlab:
if which("jupyter") is None:
raise RuntimeError("Cannot find jupyter in $PATH. Install it with `pip install`")
appname = "jupyter-lab" if has_jupyterlab else "jupyter notebook"
if foreground:
return os.system("%s %s" % (appname, nbpath))
else:
fd, tmpname = tempfile.mkstemp(text=True)
print(tmpname)
cmd = "%s %s" % (appname, nbpath)
print("Executing:", cmd, "\nstdout and stderr redirected to %s" % tmpname)
import subprocess
process = subprocess.Popen(cmd.split(), shell=False, stdout=fd, stderr=fd)
cprint("pid: %s" % str(process.pid), "yellow")
return process.returncode
[docs]
class G0W0WithQptdmFlow(Flow):
def __init__(self, workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None):
"""
Build a :class:`Flow` for one-shot G0W0 calculations.
The computation of the q-points for the screening is parallelized with qptdm
i.e. we run independent calculations for each q-point and then we merge the final results.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
scr_input: Input for the SCR run.
sigma_inputs: Input(s) for the SIGMA run(s).
manager: |TaskManager| object used to submit the jobs. Initialized from manager.yml if manager is None.
"""
super().__init__(workdir, manager=manager)
# TODO: Remove this class as it's not compatible with the computation of the q-points
# that is not performed at the level of the Abinit parser.
warnings.warn("DeprecationWarning: G0W0WithQptdmFlow is deprecated and will be removed in abipy v0.9.2")
# Register the first work (GS + NSCF calculation)
bands_work = self.register_work(BandStructureWork(scf_input, nscf_input))
# Register the callback that will be executed the work for the SCR with qptdm.
scr_work = self.register_work_from_cbk(cbk_name="cbk_qptdm_workflow", cbk_data={"input": scr_input},
deps={bands_work.nscf_task: "WFK"}, work_class=QptdmWork)
# The last work contains a list of SIGMA tasks
# that will use the data produced in the previous two works.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
sigma_work = Work()
for sigma_input in sigma_inputs:
sigma_work.register_sigma_task(sigma_input, deps={bands_work.nscf_task: "WFK", scr_work: "SCR"})
self.register_work(sigma_work)
self.allocate()
[docs]
def cbk_qptdm_workflow(self, cbk):
"""
This callback is executed by the flow when bands_work.nscf_task reaches S_OK.
It computes the list of q-points for the W(q,G,G'), creates nqpt tasks
in the second work (QptdmWork), and connect the signals.
"""
scr_input = cbk.data["input"]
# Use the WFK file produced by the second
# Task in the first Work (NSCF step).
nscf_task = self[0][1]
wfk_file = nscf_task.outdir.has_abiext("WFK")
work = self[1]
work.set_manager(self.manager)
work.create_tasks(wfk_file, scr_input)
work.add_deps(cbk.deps)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
# Add the garbage collector.
if self.gc is not None: task.set_gc(self.gc)
work.connect_signals()
work.build()
return work
# TODO: Remove
class FlowCallbackError(Exception):
"""Exceptions raised by FlowCallback."""
class FlowCallback:
"""
This object implements the callbacks executed by the |Flow| when
particular conditions are fulfilled. See on_dep_ok method of |Flow|.
.. note::
I decided to implement callbacks via this object instead of a standard
approach based on bound methods because:
1) pickle (v<=3) does not support the pickling/unplickling of bound methods
2) There's some extra logic and extra data needed for the proper functioning
of a callback at the flow level and this object provides an easy-to-use interface.
"""
Error = FlowCallbackError
def __init__(self, func_name, flow, deps, cbk_data):
"""
Args:
func_name: String with the name of the callback to execute.
func_name must be a bound method of flow with signature:
func_name(self, cbk)
where self is the Flow instance and cbk is the callback
flow: Reference to the |Flow|.
deps: List of dependencies associated to the callback
The callback is executed when all dependencies reach S_OK.
cbk_data: Dictionary with additional data that will be passed to the callback via self.
"""
self.func_name = func_name
self.flow = flow
self.deps = deps
self.data = cbk_data or {}
self._disabled = False
def __str__(self):
return "%s: %s bound to %s" % (self.__class__.__name__, self.func_name, self.flow)
def __call__(self):
"""Execute the callback."""
if self.can_execute():
# Get the bound method of the flow from func_name.
# We use this trick because pickle (format <=3) does not support bound methods.
try:
func = getattr(self.flow, self.func_name)
except AttributeError as exc:
raise self.Error(str(exc)) from exc
return func(self)
else:
raise self.Error("You tried to __call_ a callback that cannot be executed!")
def can_execute(self):
"""True if we can execute the callback."""
return not self._disabled and all(dep.status == dep.node.S_OK for dep in self.deps)
def disable(self):
"""
True if the callback has been disabled. This usually happens when the callback has been executed.
"""
self._disabled = True
def enable(self):
"""Enable the callback"""
self._disabled = False
def handle_sender(self, sender):
"""
True if the callback is associated to the sender
i.e. if the node who sent the signal appears in the
dependencies of the callback.
"""
return sender in [d.node for d in self.deps]
####################
# Factory functions
####################
[docs]
def bandstructure_flow(workdir: str, scf_input: AbinitInput, nscf_input: AbinitInput,
dos_inputs=None, manager=None,
flow_class=Flow, allocate=True):
"""
Build a |Flow| for band structure calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
dos_inputs: Input(s) for the NSCF run (dos run).
manager: |TaskManager| object used to submit the jobs. Initialized from manager.yml if manager is None.
flow_class: Flow subclass
allocate: True if the flow should be allocated before returning.
Returns: |Flow| object
"""
flow = flow_class(workdir, manager=manager)
work = BandStructureWork(scf_input, nscf_input, dos_inputs=dos_inputs)
flow.register_work(work)
# Handy aliases
flow.scf_task, flow.nscf_task, flow.dos_tasks = work.scf_task, work.nscf_task, work.dos_tasks
if allocate: flow.allocate()
return flow
[docs]
def g0w0_flow(workdir: str,
scf_input: AbinitInput, nscf_input: AbinitInput,
scr_input: AbinitInput, sigma_inputs,
manager=None, flow_class=Flow, allocate=True):
"""
Build a |Flow| for one-shot $G_0W_0$ calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
scr_input: Input for the SCR run.
sigma_inputs: List of inputs for the SIGMA run.
flow_class: Flow class
manager: |TaskManager| object used to submit the jobs. Initialized from manager.yml if manager is None.
allocate: True if the flow should be allocated before returning.
Returns: |Flow| object
"""
flow = flow_class(workdir, manager=manager)
work = G0W0Work(scf_input, nscf_input, scr_input, sigma_inputs)
flow.register_work(work)
if allocate: flow.allocate()
return flow
# TODO: Move it to dfpt_works
class PhononFlow(Flow):
"""
This Flow provides a high-level interface to compute phonons with DFPT
The flow consists of
1) One workflow for the GS part.
2) A second workflow to compute phonons on the ph_ngqpt q-mesh. Each work contains
Only the irreducible phonon perturbations are espliclty computed.
"""
@classmethod
def from_scf_input(cls, workdir: str, scf_input: AbinitInput, ph_ngqpt,
qptopt=1, with_becs=True, with_quad=False, with_flexoe=False,
manager=None, allocate=True, **kwargs) -> PhononFlow:
"""
Create a `PhononFlow` for phonon calculations from an `AbinitInput` defining a ground-state run.
Args:
workdir: Working directory of the flow.
scf_input: |AbinitInput| object with the parameters for the GS-SCF run.
ph_ngqpt: q-mesh for phonons. Must be a sub-mesh of the k-mesh used for
electrons. e.g if ngkpt = (8, 8, 8). ph_ngqpt = (4, 4, 4) is a valid choice
whereas ph_ngqpt = (3, 3, 3) is not!
qptopt: Option for the generation of q-points. Default: 1 i.e. use nsym spatial symmetries + TR.
with_becs: True if Born effective charges are wanted.
with_quad: Activate calculation of dynamical quadrupoles.
Note that only selected features are compatible with dynamical quadrupoles.
Please consult <https://docs.abinit.org/topics/longwave/>
with_flexoe: True to activate computation of flexoelectric tensor.
manager: |TaskManager| object. Read from `manager.yml` if None.
allocate: True if the flow should be allocated before returning.
kwargs: Extra optional arguments passed to PhononWork.from_scf_task.
See doc string for possible kwargs.
Return:
:class:`PhononFlow` object.
"""
flow = cls(workdir, manager=manager)
# Register the SCF task
flow.register_scf_task(scf_input)
scf_task = flow[0][0]
# Make sure k-mesh and q-mesh are compatible.
scf_ngkpt, ph_ngqpt = np.array(scf_input["ngkpt"]), np.array(ph_ngqpt)
if any(scf_ngkpt % ph_ngqpt != 0):
raise ValueError("ph_ngqpt %s should be a sub-mesh of scf_ngkpt %s" % (ph_ngqpt, scf_ngkpt))
ph_work = PhononWork.from_scf_task(scf_task, ph_ngqpt, is_ngqpt=True, qptopt=qptopt, with_becs=with_becs,
with_quad=with_quad, with_flexoe=with_flexoe, **kwargs)
flow.register_work(ph_work)
if allocate: flow.allocate()
return flow
def open_final_ddb(self):
"""
Open the DDB file located in the output directory of the flow.
Return:
:class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ph_work = self[1]
ddb_path = ph_work.outdir.has_abiext("DDB")
if not ddb_path:
if self.status == self.S_OK:
self.history.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
self.history.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
class NonLinearCoeffFlow(Flow):
"""
1) One workflow for the GS run.
2) nqpt works for electric field calculations. Each work contains
nirred tasks where nirred is the number of irreducible perturbations
for that particular q-point.
"""
@classmethod
def from_scf_input(cls, workdir: str, scf_input: AbinitInput,
manager=None, allocate=True) -> NonLinearCoeffFlow:
"""
Create a `NonlinearFlow` for second order susceptibility calculations from
an `AbinitInput` defining a ground-state run.
Args:
workdir: Working directory of the flow.
scf_input: |AbinitInput| object with the parameters for the GS-SCF run.
manager: |TaskManager| object. Read from `manager.yml` if None.
allocate: True if the flow should be allocated before returning.
"""
flow = cls(workdir, manager=manager)
flow.register_scf_task(scf_input)
scf_task = flow[0][0]
nl_work = DteWork.from_scf_task(scf_task)
flow.register_work(nl_work)
if allocate: flow.allocate()
return flow
def open_final_ddb(self):
"""
Open the DDB file located in the output directory of the flow.
Return:
|DdbFile| object, None if file could not be found or file is not readable.
"""
ddb_path = self.outdir.has_abiext("DDB")
if not ddb_path:
if self.status == self.S_OK:
self.history.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
self.history.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
def finalize(self) -> int:
"""This method is called when the flow is completed."""
# Merge all the out_DDB files found in work.outdir.
ddb_files = list(filter(None, [work.outdir.has_abiext("DDB") for work in self]))
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self.manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
print("Final DDB file available at %s" % out_ddb)
# Call the method of the super class.
return super().finalize()
def phonon_conv_flow(workdir: str, scf_input: AbinitInput, qpoints, params,
manager=None, allocate=True):
"""
Create a |Flow| to perform convergence studies for phonon calculations.
Args:
workdir: Working directory of the flow.
scf_input: |AbinitInput| object defining a GS-SCF calculation.
qpoints: List with the reduced coordinates of the q-point(s).
params:
To perform a converge study wrt ecut: params=["ecut", [2, 4, 6]]
manager: |TaskManager| object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
allocate: True if the flow should be allocated before returning.
Return: |Flow| object.
"""
qpoints = np.reshape(qpoints, (-1, 3))
flow = Flow(workdir=workdir, manager=manager)
for qpt in qpoints:
for gs_inp in scf_input.product(*params):
# Register the SCF task
work = flow.register_scf_task(gs_inp)
# Add the PhononWork connected to this scf_task.
flow.register_work(PhononWork.from_scf_task(work[0], qpoints=qpt))
if allocate: flow.allocate()
return flow
_FIX_FLOW_SCRIPT = """\
#!/usr/bin/env python
'''
WARNING WARNING WARNING
This script changes the input variables of particular tasks and resets certain tasks
so that it's possible to fix errors before starting a new scheduler.
This is an advanced script and you are supposed to be familiar with the Abipy API.
Very bad things will happen if you don't use this script properly.
DO NOT RUN THIS SCRIPT IF THE SCHEDULER IS STILL RUNNING YOUR FLOW.
YOU HAVE BEEN WARNED!
'''
import sys
import argparse
import abipy.flowtk as flowtk
def main():
# Build the main parser.
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('flowdir', help="File or directory containing the ABINIT flow")
parser.add_argument('--remove-lock', default=False, action="store_true",
help="Remove the lock on the pickle file used to save the status of the flow.")
parser.add_argument('--apply', default=False, action="store_true", help="Apply changes to the flow.")
options = parser.parse_args()
# Read the flow from the pickle file
flow = flowtk.Flow.pickle_load(options.flowdir, remove_lock=options.remove_lock)
# List with the id of the tasks that will be changed.
nids = []
for task in flow.iflat_tasks():
# Select tasks according to class and status
# Clearly, you will need to customize this part.
if task.__class__.__name__ == "PhononTask" and task.status in (task.S_UNCONVERGED, task.S_ERROR):
nids.append(task.node_id)
# Here we operate on the AbinitInput of the task.
# In this particular case, we want to use toldfe instead of tolvrs.
# Again, you will need to customize this part.
# Remove all the tolerance variables present in the input.
task.input.pop_tolerances()
# Set new stopping criterion
task.input["toldfe"] = 1e-8
task.input["nstep"] = 1000
task.input["prtwf"] = 0
if options.apply:
print(f"Resetting {len(nids)} tasks modified by the user.")
if nids:
for task in flow.iflat_tasks(nids=nids):
task.reset()
print("Writing new pickle file.")
flow.build_and_pickle_dump()
else:
print(f"Dry run mode. {len(nids)} tasks have been modified in memory.")
if nids:
for task in flow.iflat_tasks(nids=nids):
print(task)
print("Use --apply to reset these tasks and update the pickle file.")
print("Then restart the scheduler with `nohup abirun.py FLOWDIR scheduler ...`")
return 0
if __name__ == "__main__":
sys.exit(main())
"""