@conference {1453, title = {Asynchronous Receiver-Driven Replay for Local Rollback of MPI Applications}, booktitle = {Fault Tolerance for HPC at eXtreme Scale (FTXS) Workshop at The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC{\textquoteright}19)}, year = {2019}, month = {2019-11}, abstract = {With the increase in scale and architectural complexity of supercomputers, the management of failures has become integral to successfully executing a long-running high performance computing application. In many instances, failures have a localized scope, usually impacting a subset of the resources being used, yet widely used failure recovery strategies (like checkpoint/restart) fail to take advantage and rely on global, synchronous recovery actions. Even with local rollback recovery, in which only the fault impacted processes are restarted from a checkpoint, the consistency of further progress in the execution is achieved through the replay of communication from a message log. This theoretically sound approach encounters some practical limitations: the presence of collective operations forces a synchronous recovery that prevents survivor processes from continuing their execution, removing any possibility for overlapping further computation with the recovery; and the amount of resources required at recovering peers can be untenable. In this work, we solved both problems by implementing an asynchronous, receiver-driven replay of point-to-point and collective communications, and by exploiting remote-memory access capabilities to access the message logs. This new protocol is evaluated in an implementation of local rollback over the User Level Failure Mitigation fault tolerant Message Passing Interface (MPI). It reduces the recovery times of the failed processes by an average of 59\%, while the time spent in the recovery by the survivor processes is reduced by 95\% when compared to an equivalent global rollback protocol, thus living to the promise of a truly localized impact of recovery actions.}, keywords = {checkpoint/restart, Fault tolerance, Message logging, MPI, ULFM, User Level Fault Mitigation}, url = {https://sc19.supercomputing.org/proceedings/workshops/workshop_files/ws_ftxs103s2-file1.pdf}, author = {Nuria Losada and Aurelien Bouteiller and George Bosilca} } @article {1230, title = {Local Rollback for Resilient MPI Applications with Application-Level Checkpointing and Message Logging}, journal = {Future Generation Computer Systems}, volume = {91}, year = {2019}, month = {2019-02}, pages = {450-464}, abstract = {The resilience approach generally used in high-performance computing (HPC) relies on coordinated checkpoint/restart, a global rollback of all the processes that are running the application. However, in many instances, the failure has a more localized scope and its impact is usually restricted to a subset of the resources being used. Thus, a global rollback would result in unnecessary overhead and energy consumption, since all processes, including those unaffected by the failure, discard their state and roll back to the last checkpoint to repeat computations that were already done. The User Level Failure Mitigation (ULFM) interface {\textendash} the last proposal for the inclusion of resilience features in the Message Passing Interface (MPI) standard {\textendash} enables the deployment of more flexible recovery strategies, including localized recovery. This work proposes a local rollback approach that can be generally applied to Single Program, Multiple Data (SPMD) applications by combining ULFM, the ComPiler for Portable Checkpointing (CPPC) tool, and the Open MPI VProtocol system-level message logging component. Only failed processes are recovered from the last checkpoint, while consistency before further progress in the execution is achieved through a two-level message logging process. To further optimize this approach point-to-point communications are logged by the Open MPI VProtocol component, while collective communications are optimally logged at the application level{\textemdash}thereby decoupling the logging protocol from the particular collective implementation. This spatially coordinated protocol applied by CPPC reduces the log size, the log memory requirements and overall the resilience impact on the applications.}, keywords = {Application-level checkpointing, Local rollback, Message logging, MPI, resilience}, doi = {https://doi.org/10.1016/j.future.2018.09.041}, author = {Nuria Losada and George Bosilca and Aurelien Bouteiller and Patricia Gonz{\'a}lez and Mar{\'\i}a J. Mart{\'\i}n} }