@article {1459, title = {Fault Tolerance of MPI Applications in Exascale Systems: The ULFM Solution}, journal = {Future Generation Computer Systems}, volume = {106}, year = {2020}, month = {2020-05}, pages = { 467-481}, abstract = {The growth in the number of computational resources used by high-performance computing (HPC) systems leads to an increase in failure rates. Fault-tolerant techniques will become essential for long-running applications executing in future exascale systems, not only to ensure the completion of their execution in these systems but also to improve their energy consumption. Although the Message Passing Interface (MPI) is the most popular programming model for distributed-memory HPC systems, as of now, it does not provide any fault-tolerant construct for users to handle failures. Thus, the recovery procedure is postponed until the application is aborted and re-spawned. The proposal of the User Level Failure Mitigation (ULFM) interface in the MPI forum provides new opportunities in this field, enabling the implementation of resilient MPI applications, system runtimes, and programming language constructs able to detect and react to failures without aborting their execution. This paper presents a global overview of the resilience interfaces provided by the ULFM specification, covers archetypal usage patterns and building blocks, and surveys the wide variety of application-driven solutions that have exploited them in recent years. The large and varied number of approaches in the literature proves that ULFM provides the necessary flexibility to implement efficient fault-tolerant MPI applications. All the proposed solutions are based on application-driven recovery mechanisms, which allows reducing the overhead and obtaining the required level of efficiency needed in the future exascale platforms.}, keywords = {Application-level checkpointing, MPI, resilience, ULFM}, issn = {0167-739X}, doi = {https://doi.org/10.1016/j.future.2020.01.026}, url = {https://www.sciencedirect.com/science/article/pii/S0167739X1930860X}, author = {Nuria Losada and Patricia Gonz{\'a}lez and Mar{\'\i}a J. Mart{\'\i}n and George Bosilca and Aurelien Bouteiller and Keita Teranishi} } @article {1230, title = {Local Rollback for Resilient MPI Applications with Application-Level Checkpointing and Message Logging}, journal = {Future Generation Computer Systems}, volume = {91}, year = {2019}, month = {2019-02}, pages = {450-464}, abstract = {The resilience approach generally used in high-performance computing (HPC) relies on coordinated checkpoint/restart, a global rollback of all the processes that are running the application. However, in many instances, the failure has a more localized scope and its impact is usually restricted to a subset of the resources being used. Thus, a global rollback would result in unnecessary overhead and energy consumption, since all processes, including those unaffected by the failure, discard their state and roll back to the last checkpoint to repeat computations that were already done. The User Level Failure Mitigation (ULFM) interface {\textendash} the last proposal for the inclusion of resilience features in the Message Passing Interface (MPI) standard {\textendash} enables the deployment of more flexible recovery strategies, including localized recovery. This work proposes a local rollback approach that can be generally applied to Single Program, Multiple Data (SPMD) applications by combining ULFM, the ComPiler for Portable Checkpointing (CPPC) tool, and the Open MPI VProtocol system-level message logging component. Only failed processes are recovered from the last checkpoint, while consistency before further progress in the execution is achieved through a two-level message logging process. To further optimize this approach point-to-point communications are logged by the Open MPI VProtocol component, while collective communications are optimally logged at the application level{\textemdash}thereby decoupling the logging protocol from the particular collective implementation. This spatially coordinated protocol applied by CPPC reduces the log size, the log memory requirements and overall the resilience impact on the applications.}, keywords = {Application-level checkpointing, Local rollback, Message logging, MPI, resilience}, doi = {https://doi.org/10.1016/j.future.2018.09.041}, author = {Nuria Losada and George Bosilca and Aurelien Bouteiller and Patricia Gonz{\'a}lez and Mar{\'\i}a J. Mart{\'\i}n} }