@article {icl:719, title = {A Block-Asynchronous Relaxation Method for Graphics Processing Units}, journal = {Journal of Parallel and Distributed Computing}, volume = {73}, year = {2013}, month = {2013-12}, pages = {1613{\textendash}1626}, abstract = {In this paper, we analyze the potential of asynchronous relaxation methods on Graphics Processing Units (GPUs). We develop asynchronous iteration algorithms in CUDA and compare them with parallel implementations of synchronous relaxation methods on CPU- or GPU-based systems. For a set of test matrices from UFMC we investigate convergence behavior, performance and tolerance to hardware failure. We observe that even for our most basic asynchronous relaxation scheme, the method can efficiently leverage the GPUs computing power and is, despite its lower convergence rate compared to the Gauss{\textendash}Seidel relaxation, still able to provide solution approximations of certain accuracy in considerably shorter time than Gauss{\textendash}Seidel running on CPUs- or GPU-based Jacobi. Hence, it overcompensates for the slower convergence by exploiting the scalability and the good fit of the asynchronous schemes for the highly parallel GPU architectures. Further, enhancing the most basic asynchronous approach with hybrid schemes{\textendash}using multiple iterations within the {\textquoteleft}{\textquoteleft}subdomain{\textquoteright}{\textquoteright} handled by a GPU thread block{\textendash}we manage to not only recover the loss of global convergence but often accelerate convergence of up to two times, while keeping the execution time of a global iteration practically the same. The combination with the advantageous properties of asynchronous iteration methods with respect to hardware failure identifies the high potential of the asynchronous methods for Exascale computing.}, doi = {http://dx.doi.org/10.1016/j.jpdc.2013.05.008}, author = {Hartwig Anzt and Stanimire Tomov and Jack Dongarra and Vincent Heuveline} }