@article {1220, title = {Variable-Size Batched Gauss-Jordan Elimination for Block-Jacobi Preconditioning on Graphics Processors}, journal = {Parallel Computing}, volume = {81}, year = {2019}, month = {2019-01}, pages = {131-146}, abstract = {In this work, we address the efficient realization of block-Jacobi preconditioning on graphics processing units (GPUs). This task requires the solution of a collection of small and independent linear systems. To fully realize this implementation, we develop a variable-size batched matrix inversion kernel that uses Gauss-Jordan elimination (GJE) along with a variable-size batched matrix{\textendash}vector multiplication kernel that transforms the linear systems{\textquoteright} right-hand sides into the solution vectors. Our kernels make heavy use of the increased register count and the warp-local communication associated with newer GPU architectures. Moreover, in the matrix inversion, we employ an implicit pivoting strategy that migrates the workload (i.e., operations) to the place where the data resides instead of moving the data to the executing cores. We complement the matrix inversion with extraction and insertion strategies that allow the block-Jacobi preconditioner to be set up rapidly. The experiments on NVIDIA{\textquoteright}s K40 and P100 architectures reveal that our variable-size batched matrix inversion routine outperforms the CUDA basic linear algebra subroutine (cuBLAS) library functions that provide the same (or even less) functionality. We also show that the preconditioner setup and preconditioner application cost can be somewhat offset by the faster convergence of the iterative solver.}, keywords = {Batched algorithms, Block-Jacobi, Gauss{\textendash}Jordan elimination, Graphics processor, matrix inversion, sparse linear systems}, doi = {https://doi.org/10.1016/j.parco.2017.12.006}, author = {Hartwig Anzt and Jack Dongarra and Goran Flegar and Enrique S. Quintana-Orti} } @conference {1234, title = {Variable-Size Batched Condition Number Calculation on GPUs}, booktitle = {SBAC-PAD}, year = {2018}, month = {2018-09}, address = {Lyon, France}, url = {https://ieeexplore.ieee.org/document/8645907}, author = {Hartwig Anzt and Jack Dongarra and Goran Flegar and Thomas Gruetzmacher} } @inproceedings {1088, title = {Variable-Size Batched Gauss-Huard for Block-Jacobi Preconditioning}, journal = {International Conference on Computational Science (ICCS 2017)}, volume = {108}, year = {2017}, month = {2017-06}, pages = {1783-1792}, publisher = {Procedia Computer Science}, address = {Zurich, Switzerland}, abstract = {In this work we present new kernels for the generation and application of block-Jacobi precon-ditioners that accelerate the iterative solution of sparse linear systems on graphics processing units (GPUs). Our approach departs from the conventional LU factorization and decomposes the diagonal blocks of the matrix using the Gauss-Huard method. When enhanced with column pivoting, this method is as stable as LU with partial/row pivoting. Due to extensive use of GPU registers and integration of implicit pivoting, our variable size batched Gauss-Huard implementation outperforms the batched version of LU factorization. In addition, the application kernel combines the conventional two-stage triangular solve procedure, consisting of a backward solve followed by a forward solve, into a single stage that performs both operations simultaneously.}, doi = {https://doi.org/10.1016/j.procs.2017.05.186}, author = {Hartwig Anzt and Jack Dongarra and Goran Flegar and Enrique S. Quintana-Orti and Andres E. Thomas} } @conference {1160, title = {Variable-Size Batched LU for Small Matrices and Its Integration into Block-Jacobi Preconditioning}, booktitle = {46th International Conference on Parallel Processing (ICPP)}, year = {2017}, month = {2017-08}, publisher = {IEEE}, organization = {IEEE}, address = {Bristol, United Kingdom}, abstract = {We present a set of new batched CUDA kernels for the LU factorization of a large collection of independent problems of different size, and the subsequent triangular solves. All kernels heavily exploit the registers of the graphics processing unit (GPU) in order to deliver high performance for small problems. The development of these kernels is motivated by the need for tackling this embarrassingly parallel scenario in the context of block-Jacobi preconditioning that is relevant for the iterative solution of sparse linear systems.}, keywords = {graphics processing units, Jacobian matrices, Kernel, linear systems, Parallel processing, Sparse matrices}, doi = {10.1109/ICPP.2017.18}, url = {http://ieeexplore.ieee.org/abstract/document/8025283/?reload=true}, author = {Hartwig Anzt and Jack Dongarra and Goran Flegar and Enrique S. Quintana-Orti} } @conference {892, title = {Visualizing Execution Traces with Task Dependencies}, booktitle = {2nd Workshop on Visual Performance Analysis (VPA {\textquoteright}15)}, year = {2015}, month = {2015-11}, publisher = {ACM}, organization = {ACM}, address = {Austin, TX}, abstract = {Task-based scheduling has emerged as one method to reduce the complexity of parallel computing. When using task-based schedulers, developers must frame their computation as a series of tasks with various data dependencies. The scheduler can take these tasks, along with their input and output dependencies, and schedule the task in parallel across a node or cluster. While these schedulers simplify the process of parallel software development, they can obfuscate the performance characteristics of the execution of an algorithm. The execution trace has been used for many years to give developers a visual representation of how their computations are performed. These methods can be employed to visualize when and where each of the tasks in a task-based algorithm is scheduled. In addition, the task dependencies can be used to create a directed acyclic graph (DAG) that can also be visualized to demonstrate the dependencies of the various tasks that make up a workload. The work presented here aims to combine these two data sets and extend execution trace visualization to better suit task-based workloads. This paper presents a brief description of task-based schedulers and the performance data they produce. It will then describe an interactive extension to the current trace visualization methods that combines the trace and DAG data sets. This new tool allows users to gain a greater understanding of how their tasks are scheduled. It also provides a simplified way for developers to evaluate and debug the performance of their scheduler. }, author = {Blake Haugen and Stephen Richmond and Jakub Kurzak and Chad A. Steed and Jack Dongarra} } @conference {icl:692, title = {Virtual Systolic Array for QR Decomposition}, booktitle = {15th Workshop on Advances in Parallel and Distributed Computational Models, IEEE International Parallel \& Distributed Processing Symposium (IPDPS 2013)}, year = {2013}, month = {2013-05}, publisher = {IEEE}, organization = {IEEE}, address = {Boston, MA}, abstract = {Systolic arrays offer a very attractive, data-centric, execution model as an alternative to the von Neumann architecture. Hardware implementations of systolic arrays turned out not to be viable solutions in the past. This article shows how the systolic design principles can be applied to a software solution to deliver an algorithm with unprecedented strong scaling capabilities. Systolic array for the QR decomposition is developed and a virtualization layer is used for mapping of the algorithm to a large distributed memory system. Strong scaling properties are discovered, superior to existing solutions.}, keywords = {dataflow programming, message passing, multi-core, QR decomposition, roofline model, systolic array}, doi = {10.1109/IPDPS.2013.119}, author = {Jakub Kurzak and Piotr Luszczek and Mark Gates and Ichitaro Yamazaki and Jack Dongarra} } @inproceedings {icl:519, title = {VGrADS: Enabling e-Science Workflows on Grids and Clouds with Fault Tolerance}, journal = {SC{\textquoteright}09 The International Conference for High Performance Computing, Networking, Storage and Analysis (to appear)}, year = {2009}, month = {2009-00}, address = {Portland, OR}, keywords = {grads}, author = {Lavanya Ramakrishan and Daniel Nurmi and Anirban Mandal and Charles Koelbel and Dennis Gannon and Mark Huang and Yang-Suk Kee and Graziano Obertelli and Kiran Thyagaraja and Rich Wolski and Asim YarKhan and Dmitrii Zagorodnov} } @inproceedings {icl:460, title = {Visualizing the Program Execution Control Flow of OpenMP Applications}, journal = {Proc. 4th International Workshop on OpenMP (IWOMP 2008)}, year = {2008}, month = {2008-01}, pages = {181-190}, publisher = {Lecture Notes in Computer Science 5004}, address = {West Lafayette, Indiana}, author = {Karl F{\"u}rlinger and Shirley Moore} } @article {icl:202, title = {The Virtual Instrument: Support for Grid-enabled Scientific Simulations}, journal = {International Journal of High Performance Computing Applications}, volume = {18}, number = {1}, year = {2004}, month = {2004-01}, pages = {3-17}, author = {Henri Casanova and Thomas Bartol and Francine Berman and Adam Birnbaum and Jack Dongarra and Mark Ellisman and Marcio Faerman and Erhan Gockay and Michelle Miller and Graziano Obertelli and Stuart Pomerantz and Terry Sejnowski and Joel Stiles and Rich Wolski} } @article {icl:174, title = {VisPerf: Monitoring Tool for Grid Computing}, journal = {Lecture Notes in Computer Science}, volume = {2659}, year = {2003}, month = {2003-00}, pages = {233-243}, publisher = {Springer Verlag, Heidelberg}, keywords = {netsolve}, author = {DongWoo Lee and Jack Dongarra}, editor = {R. S. Ramakrishna} } @article {icl:95, title = {The Virtual Instrument: Support for Grid-enabled Scientific Simulations}, journal = {Journal of Parallel and Distributed Computing (submitted)}, year = {2002}, month = {2002-10}, author = {Henri Casanova and Thomas Bartol and Francine Berman and Adam Birnbaum and Jack Dongarra and Mark Ellisman and Marcio Faerman and Erhan Gockay and Michelle Miller and Graziano Obertelli and Stuart Pomerantz and Terry Sejnowski and Joel Stiles and Rich Wolski} }