@conference {, title = {FFT-Based Gradient Sparsification for the Distributed Training of Deep Neural Networks}, booktitle = {9th International Symposium on High-Performance Parallel and Distributed Computing (HPDC 20)}, year = {2020}, month = {2020-06}, publisher = {ACM}, organization = {ACM}, address = {Stockholm, Sweden}, abstract = {The performance and efficiency of distributed training of Deep Neural Networks (DNN) highly depend on the performance of gradient averaging among participating processes, a step bound by communication costs. There are two major approaches to reduce communication overhead: overlap communications with computations (lossless), or reduce communications (lossy). The lossless solution works well for linear neural architectures, e.g. VGG, AlexNet, but more recent networks such as ResNet and Inception limit the opportunity for such overlapping. Therefore, approaches that reduce the amount of data (lossy) become more suitable. In this paper, we present a novel, explainable lossy method that sparsifies gradients in the frequency domain, in addition to a new range-based float point representation to quantize and further compress gradients. These dynamic techniques strike a balance between compression ratio, accuracy, and computational overhead, and are optimized to maximize performance in heterogeneous environments. Unlike existing works that strive for a higher compression ratio, we stress the robustness of our methods, and provide guidance to recover accuracy from failures. To achieve this, we prove how the FFT sparsification affects the convergence and accuracy, and show that our method is guaranteed to converge using a diminishing θ in training. Reducing θ can also be used to recover accuracy from the failure. Compared to STOA lossy methods, e.g., QSGD, TernGrad, and Top-k sparsification, our approach incurs less approximation error, thereby better in both the wall-time and accuracy. On an 8 GPUs, InfiniBand interconnected cluster, our techniques effectively accelerate AlexNet training up to 2.26x to the baseline of no compression, and 1.31x to QSGD, 1.25x to Terngrad and 1.47x to Top-K sparsification.}, keywords = {FFT, Gradient Compression, Loosy Gradients, Machine Learning, Neural Networks}, doi = {https://doi.org/10.1145/3369583.3392681}, author = {Linnan Wang and Wei Wu and Junyu Zhang and Hang Liu and George Bosilca and Maurice Herlihy and Rodrigo Fonseca} }