@conference {c, title = {Distributed training strategies for a computer vision deep learning algorithm on a distributed GPU cluster}, booktitle = {International Conference on Computational Science (ICCS)}, year = {2017}, month = {06/2017}, publisher = {Elsevier}, organization = {Elsevier}, address = {Zurich, Switzerland}, abstract = {

Deep learning algorithms base their success on building high learning capacity models with millions of parameters that are tuned in a data-driven fashion. These models are trained by processing millions of examples, so that the development of more accurate algorithms is usually limited by the throughput of the computing devices on which they are trained. In this work, we explore how the training of a state-of-the-art neural network for computer vision can be parallelized on a distributed GPU cluster. The effect of distributing the training process is addressed from two different points of view. First, the scalability of the task and its performance in the distributed setting are analyzed. Second, the impact of distributed training methods on the final accuracy of the models is studied.

[ICCS 2017 website][Related session in ICCS 2017][Paper in UPCommons]

}, keywords = {distributed computing; parallel systems; deep learning; Convolutional Neural Networks}, doi = {https://doi.org/10.1016/j.procs.2017.05.074}, url = {http://www.sciencedirect.com/science/article/pii/S1877050917306129}, author = {V{\'\i}ctor Campos and Sastre, Francesc and Yag{\"u}es, Maurici and M{\'\i}riam Bellver and Xavier Gir{\'o}-i-Nieto and Jordi Torres} }