@conference {cGiro-i-Nieto, title = {One Perceptron to Rule Them All: Language, Vision, Audio and Speech (tutorial)}, booktitle = {ACM International Conference on Multimedia Retrieval (ICMR) 2020}, year = {2020}, month = {06/2020}, publisher = {ACM}, organization = {ACM}, address = {Dublin, Ireland}, abstract = {

Deep neural networks have boosted the convergence of multimedia data analytics in a unified framework shared by practitioners in natural language, vision and speech. Image captioning, lip reading or video sonorization are some of the first applications of a new and exciting field of research exploiting the generalization properties of deep neural representation. This tutorial will firstly review the basic neural architectures to encode and decode vision, text and audio, to later review the those models that have successfully translated information across modalities.

Part II: Neural Encoders \& Decoders [GSlides] [Video]

Part III: Language \& Vision: [GSlides] [Video]

Part IV: Audio \& Vision: [GSlides] [Video]

Part V: Speech \& Vision [GSlides]
}, keywords = {deep learning; multimodal; cross-modal; joint embeddings}, doi = {https://doi.org/10.1145/3372278.3390740}, author = {Xavier Gir{\'o}-i-Nieto} }