@conference {cBernal, title = {Transcription-Enriched Joint Embeddings or Spoken Descriptions of Images and Videos}, booktitle = {CVPR 2020 Workshop on Egocentric Perception, Interaction and Computing}, year = {2020}, month = {06/2020}, publisher = {arXiv}, organization = {arXiv}, address = {Seattle, WA, USA}, abstract = {

In this work, we propose an effective approach for training unique embedding representations by combining three simultaneous modalities: image and spoken and textual narratives. The proposed methodology departs from a baseline system that spawns a embedding space trained with only spoken narratives and image cues. Our experiments on the EPIC-Kitchen and Places Audio Caption datasets show that introducing the human-generated textual transcriptions of the spoken narratives helps to the training procedure yielding to get better embedding representations. The triad speech, image and words allows for a better estimate of the point embedding and show an improving of the performance within tasks like image and speech retrieval,\ even when text third modality, text, is not present in the task.

Transcription-Enriched Joint Embeddings for Spoken Descriptions of Images and Videos from Universitat Polit{\`e}cnica de Catalunya
}, author = {Oriol, Benet and Luque, J. and Diego, Ferran and Xavier Gir{\'o}-i-Nieto} }