@mastersthesis {xRoldana, title = {Speech-conditioned Face Generation with Deep Adversarial Networks}, year = {2018}, abstract = {

Image synthesis have been a trending task for the AI community in recent years. Many works have shown the potential of Generative Adversarial Networks (GANs) to deal with tasks such as text or audio to image synthesis. In particular, recent advances in deep learning using audio have inspired many works involving both visual and auditory information. In this work we propose a face synthesis method using audio and/or language representations as inputs. Furthermore, a dataset which relates speech utterances with a face and an identity has been built, fitting for other tasks apart from face synthesis such as speaker recognition or voice conversion.

Speech Conditioned Face Generation with Deep Adversarial Networks from Universitat Polit{\`e}cnica de Catalunya
}, author = {Rold{\'a}n, Francisco}, editor = {Pascual-deLaPuente, Santiago and Amaia Salvador and McGuinness, Kevin and Xavier Gir{\'o}-i-Nieto} }