@conference {cRamon, title = {Hyperparameter-Free Losses for Model-Based Monocular Reconstruction}, booktitle = {ICCV 2019 Workshop on Geometry Meets Deep Learning}, year = {2019}, month = {11/2019}, publisher = {IEEE / Computer Vision Foundation}, organization = {IEEE / Computer Vision Foundation}, address = {Seoul, South Corea}, abstract = {

This work proposes novel hyperparameter-free losses for single view 3D reconstruction with morphable models (3DMM). We dispense with the hyperparameters used in other works by exploiting geometry, so that the shape of the object and the camera pose are jointly optimized in a sole term expression. This simplification reduces the optimization time and its complexity. Moreover, we propose a novel implicit regularization technique based on random virtual projections that does not require additional 2D or 3D annotations. Our experiments suggest that minimizing a shape reprojection error together with the proposed implicit regularization is especially suitable for applications that require precise alignment between geometry and image spaces, such as augmented reality. We evaluate our losses on a large scale dataset with 3D ground truth and publish our implementations to facilitate reproducibility and public benchmarking in this field.

}, author = {Ramon, Eduard and Ruiz, Guillermo and Batard, Thomas and Xavier Gir{\'o}-i-Nieto} } @unpublished {cRamon19, title = {Plug-and-Train Loss for Model-Based Single View 3D Reconstruction}, journal = {BMVA Technical Meeting: 3D vision with Deep Learning}, year = {2019}, month = {02/2019}, publisher = {UPC}, address = {London, UK}, abstract = {

Obtaining 3D geometry from images is a well studied problem by the computer vision community. In the concrete case of a single image, a considerable amount of prior knowledge is often required to obtain plausible reconstructions. Recently, deep neural networks in combination with 3D morphable models (3DMM) have been used in order to address the lack of scene information, leading to more accurate results. Nevertheless, the losses employed during the training process are usually a linear combination of terms where the coefficients, also called hyperparameters, must be carefully tuned for each dataset to obtain satisfactory results. In this work we propose a hyperparameters-free loss that exploits the geometry of the problem for learning 3D reconstruction from a single image. The proposed formulation is not dataset dependent, is robust against very large camera poses and jointly optimizes the shape of the object and the camera pose.

Plug and-train Loss for Single View 3D Reconstruction from Universitat Polit{\`e}cnica de Catalunya
}, author = {Ramon, Eduard and Villar, Jordi and Ruiz, Guillermo and Batard, Thomas and Xavier Gir{\'o}-i-Nieto} }