@conference {cTarres, title = {GAN-based Image Colourisation with Feature Reconstruction Loss}, booktitle = {CVPR 2021 Women in Computer Vision Workshop}, year = {2021}, month = {06/2021}, address = {Virtual}, abstract = {

Image colourisation is the task of adding plausible colour to grayscale images. This transformation requires obtaining a three dimensional colour-valued mapping from a real-valued grayscale image, which leads to an undetermined problem because the gray-scale semantics and texture provide cues for multiple possible colour mappings. The goal of image colourisation in not to recover the ground truth colour in a manner that it is perceived as natural by a human observer.\ Our work takes as a baseline a scheme based on an end-to-end trainable convolutional neural network (CNN) trained with a smooth L1 loss to predict the $ab$ channels of a colour image given the $L$ channel. We introduce an extra perceptual reconstruction loss during training to improve the capabilities of a adversarial adversarial model, that we adopt as a baseline.

}, author = {Laia Tarr{\'e}s and G{\'o}rriz, Marc and Xavier Gir{\'o}-i-Nieto and Mrak, Marta} } @conference {cGorrizc, title = {Assessing Knee OA Severity with CNN attention-based end-to-end architectures}, booktitle = {International Conference on Medical Imaging with Deep Learning (MIDL) 2019}, year = {2019}, month = {02/2019}, publisher = {JMLR}, organization = {JMLR}, address = {London, United Kingdom}, abstract = {

This work proposes a novel end-to-end convolutional neural network (CNN) architecture to\ automatically quantify the severity of knee osteoarthritis (OA) using X-Ray images, which\ incorporates trainable attention modules acting as unsupervised fine-grained detectors of\ the region of interest (ROI). The proposed attention modules can be applied at different\ levels and scales across any CNN pipeline helping the network to learn relevant attention\ patterns over the most informative parts of the image at different resolutions. We test\ the proposed attention mechanism on existing state-of-the-art CNN architectures as our\ base models, achieving promising results on the benchmark knee OA datasets from the\ osteoarthritis initiative (OAI) and multicenter osteoarthritis study (MOST). All the codes\ from our experiments will be publicly available on the github repository: https://github.com/marc-gorriz/KneeOA-CNNAttention

}, url = {http://proceedings.mlr.press/v102/gorriz19a.html}, author = {G{\'o}rriz, Marc and Antony, Joseph and McGuinness, Kevin and Xavier Gir{\'o}-i-Nieto and O{\textquoteright}Connor, N.} } @inbook {bGorriz18, title = {Leishmaniasis Parasite Segmentation and Classification Using Deep Learning}, booktitle = { Articulated Motion and Deformable Objects}, volume = {10945}, number = {Lecture Notes in Computer Science}, year = {2018}, pages = {53-62}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, abstract = {

Leishmaniasis is considered a neglected disease that causes thousands of deaths annually in some tropical and subtropical countries. There are various techniques to diagnose leishmaniasis of which manual microscopy is considered to be the gold standard. There is a need for the development of automatic techniques that are able to detect parasites in a robust and unsupervised manner. In this paper we present a procedure for automatizing the detection process based on a deep learning approach. We train a U-net model that successfully segments leismania parasites and classifies them into promastigotes, amastigotes and adhered parasites.

}, issn = {978-3-319-94544-6}, doi = {10.1007/978-3-319-94544-6}, author = {G{\'o}rriz, Marc and Albert Aparicio and Berta Ravent{\'o}s and Ver{\'o}nica Vilaplana and Elisa Sayrol and Daniel L{\'o}pez-Codina} } @conference {cGorrizb, title = {Leishmaniasis Parasite Segmentation and Classification Using Deep Learning}, booktitle = {International Conference on Articulated Motion and Deformable Objects}, year = {2018}, address = {Palma, Spain}, abstract = {

Leishmaniasis is considered a neglected disease that causes thousands of deaths annually in some tropical and subtropical countries. There are various techniques to diagnose leishmaniasis of which manual microscopy is considered to be the gold standard. There is a need for the development of automatic techniques that are able to detect parasites in a robust and unsupervised manner. In this paper we present a procedure for automatizing the detection process based on a deep learning approach. We train a U-net model that successfully segments leismania parasites and classifies them into promastigotes, amastigotes and adhered parasites.

}, author = {G{\'o}rriz, Marc and Albert Aparicio and Berta Ravent{\'o}s and Daniel L{\'o}pez-Codina and Ver{\'o}nica Vilaplana and Elisa Sayrol} } @mastersthesis {xGorriz, title = {Active Deep Learning for Medical Imaging Segmentation}, year = {2017}, abstract = {

Grade: A (9.7/10)

This thesis proposes a novel active learning framework capable to train e ectively a convolutional neural network for semantic segmentation of medical imaging, with a limited amount of training labeled data. Our approach tries to apply in segmentation existing active learning techniques, which is becoming an important topic today because of the many problems caused by the lack of large amounts of data. We explore di erent strategies to study the image information and introduce a previously used cost-e ective active learning method based on the selection of high con dence predictions to assign automatically pseudo-labels with the aim of reducing the manual annotations. First, we made a simple application for handwritten digit classi cation to get started to the methodology and then we test the system with a medical image database for the treatment of melanoma skin cancer. Finally, we compared the traditional training methods with our active learning proposals, specifying the conditions and parameters required for it to be optimal.

Active Deep Learning for Medical Imaging from Xavier Giro-i-Nieto
}, url = {http://hdl.handle.net/2117/109304}, author = {G{\'o}rriz, Marc}, editor = {Xavier Gir{\'o}-i-Nieto and Carlier, Axel and Faure, Emmanuel} } @conference {cGorriz, title = {Active Deep Learning for Medical Imaging Segmentation}, booktitle = {Medical Image meets NIPS 2017 Workshop}, year = {2017}, month = {11/2017}, abstract = {

We propose a novel Active Learning framework capable to train effectively a convolutional neural network for semantic segmentation of medical imaging, with a limited amount of training labeled data. Our contribution is a practical Cost-Effective Active Learning approach using Dropout at test time as Monte Carlo sampling to model the pixel-wise uncertainty and to analyze the image information to improve the training performance.

Active Deep Learning for Medical Imaging from Xavier Giro-i-Nieto
}, author = {G{\'o}rriz, Marc and Xavier Gir{\'o}-i-Nieto and Carlier, Axel and Faure, Emmanuel} } @conference {cGorriza, title = {Cost-Effective Active Learning for Melanoma Segmentation}, booktitle = {ML4H: Machine Learning for Health Workshop at NIPS 2017}, year = {2017}, month = {11/2017}, address = {Long Beach, CA, USA}, abstract = {

We propose a novel Active Learning framework capable to train effectively a convolutional neural network for semantic segmentation of medical imaging, with a limited amount of training labeled data. Our contribution is a practical Cost-Effective Active Learning approach using Dropout at test time as Monte Carlo sampling to model the pixel-wise uncertainty and to analyze the image information to improve the training performance.

Active Deep Learning for Medical Imaging from Xavier Giro-i-Nieto
}, url = {https://arxiv.org/abs/1711.09168}, author = {G{\'o}rriz, Marc and Xavier Gir{\'o}-i-Nieto and Carlier, Axel and Faure, Emmanuel} }