@conference {cGorriz, title = {Active Deep Learning for Medical Imaging Segmentation}, booktitle = {Medical Image meets NIPS 2017 Workshop}, year = {2017}, month = {11/2017}, abstract = {

We propose a novel Active Learning framework capable to train effectively a convolutional neural network for semantic segmentation of medical imaging, with a limited amount of training labeled data. Our contribution is a practical Cost-Effective Active Learning approach using Dropout at test time as Monte Carlo sampling to model the pixel-wise uncertainty and to analyze the image information to improve the training performance.

Active Deep Learning for Medical Imaging from Xavier Giro-i-Nieto
}, author = {G{\'o}rriz, Marc and Xavier Gir{\'o}-i-Nieto and Carlier, Axel and Faure, Emmanuel} } @mastersthesis {xGorriz, title = {Active Deep Learning for Medical Imaging Segmentation}, year = {2017}, abstract = {

Grade: A (9.7/10)

This thesis proposes a novel active learning framework capable to train e ectively a convolutional neural network for semantic segmentation of medical imaging, with a limited amount of training labeled data. Our approach tries to apply in segmentation existing active learning techniques, which is becoming an important topic today because of the many problems caused by the lack of large amounts of data. We explore di erent strategies to study the image information and introduce a previously used cost-e ective active learning method based on the selection of high con dence predictions to assign automatically pseudo-labels with the aim of reducing the manual annotations. First, we made a simple application for handwritten digit classi cation to get started to the methodology and then we test the system with a medical image database for the treatment of melanoma skin cancer. Finally, we compared the traditional training methods with our active learning proposals, specifying the conditions and parameters required for it to be optimal.

Active Deep Learning for Medical Imaging from Xavier Giro-i-Nieto
}, url = {http://hdl.handle.net/2117/109304}, author = {G{\'o}rriz, Marc}, editor = {Xavier Gir{\'o}-i-Nieto and Carlier, Axel and Faure, Emmanuel} } @conference {cGorriza, title = {Cost-Effective Active Learning for Melanoma Segmentation}, booktitle = {ML4H: Machine Learning for Health Workshop at NIPS 2017}, year = {2017}, month = {11/2017}, address = {Long Beach, CA, USA}, abstract = {

We propose a novel Active Learning framework capable to train effectively a convolutional neural network for semantic segmentation of medical imaging, with a limited amount of training labeled data. Our contribution is a practical Cost-Effective Active Learning approach using Dropout at test time as Monte Carlo sampling to model the pixel-wise uncertainty and to analyze the image information to improve the training performance.

Active Deep Learning for Medical Imaging from Xavier Giro-i-Nieto
}, url = {https://arxiv.org/abs/1711.09168}, author = {G{\'o}rriz, Marc and Xavier Gir{\'o}-i-Nieto and Carlier, Axel and Faure, Emmanuel} } @article {a, title = {Assessment of Crowdsourcing and Gamification Loss in User-Assisted Object Segmentation}, journal = {Multimedia Tools and Applications}, volume = {23}, year = {2016}, month = {11/2016}, chapter = {15901-15928}, abstract = {

There has been a growing interest in applying human computation -- \ particularly crowdsourcing techniques -- to assist in the solution of multimedia, image processing, and computer vision problems which are still too difficult to solve using fully automatic algorithms, and yet relatively easy for humans.

In this paper we focus on a specific problem -- object segmentation within color images -- and compare different solutions which combine color image segmentation algorithms with human efforts, either in the form of an explicit interactive segmentation task or through an implicit collection of valuable human traces with a game.\ We use Click{\textquoteright}n{\textquoteright}Cut, a friendly, web-based, interactive segmentation tool that allows segmentation tasks to be assigned to many users, and Ask{\textquoteright}nSeek, a game with a purpose designed for object detection and segmentation.

The two main contributions of this paper are: (i) We use the results of Click{\textquoteright}n{\textquoteright}Cut campaigns with different groups of users to examine and quantify the crowdsourcing loss incurred when an interactive segmentation task is assigned to paid crowd-workers, comparing their results to the ones obtained when computer vision experts are asked to perform the same tasks. (ii) Since interactive segmentation tasks are inherently tedious and prone to fatigue, we\ compare the quality \ of the results obtained with Click{\textquoteright}n{\textquoteright}Cut with the ones obtained using a (fun, interactive, and potentially less tedious) game designed for the same purpose. We call this contribution the assessment of the gamification loss, since it refers to how much quality of segmentation results may be lost when we switch to a game-based approach to the same task.\ 

We demonstrate that the crowdsourcing loss is significant when using all the data points from workers, but decreases substantially (and becomes comparable to the quality of expert users performing similar tasks) after performing a modest amount of data analysis and filtering out of users whose data are clearly not useful. We also show that -- on the other hand -- the gamification loss is significantly more severe: the quality of the results drops roughly by half when switching from a focused (yet tedious) task to a more fun and relaxed game environment.\ 

}, keywords = {Crowdsourcing, GWAP, Object detection, Object segmentation, Serious games}, issn = {1573-7721}, doi = {10.1007/s11042-015-2897-6}, url = {http://dx.doi.org/10.1007/s11042-015-2897-6}, author = {Carlier, Axel and Amaia Salvador and Cabezas, Ferran and Xavier Gir{\'o}-i-Nieto and Charvillat, Vincent and Marques, Oge} } @mastersthesis {xCabezas, title = {Co-filtering human interaction and object segmentation}, year = {2015}, abstract = {

Advisors: Axel Carlier and Vincent Charvillat (ENSEEIHT-Universit{\'e} de Toulouse) / Amaia Salvador and\ Xavier Gir{\'o}-i-Nieto\ (UPC)\ 

Degree: Audiovisual Engineering (4 years) at\ Telecom BCN-ETSETB\ (UPC)

Grade: A with honors (9.6/10)

For so many years the problem of object segmentation has been present in image processing field. Click{\textquoteright}n{\textquoteright}Cut, an already existing web tool for interactive object segmentation, helps us to obtain segmentations of the objects by clicking in green (foreground clicks) inside the object to segment, and in red(background clicks) outside the object to segment. However, the behaviour of all human in front of this web tool is not equal. For this reason, it can be possible that these human interactions do not help us to obtain a good object segmentation as would be the result of a bad human interaction. The main aim of this project is to implement some techniques that allow us to treat with these bad human interactions in order to obtain the best object segmentation possible.

Co-filtering human interaction and object segmentation from Xavi Gir{\'o}
}, url = {http://hdl.handle.net/2099.1/25821}, author = {Cabezas, Ferran}, editor = {Carlier, Axel and Amaia Salvador and Xavier Gir{\'o}-i-Nieto and Charvillat, Vincent} } @conference {cCabezas, title = {Quality Control in Crowdsourced Object Segmentation}, booktitle = {IEEE International Conference on Image Processing (ICIP), 2015}, year = {2015}, month = {09/2015}, abstract = {

This paper explores processing techniques to deal with noisy data in crowdsourced object segmentation tasks. We use the data collected with "Click{\textquoteright}n{\textquoteright}Cut", an online interactive segmentation tool, and we perform several experiments towards improving the segmentation results. First, we introduce different superpixel-based techniques to filter users{\textquoteright} traces, and assess their impact on the segmentation result. Second, we present different criteria to detect and discard the traces from potential bad users, resulting in a remarkable increase in performance. Finally, we show a novel superpixel-based segmentation algorithm which does not require any prior filtering and is based on weighting each user{\textquoteright}s contribution according to his/her level of expertise.

Selected among Top 10\% papers in ICIP 2015 based on the reviewer scores and recommendations.

[Preprint on arXiv]

[Related BSc thesis by Ferran Cabezas]

[IEEE ICIP 2015 conference website]

Co-filtering human interaction and object segmentation from Xavier Giro

}, url = {http://arxiv.org/abs/1505.00145}, author = {Cabezas, Ferran and Carlier, Axel and Amaia Salvador and Xavier Gir{\'o}-i-Nieto and Charvillat, Vincent} } @conference {cCarlier, title = {Click{\textquoteright}n{\textquoteright}Cut: Crowdsourced Interactive Segmentation with Object Candidates}, booktitle = {3rd International ACM Workshop on Crowdsourcing for Multimedia (CrowdMM)}, year = {2014}, month = {11/2014}, address = {Orlando, Florida (USA)}, abstract = {

This paper introduces Click{\textquoteright}n{\textquoteright}Cut, a novel web tool for interactive object segmentation addressed to crowdsourcing tasks. Click{\textquoteright}n{\textquoteright}Cut combines bounding boxes and clicks generated by workers to obtain accurate object segmentations. These segmentations are created by combining precomputed object candidates in a light computational fashion that allows an immediate response from the interface. Click{\textquoteright}n{\textquoteright}Cut has been tested with a crowdsourcing campaign to annotate a subset of the Berkeley Segmentation Dataset (BSDS). Results show competitive results with state of the art, especially in time to converge to a high quality segmentation. The data collection campaign included golden standard tests to detect cheaters.

[Related master thesis by Amaia Salvador]

[Related Phd thesis by Axel Carlier]

[CrowdMM website]

}, keywords = {Crowdsourcing, figure-ground segmentation, human computing, object candidates}, doi = {10.1145/2660114.2660125}, url = {http://dx.doi.org/10.1145/2660114.2660125}, author = {Carlier, Axel and Amaia Salvador and Xavier Gir{\'o}-i-Nieto and Marques, Oge and Charvillat, Vincent} } @mastersthesis {xSalvador13, title = {Crowdsourced Object Segmentation with a Game}, year = {2013}, abstract = {

Co-advised with Axel Carlier (INP Toulouse), Vincent Charvillat\ (INP Toulouse) and Oge Marques (Florida Atlantic University).

Amaia Salvador, "Crowdsourced Object Segmentation with a Game" from Image Processing Group on Vimeo.

}, author = {Amaia Salvador}, editor = {Xavier Gir{\'o}-i-Nieto and Carlier, Axel and Charvillat, Vincent and Marques, Oge} } @conference {cSalvador13 , title = {Crowdsourced Object Segmentation with a Game}, booktitle = {ACM Workshop on Crowdsourcing for Multimedia (CrowdMM)}, year = {2013}, month = {10/2013}, address = {Barcelona}, abstract = {

We introduce a new algorithm for image segmentation based on crowdsourcing through a game : Ask{\textquoteright}nSeek. The game provides information on the objects of an image, under the form of clicks that are either on the object, or on the background. These logs are then used in order to determine the best segmentation for an object among a set of candidates generated by the state-of-the-art CPMC algorithm. We also introduce a simulator that allows the generation of game logs and therefore gives insight about the number of games needed on an image to perform acceptable segmentation.

Amaia Salvador, "Crowdsourced Object Segmentation with a Game" from Xavi Gir{\'o}
}, isbn = {978-1-4503-2396-3}, doi = {http://dx.doi.org/10.1145/2506364.2506367}, url = {http://dx.doi.org/10.1145/2506364.2506367}, author = {Amaia Salvador and Carlier, Axel and Xavier Gir{\'o}-i-Nieto and Marques, Oge and Charvillat, Vincent} }