@article {a, title = {Assessment of Crowdsourcing and Gamification Loss in User-Assisted Object Segmentation}, journal = {Multimedia Tools and Applications}, volume = {23}, year = {2016}, month = {11/2016}, chapter = {15901-15928}, abstract = {

There has been a growing interest in applying human computation -- \ particularly crowdsourcing techniques -- to assist in the solution of multimedia, image processing, and computer vision problems which are still too difficult to solve using fully automatic algorithms, and yet relatively easy for humans.

In this paper we focus on a specific problem -- object segmentation within color images -- and compare different solutions which combine color image segmentation algorithms with human efforts, either in the form of an explicit interactive segmentation task or through an implicit collection of valuable human traces with a game.\ We use Click{\textquoteright}n{\textquoteright}Cut, a friendly, web-based, interactive segmentation tool that allows segmentation tasks to be assigned to many users, and Ask{\textquoteright}nSeek, a game with a purpose designed for object detection and segmentation.

The two main contributions of this paper are: (i) We use the results of Click{\textquoteright}n{\textquoteright}Cut campaigns with different groups of users to examine and quantify the crowdsourcing loss incurred when an interactive segmentation task is assigned to paid crowd-workers, comparing their results to the ones obtained when computer vision experts are asked to perform the same tasks. (ii) Since interactive segmentation tasks are inherently tedious and prone to fatigue, we\ compare the quality \ of the results obtained with Click{\textquoteright}n{\textquoteright}Cut with the ones obtained using a (fun, interactive, and potentially less tedious) game designed for the same purpose. We call this contribution the assessment of the gamification loss, since it refers to how much quality of segmentation results may be lost when we switch to a game-based approach to the same task.\ 

We demonstrate that the crowdsourcing loss is significant when using all the data points from workers, but decreases substantially (and becomes comparable to the quality of expert users performing similar tasks) after performing a modest amount of data analysis and filtering out of users whose data are clearly not useful. We also show that -- on the other hand -- the gamification loss is significantly more severe: the quality of the results drops roughly by half when switching from a focused (yet tedious) task to a more fun and relaxed game environment.\ 

}, keywords = {Crowdsourcing, GWAP, Object detection, Object segmentation, Serious games}, issn = {1573-7721}, doi = {10.1007/s11042-015-2897-6}, url = {http://dx.doi.org/10.1007/s11042-015-2897-6}, author = {Carlier, Axel and Amaia Salvador and Cabezas, Ferran and Xavier Gir{\'o}-i-Nieto and Charvillat, Vincent and Marques, Oge} }