@article {aSalgueirob, title = {SEG-ESRGAN: A multi-task network for super-resolution and semantic segmentation of remote sensing images}, journal = {Remote Sensing}, volume = {14}, year = {2022}, month = {2022}, chapter = {5862}, abstract = {

The production of highly accurate land cover maps is one of the primary challenges in remote sensing, which depends on the spatial resolution of the input images. Sometimes, high-resolution imagery is not available or is too expensive to cover large areas or to perform multitemporal analysis. In this context, we propose a multi-task network to take advantage of the freely available Sentinel-2 imagery to produce a super-resolution image, with a scaling factor of 5, and the corresponding high-resolution land cover map. Our proposal, named SEG-ESRGAN, consists of two branches: the super-resolution branch, that produces Sentinel-2 multispectral images at 2 m resolution, and an encoder{\textendash}decoder architecture for the semantic segmentation branch, that generates the enhanced land cover map. From the super-resolution branch, several skip connections are retrieved and concatenated with features from the different stages of the encoder part of the segmentation branch, promoting the flow of meaningful information to boost the accuracy in the segmentation task. Our model is trained with a multi-loss approach using a novel dataset to train and test the super-resolution stage, which is developed from Sentinel-2 and WorldView-2 image pairs. In addition, we generated a dataset with ground-truth labels for the segmentation task. To assess the super-resolution improvement, the PSNR, SSIM, ERGAS, and SAM metrics were considered, while to measure the classification performance, we used the IoU, confusion matrix and the F1-score. Experimental results demonstrate that the SEG-ESRGAN model outperforms different full segmentation and dual network models (U-Net, DeepLabV3+, HRNet and Dual_DeepLab), allowing the generation of high-resolution land cover maps in challenging scenarios using Sentinel-2 10 m bands.

}, issn = {2072-4292 }, doi = {https://doi.org/10.3390/rs142}, url = {https://doi.org/10.3390/rs14225862 }, author = {Luis Salgueiro and Javier Marcello and Ver{\'o}nica Vilaplana} } @phdthesis {dSalgueiro22, title = {Super-resolution and semantic segmentation of remote sensing images using deep learning techniques}, volume = {PhD}, year = {2022}, month = {10/2022}, abstract = {

Remote sensing for Earth observation is a growing scientific field essential for many human activities. Among the different applications in the Remote Sensing domain, the production of thematic maps, such as Land Cover and Land Use, are among the most relevant, as this information plays a critical role in management, planning and monitoring activities at different levels. In this context, the Sentinel-2 satellites are of great importance since they provide open data on land and coastal areas at different spatial resolutions (10, 20, and 60 m), democratizing usability, and creating a high potential for the generation of valuable information, useful in many scenarios, such as agriculture, forestry, land cover and urban planning, among others.

In this thesis, we aim to exploit the usability of Sentinel-2 data by applying deep learning techniques, which are revolutionizing the world of computer vision and, recently, remote sensing. First, we propose super-resolution models to improve the spatial details of the different Sentinel-2 bands, and second, we propose the conjunction of semantic segmentation with super-resolution to generate improved land cover maps that benefit from the enhanced spatial details of the bands.

We first address super-resolution by proposing two different models, one for the 10 m/pixel bands to reach 2 m/pixel and another for the 20 and 60 m/pixel bands to achieve 10 m/pixel. Then, we propose two different multitasking models to derive land cover maps. The first one extending a semantic segmentation model to produce an additional super-resolution image and the second, improving our first super-resolution approach, to provide a semantic segmentation map, as well. We combine features of the different tasks within a single model to improve performance and to generate a high-resolution image with the corresponding highquality land cover map. All models developed were evaluated, quantitatively and qualitatively, using different datasets, showing excellent performance in diverse complex scenarios.

}, author = {Luis Salgueiro}, editor = {Ver{\'o}nica Vilaplana and Javier Marcello} } @article {aAbadal, title = {A Dual Network for Super-Resolution and Semantic Segmentation of Sentinel-2 imagery}, journal = {Remote Sensing}, volume = {13}, year = {2021}, month = {2021}, pages = {4547}, abstract = {

There is a growing interest in the development of automated data processing workflows that provide reliable, high spatial resolution land cover maps. However, high-resolution remote sensing images are not always affordable. Taking into account the free availability of Sentinel-2 satellite data, in this work we propose a deep learning model to generate high-resolution segmentation maps from low-resolution inputs in a multi-task approach. Our proposal is a dual-network model with two branches: the Single Image Super-Resolution branch, that reconstructs a high-resolution version of the input image, and the Semantic Segmentation Super-Resolution branch, that predicts a high-resolution segmentation map with a scaling factor of 2. We performed several experiments to find the best architecture, training and testing on a subset of the S2GLC 2017 dataset. We based our model on the DeepLabV3+ architecture, enhancing the model and achieving an improvement of 5\% on IoU and almost 10\% on the recall score. Furthermore, our qualitative results demonstrate the effectiveness and usefulness of the proposed approach.

}, author = {Sa{\"u}c Abadal and Luis Salgueiro and Javier Marcello and Ver{\'o}nica Vilaplana} } @article {aSalgueiroa, title = {Single-image super-resolution of Sentinel-2 low resolution bands with residual dense convolutional neural networks}, journal = {Remote Sensing}, volume = {13}, year = {2021}, month = {2021}, pages = {5007}, abstract = {

Sentinel-2 satellites have become one of the main resources for\  Earth observation images because they are free of charge, have a great spatial coverage and high temporal revisit. Sentinel-2 senses the same location providing different spatial resolutions as well as generating a multi-spectral image with 13 bands of 10, 20, and 60 m/pixel. In this work, we propose a single-image super-resolution model based on convolutional neural networks that enhances the low-resolution bands (20 m and 60 m) to reach the maximal resolution sensed (10 m) at the same time, whereas other approaches provide two independent models for each group of LR bands. Our proposed model, named Sen2-RDSR, is made up of Residual in Residual blocks that produce two final outputs at maximal resolution, one for 20 m/pixel bands and the other for 60 m/pixel bands. The training is done in two stages, first focusing on 20 m bands and then on the 60 m bands. Experimental results using 6 quality metrics (RMSE, SRE, SAM, PSNR, SSIM, ERGAS) show that our model has superior performance compared to other state-of-the-art approaches, and it is very effective and suitable as a preliminary step for land and coastal applications, as studies involving pixel-based classification for Land-Use-Land-Cover or the generation of vegetation indices.

}, author = {Luis Salgueiro and Javier Marcello and Ver{\'o}nica Vilaplana} } @article {aSalgueiro, title = {Super-Resolution of Sentinel-2 Imagery Using Generative Adversarial Networks}, journal = {Remote Sensing}, volume = {12}, year = {2020}, month = {01/2020}, abstract = {

Sentinel-2 satellites provide multi-spectral optical remote sensing images with four bands at 10 m of spatial resolution. These images, due to the open data distribution policy, are becoming an important resource for several applications. However, for small scale studies, the spatial detail of these images might not be sufficient. On the other hand, WorldView commercial satellites offer multi-spectral images with a very high spatial resolution, typically less than 2 m, but their use can be impractical for large areas or multi-temporal analysis due to their high cost. To exploit the free availability of Sentinel imagery, it is worth considering deep learning techniques for single-image super-resolution tasks, allowing the spatial enhancement of low-resolution (LR) images by recovering high-frequency details to produce high-resolution (HR) super-resolved images. In this work, we implement and train a model based on the Enhanced Super-Resolution Generative Adversarial Network (ESRGAN) with pairs of WorldView-Sentinel images to generate a super-resolved multispectral Sentinel-2 output with a scaling factor of 5. Our model, named RS-ESRGAN, removes the upsampling layers of the network to make it feasible to train with co-registered remote sensing images. Results obtained outperform state-of-the-art models using standard metrics like PSNR, SSIM, ERGAS, SAM and CC. Moreover, qualitative visual analysis shows spatial improvements as well as the preservation of the spectral information, allowing the super-resolved Sentinel-2 imagery to be used in studies requiring very high spatial resolution.

}, author = {Luis Salgueiro and Javier Marcello and Ver{\'o}nica Vilaplana} } @conference {cMoliner, title = {Weakly Supervised Semantic Segmentation for Remote Sensing Hyperspectral Imaging}, booktitle = {International Conference on Acoustics, Speech, and Signal Processing (ICASSP 2020)}, year = {2020}, month = {05/2020}, abstract = {

This paper studies the problem of training a semantic segmentation neural network with weak annotations, in order to be applied in aerial vegetation images from Teide National Park. It proposes a Deep Seeded Region Growing system which consists on training a semantic segmentation network from a set of seeds generated by a Support Vector Machine. A region growing algorithm module is applied to the seeds to progressively increase the pixel-level supervision. The proposed method performs better than an SVM, which is one of the most popular segmentation tools in remote sensing image applications.

}, author = {Eloi Moliner and Luis Salgueiro and Ver{\'o}nica Vilaplana} } @conference {cSalgueiro, title = {Comparative study of upsampling methods for super-resolution in remote sensing}, booktitle = {International Conference on Machine Vision}, year = {2019}, month = {11/2019}, abstract = {

Many remote sensing applications require high spatial resolution images, but the elevated cost of these images makes some studies unfeasible. Single-image super-resolution algorithms can improve the spatial resolution of a low-resolution image by recovering feature details learned from pairs of low-high resolution images. In this work, several configurations of ESRGAN, a state-of-the-art algorithm for image super-resolution are tested. We make a comparison between several scenarios, with different modes of upsampling and channels involved.\  The best results are obtained training a model with RGB-IR channels and using progressive upsampling.\ 

}, keywords = {deep learning, Remote sensing, Super-resolution, WorldView-2}, author = {Luis Salgueiro and Javier Marcello and Ver{\'o}nica Vilaplana} } @mastersthesis {xBalibrea19, title = {Deep learning for semantic segmentation of airplane hyperspectral imaging}, year = {2019}, abstract = {

Given their success, both qualitative and quantitative, Deep Neural Networks have been used to approach classification and segmentation problems for images, especially during these last few years where it has been possible to design computers with sufficient capacity to make quick and efficient experiments.

In this work, we will study the use of two Convolutional Neural Networks (CNNs) to segment the ground of a land section of Maspalomas{\textquoteright} Park using an image taken by the flight of an airplane.

The comparison will be made in terms of computational cost, complexity and results that will be obtained while testing different algorithms, loss functions or optimizers and also while tuning some other parameters. The results will also be compared with a past work done with the same dataset but another methodology (SVM).

}, author = {Mar Balibrea}, editor = {Luis Salgueiro and Ver{\'o}nica Vilaplana} }