@article {aPina, title = {Feature propagation as self-supervision signals on graphs}, journal = {Knowledge-Based Systems}, year = {Submitted}, author = {Oscar Pina and Ver{\'o}nica Vilaplana} } @conference {cPinab, title = {Layer-wise self-supervised learning on graphs}, booktitle = {KDD 2023 Workshop on Deep Learning on Graphs: Methods and Applications (DLG-KDD 2023)}, year = {2023}, month = {08/2023}, address = {Long Beach, USA}, abstract = {

End-to-end training of graph neural networks (GNN) on large graphs presents several memory and computational challenges, and limits the application to shallow architectures as depth exponentially increases the memory and space complexities. In this manuscript, we propose Layer-wise Regularized Graph Infomax, an algorithm to train GNNs layer by layer in a self-supervised manner. We decouple the feature propagation and feature transformation carried out by GNNs to learn node representations in order to derive a loss function based on the prediction of future inputs. We evaluate the algorithm in inductive large graphs and show similar performance to other end to end methods and a substantially increased efficiency, which enables the training of more sophisticated models in one single device. We also show that our algorithm avoids the oversmoothing of the representations, another common challenge of deep GNNs.

}, author = {Oscar Pina and Ver{\'o}nica Vilaplana} } @conference {cPinaa, title = {Self-supervised graph representations of WSIs}, booktitle = {Geometric Deep Learning in Medical Image Analysis}, year = {2022}, month = {2022}, abstract = {

In this manuscript we propose a framework for the analysis of whole slide images (WSI) on the cell entity space with self-supervised deep learning on graphs and explore its representation quality at different levels of application. It consists of a two step process in which the cell level analysis is performed locally, by clusters of nearby cells that can be seen as small regions of the image, in order to learn representations that capture the cell environment and distribution. In a second stage, a WSI graph is generated with these regions as nodes and the representations learned as initial node embeddings. The graph is leveraged for a downstream task, region of interest (ROI) detection addressed as a graph clustering. The representations outperform the evaluation baselines at both levels of application, which has been carried out predicting whether a cell, or region, is tumor or not based on its learned representations with a logistic regressor.

}, url = {https://proceedings.mlr.press/v194/pina22a/pina22a.pdf}, author = {Oscar Pina and Ver{\'o}nica Vilaplana} } @conference {cPina, title = {Structural Networks for Brain Age Prediction}, booktitle = {Medical Imaging with Deep Learning (MIDL 2022)}, year = {2022}, month = {08/2022}, abstract = {

Biological networks have gained considerable attention within the Deep Learning community because of the promising framework of Graph Neural Networks (GNN), neural models that operate in complex networks. In the context of neuroimaging, GNNs have successfully been employed for functional MRI processing but their application to ROI-level structural MRI (sMRI) remains mostly unexplored. In this work we analyze the implementation of these geometric models with sMRI by building graphs of ROIs (ROI graphs) using tools from Graph Signal Processing literature and evaluate their performance in a downstream supervised task, age prediction. We first make a qualitative and quantitative comparison of the resulting networks obtained with common graph topology learning strategies. In a second stage, we train GNN-based models for brain age prediction. Since the order of every ROI graph is exactly the same and each vertex is an entity by itself (a ROI), we evaluate whether including ROI information during message-passing or global pooling operations is beneficial and compare the performance of GNNs against a Fully-Connected Neural Network baseline. The results show that ROI-level information is needed during the global pooling operation in order to achieve competitive results. However, no relevant improvement has been detected when it is incorporated during the message passing. These models achieve a MAE of 4.27 in hold-out test data, which is a performance very similar to the baseline, suggesting that the inductive bias included with the obtained graph connectivity is relevant and useful to reduce the dimensionality of the problem

}, author = {Oscar Pina and Irene Cumplido-Mayoral and Raffaele Cacciaglia and Jos{\'e} Mar{\'\i}a Gonz{\'a}lez-de-Ech{\'a}varri and Juan D. Gispert and Ver{\'o}nica Vilaplana} }