@article {aDominguez, title = {Neural ADMIXTURE: rapid population clustering with autoencoders}, journal = {Nature Computational Science}, year = {2023}, month = {07/2023}, abstract = {

Characterizing the genetic substructure of large cohorts has become increasingly important as genetic association and prediction studies are extended to massive, increasingly diverse, biobanks. ADMIXTURE and STRUCTURE are widely used unsupervised clustering algorithms for characterizing such ancestral genetic structure. These methods decompose individual genomes into fractional cluster assignments with each cluster representing a vector of DNA marker frequencies. The assignments, and clusters, provide an interpretable representation for geneticists to describe population substructure at the sample level. However, with the rapidly increasing size of population biobanks and the growing numbers of variants genotyped (or sequenced) per sample, such traditional methods become computationally intractable. Furthermore, multiple runs with different hyperparameters are required to properly depict the population clustering using these traditional methods, increasing the computational burden. This can lead to days of compute. In this work we present Neural ADMIXTURE, a neural network autoencoder that follows the same modeling assumptions as ADMIXTURE, providing similar (or better) clustering, while reducing the compute time by orders of magnitude. Indeed, the equivalent of one month of continuous compute can be reduced to hours. In addition, Neural ADMIXTURE can include multiple outputs, providing the equivalent results as running the original ADMIXTURE algorithm many times with different numbers of clusters. Our models can also be stored, allowing later cluster assignment to be performed with a linear computational time.

}, author = {Dominguez, Albert and Mas-Montserrat, Daniel and Bustamante, Carlos and Xavier Gir{\'o}-i-Nieto and Ioannidis, Alexander G.} } @conference {cGeleta, title = {Deep variational autoencoders for population genetics: applications in classification, imputation, dimensionality reduction, and novel lossless data compression}, booktitle = {American Society of Human Genetics (ASHG)}, year = {2021}, month = {10/2021}, publisher = {ASHG}, organization = {ASHG}, address = {Virtual}, abstract = {

In this study we show the power of variational autoencoders (VAEs) for a variety of tasks relating to the interpretation and compression of genomic data. The unsupervised setting allows for detecting and learning of granular population structure and inferring of new informative latent factors, opening up an avenue for applications in dimensionality reduction, data simulation, population classification, imputation, and lossless genomic data compression. The latent spaces of VAEs are able to capture and represent clearly differentiated Gaussian-like clusters of similar genetic composition on a fine-scale with a relatively small number of SNPs as input. Furthermore, sequences can be decomposed into latent representations and reconstruction errors (residuals) providing a sparse representation that provides a means for efficient lossless compression.

Identifying genetic clusters can be important when performing genome-wide association studies and provides an alternative to self-reported ethnic labels, which are culturally constructed and vary according to the location and individual. A variety of unsupervised dimensionality reduction methods have been explored in the past for such applications, including PCA, MDS, t-SNE, and UMAP. Our proposed VAE can represent the population structure as a Gaussian-distributed continuous multi-dimensional representation and as classification probabilities providing flexible and interpretable population descriptors.

We train our VAE method with several worldwide whole genome datasets from both humans and canids and evaluate the performance of the different proposed applications with networks with and without ancestry conditioning. Our experiments show that different population groups have significantly differentiated compression ratios and classification accuracies. Additionally, we analyze the entropy of the SNP data, noting its effect on compression across populations and connect these patterns to historical migrations and ancestral relationships.

Video from the related BSc thesis at UPC Data Science Engineering (2021):

}, author = {Geleta, Margarita and Mas-Montserrat, Daniel and Bustamante, Carlos and Xavier Gir{\'o}-i-Nieto and Ioannidis, Alexander G.} } @conference {cDominguez, title = {Neural ADMIXTURE: rapid population clustering with autoencoders}, booktitle = {2021 Society for Molecular Biology \& Evolution meeting (SMBEv2021)}, year = {2021}, month = {06/2021}, address = {Virtual}, abstract = {

Characterizing the genetic substructure of large cohorts has become increasingly important as genetic association and prediction studies are extended to massive, increasingly diverse, biobanks. ADMIXTURE and STRUCTURE are widely used unsupervised clustering algorithms for characterizing such ancestral genetic structure. These methods decompose individual genomes into fractional cluster assignments with each cluster representing a vector of DNA marker frequencies. The assignments, and clusters, provide an interpretable representation for geneticists to describe population substructure at the sample level. However, with the rapidly increasing size of population biobanks and the growing numbers of variants genotyped (or sequenced) per sample, such traditional methods become computationally intractable. Furthermore, multiple runs with different hyperparameters are required to properly depict the population clustering using these traditional methods, increasing the computational burden. This can lead to days of compute. In this work we present Neural ADMIXTURE, a neural network autoencoder that follows the same modeling assumptions as ADMIXTURE, providing similar (or better) clustering, while reducing the compute time by orders of magnitude. In addition, this network can include multiple outputs, providing the equivalent results as running the original ADMIXTURE algorithm many times with different numbers of clusters. These models can also be stored, allowing later cluster assignment to be performed with a linear computational time.

}, author = {Dominguez, Albert and Mas-Montserrat, Daniel and Bustamante, Carlos and Xavier Gir{\'o}-i-Nieto and Ioannidis, Alexander G.} }