Papers | Parallel Computing
2024
Chi Hong, Robert Birke, Pin-Yu Chen, Lydia Chen
On Dark Knowledge for Distilling Generators Proceedings Article
In: Proceedings of the 28th Pacific-Asia Conference on Knowledge Discovery and Data Mining, Taipei, Taiwan, 2024.
Abstract | BibTeX | Tags: ai, epi, icsc
@inproceedings{24:chen:llm,
title = {On Dark Knowledge for Distilling Generators},
author = {Chi Hong and Robert Birke and Pin-Yu Chen and Lydia Chen},
year = {2024},
date = {2024-05-01},
booktitle = {Proceedings of the 28th Pacific-Asia Conference on Knowledge Discovery and Data Mining},
address = {Taipei, Taiwan},
abstract = {Knowledge distillation has been applied on generative models, such as Variational Autoencoder (VAE) and Generative Adversarial Networks (GANs). To distill the knowledge, the synthetic outputs of a teacher generator are used to train a student model. While the dark knowledge, i.e., the probabilistic output, is well explored in distilling classifiers, little is known about the existence of an equivalent dark knowledge for generative models and its extractability. In this paper, we derive the first kind of empirical risk bound for distilling generative models from a Bayesian perspective. Through our analysis, we show the existence of the dark knowledge for generative models, i.e., Bayes probability distribution of a synthetic output from a given input, which achieves lower empirical risk bound than merely using the synthetic output of the generators. Furthermore, we propose a Dark Knowledge based Distillation , DKtill, which trains the student generator based on the (approximate) dark knowledge. Our extensive evaluation on distilling VAE, conditional GANs, and translation GANs on Facades and CelebA datasets show that the FID of student generators trained by DKtill combining dark knowledge are lower than student generators trained only by the synthetic outputs by up to 42.66%, and 78.99%, respectively.},
keywords = {ai, epi, icsc},
pubstate = {published},
tppubtype = {inproceedings}
}
Bruno Casella, Iacopo Colonnelli, Gianluca Mittone, Robert Birke, Walter Riviera, Antonio Sciarappa, Carlo Cavazzoni, Marco Aldinucci
A Performance Model for Confidential Federated Learning Proceedings Article
In: Proceedings of the 2024 Deep Learning Security and Privacy Workshop, IEEE Symposium on Security and Privacy 2024, San Francisco, CA, 2024.
Abstract | Links | BibTeX | Tags: confidential, epi, icsc
@inproceedings{24:casella:sgx,
title = {A Performance Model for Confidential Federated Learning},
author = {Bruno Casella and Iacopo Colonnelli and Gianluca Mittone and Robert Birke and Walter Riviera and Antonio Sciarappa and Carlo Cavazzoni and Marco Aldinucci},
url = {https://iris.unito.it/retrieve/b5877a97-2d8d-4e95-8791-0aa4a1b953b3/DLSP___CONFIDENTIAL_FL.pdf},
year = {2024},
date = {2024-05-01},
booktitle = {Proceedings of the 2024 Deep Learning Security and Privacy Workshop, IEEE Symposium on Security and Privacy 2024},
address = {San Francisco, CA},
abstract = {Federated Learning (FL) has emerged as a solution to preserve data privacy by keeping the data locally on each participant's device. However, FL alone is still vulnerable to attacks that can cause privacy leaks. Therefore, it becomes necessary to take additional security measures at the cost of increasing runtimes. The Trusted Execution Environment (TEE) approach promises to offer the highest degree of security during execution. However, TEEs suffer from memory limits which prevent safe end-to-end FL training of modern deep models. State-of- the-art approaches limit secure training to selected layers, failing to avert the full spectrum of attacks or adopt layer-wise training affecting model performance. We benchmark the usage of a library OS (LibOS) to run the full, unmodified end-to-end FL training inside the TEE. We extensively evaluate and model the overhead of the different security mechanisms needed to protect the data and model during computation (TEE), communication (TLS), and storage (disk encryption). The obtained results across three datasets and two models demonstrate that LibOSes are a viable way to seamlessly inject security into FL with limited overhead (at most 2x), offering valuable guidance for researchers and developers aiming to apply FL in data-security-focused contexts.},
keywords = {confidential, epi, icsc},
pubstate = {published},
tppubtype = {inproceedings}
}
Bruno Casella, Walter Riviera, Marco Aldinucci, Gloria Menegaz
Protocol for training MERGE: A federated multi-input neural network for COVID-19 prognosis Journal Article
In: STAR Protocols, 2024, (https://prod-shared-star-protocols.s3.amazonaws.com/protocols/3225.pdf).
Abstract | Links | BibTeX | Tags: confidential, epi, icsc
@article{24:casella:starprotocol,
title = {Protocol for training MERGE: A federated multi-input neural network for COVID-19 prognosis},
author = {Bruno Casella and Walter Riviera and Marco Aldinucci and Gloria Menegaz},
url = {https://prod-shared-star-protocols.s3.amazonaws.com/protocols/3225.pdf},
doi = {10.1016/j.xpro.2023.102812},
year = {2024},
date = {2024-01-01},
journal = {STAR Protocols},
institution = {Computer Science Department, University of Torino},
abstract = {Federated learning is a cooperative learning approach that has emerged as an effective way to address privacy concerns. Here, we present a protocol for training MERGE: a federated multi-input neural network (NN) for COVID-19 prognosis. We describe steps for collecting and preprocessing datasets. We then detail the process of training a multi-input NN. This protocol can be adapted for use with datasets containing both image- and table-based input sources.},
note = {https://prod-shared-star-protocols.s3.amazonaws.com/protocols/3225.pdf},
keywords = {confidential, epi, icsc},
pubstate = {published},
tppubtype = {article}
}
2023
Bruno Casella, Lorenzo Paletto
Predicting Cryptocurrencies Market Phases through On-Chain Data Long-Term Forecasting Proceedings Article
In: Proceedings of the 2023 IEEE International Conference on Blockchain and Cryptocurrency (ICBC), 1-5 May 2023, Dubai, 2023, (https://ieeexplore.ieee.org/document/10174989).
Abstract | Links | BibTeX | Tags: epi, icsc
@inproceedings{23:casella:onchain,
title = {Predicting Cryptocurrencies Market Phases through On-Chain Data Long-Term Forecasting},
author = {Bruno Casella and Lorenzo Paletto},
url = {https://iris.unito.it/bitstream/2318/1902652/1/6.%20ICBC23%20-%20PREDICTING%20BTC.pdf},
doi = {https://doi.org/10.1109/ICBC56567.2023.10174989},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 2023 IEEE International Conference on Blockchain and Cryptocurrency (ICBC), 1-5 May 2023, Dubai},
abstract = {Blockchain, the underlying technology of Bitcoin and several other cryptocurrencies, like Ethereum, produces a massive amount of open-access data that can be analyzed, providing important information about the network's activity and its respective token. The on-chain data have extensively been used as input to Machine Learning algorithms for predicting cryptocurrencies' future prices; however, there is a lack of study in predicting the future behaviour of on-chain data. This study aims to show how on-chain data can be used to detect cryptocurrency market regimes, like minimum and maximum, bear and bull market phases, and how forecasting these data can provide an optimal asset allocation for long-term investors.},
note = {https://ieeexplore.ieee.org/document/10174989},
keywords = {epi, icsc},
pubstate = {published},
tppubtype = {inproceedings}
}
Bruno Casella, Walter Riviera, Marco Aldinucci, Gloria Menegaz
MERGE: A model for multi-input biomedical federated learning Journal Article
In: Patterns, pp. 100856, 2023, ISSN: 2666-3899.
Abstract | Links | BibTeX | Tags: ai, confidential, epi, icsc
@article{23:fl:patterns,
title = {MERGE: A model for multi-input biomedical federated learning},
author = {Bruno Casella and Walter Riviera and Marco Aldinucci and Gloria Menegaz},
url = {https://www.sciencedirect.com/science/article/pii/S2666389923002404},
doi = {10.1016/j.patter.2023.100856},
issn = {2666-3899},
year = {2023},
date = {2023-01-01},
journal = {Patterns},
pages = {100856},
abstract = {Driven by the deep learning (DL) revolution, artificial intelligence (AI) has become a fundamental tool for many biomedical tasks, including analyzing and classifying diagnostic images. Imaging, however, is not the only source of information. Tabular data, such as personal and genomic data and blood test results, are routinely collected but rarely considered in DL pipelines. Nevertheless, DL requires large datasets that often must be pooled from different institutions, raising non-trivial privacy concerns. Federated learning (FL) is a cooperative learning paradigm that aims to address these issues by moving models instead of data across different institutions. Here, we present a federated multi-input architecture using images and tabular data as a methodology to enhance model performance while preserving data privacy. We evaluated it on two showcases: the prognosis of COVID-19 and patients' stratification in Alzheimer's disease, providing evidence of enhanced accuracy and F1 scores against single-input models and improved generalizability against non-federated models.},
keywords = {ai, confidential, epi, icsc},
pubstate = {published},
tppubtype = {article}
}
Bruno Casella, Roberto Esposito, Antonio Sciarappa, Carlo Cavazzoni, Marco Aldinucci
Experimenting with Normalization Layers in Federated Learning on non-IID scenarios Technical Report
Computer Science Department, University of Torino 2023.
Abstract | Links | BibTeX | Tags: confidential, epi, icsc
@techreport{23:casella:normalization,
title = {Experimenting with Normalization Layers in Federated Learning on non-IID scenarios},
author = {Bruno Casella and Roberto Esposito and Antonio Sciarappa and Carlo Cavazzoni and Marco Aldinucci},
url = {https://arxiv.org/pdf/2303.10630.pdf},
year = {2023},
date = {2023-01-01},
institution = {Computer Science Department, University of Torino},
abstract = {Training Deep Learning (DL) models require large, high-quality datasets, often assembled with data from different institutions. Federated Learning (FL) has been emerging as a method for privacy-preserving pooling of datasets employing collaborative training from different institutions by iteratively globally aggregating locally trained models. One critical performance challenge of FL is operating on datasets not independently and identically distributed (non-IID) among the federation participants. Even though this fragility cannot be eliminated, it can be debunked by a suitable optimization of two hyperparameters: layer normalization methods and collaboration frequency selection. In this work, we benchmark five different normalization layers for training Neural Networks (NNs), two families of non-IID data skew, and two datasets. Results show that Batch Normalization, widely employed for centralized DL, is not the best choice for FL, whereas Group and Layer Normalization consistently outperform Batch Normalization. Similarly, frequent model aggregation decreases convergence speed and mode quality.},
keywords = {confidential, epi, icsc},
pubstate = {published},
tppubtype = {techreport}
}