This is the source code for paper "Enhancing Privacy in Federated Learning via Early Exit"
Please cite as:
@inproceedings{10.1145/3584684.3597274,
author = {Wu, Yashuo and Chiasserini, Carla Fabiana and Malandrino, Francesco and Levorato, Marco},
title = {Invited Paper: Enhancing Privacy in Federated Learning via Early Exit},
year = {2023},
isbn = {9798400701283},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3584684.3597274},
doi = {10.1145/3584684.3597274},
abstract = {In this paper, we investigate the interplay between early exit mechanisms in deep neural networks and privacy preservation in the context of federated learning. Our primary objective is to assess how early exits impact privacy during the learning and inference phases. Through experiments, we demonstrate that models equipped with early exits perceivably boost privacy against membership inference attacks. Our findings suggest that the inclusion of early exits in neural models can serve as a valuable tool in mitigating privacy risks while, at the same time, retaining their original advantages of fast inference.},
booktitle = {Proceedings of the 5th Workshop on Advanced Tools, Programming Languages, and PLatforms for Implementing and Evaluating Algorithms for Distributed Systems},
articleno = {10},
numpages = {5},
keywords = {federated learning, early exit, neural networks, membership inference attacks, deep learning},
location = {Orlando, FL, USA},
series = {ApPLIED 2023}
}