@inproceedings{9363ad0ab8664fcf9074996d61676dee,
title = "Parasite: Mitigating Physical Side-Channel Attacks Against Neural Networks",
abstract = "Neural Networks (NNs) are now the target of various side-channel attacks whose aim is to recover the model{\textquoteright}s parameters and/or architecture. We focus our work on EM side-channel attacks for parameter extraction. We propose a novel approach to countering such side-channel attacks, based on the method introduced by Chabanne et al. in 2021, where parasitic convolutional models are dynamically applied to the input of the victim model. We validate this new idea in the side-channel field by simulation.",
keywords = "Model confidentiality, Neural networks, Physical side-channel attacks, Reverse engineering",
author = "Herv{\'e} Chabanne and Danger, \{Jean Luc\} and Linda Guiga and Ulrich K{\"u}hne",
note = "Publisher Copyright: {\textcopyright} 2022, Springer Nature Switzerland AG.; 11th International Conference on Security, Privacy, and Applied Cryptography Engineering, SPACE 2021 ; Conference date: 10-12-2021 Through 13-12-2021",
year = "2022",
month = jan,
day = "1",
doi = "10.1007/978-3-030-95085-9\_8",
language = "English",
isbn = "9783030950842",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "148--167",
editor = "Lejla Batina and Stjepan Picek and Stjepan Picek and Mainack Mondal",
booktitle = "Security, Privacy, and Applied Cryptography Engineering - 11th International Conference, SPACE 2021, Proceedings",
}