@inproceedings{6572af3ffbc8491b994548d740959a45,
title = "BERTweetFR: Domain Adaptation of Pre-Trained Language Models for French Tweets",
abstract = "We introduce BERTweetFR, the first large-scale pre-trained language model for French tweets. Our model is initialized using the general-domain French language model CamemBERT (Martin et al., 2020) which follows the base architecture of BERT. Experiments show that BERTweetFR outperforms all previous general-domain French language models on two downstream Twitter NLP tasks of offensiveness identification and named entity recognition. The dataset used in the offensiveness detection task is first created and annotated by our team, filling in the gap of such analytic datasets in French. We make our model publicly available in the transformers library with the aim of promoting future research in analytic tasks for French tweets.",
author = "Yanzhu Guo and Virgile Rennard and Christos Xypolopoulos and Michalis Vazirgiannis",
note = "Publisher Copyright: {\textcopyright} 2021 Association for Computational Linguistics.; 7th Workshop on Noisy User-Generated Text, W-NUT 2021 ; Conference date: 11-11-2021",
year = "2021",
month = jan,
day = "1",
language = "English",
series = "W-NUT 2021 - 7th Workshop on Noisy User-Generated Text, Proceedings of the Conference",
publisher = "Association for Computational Linguistics (ACL)",
pages = "445--450",
editor = "Wei Xu and Alan Ritter and Tim Baldwin and Afshin Rahimi",
booktitle = "W-NUT 2021 - 7th Workshop on Noisy User-Generated Text, Proceedings of the Conference",
}