@inproceedings{bock-etal-2021-ait,
title = "{AIT}{\_}{FHSTP} at {G}erm{E}val 2021: Automatic Fact Claiming Detection with Multilingual Transformer Models",
author = {B{\"o}ck, Jaqueline and
Liakhovets, Daria and
Sch{\"u}tz, Mina and
Kirchknopf, Armin and
Slijep{\v{c}}evi{\'c}, Djordje and
Zeppelzauer, Matthias and
Schindler, Alexander},
editor = "Risch, Julian and
Stoll, Anke and
Wilms, Lena and
Wiegand, Michael",
booktitle = "Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments",
month = sep,
year = "2021",
address = "Duesseldorf, Germany",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.germeval-1.11/",
pages = "76--82",
abstract = "Spreading ones opinion on the internet is becoming more and more important. A problem is that in many discussions people often argue with supposed facts. This year`s GermEval 2021 focuses on this topic by incorporating a shared task on the identification of fact-claiming comments. This paper presents the contribution of the AIT FHSTP team at the GermEval 2021 benchmark for task 3: {\textquotedblleft}identifying fact-claiming comments in social media texts{\textquotedblright}. Our methodological approaches are based on transformers and incorporate 3 different models: multilingual BERT, GottBERT and XML-RoBERTa. To solve the fact claiming task, we fine-tuned these transformers with external data and the data provided by the GermEval task organizers. Our multilingual BERT model achieved a precision-score of 72.71{\%}, a recall of 72.96{\%} and an F1-Score of 72.84{\%} on the GermEval test set. Our fine-tuned XML-RoBERTa model achieved a precision-score of 68.45{\%}, a recall of 70.11{\%} and a F1-Score of 69.27{\%}. Our best model is GottBERT (i.e., a BERT transformer pre-trained on German texts) fine-tuned on the GermEval 2021 data. This transformer achieved a precision of 74.13{\%}, a recall of 75.11{\%} and an F1-Score of 74.62{\%} on the test set."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bock-etal-2021-ait">
<titleInfo>
<title>AIT_FHSTP at GermEval 2021: Automatic Fact Claiming Detection with Multilingual Transformer Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jaqueline</namePart>
<namePart type="family">Böck</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daria</namePart>
<namePart type="family">Liakhovets</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mina</namePart>
<namePart type="family">Schütz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Armin</namePart>
<namePart type="family">Kirchknopf</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Djordje</namePart>
<namePart type="family">Slijepčević</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Matthias</namePart>
<namePart type="family">Zeppelzauer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="family">Schindler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments</title>
</titleInfo>
<name type="personal">
<namePart type="given">Julian</namePart>
<namePart type="family">Risch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anke</namePart>
<namePart type="family">Stoll</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lena</namePart>
<namePart type="family">Wilms</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Wiegand</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Duesseldorf, Germany</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Spreading ones opinion on the internet is becoming more and more important. A problem is that in many discussions people often argue with supposed facts. This year‘s GermEval 2021 focuses on this topic by incorporating a shared task on the identification of fact-claiming comments. This paper presents the contribution of the AIT FHSTP team at the GermEval 2021 benchmark for task 3: “identifying fact-claiming comments in social media texts”. Our methodological approaches are based on transformers and incorporate 3 different models: multilingual BERT, GottBERT and XML-RoBERTa. To solve the fact claiming task, we fine-tuned these transformers with external data and the data provided by the GermEval task organizers. Our multilingual BERT model achieved a precision-score of 72.71%, a recall of 72.96% and an F1-Score of 72.84% on the GermEval test set. Our fine-tuned XML-RoBERTa model achieved a precision-score of 68.45%, a recall of 70.11% and a F1-Score of 69.27%. Our best model is GottBERT (i.e., a BERT transformer pre-trained on German texts) fine-tuned on the GermEval 2021 data. This transformer achieved a precision of 74.13%, a recall of 75.11% and an F1-Score of 74.62% on the test set.</abstract>
<identifier type="citekey">bock-etal-2021-ait</identifier>
<location>
<url>https://aclanthology.org/2021.germeval-1.11/</url>
</location>
<part>
<date>2021-09</date>
<extent unit="page">
<start>76</start>
<end>82</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T AIT_FHSTP at GermEval 2021: Automatic Fact Claiming Detection with Multilingual Transformer Models
%A Böck, Jaqueline
%A Liakhovets, Daria
%A Schütz, Mina
%A Kirchknopf, Armin
%A Slijepčević, Djordje
%A Zeppelzauer, Matthias
%A Schindler, Alexander
%Y Risch, Julian
%Y Stoll, Anke
%Y Wilms, Lena
%Y Wiegand, Michael
%S Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments
%D 2021
%8 September
%I Association for Computational Linguistics
%C Duesseldorf, Germany
%F bock-etal-2021-ait
%X Spreading ones opinion on the internet is becoming more and more important. A problem is that in many discussions people often argue with supposed facts. This year‘s GermEval 2021 focuses on this topic by incorporating a shared task on the identification of fact-claiming comments. This paper presents the contribution of the AIT FHSTP team at the GermEval 2021 benchmark for task 3: “identifying fact-claiming comments in social media texts”. Our methodological approaches are based on transformers and incorporate 3 different models: multilingual BERT, GottBERT and XML-RoBERTa. To solve the fact claiming task, we fine-tuned these transformers with external data and the data provided by the GermEval task organizers. Our multilingual BERT model achieved a precision-score of 72.71%, a recall of 72.96% and an F1-Score of 72.84% on the GermEval test set. Our fine-tuned XML-RoBERTa model achieved a precision-score of 68.45%, a recall of 70.11% and a F1-Score of 69.27%. Our best model is GottBERT (i.e., a BERT transformer pre-trained on German texts) fine-tuned on the GermEval 2021 data. This transformer achieved a precision of 74.13%, a recall of 75.11% and an F1-Score of 74.62% on the test set.
%U https://aclanthology.org/2021.germeval-1.11/
%P 76-82
Markdown (Informal)
[AIT_FHSTP at GermEval 2021: Automatic Fact Claiming Detection with Multilingual Transformer Models](https://aclanthology.org/2021.germeval-1.11/) (Böck et al., GermEval 2021)
ACL
- Jaqueline Böck, Daria Liakhovets, Mina Schütz, Armin Kirchknopf, Djordje Slijepčević, Matthias Zeppelzauer, and Alexander Schindler. 2021. AIT_FHSTP at GermEval 2021: Automatic Fact Claiming Detection with Multilingual Transformer Models. In Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments, pages 76–82, Duesseldorf, Germany. Association for Computational Linguistics.