@inproceedings{jain-etal-2022-extending,
title = "Extending Logic Explained Networks to Text Classification",
author = "Jain, Rishabh and
Ciravegna, Gabriele and
Barbiero, Pietro and
Giannini, Francesco and
Buffelli, Davide and
Lio, Pietro",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.emnlp-main.604/",
doi = "10.18653/v1/2022.emnlp-main.604",
pages = "8838--8857",
abstract = "Recently, Logic Explained Networks (LENs) have been proposed as explainable-by-design neural models providing logic explanations for their predictions.However, these models have only been applied to vision and tabular data, and they mostly favour the generation of global explanations, while local ones tend to be noisy and verbose.For these reasons, we propose LEN{\ensuremath{<}}sup{\ensuremath{>}}p{\ensuremath{<}}/sup{\ensuremath{>}}, improving local explanations by perturbing input words, and we test it on text classification. Our results show that (i) LEN{\ensuremath{<}}sup{\ensuremath{>}}p{\ensuremath{<}}/sup{\ensuremath{>}} provides better local explanations than LIME in terms of sensitivity and faithfulness, and (ii) its logic explanations are more useful and user-friendly than the feature scoring provided by LIME as attested by a human survey."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jain-etal-2022-extending">
<titleInfo>
<title>Extending Logic Explained Networks to Text Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rishabh</namePart>
<namePart type="family">Jain</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gabriele</namePart>
<namePart type="family">Ciravegna</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pietro</namePart>
<namePart type="family">Barbiero</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Francesco</namePart>
<namePart type="family">Giannini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Davide</namePart>
<namePart type="family">Buffelli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pietro</namePart>
<namePart type="family">Lio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recently, Logic Explained Networks (LENs) have been proposed as explainable-by-design neural models providing logic explanations for their predictions.However, these models have only been applied to vision and tabular data, and they mostly favour the generation of global explanations, while local ones tend to be noisy and verbose.For these reasons, we propose LEN\ensuremath<sup\ensuremath>p\ensuremath</sup\ensuremath>, improving local explanations by perturbing input words, and we test it on text classification. Our results show that (i) LEN\ensuremath<sup\ensuremath>p\ensuremath</sup\ensuremath> provides better local explanations than LIME in terms of sensitivity and faithfulness, and (ii) its logic explanations are more useful and user-friendly than the feature scoring provided by LIME as attested by a human survey.</abstract>
<identifier type="citekey">jain-etal-2022-extending</identifier>
<identifier type="doi">10.18653/v1/2022.emnlp-main.604</identifier>
<location>
<url>https://aclanthology.org/2022.emnlp-main.604/</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>8838</start>
<end>8857</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Extending Logic Explained Networks to Text Classification
%A Jain, Rishabh
%A Ciravegna, Gabriele
%A Barbiero, Pietro
%A Giannini, Francesco
%A Buffelli, Davide
%A Lio, Pietro
%Y Goldberg, Yoav
%Y Kozareva, Zornitsa
%Y Zhang, Yue
%S Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F jain-etal-2022-extending
%X Recently, Logic Explained Networks (LENs) have been proposed as explainable-by-design neural models providing logic explanations for their predictions.However, these models have only been applied to vision and tabular data, and they mostly favour the generation of global explanations, while local ones tend to be noisy and verbose.For these reasons, we propose LEN\ensuremath<sup\ensuremath>p\ensuremath</sup\ensuremath>, improving local explanations by perturbing input words, and we test it on text classification. Our results show that (i) LEN\ensuremath<sup\ensuremath>p\ensuremath</sup\ensuremath> provides better local explanations than LIME in terms of sensitivity and faithfulness, and (ii) its logic explanations are more useful and user-friendly than the feature scoring provided by LIME as attested by a human survey.
%R 10.18653/v1/2022.emnlp-main.604
%U https://aclanthology.org/2022.emnlp-main.604/
%U https://doi.org/10.18653/v1/2022.emnlp-main.604
%P 8838-8857
Markdown (Informal)
[Extending Logic Explained Networks to Text Classification](https://aclanthology.org/2022.emnlp-main.604/) (Jain et al., EMNLP 2022)
ACL
- Rishabh Jain, Gabriele Ciravegna, Pietro Barbiero, Francesco Giannini, Davide Buffelli, and Pietro Lio. 2022. Extending Logic Explained Networks to Text Classification. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 8838–8857, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.