@inproceedings{belinkov-etal-2019-adversarial,
title = "On Adversarial Removal of Hypothesis-only Bias in Natural Language Inference",
author = "Belinkov, Yonatan and
Poliak, Adam and
Shieber, Stuart and
Van Durme, Benjamin and
Rush, Alexander",
editor = "Mihalcea, Rada and
Shutova, Ekaterina and
Ku, Lun-Wei and
Evang, Kilian and
Poria, Soujanya",
booktitle = "Proceedings of the Eighth Joint Conference on Lexical and Computational Semantics (*{SEM} 2019)",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/S19-1028/",
doi = "10.18653/v1/S19-1028",
pages = "256--262",
abstract = "Popular Natural Language Inference (NLI) datasets have been shown to be tainted by hypothesis-only biases. Adversarial learning may help models ignore sensitive biases and spurious correlations in data. We evaluate whether adversarial learning can be used in NLI to encourage models to learn representations free of hypothesis-only biases. Our analyses indicate that the representations learned via adversarial learning may be less biased, with only small drops in NLI accuracy."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="belinkov-etal-2019-adversarial">
<titleInfo>
<title>On Adversarial Removal of Hypothesis-only Bias in Natural Language Inference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yonatan</namePart>
<namePart type="family">Belinkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Adam</namePart>
<namePart type="family">Poliak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stuart</namePart>
<namePart type="family">Shieber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Benjamin</namePart>
<namePart type="family">Van Durme</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="family">Rush</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Eighth Joint Conference on Lexical and Computational Semantics (*SEM 2019)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rada</namePart>
<namePart type="family">Mihalcea</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kilian</namePart>
<namePart type="family">Evang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Soujanya</namePart>
<namePart type="family">Poria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Minneapolis, Minnesota</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Popular Natural Language Inference (NLI) datasets have been shown to be tainted by hypothesis-only biases. Adversarial learning may help models ignore sensitive biases and spurious correlations in data. We evaluate whether adversarial learning can be used in NLI to encourage models to learn representations free of hypothesis-only biases. Our analyses indicate that the representations learned via adversarial learning may be less biased, with only small drops in NLI accuracy.</abstract>
<identifier type="citekey">belinkov-etal-2019-adversarial</identifier>
<identifier type="doi">10.18653/v1/S19-1028</identifier>
<location>
<url>https://aclanthology.org/S19-1028/</url>
</location>
<part>
<date>2019-06</date>
<extent unit="page">
<start>256</start>
<end>262</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T On Adversarial Removal of Hypothesis-only Bias in Natural Language Inference
%A Belinkov, Yonatan
%A Poliak, Adam
%A Shieber, Stuart
%A Van Durme, Benjamin
%A Rush, Alexander
%Y Mihalcea, Rada
%Y Shutova, Ekaterina
%Y Ku, Lun-Wei
%Y Evang, Kilian
%Y Poria, Soujanya
%S Proceedings of the Eighth Joint Conference on Lexical and Computational Semantics (*SEM 2019)
%D 2019
%8 June
%I Association for Computational Linguistics
%C Minneapolis, Minnesota
%F belinkov-etal-2019-adversarial
%X Popular Natural Language Inference (NLI) datasets have been shown to be tainted by hypothesis-only biases. Adversarial learning may help models ignore sensitive biases and spurious correlations in data. We evaluate whether adversarial learning can be used in NLI to encourage models to learn representations free of hypothesis-only biases. Our analyses indicate that the representations learned via adversarial learning may be less biased, with only small drops in NLI accuracy.
%R 10.18653/v1/S19-1028
%U https://aclanthology.org/S19-1028/
%U https://doi.org/10.18653/v1/S19-1028
%P 256-262
Markdown (Informal)
[On Adversarial Removal of Hypothesis-only Bias in Natural Language Inference](https://aclanthology.org/S19-1028/) (Belinkov et al., *SEM 2019)
ACL