@inproceedings{zhuang-etal-2024-visual,
title = "Visual Grounding Helps Learn Word Meanings in Low-Data Regimes",
author = "Zhuang, Chengxu and
Fedorenko, Evelina and
Andreas, Jacob",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.naacl-long.71/",
doi = "10.18653/v1/2024.naacl-long.71",
pages = "1311--1329",
abstract = "Modern neural language models (LMs) are powerful tools for modeling human sentence production and comprehension, and their internal representations are remarkably well-aligned with representations of language in the human brain. But to achieve these results, LMs must be trained in distinctly un-human-like ways {---} requiring orders of magnitude more language data than children receive during development, and without perceptual or social context. Do models trained more naturalistically {---} with grounded supervision {---} exhibit more humanlike language learning? We investigate this question in the context of word learning, a key sub-task in language acquisition. We train a diverse set of LM architectures, with and without auxiliary visual supervision, on datasets of varying scales. We then evaluate these models' learning of syntactic categories, lexical relations, semantic features, word similarity, and alignment with human neural representations. We find that visual supervision can indeed improve the efficiency of word learning. However, these improvements are limited: they are present almost exclusively in the low-dataregime, and sometimes canceled out by the inclusion of rich distributional signals from text. The information conveyed by text and images isnot redundant{---}models mainly driven by visual information yield qualitatively different from those mainly driven by word co-occurrences. However, our results suggest that current multimodal modeling approaches fail to effectively leverage visual information to build human-like word representations from human-scale data."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhuang-etal-2024-visual">
<titleInfo>
<title>Visual Grounding Helps Learn Word Meanings in Low-Data Regimes</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chengxu</namePart>
<namePart type="family">Zhuang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Evelina</namePart>
<namePart type="family">Fedorenko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jacob</namePart>
<namePart type="family">Andreas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Modern neural language models (LMs) are powerful tools for modeling human sentence production and comprehension, and their internal representations are remarkably well-aligned with representations of language in the human brain. But to achieve these results, LMs must be trained in distinctly un-human-like ways — requiring orders of magnitude more language data than children receive during development, and without perceptual or social context. Do models trained more naturalistically — with grounded supervision — exhibit more humanlike language learning? We investigate this question in the context of word learning, a key sub-task in language acquisition. We train a diverse set of LM architectures, with and without auxiliary visual supervision, on datasets of varying scales. We then evaluate these models’ learning of syntactic categories, lexical relations, semantic features, word similarity, and alignment with human neural representations. We find that visual supervision can indeed improve the efficiency of word learning. However, these improvements are limited: they are present almost exclusively in the low-dataregime, and sometimes canceled out by the inclusion of rich distributional signals from text. The information conveyed by text and images isnot redundant—models mainly driven by visual information yield qualitatively different from those mainly driven by word co-occurrences. However, our results suggest that current multimodal modeling approaches fail to effectively leverage visual information to build human-like word representations from human-scale data.</abstract>
<identifier type="citekey">zhuang-etal-2024-visual</identifier>
<identifier type="doi">10.18653/v1/2024.naacl-long.71</identifier>
<location>
<url>https://aclanthology.org/2024.naacl-long.71/</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>1311</start>
<end>1329</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Visual Grounding Helps Learn Word Meanings in Low-Data Regimes
%A Zhuang, Chengxu
%A Fedorenko, Evelina
%A Andreas, Jacob
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F zhuang-etal-2024-visual
%X Modern neural language models (LMs) are powerful tools for modeling human sentence production and comprehension, and their internal representations are remarkably well-aligned with representations of language in the human brain. But to achieve these results, LMs must be trained in distinctly un-human-like ways — requiring orders of magnitude more language data than children receive during development, and without perceptual or social context. Do models trained more naturalistically — with grounded supervision — exhibit more humanlike language learning? We investigate this question in the context of word learning, a key sub-task in language acquisition. We train a diverse set of LM architectures, with and without auxiliary visual supervision, on datasets of varying scales. We then evaluate these models’ learning of syntactic categories, lexical relations, semantic features, word similarity, and alignment with human neural representations. We find that visual supervision can indeed improve the efficiency of word learning. However, these improvements are limited: they are present almost exclusively in the low-dataregime, and sometimes canceled out by the inclusion of rich distributional signals from text. The information conveyed by text and images isnot redundant—models mainly driven by visual information yield qualitatively different from those mainly driven by word co-occurrences. However, our results suggest that current multimodal modeling approaches fail to effectively leverage visual information to build human-like word representations from human-scale data.
%R 10.18653/v1/2024.naacl-long.71
%U https://aclanthology.org/2024.naacl-long.71/
%U https://doi.org/10.18653/v1/2024.naacl-long.71
%P 1311-1329
Markdown (Informal)
[Visual Grounding Helps Learn Word Meanings in Low-Data Regimes](https://aclanthology.org/2024.naacl-long.71/) (Zhuang et al., NAACL 2024)
ACL
- Chengxu Zhuang, Evelina Fedorenko, and Jacob Andreas. 2024. Visual Grounding Helps Learn Word Meanings in Low-Data Regimes. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 1311–1329, Mexico City, Mexico. Association for Computational Linguistics.