{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T04:15:58Z","timestamp":1740111358249,"version":"3.37.3"},"reference-count":40,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100003329","name":"Ministerio de Econom\u00eda y Competitividad","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003329","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002809","name":"Generalitat de Catalunya","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002848","name":"Comisi\u00f3n Nacional de Investigaci\u00f3n Cient\u00edfica y Tecnol\u00f3gica","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002848","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Pattern Recognition Letters"],"published-print":{"date-parts":[[2021,10]]},"DOI":"10.1016\/j.patrec.2021.05.015","type":"journal-article","created":{"date-parts":[[2021,7,8]],"date-time":"2021-07-08T12:16:17Z","timestamp":1625746577000},"page":"1-7","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":5,"special_numbering":"C","title":["Saliency for free: Saliency prediction as a side-effect of object recognition"],"prefix":"10.1016","volume":"150","author":[{"given":"Carola","family":"Figueroa-Flores","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7543-2770","authenticated-orcid":false,"given":"David","family":"Berga","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9656-9706","authenticated-orcid":false,"given":"Joost","family":"van de Weijer","sequence":"additional","affiliation":[]},{"given":"Bogdan","family":"Raducanu","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.patrec.2021.05.015_bib0001","doi-asserted-by":"crossref","first-page":"60","DOI":"10.1016\/j.visres.2018.10.006","article-title":"Psychophysical evaluation of individual low-level feature influences on visual attention","volume":"154","author":"Berga","year":"2019","journal-title":"Vis. Res."},{"key":"10.1016\/j.patrec.2021.05.015_bib0002","series-title":"Proc. of ICCV","first-page":"8789","article-title":"SID4VAM: a benchmark dataset with synthetic images for visual attention modeling","author":"Berga","year":"2019"},{"issue":"1","key":"10.1016\/j.patrec.2021.05.015_bib0003","doi-asserted-by":"crossref","first-page":"185","DOI":"10.1109\/TPAMI.2012.89","article-title":"State-of-the-art in visual attention modeling","volume":"35","author":"Borji","year":"2013","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.patrec.2021.05.015_sbref0004","series-title":"Proc. of CVPR 2015, Workshop on \u201cFuture of Datasets\u201d","article-title":"Cat2000: a large scale fixation dataset for boosting saliency research","author":"Borji","year":"2015"},{"issue":"5","key":"10.1016\/j.patrec.2021.05.015_bib0005","doi-asserted-by":"crossref","first-page":"523","DOI":"10.1109\/TSMC.2013.2279715","article-title":"What\/where to look next? Modeling top-down visual attention in complex interactive environments","volume":"44","author":"Borji","year":"2014","journal-title":"IEEE Trans. Syst. Man Cybern."},{"issue":"6","key":"10.1016\/j.patrec.2021.05.015_bib0006","doi-asserted-by":"crossref","first-page":"1214","DOI":"10.1109\/TNNLS.2015.2480683","article-title":"Reconciling saliency and object center-bias hypotheses in explaining free-viewing fixations","volume":"27","author":"Borji","year":"2016","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.patrec.2021.05.015_bib0007","series-title":"Proc. of ICCV","first-page":"921","article-title":"Analysis of scores, datasets, and models in visual saliency prediction","author":"Borji","year":"2013"},{"key":"10.1016\/j.patrec.2021.05.015_bib0008","doi-asserted-by":"crossref","first-page":"95","DOI":"10.1016\/j.visres.2015.01.010","article-title":"On computational modeling of visual saliency: examining what\u2019s right, and what\u2019s left","volume":"116","author":"Bruce","year":"2015","journal-title":"Vis. Res."},{"key":"10.1016\/j.patrec.2021.05.015_bib0009","series-title":"Proc. of NeurIPS","first-page":"155","article-title":"Saliency based on information maximization","author":"Bruce","year":"2005"},{"key":"10.1016\/j.patrec.2021.05.015_bib0010","unstructured":"Z. Bylinskii, T. Judd, A. Borji, L. Itti, F. Durand, A. Oliva, A. Torralba, Mit saliency benchmark, (http:\/\/saliency.mit.edu\/)."},{"issue":"3","key":"10.1016\/j.patrec.2021.05.015_bib0011","doi-asserted-by":"crossref","first-page":"740","DOI":"10.1109\/TPAMI.2018.2815601","article-title":"What do different evaluation metrics tell us about saliency models?","volume":"41","author":"Bylinskii","year":"2019","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.patrec.2021.05.015_bib0012","series-title":"Proc. of ICPR","first-page":"3488","article-title":"A deep multi-level network for saliency prediction","author":"Cornia","year":"2016"},{"issue":"10","key":"10.1016\/j.patrec.2021.05.015_bib0013","doi-asserted-by":"crossref","first-page":"5142","DOI":"10.1109\/TIP.2018.2851672","article-title":"Predicting human eye fixations via an LSTM-based saliency attentive model","volume":"27","author":"Cornia","year":"2018","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.patrec.2021.05.015_bib0014","doi-asserted-by":"crossref","first-page":"62","DOI":"10.1016\/j.patcog.2019.05.002","article-title":"Saliency for fine-grained object recognition in domains with scarce training data","volume":"94","author":"Figueroa-Flores","year":"2019","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.patrec.2021.05.015_bib0015","series-title":"Proceedings of the 16th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications - Volume 4 VISAPP","first-page":"163","article-title":"Hallucinating saliency maps for fine-grained image classification for limited data domains","author":"Figueroa-Flores.","year":"2021"},{"key":"10.1016\/j.patrec.2021.05.015_bib0016","first-page":"249","article-title":"Understanding the difficulty of training deep feedforward neural networks","volume":"9","author":"Glorot","year":"2010","journal-title":"J. Mach. Learn. Res. - Proc. Track"},{"key":"10.1016\/j.patrec.2021.05.015_bib0017","doi-asserted-by":"crossref","first-page":"2295","DOI":"10.1016\/j.visres.2010.05.034","article-title":"Biologically plausible saliency mechanisms improve feedforward object recognition","volume":"50","author":"Han","year":"2010","journal-title":"Vis. Res."},{"key":"10.1016\/j.patrec.2021.05.015_bib0018","series-title":"Proc. of CVPR","first-page":"770","article-title":"Deep residual learning for image recognition","author":"He","year":"2016"},{"key":"10.1016\/j.patrec.2021.05.015_bib0019","series-title":"Proc. of ICCV","first-page":"262","article-title":"Salicon: reducing the semantic gap in saliency prediction by adapting deep neural networks","author":"Huang","year":"2015"},{"issue":"11","key":"10.1016\/j.patrec.2021.05.015_bib0020","doi-asserted-by":"crossref","first-page":"1254","DOI":"10.1109\/34.730558","article-title":"A model of saliency-based visual attention for rapid scene analysis","volume":"20","author":"Itti","year":"1998","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.patrec.2021.05.015_bib0021","series-title":"Proc. of ICCV","first-page":"2106","article-title":"Learning to predict where humans look","author":"Judd","year":"2009"},{"issue":"1","key":"10.1016\/j.patrec.2021.05.015_bib0022","doi-asserted-by":"crossref","first-page":"223","DOI":"10.1007\/s12559-010-9089-5","article-title":"Predicting eye fixations on complex visual stimuli using local symmetry","volume":"3","author":"Kootstra","year":"2011","journal-title":"Cogn. Comput."},{"key":"10.1016\/j.patrec.2021.05.015_bib0023","series-title":"Proc. of NeurIPS","first-page":"1097","article-title":"Imagenet classification with deep convolutional neural networks","author":"Krizhevsky","year":"2012"},{"key":"10.1016\/j.patrec.2021.05.015_bib0024","doi-asserted-by":"crossref","unstructured":"M. K\u00fcmmerer, T.S.A. Wallis, M. Bethge, Deepgaze II: reading fixations from deep features trained on object recognition, arXiv preprint arXiv:1610.01563(2016).","DOI":"10.1167\/17.10.1147"},{"issue":"1","key":"10.1016\/j.patrec.2021.05.015_bib0025","doi-asserted-by":"crossref","first-page":"251","DOI":"10.3758\/s13428-012-0226-9","article-title":"Methods for comparing scanpaths and saliency maps: strengths and weaknesses","volume":"45","author":"LeMeur","year":"2012","journal-title":"Behav. Res. Methods"},{"key":"10.1016\/j.patrec.2021.05.015_bib0026","series-title":"Proc. of CVPR","first-page":"478","article-title":"Deep contrast learning for salient object detection","author":"Li","year":"2016"},{"key":"10.1016\/j.patrec.2021.05.015_bib0027","article-title":"Top-down saliency detection driven by visual classification","volume":"172","author":"Murabito","year":"2017","journal-title":"Comput. Vis. Image Underst."},{"key":"10.1016\/j.patrec.2021.05.015_bib0028","doi-asserted-by":"crossref","first-page":"59","DOI":"10.1016\/j.visres.2015.10.001","article-title":"Saliency-based gaze prediction based on head direction","volume":"117","author":"Nakashima","year":"2015","journal-title":"Vis. Res."},{"key":"10.1016\/j.patrec.2021.05.015_bib0029","unstructured":"J. Pan, C. Canton, K. McGuinness, N.E. O\u2019Connor, J. Torres, E. Sayrol, X.a. Giro-i Nieto, SaLGAN: visual saliency prediction with generative adversarial networks, in: arXiv, 2017."},{"key":"10.1016\/j.patrec.2021.05.015_bib0030","series-title":"Proc. of CVPR","first-page":"598","article-title":"Shallow and deep convolutional networks for saliency prediction","author":"Pan","year":"2016"},{"key":"10.1016\/j.patrec.2021.05.015_bib0031","series-title":"From Human Attention to Computational Attention","first-page":"141","article-title":"Bottom-up saliency models for still images: apractical review","author":"Riche","year":"2016"},{"key":"10.1016\/j.patrec.2021.05.015_bib0032","series-title":"Proc. of ICLR","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2015"},{"issue":"5","key":"10.1016\/j.patrec.2021.05.015_bib0033","doi-asserted-by":"crossref","first-page":"13","DOI":"10.1167\/11.5.13","article-title":"Peripheral vision and pattern recognition: a review","volume":"11","author":"Strasburger","year":"2011","journal-title":"J. Vis."},{"key":"10.1016\/j.patrec.2021.05.015_bib0034","series-title":"Proc. of ECCV","first-page":"30","article-title":"An eye fixation database for saliency detection in images","volume":"6314","author":"Subramanian","year":"2010"},{"issue":"14","key":"10.1016\/j.patrec.2021.05.015_bib0035","doi-asserted-by":"crossref","first-page":"4","DOI":"10.1167\/7.14.4","article-title":"The central fixation bias in scene viewing: selecting an optimal viewing position independently of motor biases and image feature distributions","volume":"7","author":"Tatler","year":"2007","journal-title":"J. Vis."},{"issue":"4","key":"10.1016\/j.patrec.2021.05.015_bib0036","doi-asserted-by":"crossref","first-page":"766","DOI":"10.1037\/0033-295X.113.4.766","article-title":"Contextual guidance of eye movements and attention in real-world scenes: the role of global features in object search.","volume":"113","author":"Torralba","year":"2006","journal-title":"Psychol. Rev."},{"key":"10.1016\/j.patrec.2021.05.015_bib0037","unstructured":"B.T. Vincent, B.W. Tatler, Systematic tendencies in scene viewing, 2008."},{"year":"2013","series-title":"Selective Visual Attention","author":"Zhang","key":"10.1016\/j.patrec.2021.05.015_bib0038"},{"key":"10.1016\/j.patrec.2021.05.015_bib0039","series-title":"Proc. of CVPR","first-page":"714","article-title":"Progressive attention guided recurrent network for salient object detection","author":"Zhang","year":"2018"},{"key":"10.1016\/j.patrec.2021.05.015_bib0040","series-title":"Proc. of ICCV","first-page":"8779","article-title":"Egnet: edge guidance network for salient object detection","author":"Zhao","year":"2019"}],"container-title":["Pattern Recognition Letters"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0167865521001987?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0167865521001987?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T17:17:59Z","timestamp":1673284679000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0167865521001987"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,10]]},"references-count":40,"alternative-id":["S0167865521001987"],"URL":"https:\/\/doi.org\/10.1016\/j.patrec.2021.05.015","relation":{},"ISSN":["0167-8655"],"issn-type":[{"type":"print","value":"0167-8655"}],"subject":[],"published":{"date-parts":[[2021,10]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Saliency for free: Saliency prediction as a side-effect of object recognition","name":"articletitle","label":"Article Title"},{"value":"Pattern Recognition Letters","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.patrec.2021.05.015","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2021 Elsevier B.V. All rights reserved.","name":"copyright","label":"Copyright"}]}}