{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,14]],"date-time":"2025-04-14T15:46:50Z","timestamp":1744645610828},"reference-count":59,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Information Fusion"],"published-print":{"date-parts":[[2021,9]]},"DOI":"10.1016\/j.inffus.2021.02.023","type":"journal-article","created":{"date-parts":[[2021,3,1]],"date-time":"2021-03-01T17:51:26Z","timestamp":1614621086000},"page":"72-86","update-policy":"http:\/\/dx.doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":650,"special_numbering":"C","title":["RFN-Nest: An end-to-end residual fusion network for infrared and visible images"],"prefix":"10.1016","volume":"73","author":[{"ORCID":"http:\/\/orcid.org\/0000-0003-4550-7879","authenticated-orcid":false,"given":"Hui","family":"Li","sequence":"first","affiliation":[]},{"given":"Xiao-Jun","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Josef","family":"Kittler","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.inffus.2021.02.023_b1","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2019.106977","article-title":"RGB-T object tracking: benchmark and baseline","volume":"96","author":"Li","year":"2019","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.inffus.2021.02.023_b2","article-title":"Learning local-global multi-graph descriptors for RGB-T object tracking","author":"Li","year":"2018","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.inffus.2021.02.023_b3","doi-asserted-by":"crossref","first-page":"265","DOI":"10.1016\/j.infrared.2019.04.017","article-title":"Thermal infrared and visible sequences fusion tracking based on a hybrid tracking framework with adaptive weighting scheme","volume":"99","author":"Luo","year":"2019","journal-title":"Infrared Phys. Technol."},{"key":"10.1016\/j.inffus.2021.02.023_b4","series-title":"2018 International Conference on Wireless Communications, Signal Processing and Networking","first-page":"1","article-title":"IR and visible video fusion for surveillance","author":"Shrinidhi","year":"2018"},{"key":"10.1016\/j.inffus.2021.02.023_b5","doi-asserted-by":"crossref","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","article-title":"Infrared and visible image fusion methods and applications: A survey","volume":"45","author":"Ma","year":"2019","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2021.02.023_b6","doi-asserted-by":"crossref","first-page":"100","DOI":"10.1016\/j.inffus.2016.05.004","article-title":"Pixel-level image fusion: A survey of the state of the art","volume":"33","author":"Li","year":"2017","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2021.02.023_b7","doi-asserted-by":"crossref","first-page":"158","DOI":"10.1016\/j.inffus.2017.10.007","article-title":"Deep learning for pixel-level image fusion: Recent advances and future prospects","volume":"42","author":"Liu","year":"2018","journal-title":"Inf. Fusion"},{"issue":"9","key":"10.1016\/j.inffus.2021.02.023_b8","doi-asserted-by":"crossref","first-page":"1855","DOI":"10.1016\/j.patcog.2004.03.010","article-title":"A wavelet-based image fusion tutorial","volume":"37","author":"Pajares","year":"2004","journal-title":"Pattern Recognit."},{"issue":"2","key":"10.1016\/j.inffus.2021.02.023_b9","doi-asserted-by":"crossref","first-page":"135","DOI":"10.3233\/ICA-2005-12201","article-title":"A multiscale approach to pixel-level image fusion","volume":"12","author":"Ben Hamza","year":"2005","journal-title":"Integr. Comput.-Aided Eng."},{"issue":"2","key":"10.1016\/j.inffus.2021.02.023_b10","doi-asserted-by":"crossref","first-page":"78","DOI":"10.1016\/j.inffus.2009.05.001","article-title":"Image fusion based on a new contourlet packet","volume":"11","author":"Yang","year":"2010","journal-title":"Inf. Fusion"},{"issue":"7","key":"10.1016\/j.inffus.2021.02.023_b11","doi-asserted-by":"crossref","first-page":"2864","DOI":"10.1109\/TIP.2013.2244222","article-title":"Image fusion with guided filtering","volume":"22","author":"Li","year":"2013","journal-title":"IEEE Trans. Image Process."},{"issue":"2","key":"10.1016\/j.inffus.2021.02.023_b12","doi-asserted-by":"crossref","first-page":"210","DOI":"10.1109\/TPAMI.2008.79","article-title":"Robust face recognition via sparse representation","volume":"31","author":"Wright","year":"2008","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.inffus.2021.02.023_b13","series-title":"ICML, Vol. 1","first-page":"8","article-title":"Robust subspace segmentation by low-rank representation","author":"Liu","year":"2010"},{"issue":"1","key":"10.1016\/j.inffus.2021.02.023_b14","doi-asserted-by":"crossref","first-page":"171","DOI":"10.1109\/TPAMI.2012.88","article-title":"Robust recovery of subspace structures by low-rank representation","volume":"35","author":"Liu","year":"2012","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"5","key":"10.1016\/j.inffus.2021.02.023_b15","doi-asserted-by":"crossref","DOI":"10.1117\/1.OE.52.5.057006","article-title":"Dictionary learning method for joint sparse representation-based image fusion","volume":"52","author":"Zhang","year":"2013","journal-title":"Opt. Eng."},{"key":"10.1016\/j.inffus.2021.02.023_b16","doi-asserted-by":"crossref","first-page":"94","DOI":"10.1016\/j.infrared.2017.04.018","article-title":"Infrared and visible image fusion method based on saliency detection in sparse domain","volume":"83","author":"Liu","year":"2017","journal-title":"Infrared Phys. Technol."},{"issue":"7","key":"10.1016\/j.inffus.2021.02.023_b17","doi-asserted-by":"crossref","first-page":"943","DOI":"10.1109\/LSP.2017.2696055","article-title":"Image fusion with cosparse analysis operator","volume":"24","author":"Gao","year":"2017","journal-title":"IEEE Signal Process. Lett."},{"key":"10.1016\/j.inffus.2021.02.023_b18","series-title":"International Conference on Image and Graphics","first-page":"675","article-title":"Multi-focus image fusion using dictionary learning and low-rank representation","author":"Li","year":"2017"},{"key":"10.1016\/j.inffus.2021.02.023_b19","doi-asserted-by":"crossref","first-page":"397","DOI":"10.1016\/j.infrared.2014.09.007","article-title":"The infrared and visible image fusion algorithm based on target separation and sparse representation","volume":"67","author":"Lu","year":"2014","journal-title":"Infrared Phys. Technol."},{"key":"10.1016\/j.inffus.2021.02.023_b20","doi-asserted-by":"crossref","first-page":"182","DOI":"10.1016\/j.neucom.2016.11.051","article-title":"A novel infrared and visible image fusion algorithm based on shift-invariant dual-tree complex shearlet transform and sparse representation","volume":"226","author":"Yin","year":"2017","journal-title":"Neurocomputing"},{"key":"10.1016\/j.inffus.2021.02.023_b21","doi-asserted-by":"crossref","first-page":"94","DOI":"10.1016\/j.infrared.2017.04.018","article-title":"Infrared and visible image fusion method based on saliency detection in sparse domain","volume":"83","author":"Liu","year":"2017","journal-title":"Infrared Phys. Technol."},{"key":"10.1016\/j.inffus.2021.02.023_b22","series-title":"2018 24th International Conference on Pattern Recognition","first-page":"2705","article-title":"Infrared and visible image fusion using a deep learning framework","author":"Li","year":"2018"},{"key":"10.1016\/j.inffus.2021.02.023_b23","doi-asserted-by":"crossref","DOI":"10.1016\/j.infrared.2019.103039","article-title":"Infrared and visible image fusion with resnet and zero-phase component analysis","volume":"102","author":"Li","year":"2019","journal-title":"Infrared Phys. Technol."},{"key":"10.1016\/j.inffus.2021.02.023_b24","series-title":"IAPR Workshop on Multimodal Pattern Recognition of Social Signals in Human-Computer Interaction","first-page":"1","article-title":"Multi-focus image fusion with PCA filters of PCANet","author":"Song","year":"2018"},{"issue":"5","key":"10.1016\/j.inffus.2021.02.023_b25","doi-asserted-by":"crossref","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","article-title":"Densefuse: A fusion approach to infrared and visible images","volume":"28","author":"Li","year":"2019","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.inffus.2021.02.023_b26","article-title":"MDLatLRR: A novel decomposition method for infrared and visible image fusion","author":"Li","year":"2020","journal-title":"IEEE Trans. Image Process."},{"issue":"12","key":"10.1016\/j.inffus.2021.02.023_b27","doi-asserted-by":"crossref","first-page":"1882","DOI":"10.1109\/LSP.2016.2618776","article-title":"Image fusion with convolutional sparse representation","volume":"23","author":"Liu","year":"2016","journal-title":"IEEE Signal Process. Lett."},{"key":"10.1016\/j.inffus.2021.02.023_b28","doi-asserted-by":"crossref","first-page":"191","DOI":"10.1016\/j.inffus.2016.12.001","article-title":"Multi-focus image fusion with a deep convolutional neural network","volume":"36","author":"Liu","year":"2017","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2021.02.023_b29","doi-asserted-by":"crossref","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","article-title":"FusionGAN: A generative adversarial network for infrared and visible image fusion","volume":"48","author":"Ma","year":"2019","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2021.02.023_b30","doi-asserted-by":"crossref","first-page":"85","DOI":"10.1016\/j.inffus.2019.07.005","article-title":"Infrared and visible image fusion via detail preserving adversarial learning","volume":"54","author":"Ma","year":"2020","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2021.02.023_b31","doi-asserted-by":"crossref","first-page":"4980","DOI":"10.1109\/TIP.2020.2977573","article-title":"DDcGAN: A dual-discriminator conditional generative adversarial network for multi-resolution image fusion","volume":"29","author":"Ma","year":"2020","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.inffus.2021.02.023_b32","doi-asserted-by":"crossref","DOI":"10.1109\/TIM.2020.3005230","article-title":"Nestfuse: An infrared and visible image fusion architecture based on nest connection and spatial\/channel attention models","author":"Li","year":"2020","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"10.1016\/j.inffus.2021.02.023_b33","series-title":"Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support, Granada, Spain","first-page":"3","article-title":"Unet++: A nested u-net architecture for medical image segmentation","author":"Zhou","year":"2018"},{"key":"10.1016\/j.inffus.2021.02.023_b34","doi-asserted-by":"crossref","unstructured":"K. Ram Prabhakar, V. Sai Srikar, R. Venkatesh Babu, Deepfuse: A deep unsupervised approach for exposure fusion with extreme exposure image pairs, in: Proceedings of the IEEE International Conference on Computer Vision, 2017, pp. 4714\u20134722.","DOI":"10.1109\/ICCV.2017.505"},{"year":"2014","series-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","key":"10.1016\/j.inffus.2021.02.023_b35"},{"key":"10.1016\/j.inffus.2021.02.023_b36","doi-asserted-by":"crossref","unstructured":"K. He, X. Zhang, S. Ren, J. Sun, Deep residual learning for image recognition, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016, pp. 770\u2013778.","DOI":"10.1109\/CVPR.2016.90"},{"key":"10.1016\/j.inffus.2021.02.023_b37","doi-asserted-by":"crossref","unstructured":"G. Huang, Z. Liu, L. Van Der Maaten, K.Q. Weinberger, Densely connected convolutional networks, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2017, pp. 4700\u20134708.","DOI":"10.1109\/CVPR.2017.243"},{"key":"10.1016\/j.inffus.2021.02.023_b38","doi-asserted-by":"crossref","first-page":"99","DOI":"10.1016\/j.inffus.2019.07.011","article-title":"IFCNN: A general image fusion framework based on convolutional neural network","volume":"54","author":"Zhang","year":"2020","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2021.02.023_b39","doi-asserted-by":"crossref","unstructured":"H. Zhang, H. Xu, Y. Xiao, X. Guo, J. Ma, Rethinking the image fusion: A fast unified image fusion network based on proportional maintenance of gradient and intensity, in: Proceedings of the AAAI Conference on Artificial Intelligence, 2020, pp. 12797\u201312804.","DOI":"10.1609\/aaai.v34i07.6975"},{"key":"10.1016\/j.inffus.2021.02.023_b40","article-title":"U2fusion: A unified unsupervised image fusion network","author":"Xu","year":"2020","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"4","key":"10.1016\/j.inffus.2021.02.023_b41","doi-asserted-by":"crossref","first-page":"600","DOI":"10.1109\/TIP.2003.819861","article-title":"Image quality assessment: from error visibility to structural similarity","volume":"13","author":"Wang","year":"2004","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.inffus.2021.02.023_b42","series-title":"European Conference on Computer Vision, Zurich, Switzerland","first-page":"740","article-title":"Microsoft coco: Common objects in context","author":"Lin","year":"2014"},{"key":"10.1016\/j.inffus.2021.02.023_b43","doi-asserted-by":"crossref","unstructured":"S. Hwang, J. Park, N. Kim, Y. Choi, I. So Kweon, Multispectral pedestrian detection: Benchmark dataset and baseline, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2015, pp. 1037\u20131045.","DOI":"10.1109\/CVPR.2015.7298706"},{"year":"2014","series-title":"TNO image fusion dataset","author":"Toet","key":"10.1016\/j.inffus.2021.02.023_b44"},{"key":"10.1016\/j.inffus.2021.02.023_b45","unstructured":"M. Kristan, J. Matas, A. Leonardis, M. Felsberg, et al. The eighth visual object tracking VOT2020 challenge results, in: Proc. 16th Eur. Conf. Comput. Vis. Workshop, 2020."},{"year":"2020","series-title":"Code of RFN-nest","author":"Li","key":"10.1016\/j.inffus.2021.02.023_b46"},{"issue":"1","key":"10.1016\/j.inffus.2021.02.023_b47","article-title":"Assessment of image fusion procedures using entropy, image quality, and multispectral classification","volume":"2","author":"Roberts","year":"2008","journal-title":"J. Appl. Remote Sens."},{"issue":"4","key":"10.1016\/j.inffus.2021.02.023_b48","doi-asserted-by":"crossref","first-page":"355","DOI":"10.1088\/0957-0233\/8\/4\/002","article-title":"In-fibre Bragg grating sensors","volume":"8","author":"Rao","year":"1997","journal-title":"Meas. Sci. Technol."},{"issue":"7","key":"10.1016\/j.inffus.2021.02.023_b49","doi-asserted-by":"crossref","first-page":"313","DOI":"10.1049\/el:20020212","article-title":"Information measure for performance of image fusion","volume":"38","author":"Qu","year":"2002","journal-title":"Electron. Lett."},{"issue":"6","key":"10.1016\/j.inffus.2021.02.023_b50","doi-asserted-by":"crossref","first-page":"1125","DOI":"10.1007\/s11760-012-0361-x","article-title":"Multifocus and multispectral image fusion based on pixel significance using discrete cosine harmonic wavelet transform","volume":"7","author":"Kumar","year":"2013","journal-title":"Signal Image Video Process."},{"issue":"12","key":"10.1016\/j.inffus.2021.02.023_b51","doi-asserted-by":"crossref","first-page":"1890","DOI":"10.1016\/j.aeue.2015.09.004","article-title":"A new image quality metric for image fusion: The sum of the correlations of differences","volume":"69","author":"Aslantas","year":"2015","journal-title":"AEU-Int. J. Electron. Commun."},{"issue":"11","key":"10.1016\/j.inffus.2021.02.023_b52","doi-asserted-by":"crossref","first-page":"3345","DOI":"10.1109\/TIP.2015.2442920","article-title":"Perceptual quality assessment for multi-exposure image fusion","volume":"24","author":"Ma","year":"2015","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.inffus.2021.02.023_b53","doi-asserted-by":"crossref","first-page":"100","DOI":"10.1016\/j.inffus.2016.02.001","article-title":"Infrared and visible image fusion via gradient transfer and total variation minimization","volume":"31","author":"Ma","year":"2016","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2021.02.023_b54","unstructured":"M. Kristan, J. Matas, A. Leonardis, M. Felsberg, R. Pflugfelder, J.-K. Kamarainen, L. Cehovin Zajc, O. Drbohlav, A. Lukezic, A. Berg, et al. The seventh visual object tracking vot2019 challenge results, in: Proceedings of the IEEE International Conference on Computer Vision Workshops, 2019, pp. 1\u201336."},{"year":"2020","series-title":"AFAT: Adaptive failure-aware tracker for robust visual object tracking","author":"Xu","key":"10.1016\/j.inffus.2021.02.023_b55"},{"issue":"12","key":"10.1016\/j.inffus.2021.02.023_b56","doi-asserted-by":"crossref","first-page":"5743","DOI":"10.1109\/TIP.2016.2614135","article-title":"Learning collaborative sparse representation for grayscale-thermal tracking","volume":"25","author":"Li","year":"2016","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.inffus.2021.02.023_b57","article-title":"RGBT salient object detection: Benchmark and A novel cooperative ranking approach","author":"Tang","year":"2019","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"1","key":"10.1016\/j.inffus.2021.02.023_b58","doi-asserted-by":"crossref","first-page":"160","DOI":"10.1109\/TMM.2019.2924578","article-title":"RGB-T image saliency detection via collaborative graph learning","volume":"22","author":"Tu","year":"2019","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.inffus.2021.02.023_b59","unstructured":"M. Kristan, J. Matas, A. Leonardis, M. Felsberg, L. Cehovin, G. Fernandez, T. Vojir, G. Hager, G. Nebehay, R. Pflugfelder, The visual object tracking vot2015 challenge results, in: Proceedings of the IEEE International Conference on Computer Vision Workshops, 2015, pp. 1\u201323."}],"container-title":["Information Fusion"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1566253521000440?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1566253521000440?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2023,4,15]],"date-time":"2023-04-15T07:52:59Z","timestamp":1681545179000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1566253521000440"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,9]]},"references-count":59,"alternative-id":["S1566253521000440"],"URL":"https:\/\/doi.org\/10.1016\/j.inffus.2021.02.023","relation":{},"ISSN":["1566-2535"],"issn-type":[{"type":"print","value":"1566-2535"}],"subject":[],"published":{"date-parts":[[2021,9]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"RFN-Nest: An end-to-end residual fusion network for infrared and visible images","name":"articletitle","label":"Article Title"},{"value":"Information Fusion","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.inffus.2021.02.023","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2021 Published by Elsevier B.V.","name":"copyright","label":"Copyright"}]}}