{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,7,17]],"date-time":"2024-07-17T17:18:23Z","timestamp":1721236703026},"reference-count":36,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100005073","name":"Agency for Defense Development","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100005073","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Pattern Recognition Letters"],"published-print":{"date-parts":[[2023,10]]},"DOI":"10.1016\/j.patrec.2023.09.002","type":"journal-article","created":{"date-parts":[[2023,9,9]],"date-time":"2023-09-09T18:59:04Z","timestamp":1694285944000},"page":"99-105","update-policy":"http:\/\/dx.doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":1,"special_numbering":"C","title":["EasyFuse: Easy-to-learn visible and infrared image fusion framework based on unpaired set"],"prefix":"10.1016","volume":"174","author":[{"ORCID":"http:\/\/orcid.org\/0000-0001-8210-6324","authenticated-orcid":false,"given":"Seongyong","family":"Ahn","sequence":"first","affiliation":[]},{"given":"Inwook","family":"Shim","sequence":"additional","affiliation":[]},{"given":"Jihong","family":"Min","sequence":"additional","affiliation":[]},{"given":"Kuk-Jin","family":"Yoon","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.patrec.2023.09.002_b1","doi-asserted-by":"crossref","first-page":"18","DOI":"10.1016\/j.inffus.2020.08.012","article-title":"Deep unsupervised learning based on color un-referenced loss functions for multi-exposure image fusion","volume":"66","author":"Qi","year":"2021","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.patrec.2023.09.002_b2","first-page":"7203","article-title":"MEF-GAN: Multi-exposure image fusion via generative adversarial networks","volume":"29","author":"Xu","year":"2020","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b3","first-page":"2808","article-title":"Deep guided learning for fast multi-exposure image fusion","volume":"29","author":"Ma","year":"2020","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b4","first-page":"4819","article-title":"Deep learning-based multi-focus image fusion: A survey and a comparative study","volume":"44","author":"Zhang","year":"2022","journal-title":"PAMI"},{"key":"10.1016\/j.patrec.2023.09.002_b5","series-title":"ICCV","first-page":"43","article-title":"DTMNet: A discrete tchebichef moments-based deep neural network for multi-focus image fusion","author":"Xiao","year":"2021"},{"key":"10.1016\/j.patrec.2023.09.002_b6","first-page":"4816","article-title":"DRPL: Deep regression pair learning for multi-focus image fusion","volume":"29","author":"Li","year":"2020","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b7","first-page":"163","article-title":"Global-feature encoding U-net (GEU-net) for multi-focus image fusion","volume":"30","author":"Xiao","year":"2020","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b8","doi-asserted-by":"crossref","first-page":"4","DOI":"10.1016\/j.inffus.2013.12.002","article-title":"Medical image fusion: A survey of the state of the art","volume":"19","author":"James","year":"2014","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.patrec.2023.09.002_b9","series-title":"FUSION","first-page":"1","article-title":"Zero-learning fast medical image fusion","author":"Xiao","year":"2019"},{"key":"10.1016\/j.patrec.2023.09.002_b10","first-page":"5134","article-title":"MATR: Multimodal medical image fusion via multiscale adaptive transformer","volume":"31","author":"Tang","year":"2022","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b11","doi-asserted-by":"crossref","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","article-title":"Infrared and visible image fusion methods and application:A survey","volume":"45","author":"Ma","year":"2019","journal-title":"Inf. Fusion"},{"issue":"5","key":"10.1016\/j.patrec.2023.09.002_b12","first-page":"2614","article-title":"DenseFuse: A fusion approach to infrared and visible images","volume":"28","author":"Li","year":"2019","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b13","doi-asserted-by":"crossref","first-page":"2761","DOI":"10.1007\/s11263-021-01501-8","article-title":"SDNet: A versatile squeeze-and-decomposition network for real-time image fusion","volume":"129","author":"Zhang","year":"2020","journal-title":"IJCV"},{"issue":"1","key":"10.1016\/j.patrec.2023.09.002_b14","doi-asserted-by":"crossref","first-page":"502","DOI":"10.1109\/TPAMI.2020.3012548","article-title":"U2Fusion: A unified unsupervised image fusion network","volume":"44","author":"Xu","year":"2022","journal-title":"PAMI"},{"key":"10.1016\/j.patrec.2023.09.002_b15","doi-asserted-by":"crossref","first-page":"323","DOI":"10.1016\/j.inffus.2021.06.008","article-title":"Image fusion meets deep learning: A survey and perspective","volume":"76","author":"Zhang","year":"2021","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.patrec.2023.09.002_b16","doi-asserted-by":"crossref","first-page":"28","DOI":"10.1016\/j.infrared.2015.11.002","article-title":"An improved fusion algorithm for infrared and visible images based on multi-scale transform","volume":"74","author":"Li","year":"2016","journal-title":"Infrared Phys. Technol."},{"key":"10.1016\/j.patrec.2023.09.002_b17","first-page":"1882","article-title":"Image fusion with convolutional sparse representation","volume":"23","author":"Liu","year":"2016","journal-title":"SPL"},{"key":"10.1016\/j.patrec.2023.09.002_b18","doi-asserted-by":"crossref","first-page":"114","DOI":"10.1016\/j.infrared.2016.05.012","article-title":"Infrared and visible images fusion based on RPCA and NSCT","volume":"77","author":"Liu","year":"2016","journal-title":"Infrared Phys. Technol."},{"issue":"7","key":"10.1016\/j.patrec.2023.09.002_b19","first-page":"2864","article-title":"Image fusion with guided filtering","volume":"22","author":"Li","year":"2013","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b20","doi-asserted-by":"crossref","first-page":"85","DOI":"10.1016\/j.infrared.2017.01.026","article-title":"Fusion of infrared and visible images based on nonsubsampled contourlet transform and sparse K-SVD dictionary learning","volume":"82","author":"Cai","year":"2017","journal-title":"Infrared Phys. Technol."},{"key":"10.1016\/j.patrec.2023.09.002_b21","doi-asserted-by":"crossref","first-page":"147","DOI":"10.1016\/j.inffus.2014.09.004","article-title":"A general framework for image fusion based on multi-scale transform and sparse representation","volume":"24","author":"Liu","year":"2015","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.patrec.2023.09.002_b22","doi-asserted-by":"crossref","first-page":"282","DOI":"10.1016\/j.infrared.2017.01.013","article-title":"Fusion of infrared-visible images using improved multi-scale top-hat transform and suitable fusion rules","volume":"81","author":"Zhu","year":"2017","journal-title":"Infrared Phys. Technol."},{"key":"10.1016\/j.patrec.2023.09.002_b23","doi-asserted-by":"crossref","first-page":"94","DOI":"10.1016\/j.infrared.2017.04.018","article-title":"Infrared and visible image fusion method based on saliency detection in sparse domain","volume":"83","author":"Liu","year":"2017","journal-title":"Infrared Phys. Technol."},{"issue":"5","key":"10.1016\/j.patrec.2023.09.002_b24","first-page":"2614","article-title":"NestFuse: An infrared and visible image fusion architecture based on nest connection and spatial\/channel attention models","volume":"28","author":"Li","year":"2019","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b25","doi-asserted-by":"crossref","first-page":"85","DOI":"10.1016\/j.inffus.2019.07.005","article-title":"Infrared and visible image fusion via detail preserving adversarial learning","volume":"54","author":"Ma","year":"2020","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.patrec.2023.09.002_b26","series-title":"AAAI","first-page":"12797","article-title":"Rethinking the image fusion: A fast unified image fusion network based on propotional maintenance of gradient and intensity","author":"Zhang","year":"2020"},{"key":"10.1016\/j.patrec.2023.09.002_b27","series-title":"AAAI","first-page":"12484","article-title":"FusionDN: A unified densely connected network for image fusion","author":"Xu","year":"2020"},{"key":"10.1016\/j.patrec.2023.09.002_b28","doi-asserted-by":"crossref","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","article-title":"FusionGAN: A generative adversarial network for infrared and visible image fusion","volume":"48","author":"Ma","year":"2019","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.patrec.2023.09.002_b29","first-page":"4980","article-title":"DDcGAN: A dual-discriminator conditional generative adversarial network for multi-resolution image fusion","volume":"29","author":"Ma","year":"2020","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b30","series-title":"MICCAI","first-page":"234","article-title":"U-Net: Convolutional networks for biomedical image segmentation","author":"Ronneberger","year":"2015"},{"issue":"4","key":"10.1016\/j.patrec.2023.09.002_b31","first-page":"600","article-title":"Image quality assessment: from error visibility to structural similarity","volume":"13","author":"Wang","year":"2004","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b32","first-page":"3845","article-title":"Unsupervised deep image fusion with structure tensor representations","volume":"29","author":"Jung","year":"2020","journal-title":"TIP"},{"key":"10.1016\/j.patrec.2023.09.002_b33","first-page":"1","article-title":"STDFusionNet: An infrared and visible image fusion network based on salient target detection","volume":"70","author":"Ma","year":"2021","journal-title":"TIM"},{"key":"10.1016\/j.patrec.2023.09.002_b34","doi-asserted-by":"crossref","first-page":"28","DOI":"10.1016\/j.inffus.2021.12.004","article-title":"Image fusion in the loop of high-level vision tasks: A semantic-aware real-time infrared and visible image fusion network","volume":"82","author":"Tang","year":"2022","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.patrec.2023.09.002_b35","series-title":"AICT","first-page":"1","article-title":"FastFMI: Non-reference image fusion metric","author":"Haghighat","year":"2014"},{"issue":"11","key":"10.1016\/j.patrec.2023.09.002_b36","first-page":"3345","article-title":"Perceptual quality assessment for multi-exposure image fusion","volume":"24","author":"Ma","year":"2015","journal-title":"TIP"}],"container-title":["Pattern Recognition Letters"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0167865523002490?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0167865523002490?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2023,9,30]],"date-time":"2023-09-30T04:30:47Z","timestamp":1696048247000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0167865523002490"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10]]},"references-count":36,"alternative-id":["S0167865523002490"],"URL":"https:\/\/doi.org\/10.1016\/j.patrec.2023.09.002","relation":{},"ISSN":["0167-8655"],"issn-type":[{"value":"0167-8655","type":"print"}],"subject":[],"published":{"date-parts":[[2023,10]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"EasyFuse: Easy-to-learn visible and infrared image fusion framework based on unpaired set","name":"articletitle","label":"Article Title"},{"value":"Pattern Recognition Letters","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.patrec.2023.09.002","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2023 Elsevier B.V. All rights reserved.","name":"copyright","label":"Copyright"}]}}