{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,12]],"date-time":"2024-09-12T13:50:32Z","timestamp":1726149032521},"publisher-location":"Cham","reference-count":24,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030922726"},{"type":"electronic","value":"9783030922733"}],"license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021]]},"DOI":"10.1007\/978-3-030-92273-3_26","type":"book-chapter","created":{"date-parts":[[2021,12,4]],"date-time":"2021-12-04T21:34:27Z","timestamp":1638653667000},"page":"311-322","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["QS-Hyper: A\u00a0Quality-Sensitive Hyper Network for\u00a0the\u00a0No-Reference Image Quality Assessment"],"prefix":"10.1007","author":[{"given":"Xuewen","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Yunye","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Wenxin","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Nie","sequence":"additional","affiliation":[]},{"given":"Ning","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Jun","family":"Gong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,12,5]]},"reference":[{"key":"26_CR1","doi-asserted-by":"crossref","unstructured":"Bosse, S., Maniry, D., M\u00fcller, K.R., Wiegand, T., Samek, W.: Deep neural networks for no-reference and full-reference image quality assessment. IEEE Trans. Image Process. 27(1), 206\u2013219 (2017)","DOI":"10.1109\/TIP.2017.2760518"},{"key":"26_CR2","doi-asserted-by":"crossref","unstructured":"Bromley, J., Guyon, I., LeCun, Y., S\u00e4ckinger, E., Shah, R.: Signature verification using a \u201csiamese\" time delay neural network. Adv. Neural Inf. Process. Syst. 6, 737\u2013744 (1993)","DOI":"10.1142\/9789812797926_0003"},{"key":"26_CR3","doi-asserted-by":"crossref","unstructured":"Caron, M., Bojanowski, P., Joulin, A., Douze, M.: Deep clustering for unsupervised learning of visual features. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 132\u2013149 (2018)","DOI":"10.1007\/978-3-030-01264-9_9"},{"key":"26_CR4","doi-asserted-by":"crossref","unstructured":"Chen, X., He, K.: Exploring simple siamese representation learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15750\u201315758 (2021)","DOI":"10.1109\/CVPR46437.2021.01549"},{"key":"26_CR5","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: 2009 IEEE conference on computer vision and pattern recognition, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"26_CR6","doi-asserted-by":"crossref","unstructured":"Ghadiyaram, D., Bovik, A.C.: Massive online crowdsourced study of subjective and objective picture quality. IEEE Trans. Image Process. 25(1), 372\u2013387 (2015)","DOI":"10.1109\/TIP.2015.2500021"},{"key":"26_CR7","doi-asserted-by":"crossref","unstructured":"He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9729\u20139738 (2020)","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"26_CR8","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"26_CR9","unstructured":"Hjelm, R.D., et al.: Learning deep representations by mutual information estimation and maximization. arXiv preprint arXiv:1808.06670 (2018)"},{"key":"26_CR10","doi-asserted-by":"crossref","unstructured":"Hosu, V., Lin, H., Sziranyi, T., Saupe, D.: Koniq-10k: an ecologically valid database for deep learning of blind image quality assessment. IEEE Trans. Image Process. 29, 4041\u20134056 (2020)","DOI":"10.1109\/TIP.2020.2967829"},{"key":"26_CR11","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. Adv. Neural Inf. Process. Syst. 25, 1097\u20131105 (2012)"},{"key":"26_CR12","doi-asserted-by":"crossref","unstructured":"Lin, K.Y., Wang, G.: Hallucinated-iqa: no-reference image quality assessment via adversarial learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 732\u2013741 (2018)","DOI":"10.1109\/CVPR.2018.00083"},{"key":"26_CR13","doi-asserted-by":"crossref","unstructured":"Liu, X., Van De Weijer, J., Bagdanov, A.D.: Rankiqa: learning from rankings for no-reference image quality assessment. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1040\u20131049 (2017)","DOI":"10.1109\/ICCV.2017.118"},{"key":"26_CR14","doi-asserted-by":"crossref","unstructured":"Mittal, A., Moorthy, A.K., Bovik, A.C.: No-reference image quality assessment in the spatial domain. IEEE Transactions on Image Processing 21(12), 4695\u20134708 (2012)","DOI":"10.1109\/TIP.2012.2214050"},{"key":"26_CR15","doi-asserted-by":"crossref","unstructured":"Moorthy, A.K., Bovik, A.C.: A two-step framework for constructing blind image quality indices. IEEE Signal Process. Lett. 17(5), 513\u2013516 (2010)","DOI":"10.1109\/LSP.2010.2043888"},{"key":"26_CR16","doi-asserted-by":"crossref","unstructured":"Ou, F.Z., et al.: Sdd-fiqa: unsupervised face image quality assessment with similarity distribution distance. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7670\u20137679 (2021)","DOI":"10.1109\/CVPR46437.2021.00758"},{"key":"26_CR17","doi-asserted-by":"crossref","unstructured":"Ponomarenko, N., et al.: Image database tid2013: peculiarities, results and perspectives. Signal Process. Image Commun. 30, 57\u201377 (2015)","DOI":"10.1016\/j.image.2014.10.009"},{"key":"26_CR18","doi-asserted-by":"crossref","unstructured":"Su, S., et al.: Blindly assess image quality in the wild guided by a self-adaptive hyper network. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3667\u20133676 (2020)","DOI":"10.1109\/CVPR42600.2020.00372"},{"key":"26_CR19","doi-asserted-by":"crossref","unstructured":"Xue, W., Mou, X., Zhang, L., Bovik, A.C., Feng, X.: Blind image quality assessment using joint statistics of gradient magnitude and laplacian features. IEEE Trans. Image Process. 23(11), 4850\u20134862 (2014)","DOI":"10.1109\/TIP.2014.2355716"},{"key":"26_CR20","doi-asserted-by":"crossref","unstructured":"Zhang, L., Zhang, L., Bovik, A.C.: A feature-enriched completely blind image quality evaluator. IEEE Trans. Image Process. 24(8), 2579\u20132591 (2015)","DOI":"10.1109\/TIP.2015.2426416"},{"key":"26_CR21","doi-asserted-by":"crossref","unstructured":"Zhang, X., Yu, W., Jiang, N., Zhang, Y., Zhang, Z.: Sps: A subjective perception score for text-to-image synthesis. In: 2021 IEEE International Symposium on Circuits and Systems (ISCAS), pp. 1\u20135. IEEE (2021)","DOI":"10.1109\/ISCAS51556.2021.9401705"},{"key":"26_CR22","doi-asserted-by":"publisher","unstructured":"Zhang, X., Zhang, Y., Zhang, Z., Yu, W., Jiang, N., He, G.: Deep feature compatibility for generated images quality assessment. In: Yang, H., Pasupa, K., Leung, A.CS., Kwok, J.T., Chan, J.H., King, I. (eds.) Neural Information Processing. ICONIP 2020. Communications in Computer and Information Science, vol. 1332. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-63820-7_40","DOI":"10.1007\/978-3-030-63820-7_40"},{"key":"26_CR23","doi-asserted-by":"publisher","unstructured":"Zhang, Y., Zhang, X., Zhang, Z., Yu, W., Jiang, N., He, G.: No-reference quality assessment based on spatial statistic for generated images. In: Yang, H., Pasupa, K., Leung, A.CS., Kwok, J.T., Chan, J.H., King, I. (eds.) Neural Information Processing. ICONIP 2020. Communications in Computer and Information Science, vol. 1332. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-63820-7_57","DOI":"10.1007\/978-3-030-63820-7_57"},{"key":"26_CR24","doi-asserted-by":"crossref","unstructured":"Zhu, H., Li, L., Wu, J., Dong, W., Shi, G.: Metaiqa: deep meta-learning for no-reference image quality assessment. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14143\u201314152 (2020)","DOI":"10.1109\/CVPR42600.2020.01415"}],"container-title":["Lecture Notes in Computer Science","Neural Information Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-92273-3_26","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,12,4]],"date-time":"2021-12-04T21:38:08Z","timestamp":1638653888000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-92273-3_26"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"ISBN":["9783030922726","9783030922733"],"references-count":24,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-92273-3_26","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2021]]},"assertion":[{"value":"5 December 2021","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICONIP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Neural Information Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Sanur, Bali","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Indonesia","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2021","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 December 2021","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 December 2021","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iconip2021","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/iconip2021.apnns.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1093","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"226","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"177","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"21% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.57","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"6","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Due to the COVID-19 pandemic the conference was held online.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}