{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,19]],"date-time":"2024-09-19T16:24:57Z","timestamp":1726763097514},"publisher-location":"Cham","reference-count":51,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031198328"},{"type":"electronic","value":"9783031198335"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19833-5_31","type":"book-chapter","created":{"date-parts":[[2022,11,4]],"date-time":"2022-11-04T00:40:30Z","timestamp":1667522430000},"page":"528-545","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":11,"title":["MORE: Multi-Order RElation Mining for\u00a0Dense Captioning in\u00a03D Scenes"],"prefix":"10.1007","author":[{"given":"Yang","family":"Jiao","sequence":"first","affiliation":[]},{"given":"Shaoxiang","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Zequn","family":"Jie","sequence":"additional","affiliation":[]},{"given":"Jingjing","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Lin","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Yu-Gang","family":"Jiang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,4]]},"reference":[{"key":"31_CR1","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"422","DOI":"10.1007\/978-3-030-58452-8_25","volume-title":"Computer Vision \u2013 ECCV 2020","author":"P Achlioptas","year":"2020","unstructured":"Achlioptas, P., Abdelreheem, A., Xia, F., Elhoseiny, M., Guibas, L.: ReferIt3D: neural listeners for fine-grained 3D object identification in real-world scenes. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 422\u2013440. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_25"},{"key":"31_CR2","doi-asserted-by":"crossref","unstructured":"Anderson, P., et al.: Bottom-up and top-down attention for image captioning and visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6077\u20136086 (2018)","DOI":"10.1109\/CVPR.2018.00636"},{"key":"31_CR3","doi-asserted-by":"crossref","unstructured":"Armeni, I., et al.: 3D scene graph: a structure for unified semantics, 3D space, and camera. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5664\u20135673 (2019)","DOI":"10.1109\/ICCV.2019.00576"},{"key":"31_CR4","unstructured":"Banerjee, S., Lavie, A.: Meteor: an automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization, pp. 65\u201372 (2005)"},{"key":"31_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"202","DOI":"10.1007\/978-3-030-58565-5_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"DZ Chen","year":"2020","unstructured":"Chen, D.Z., Chang, A.X., Nie\u00dfner, M.: ScanRefer: 3D object localization in RGB-D scans using natural language. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12365, pp. 202\u2013221. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58565-5_13"},{"key":"31_CR6","doi-asserted-by":"crossref","unstructured":"Chen, D.Z., Wu, Q., Nie\u00dfner, M., Chang, A.X.: D3Net: a speaker-listener architecture for semi-supervised dense captioning and visual grounding in RGB-D scans. arXiv preprint arXiv:2112.01551 (2021)","DOI":"10.1007\/978-3-031-19824-3_29"},{"key":"31_CR7","doi-asserted-by":"crossref","unstructured":"Chen, D., Lin, Y., Li, W., Li, P., Zhou, J., Sun, X.: Measuring and relieving the over-smoothing problem for graph neural networks from the topological view. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 3438\u20133445 (2020)","DOI":"10.1609\/aaai.v34i04.5747"},{"key":"31_CR8","doi-asserted-by":"crossref","unstructured":"Chen, J., et al.: Zero-shot ingredient recognition by multi-relational graph convolutional network. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 10542\u201310550 (2020)","DOI":"10.1609\/aaai.v34i07.6626"},{"key":"31_CR9","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"333","DOI":"10.1007\/978-3-030-58548-8_20","volume-title":"Computer Vision \u2013 ECCV 2020","author":"S Chen","year":"2020","unstructured":"Chen, S., Jiang, W., Liu, W., Jiang, Y.-G.: Learning modality interaction for temporal sentence localization and event captioning in videos. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12349, pp. 333\u2013351. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58548-8_20"},{"key":"31_CR10","doi-asserted-by":"crossref","unstructured":"Chen, S., Jiang, Y.G.: Towards bridging event captioner and sentence localizer for weakly supervised dense event captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 8425\u20138435, June 2021","DOI":"10.1109\/CVPR46437.2021.00832"},{"key":"31_CR11","doi-asserted-by":"crossref","unstructured":"Chen, Z., Gholami, A., Nie\u00dfner, M., Chang, A.X.: Scan2cap: context-aware dense captioning in RGB-D scans. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3193\u20133203 (2021)","DOI":"10.1109\/CVPR46437.2021.00321"},{"key":"31_CR12","doi-asserted-by":"crossref","unstructured":"Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nie\u00dfner, M.: ScanNet: richly-annotated 3D reconstructions of indoor scenes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5828\u20135839 (2017)","DOI":"10.1109\/CVPR.2017.261"},{"key":"31_CR13","doi-asserted-by":"crossref","unstructured":"Deng, C., Chen, S., Chen, D., He, Y., Wu, Q.: Sketch, ground, and refine: top-down dense video captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 234\u2013243 (2021)","DOI":"10.1109\/CVPR46437.2021.00030"},{"key":"31_CR14","doi-asserted-by":"crossref","unstructured":"Feng, M., et al.: Free-form description guided 3D visual graph network for object grounding in point cloud. arXiv preprint arXiv:2103.16381 (2021)","DOI":"10.1109\/ICCV48922.2021.00370"},{"key":"31_CR15","doi-asserted-by":"crossref","unstructured":"He, D., et al.: Transrefer3D: entity-and-relation aware transformer for fine-grained 3D visual grounding. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 2344\u20132352 (2021)","DOI":"10.1145\/3474085.3475397"},{"key":"31_CR16","doi-asserted-by":"crossref","unstructured":"Huang, P.H., Lee, H.H., Chen, H.T., Liu, T.L.: Text-guided graph neural networks for referring 3D instance segmentation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, pp. 1610\u20131618 (2021)","DOI":"10.1609\/aaai.v35i2.16253"},{"key":"31_CR17","doi-asserted-by":"crossref","unstructured":"Ji, Z., Chen, K., Wang, H.: Step-wise hierarchical alignment network for image-text matching. In: IJCAI, pp. 765\u2013771 (2021)","DOI":"10.24963\/ijcai.2021\/106"},{"key":"31_CR18","doi-asserted-by":"crossref","unstructured":"Jiao, Y., Jie, Z., Chen, J., Ma, L., Jiang, Y.G.: Suspected object matters: rethinking model\u2019s prediction for one-stage visual grounding. arXiv preprint arXiv:2203.05186 (2022)","DOI":"10.1145\/3581783.3611721"},{"key":"31_CR19","doi-asserted-by":"crossref","unstructured":"Jiao, Y., et al.: Two-stage visual cues enhancement network for referring image segmentation. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 1331\u20131340 (2021)","DOI":"10.1145\/3474085.3475222"},{"key":"31_CR20","doi-asserted-by":"crossref","unstructured":"Kim, D.J., Choi, J., Oh, T.H., Kweon, I.S.: Dense relational captioning: triple-stream networks for relationship-based captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6271\u20136280 (2019)","DOI":"10.1109\/CVPR.2019.00643"},{"issue":"11","key":"31_CR21","doi-asserted-by":"publisher","first-page":"2947","DOI":"10.1109\/TVCG.2018.2868591","volume":"24","author":"K Kim","year":"2018","unstructured":"Kim, K., Billinghurst, M., Bruder, G., Duh, H.B.L., Welch, G.F.: Revisiting trends in augmented reality research: a review of the 2nd decade of ISMAR (2008\u20132017). IEEE Trans. Vis. Comput. Graph. 24(11), 2947\u20132962 (2018)","journal-title":"IEEE Trans. Vis. Comput. Graph."},{"key":"31_CR22","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"issue":"8","key":"31_CR23","doi-asserted-by":"publisher","first-page":"2117","DOI":"10.1109\/TMM.2019.2896516","volume":"21","author":"X Li","year":"2019","unstructured":"Li, X., Jiang, S.: Know more say less: image captioning based on scene graphs. IEEE Trans. Multimedia 21(8), 2117\u20132130 (2019)","journal-title":"IEEE Trans. Multimedia"},{"key":"31_CR24","unstructured":"Lin, C.Y.: ROUGE: a package for automatic evaluation of summaries. In: Text Summarization Branches Out, pp. 74\u201381 (2004)"},{"key":"31_CR25","unstructured":"Milewski, V., Moens, M.F., Calixto, I.: Are scene graphs good enough to improve image captioning? arXiv preprint arXiv:2009.12313 (2020)"},{"key":"31_CR26","doi-asserted-by":"crossref","unstructured":"Pan, Y., Mei, T., Yao, T., Li, H., Rui, Y.: Jointly modeling embedding and translation to bridge video and language. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4594\u20134602 (2016)","DOI":"10.1109\/CVPR.2016.497"},{"key":"31_CR27","doi-asserted-by":"crossref","unstructured":"Pan, Y., Yao, T., Li, Y., Mei, T.: X-linear attention networks for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10971\u201310980 (2020)","DOI":"10.1109\/CVPR42600.2020.01098"},{"key":"31_CR28","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation. In: Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pp. 311\u2013318 (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"31_CR29","doi-asserted-by":"crossref","unstructured":"Pennington, J., Socher, R., Manning, C.D.: Glove: global vectors for word representation. In: Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 1532\u20131543 (2014)","DOI":"10.3115\/v1\/D14-1162"},{"key":"31_CR30","doi-asserted-by":"crossref","unstructured":"Qi, C.R., Litany, O., He, K., Guibas, L.J.: Deep hough voting for 3D object detection in point clouds. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9277\u20139286 (2019)","DOI":"10.1109\/ICCV.2019.00937"},{"key":"31_CR31","unstructured":"Qi, C.R., Yi, L., Su, H., Guibas, L.J.: PointNet++: deep hierarchical feature learning on point sets in a metric space. arXiv preprint arXiv:1706.02413 (2017)"},{"key":"31_CR32","doi-asserted-by":"crossref","unstructured":"Savva, M., et al.: Habitat: a platform for embodied AI research. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9339\u20139347 (2019)","DOI":"10.1109\/ICCV.2019.00943"},{"key":"31_CR33","doi-asserted-by":"crossref","unstructured":"Song, X., Chen, J., Wu, Z., Jiang, Y.G.: Spatial-temporal graphs for cross-modal text2video retrieval. IEEE Trans. Multimedia (2021)","DOI":"10.1109\/TMM.2021.3090595"},{"key":"31_CR34","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems 30 (2017)"},{"key":"31_CR35","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence Zitnick, C., Parikh, D.: CIDEr: consensus-based image description evaluation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4566\u20134575 (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"31_CR36","doi-asserted-by":"crossref","unstructured":"Wald, J., Dhamo, H., Navab, N., Tombari, F.: Learning 3D semantic scene graphs from 3D indoor reconstructions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3961\u20133970 (2020)","DOI":"10.1109\/CVPR42600.2020.00402"},{"key":"31_CR37","doi-asserted-by":"crossref","unstructured":"Wang, D., Beck, D., Cohn, T.: On the role of scene graphs in image captioning. In: Proceedings of the Beyond Vision and Language: Integrating Real-world Knowledge (LANTERN), pp. 29\u201334 (2019)","DOI":"10.18653\/v1\/D19-6405"},{"key":"31_CR38","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1007\/978-3-030-58586-0_2","volume-title":"Computer Vision \u2013 ECCV 2020","author":"H Wang","year":"2020","unstructured":"Wang, H., Zhang, Y., Ji, Z., Pang, Y., Ma, L.: Consensus-aware visual-semantic embedding for image-text matching. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12369, pp. 18\u201334. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58586-0_2"},{"key":"31_CR39","doi-asserted-by":"crossref","unstructured":"Wang, J., Jiang, W., Ma, L., Liu, W., Xu, Y.: Bidirectional attentive fusion with context gating for dense video captioning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7190\u20137198 (2018)","DOI":"10.1109\/CVPR.2018.00751"},{"issue":"5","key":"31_CR40","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3326362","volume":"38","author":"Y Wang","year":"2019","unstructured":"Wang, Y., Sun, Y., Liu, Z., Sarma, S.E., Bronstein, M.M., Solomon, J.M.: Dynamic graph CNN for learning on point clouds. ACM Trans. Graph. (TOG) 38(5), 1\u201312 (2019)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"31_CR41","doi-asserted-by":"crossref","unstructured":"Wu, S.C., Wald, J., Tateno, K., Navab, N., Tombari, F.: SceneGraphFusion: incremental 3D scene graph prediction from RGB-D sequences. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7515\u20137525 (2021)","DOI":"10.1109\/CVPR46437.2021.00743"},{"key":"31_CR42","doi-asserted-by":"crossref","unstructured":"Xia, F., Zamir, A.R., He, Z., Sax, A., Malik, J., Savarese, S.: Gibson Env: real-world perception for embodied agents. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 9068\u20139079 (2018)","DOI":"10.1109\/CVPR.2018.00945"},{"key":"31_CR43","doi-asserted-by":"crossref","unstructured":"Xiong, J., Hsiang, E.L., He, Z., Zhan, T., Wu, S.T.: Augmented reality and virtual reality displays: emerging technologies and future perspectives. Light Sci. Appl. 10(1), 1\u201330 (2021)","DOI":"10.1038\/s41377-021-00658-8"},{"key":"31_CR44","doi-asserted-by":"crossref","unstructured":"Yang, L., Tang, K., Yang, J., Li, L.J.: Dense captioning with joint inference and visual context. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2193\u20132202 (2017)","DOI":"10.1109\/CVPR.2017.214"},{"key":"31_CR45","doi-asserted-by":"crossref","unstructured":"Yang, X., Tang, K., Zhang, H., Cai, J.: Auto-encoding scene graphs for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10685\u201310694 (2019)","DOI":"10.1109\/CVPR.2019.01094"},{"key":"31_CR46","doi-asserted-by":"crossref","unstructured":"Yang, Z., Zhang, S., Wang, L., Luo, J.: SAT: 2D semantics assisted training for 3D visual grounding. arXiv preprint arXiv:2105.11450 (2021)","DOI":"10.1109\/ICCV48922.2021.00187"},{"key":"31_CR47","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"711","DOI":"10.1007\/978-3-030-01264-9_42","volume-title":"Computer Vision \u2013 ECCV 2018","author":"T Yao","year":"2018","unstructured":"Yao, T., Pan, Y., Li, Y., Mei, T.: Exploring visual relationship for image captioning. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) Computer Vision \u2013 ECCV 2018. LNCS, vol. 11218, pp. 711\u2013727. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01264-9_42"},{"key":"31_CR48","doi-asserted-by":"crossref","unstructured":"Yuan, Z., et al.: InstanceRefer: cooperative holistic understanding for visual grounding on point clouds through instance multi-level contextual referring. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1791\u20131800 (2021)","DOI":"10.1109\/ICCV48922.2021.00181"},{"key":"31_CR49","doi-asserted-by":"crossref","unstructured":"Zhang, H., Niu, Y., Chang, S.F.: Grounding referring expressions in images by variational context. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4158\u20134166 (2018)","DOI":"10.1109\/CVPR.2018.00437"},{"key":"31_CR50","doi-asserted-by":"crossref","unstructured":"Zhao, L., Cai, D., Sheng, L., Xu, D.: 3DVG-transformer: relation modeling for visual grounding on point clouds. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2928\u20132937 (2021)","DOI":"10.1109\/ICCV48922.2021.00292"},{"key":"31_CR51","unstructured":"Zhou, K., Huang, X., Li, Y., Zha, D., Chen, R., Hu, X.: Towards deeper graph neural networks with differentiable group normalization. In: Advances in Neural Information Processing Systems, vol. 33, 4917\u20134928 (2020)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19833-5_31","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T02:02:16Z","timestamp":1701309736000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19833-5_31"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198328","9783031198335"],"references-count":51,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19833-5_31","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}