{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,17]],"date-time":"2024-10-17T04:24:44Z","timestamp":1729139084584,"version":"3.27.0"},"publisher-location":"Cham","reference-count":54,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031282430"},{"type":"electronic","value":"9783031282447"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-28244-7_32","type":"book-chapter","created":{"date-parts":[[2023,3,16]],"date-time":"2023-03-16T17:03:18Z","timestamp":1678986198000},"page":"504-520","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["An Experimental Study on\u00a0Pretraining Transformers from\u00a0Scratch for\u00a0IR"],"prefix":"10.1007","author":[{"given":"Carlos","family":"Lassance","sequence":"first","affiliation":[]},{"given":"Herv\u00e9","family":"Dejean","sequence":"additional","affiliation":[]},{"given":"St\u00e9phane","family":"Clinchant","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,3,17]]},"reference":[{"key":"32_CR1","doi-asserted-by":"publisher","unstructured":"Aroca-Ouellette, S., Rudzicz, F.: On losses for modern language models. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 4970\u20134981. Association for Computational Linguistics, Online, November 2020. https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-main.403, https:\/\/aclanthology.org\/2020.emnlp-main.403","DOI":"10.18653\/v1\/2020.emnlp-main.403"},{"key":"32_CR2","doi-asserted-by":"publisher","unstructured":"Bai, B., et al.: Supervised semantic indexing. In: Proceedings of the 18th ACM International Conference on Information and Knowledge Management, pp. 187\u2013196. ACM (2009). https:\/\/doi.org\/10.1145\/1645953.1645979","DOI":"10.1145\/1645953.1645979"},{"key":"32_CR3","doi-asserted-by":"publisher","unstructured":"Beltagy, I., Lo, K., Cohan, A.: SciBERT: a pretrained language model for scientific text (2019). https:\/\/doi.org\/10.48550\/ARXIV.1903.10676, https:\/\/arxiv.org\/abs\/1903.10676","DOI":"10.48550\/ARXIV.1903.10676"},{"key":"32_CR4","doi-asserted-by":"publisher","unstructured":"Bommasani, R., et al.: On the opportunities and risks of foundation models (2021). https:\/\/doi.org\/10.48550\/ARXIV.2108.07258, https:\/\/arxiv.org\/abs\/2108.07258","DOI":"10.48550\/ARXIV.2108.07258"},{"key":"32_CR5","unstructured":"Bonifacio, L.H., Campiotti, I., Jeronymo, V., Lotufo, R., Nogueira, R.: MMARCO: a multilingual version of the MS MARCO passage ranking dataset. arXiv preprint arXiv:2108.13897 (2021)"},{"key":"32_CR6","unstructured":"Chang, W.C., Yu, F.X., Chang, Y.W., Yang, Y., Kumar, S.: Pre-training tasks for embedding-based large-scale retrieval. In: International Conference on Learning Representations (2020). https:\/\/openreview.net\/forum?id=rkg-mA4FDr"},{"key":"32_CR7","doi-asserted-by":"publisher","unstructured":"Clinchant, S., Jung, K.W., Nikoulina, V.: On the use of BERT for neural machine translation. In: Proceedings of the 3rd Workshop on Neural Generation and Translation, pp. 108\u2013117. Association for Computational Linguistics, Hong Kong, November 2019. https:\/\/doi.org\/10.18653\/v1\/D19-5611, https:\/\/aclanthology.org\/D19-5611","DOI":"10.18653\/v1\/D19-5611"},{"key":"32_CR8","doi-asserted-by":"crossref","unstructured":"Conneau, A., et al.: Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116 (2019)","DOI":"10.18653\/v1\/2020.acl-main.747"},{"key":"32_CR9","doi-asserted-by":"crossref","unstructured":"Craswell, N., Zoeter, O., Taylor, M., Ramsey, B.: An experimental comparison of click position-bias models. In: Proceedings of the 2008 International Conference on Web Search and Data Mining, pp. 87\u201394 (2008)","DOI":"10.1145\/1341531.1341545"},{"key":"32_CR10","doi-asserted-by":"publisher","unstructured":"Dehghani, M., Zamani, H., Severyn, A., Kamps, J., Croft, W.B.: Neural ranking models with weak supervision. In: Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval. SIGIR 2017, pp. 65\u201374. Association for Computing Machinery, New York (2017). https:\/\/doi.org\/10.1145\/3077136.3080832","DOI":"10.1145\/3077136.3080832"},{"key":"32_CR11","unstructured":"Devlin, J., Chang, M., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. CoRR abs\/1810.04805 (2018), http:\/\/arxiv.org\/abs\/1810.04805"},{"key":"32_CR12","unstructured":"El-Nouby, A., Izacard, G., Touvron, H., Laptev, I., Jegou, H., Grave, E.: Are large-scale datasets necessary for self-supervised pre-training? arXiv preprint arXiv:2112.10740 (2021)"},{"key":"32_CR13","doi-asserted-by":"publisher","unstructured":"Formal, T., Lassance, C., Piwowarski, B., Clinchant, S.: From distillation to hard negative sampling: making sparse neural IR models more effective. In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval. SIGIR 2022, pp. 2353\u20132359. Association for Computing Machinery, New York (2022). https:\/\/doi.org\/10.1145\/3477495.3531857","DOI":"10.1145\/3477495.3531857"},{"key":"32_CR14","doi-asserted-by":"publisher","unstructured":"Gao, L., Callan, J.: Condenser: a pre-training architecture for dense retrieval. In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 981\u2013993. Association for Computational Linguistics, Online and Punta Cana, Dominican Republic, November 2021. https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.75, https:\/\/aclanthology.org\/2021.emnlp-main.75","DOI":"10.18653\/v1\/2021.emnlp-main.75"},{"key":"32_CR15","doi-asserted-by":"publisher","unstructured":"Gao, L., Callan, J.: Unsupervised corpus aware language model pre-training for dense passage retrieval. In: Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2843\u20132853. Association for Computational Linguistics, Dublin, Ireland, May 2022. https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.203, https:\/\/aclanthology.org\/2022.acl-long.203","DOI":"10.18653\/v1\/2022.acl-long.203"},{"key":"32_CR16","doi-asserted-by":"publisher","unstructured":"Gao, L., Dai, Z., Callan, J.: COIL: Revisit exact lexical match in information retrieval with contextualized inverted list. In: Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 3030\u20133042. Association for Computational Linguistics, Online, June 2021. https:\/\/doi.org\/10.18653\/v1\/2021.naacl-main.241, https:\/\/aclanthology.org\/2021.naacl-main.241","DOI":"10.18653\/v1\/2021.naacl-main.241"},{"key":"32_CR17","doi-asserted-by":"publisher","unstructured":"Gu, Y., et al.: Domain-specific language model pretraining for biomedical natural language processing. ACM Trans. Comput. Healthc. 3(1), 1\u201323 (2022). https:\/\/doi.org\/10.1145\/3458754","DOI":"10.1145\/3458754"},{"key":"32_CR18","doi-asserted-by":"publisher","unstructured":"Guo, Y., et al.: Webformer: pre-training with web pages for information retrieval. In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval. SIGIR 2022, pp. 1502\u20131512. Association for Computing Machinery, New York (2022). https:\/\/doi.org\/10.1145\/3477495.3532086","DOI":"10.1145\/3477495.3532086"},{"key":"32_CR19","unstructured":"He, P., Liu, X., Gao, J., Chen, W.: DeBERTa: decoding-enhanced BERT with disentangled attention. In: International Conference on Learning Representations (2021). https:\/\/openreview.net\/forum?id=XPZIaotutsD"},{"key":"32_CR20","unstructured":"Hofst\u00e4tter, S., Althammer, S., Schr\u00f6der, M., Sertkan, M., Hanbury, A.: Improving efficient neural ranking models with cross-architecture knowledge distillation (2020)"},{"key":"32_CR21","doi-asserted-by":"crossref","unstructured":"Hofst\u00e4tter, S., Althammer, S., Sertkan, M., Hanbury, A.: Establishing strong baselines for tripclick health retrieval (2022)","DOI":"10.1007\/978-3-030-99739-7_17"},{"key":"32_CR22","doi-asserted-by":"crossref","unstructured":"Hofst\u00e4tter, S., Lin, S.C., Yang, J.H., Lin, J., Hanbury, A.: Efficiently teaching an effective dense retriever with balanced topic aware sampling. In: Proceedings of SIGIR (2021)","DOI":"10.1145\/3404835.3462891"},{"key":"32_CR23","unstructured":"Izacard, G., et al.: Towards unsupervised dense information retrieval with contrastive learning (2021)"},{"key":"32_CR24","unstructured":"Kaplan, J., et al.: Scaling laws for neural language models. arXiv abs\/2001.08361 (2020)"},{"key":"32_CR25","doi-asserted-by":"publisher","unstructured":"Karpukhin, V., et al.: Dense passage retrieval for open-domain question answering. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 6769\u20136781. Association for Computational Linguistics, Online, November 2020. https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-main.550, https:\/\/www.aclweb.org\/anthology\/2020.emnlp-main.550","DOI":"10.18653\/v1\/2020.emnlp-main.550"},{"key":"32_CR26","doi-asserted-by":"publisher","unstructured":"Khattab, O., Zaharia, M.: ColBERT: efficient and effective passage search via contextualized late interaction over BERT. In: Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. SIGIR 2020, pp. 39\u201348. Association for Computing Machinery, New York (2020). https:\/\/doi.org\/10.1145\/3397271.3401075","DOI":"10.1145\/3397271.3401075"},{"key":"32_CR27","doi-asserted-by":"publisher","unstructured":"Kim, T., Yoo, K.M., Lee, S.G.: Self-guided contrastive learning for BERT sentence representations. In: Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 2528\u20132540. Association for Computational Linguistics, Online, August 2021. https:\/\/doi.org\/10.18653\/v1\/2021.acl-long.197, https:\/\/aclanthology.org\/2021.acl-long.197","DOI":"10.18653\/v1\/2021.acl-long.197"},{"key":"32_CR28","doi-asserted-by":"publisher","unstructured":"Lassance, C., Clinchant, S.: An efficiency study for splade models. In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval. SIGIR 2022, pp. 2220\u20132226. Association for Computing Machinery, New York (2022). https:\/\/doi.org\/10.1145\/3477495.3531833","DOI":"10.1145\/3477495.3531833"},{"key":"32_CR29","unstructured":"Lin, J., Ma, X.: A few brief notes on deepimpact, coil, and a conceptual framework for information retrieval techniques. CoRR abs\/2106.14807 (2021). https:\/\/arxiv.org\/abs\/2106.14807"},{"key":"32_CR30","unstructured":"Lin, J., Nogueira, R., Yates, A.: Pretrained transformers for text ranking: BERT and beyond. arXiv:2010.06467 [cs] (Oct 2020), http:\/\/arxiv.org\/abs\/2010.06467, zSCC: NoCitationData[s0] arXiv: 2010.06467"},{"key":"32_CR31","doi-asserted-by":"publisher","unstructured":"Lin, S.C., Yang, J.H., Lin, J.: In-batch negatives for knowledge distillation with tightly-coupled teachers for dense retrieval. In: Proceedings of the 6th Workshop on Representation Learning for NLP (RepL4NLP-2021), pp. 163\u2013173. Association for Computational Linguistics, Online, August 2021. https:\/\/doi.org\/10.18653\/v1\/2021.repl4nlp-1.17, https:\/\/aclanthology.org\/2021.repl4nlp-1.17","DOI":"10.18653\/v1\/2021.repl4nlp-1.17"},{"key":"32_CR32","doi-asserted-by":"publisher","unstructured":"Liu, Z., Shao, Y.: Retromae: pre-training retrieval-oriented transformers via masked auto-encoder (2022). https:\/\/doi.org\/10.48550\/ARXIV.2205.12035, https:\/\/arxiv.org\/abs\/2205.12035","DOI":"10.48550\/ARXIV.2205.12035"},{"key":"32_CR33","doi-asserted-by":"crossref","unstructured":"Ma, X., Guo, J., Zhang, R., Fan, Y., Ji, X., Cheng, X.: B-prop: bootstrapped pre-training with representative words prediction for ad-hoc retrieval. Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval (2021)","DOI":"10.1145\/3404835.3462869"},{"key":"32_CR34","doi-asserted-by":"crossref","unstructured":"Ma, X., Guo, J., Zhang, R., Fan, Y., Ji, X., Cheng, X.: Prop: pre-training with representative words prediction for ad-hoc retrieval. In: Proceedings of the 14th ACM International Conference on Web Search and Data Mining (2021)","DOI":"10.1145\/3437963.3441777"},{"key":"32_CR35","doi-asserted-by":"crossref","unstructured":"Ma, Z., et al.: Pre-training for ad-hoc retrieval: hyperlink is also you need. In: Proceedings of the 30th ACM International Conference on Information and Knowledge Management (2021)","DOI":"10.1145\/3459637.3482286"},{"key":"32_CR36","unstructured":"Muennighoff, N.: SGPT: GPT sentence embeddings for semantic search. arXiv preprint arXiv:2202.08904 (2022)"},{"key":"32_CR37","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"382","DOI":"10.1007\/978-3-030-99736-6_26","volume-title":"Advances in Information Retrieval","author":"S Nair","year":"2022","unstructured":"Nair, S., et al.: Transfer learning approaches for building cross-language dense retrieval models. In: Hagen, M., et al. (eds.) ECIR 2022. LNCS, vol. 13185, pp. 382\u2013396. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-030-99736-6_26"},{"key":"32_CR38","unstructured":"Nair, S., Yang, E., Lawrie, D., Mayfield, J., Oard, D.W.: Learning a sparse representation model for neural CLIR. In: Design of Experimental Search and Information REtrieval Systems (DESIRES) (2022)"},{"key":"32_CR39","unstructured":"Nguyen, T., et al.: MS MARCO: a human generated machine reading comprehension dataset. In: CoCo@ NIPs (2016)"},{"key":"32_CR40","unstructured":"Nogueira, R., Cho, K.: Passage re-ranking with BERT (2019)"},{"key":"32_CR41","unstructured":"Paria, B., Yeh, C.K., Yen, I.E.H., Xu, N., Ravikumar, P., P\u00f3czos, B.: Minimizing flops to learn efficient sparse representations (2020)"},{"key":"32_CR42","doi-asserted-by":"crossref","unstructured":"Qu, Y., et al: RocketQA: an optimized training approach to dense passage retrieval for open-domain question answering. In: In Proceedings of NAACL (2021)","DOI":"10.18653\/v1\/2021.naacl-main.466"},{"key":"32_CR43","doi-asserted-by":"crossref","unstructured":"Reimers, N., Gurevych, I.: Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, November 2019. http:\/\/arxiv.org\/abs\/1908.10084","DOI":"10.18653\/v1\/D19-1410"},{"key":"32_CR44","doi-asserted-by":"publisher","unstructured":"Rekabsaz, N., Lesota, O., Schedl, M., Brassey, J., Eickhoff, C.: Tripclick: the log files of a large health web search engine. In: Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2507\u20132513 (2021). https:\/\/doi.org\/10.1145\/3404835.3463242","DOI":"10.1145\/3404835.3463242"},{"key":"32_CR45","doi-asserted-by":"publisher","unstructured":"Ren, R., et al.: RocketQAv2: a joint training method for dense passage retrieval and passage re-ranking. In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 2825\u20132835. Association for Computational Linguistics, Online and Punta Cana, Dominican Republic, November 2021. https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.224, https:\/\/aclanthology.org\/2021.emnlp-main.224","DOI":"10.18653\/v1\/2021.emnlp-main.224"},{"key":"32_CR46","doi-asserted-by":"crossref","unstructured":"Robertson, S.E., Walker, S., Beaulieu, M., Gatford, M., Payne, A.: Okapi at TREC-4. Nist Special Publication Sp, pp. 73\u201396 (1996)","DOI":"10.6028\/NIST.SP.500-236.routing-city"},{"key":"32_CR47","unstructured":"Sanh, V., Debut, L., Chaumond, J., Wolf, T.: DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108 (2019)"},{"key":"32_CR48","doi-asserted-by":"crossref","unstructured":"Santhanam, K., Khattab, O., Saad-Falcon, J., Potts, C., Zaharia, M.: ColBERTv2: effective and efficient retrieval via lightweight late interaction (2021)","DOI":"10.18653\/v1\/2022.naacl-main.272"},{"key":"32_CR49","doi-asserted-by":"publisher","unstructured":"Tay, Y., et al.: Are pretrained convolutions better than pretrained transformers? In: Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 4349\u20134359. Association for Computational Linguistics, Online, August 2021. https:\/\/doi.org\/10.18653\/v1\/2021.acl-long.335, https:\/\/aclanthology.org\/2021.acl-long.335","DOI":"10.18653\/v1\/2021.acl-long.335"},{"key":"32_CR50","unstructured":"Tay, Y., et al.: Scale efficiently: insights from pre-training and fine-tuning transformers. arXiv abs\/2109.10686 (2022)"},{"key":"32_CR51","unstructured":"Thakur, N., Reimers, N., R\u00fcckl\u00e9, A., Srivastava, A., Gurevych, I.: BEIR: a heterogenous benchmark for zero-shot evaluation of information retrieval models. CoRR abs\/2104.08663 (2021). https:\/\/arxiv.org\/abs\/2104.08663"},{"key":"32_CR52","unstructured":"Wu, Y., et al.: Google\u2019s neural machine translation system: bridging the gap between human and machine translation (2016)"},{"key":"32_CR53","unstructured":"Xiong, L., et al.: Approximate nearest neighbor negative contrastive learning for dense text retrieval. In: International Conference on Learning Representations (2021). https:\/\/openreview.net\/forum?id=zeFrfgyZln"},{"key":"32_CR54","doi-asserted-by":"crossref","unstructured":"Zhang, X., Ma, X., Shi, P., Lin, J.: Mr. Tydi: a multi-lingual benchmark for dense retrieval. In: Proceedings of the 1st Workshop on Multilingual Representation Learning, pp. 127\u2013137 (2021)","DOI":"10.18653\/v1\/2021.mrl-1.12"}],"container-title":["Lecture Notes in Computer Science","Advances in Information Retrieval"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-28244-7_32","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,16]],"date-time":"2024-10-16T15:09:33Z","timestamp":1729091373000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-28244-7_32"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031282430","9783031282447"],"references-count":54,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-28244-7_32","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"17 March 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECIR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Information Retrieval","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Dublin","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Ireland","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 April 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"6 April 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"45","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ecir2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/ecir2023.org\/index.html?v=1.0","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"489","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"77","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"83","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"16% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}