{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T00:43:31Z","timestamp":1730249011763,"version":"3.28.0"},"reference-count":33,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,12,1]],"date-time":"2023-12-01T00:00:00Z","timestamp":1701388800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,12,1]],"date-time":"2023-12-01T00:00:00Z","timestamp":1701388800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,12,1]]},"DOI":"10.1109\/ickg59574.2023.00012","type":"proceedings-article","created":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T18:29:54Z","timestamp":1706812194000},"page":"52-59","source":"Crossref","is-referenced-by-count":0,"title":["A Study Case of Automatic Archival Research and Compilation using Large Language Models"],"prefix":"10.1109","author":[{"given":"Dongsheng","family":"Guo","sequence":"first","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]},{"given":"Aizhen","family":"Yue","sequence":"additional","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]},{"given":"Fanggang","family":"Ning","sequence":"additional","affiliation":[{"name":"Inspur Software Co., Ltd.,Shandong,China"}]},{"given":"Dengrong","family":"Huang","sequence":"additional","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]},{"given":"Bingxin","family":"Chang","sequence":"additional","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]},{"given":"Qiang","family":"Duan","sequence":"additional","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]},{"given":"Lianchao","family":"Zhang","sequence":"additional","affiliation":[{"name":"Inspur Software Co., Ltd.,Shandong,China"}]},{"given":"Zhaoliang","family":"Chen","sequence":"additional","affiliation":[{"name":"Inspur Software Co., Ltd.,Shandong,China"}]},{"given":"Zheng","family":"Zhang","sequence":"additional","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]},{"given":"Enhao","family":"Zhan","sequence":"additional","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]},{"given":"Qilai","family":"Zhang","sequence":"additional","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]},{"given":"Kai","family":"Jiang","sequence":"additional","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]},{"given":"Rui","family":"Li","sequence":"additional","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]},{"given":"Shaoxiang","family":"Zhao","sequence":"additional","affiliation":[{"name":"Inspur Software Co., Ltd.,Shandong,China"}]},{"given":"Zizhong","family":"Wei","sequence":"additional","affiliation":[{"name":"Inspur Academy of Science and Technology,Shandong,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/BF02435636"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.17723\/aarc.72.2.g513766100731832"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1080\/01576895.2018.1502088"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/s10502-021-09381-0"},{"first-page":"1218","article-title":"A robustly optimized bert pre-training approach with post-training","volume-title":"Proceedings of the 20th chinese national conference on computational linguistics","author":"Zhuang","key":"ref5"},{"key":"ref6","article-title":"Attention is all you need","volume":"30","author":"Vaswani","year":"2017","journal-title":"Advances in neural information processing systems"},{"issue":"1","key":"ref7","first-page":"5232","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","volume":"23","author":"Fedus","year":"2022","journal-title":"The Journal of Machine Learning Research"},{"journal-title":"arXiv preprint","article-title":"Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension","year":"2019","author":"Lewis","key":"ref8"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1108\/RMJ-09-2019-0055"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2015.7404785"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3479010"},{"issue":"2","key":"ref12","first-page":"3","article-title":"Using computer vision to increase the research potential of photo archives","volume-title":"Journal of Digital Humanities","volume":"3","author":"Resig","year":"2014"},{"journal-title":"Improving language understanding by generative pre-training","year":"2018","author":"Radford","key":"ref13"},{"key":"ref14","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"issue":"8","key":"ref15","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"key":"ref16","article-title":"Unified language model pre-training for natural language understanding and generation","volume":"32","author":"Dong","year":"2019","journal-title":"Advances in neural information processing systems"},{"first-page":"26 176","article-title":"Examining scaling and transfer of language model architectures for machine translation","volume-title":"International Conference on Machine Learning","author":"Zhang","key":"ref17"},{"issue":"1","key":"ref18","first-page":"5485","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"The Journal of Machine Learning Research"},{"article-title":"GLM-130b: An open bilingual pre-trained model","volume-title":"The Eleventh International Conference on Learning Representations (ICLR)","author":"Zeng","key":"ref19"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.91"},{"article-title":"Finetuned language models are zero-shot learners","volume-title":"The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25\u201329, 2022","author":"Wei","key":"ref21"},{"key":"ref22","article-title":"Scaling instruction-finetuned language models","volume":"abs\/2210.11416","author":"Chung","year":"2022","journal-title":"CoRR"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-023-00626-4"},{"first-page":"2790","article-title":"Parameter-efficient transfer learning for NLP","volume-title":"Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9\u201315 June 2019, Long Beach, California, USA","author":"Houlsby","key":"ref24"},{"journal-title":"arXiv preprint","article-title":"P-tuning v2: Prompt tuning can be comparable to fine-tuning universally across scales and tasks","year":"2021","author":"Liu","key":"ref25"},{"article-title":"Lora: Low-rank adaptation of large language models","volume-title":"The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25\u201329, 2022","author":"Hu","key":"ref26"},{"key":"ref27","first-page":"27 730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.26"},{"journal-title":"arXiv preprint","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","year":"2018","author":"Devlin","key":"ref29"},{"article-title":"Convolutional neural network for sentence classification","volume-title":"Masters thesis, University of Waterloo","year":"2015","author":"Chen","key":"ref30"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/K16-1028"},{"journal-title":"Roberta: A robustly optimized bert pretraining approach","year":"2019","author":"Liu","key":"ref33"}],"event":{"name":"2023 IEEE International Conference on Knowledge Graph (ICKG)","start":{"date-parts":[[2023,12,1]]},"location":"Shanghai, China","end":{"date-parts":[[2023,12,2]]}},"container-title":["2023 IEEE International Conference on Knowledge Graph (ICKG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10412665\/10412686\/10412786.pdf?arnumber=10412786","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,2]],"date-time":"2024-02-02T00:35:33Z","timestamp":1706834133000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10412786\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,1]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/ickg59574.2023.00012","relation":{},"subject":[],"published":{"date-parts":[[2023,12,1]]}}}