{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,10]],"date-time":"2025-04-10T17:07:19Z","timestamp":1744304839776,"version":"3.40.3"},"reference-count":67,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1109\/tpami.2024.3386927","type":"journal-article","created":{"date-parts":[[2024,4,10]],"date-time":"2024-04-10T18:14:27Z","timestamp":1712772867000},"page":"6761-6774","source":"Crossref","is-referenced-by-count":18,"title":["DeepNet: Scaling Transformers to 1,000 Layers"],"prefix":"10.1109","volume":"46","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1811-3903","authenticated-orcid":false,"given":"Hongyu","family":"Wang","sequence":"first","affiliation":[{"name":"School of Computer and Control Engineering, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1091-1206","authenticated-orcid":false,"given":"Shuming","family":"Ma","sequence":"additional","affiliation":[{"name":"Microsoft Research, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3083-7170","authenticated-orcid":false,"given":"Li","family":"Dong","sequence":"additional","affiliation":[{"name":"Microsoft Research, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4324-6337","authenticated-orcid":false,"given":"Shaohan","family":"Huang","sequence":"additional","affiliation":[{"name":"Microsoft Research, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0833-7903","authenticated-orcid":false,"given":"Dongdong","family":"Zhang","sequence":"additional","affiliation":[{"name":"Microsoft Research, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7810-5852","authenticated-orcid":false,"given":"Furu","family":"Wei","sequence":"additional","affiliation":[{"name":"Microsoft Research, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1810.04805"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.747"},{"article-title":"Language models are unsupervised multitask learners","year":"2019","author":"Radford","key":"ref4"},{"key":"ref5","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Brown"},{"key":"ref6","first-page":"103","article-title":"GPipe: Efficient training of giant neural networks using pipeline parallelism","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Huang"},{"issue":"140","key":"ref7","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"article-title":"GShard: Scaling giant models with conditional computation and automatic sharding","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lepikhin","key":"ref8"},{"article-title":"Scaling language models: Methods, analysis & insights from training gopher","year":"2021","author":"Rae","key":"ref9"},{"article-title":"Few-shot learning with multilingual language models","year":"2021","author":"Lin","key":"ref10"},{"article-title":"Using deepspeed and megatron to train megatron-turing NLG 530b, A large-scale generative language model","year":"2022","author":"Smith","key":"ref11"},{"article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","year":"2021","author":"Fedus","key":"ref12"},{"article-title":"GLaM: Efficient scaling of language models with mixture-of-experts","year":"2021","author":"Du","key":"ref13"},{"article-title":"Transformers without tears: Improving the normalization of self-attention","year":"2019","author":"Nguyen","key":"ref14"},{"article-title":"NormFormer: Improved transformer pretraining with extra normalization","year":"2021","author":"Shleifer","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1083"},{"article-title":"Fixup initialization: Residual learning without normalization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang","key":"ref17"},{"key":"ref18","first-page":"4475","article-title":"Improving transformer optimization through better initialization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Huang"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1176"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.463"},{"article-title":"ReZero is all you need: Fast convergence at large depth","year":"2020","author":"Bachlechner","key":"ref21"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref23","first-page":"6391","article-title":"Visualizing the loss landscape of neural nets","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Li"},{"key":"ref24","first-page":"107:1","article-title":"Beyond english-centric multilingual machine translation","volume":"22","author":"Fan","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref25","first-page":"36077","article-title":"Magneto: A foundation transformer","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wang"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.163"},{"key":"ref27","first-page":"10524","article-title":"On layer normalization in the transformer architecture","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Xiong"},{"key":"ref28","first-page":"249","article-title":"Understanding the difficulty of training deep feedforward neural networks","volume-title":"Proc. Int. Conf. Artif. Intell. Statist.","author":"Glorot"},{"article-title":"Adam: A method for stochastic optimization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kingma","key":"ref29"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1424"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Dosovitskiy","key":"ref31"},{"article-title":"LLaMA: Open and efficient foundation language models","year":"2023","author":"Touvron","key":"ref32"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.148"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-6319"},{"article-title":"Training compute-optimal large language models","year":"2022","author":"Hoffmann","key":"ref35"},{"article-title":"Scaling laws for neural language models","year":"2020","author":"Kaplan","key":"ref36"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.507"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.480"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/W14-3302"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W17-4717"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-6401"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W19-5301"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-2084"},{"key":"ref44","article-title":"The FLORES-101 evaluation benchmark for low-resource and multilingual machine translation","author":"Goyal","year":"2021","journal-title":"arXiv:2106.03193"},{"article-title":"RoBERTa: A robustly optimized BERT pretraining approach","year":"2019","author":"Liu","key":"ref45"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-5446"},{"year":"2023","key":"ref47","article-title":"GPT-4 technical report"},{"article-title":"Llama 2: Open foundation and fine-tuned chat models","year":"2023","author":"Touvron","key":"ref48"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6399"},{"article-title":"The winograd schema challenge","volume-title":"Proc. 13th Int. Conf. Princ. Knowl. Representation Reasoning","author":"Levesque","key":"ref50"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W17-0906"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/p19-1472"},{"article-title":"BEiT: BERT pre-training of image transformers","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bao","key":"ref53"},{"article-title":"BEiT v2: Masked image modeling with vector-quantized visual tokenizers","year":"2022","author":"Peng","key":"ref54"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-018-1140-0"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01501"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00823"},{"key":"ref59","first-page":"10506","article-title":"Learning robust global representations by penalizing local predictive power","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Wang"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01228-1_26"},{"article-title":"DeltaLM: Encoder-decoder pre-training for language generation and translation by augmenting pretrained multilingual encoders","year":"2021","author":"Ma","key":"ref61"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053896"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.01838"},{"article-title":"On the variance of the adaptive learning rate and beyond","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Liu","key":"ref64"},{"key":"ref65","first-page":"19822","article-title":"Cogview: Mastering text-to-image generation via transformers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.: Annu. Conf. Neural Inf. Process. Syst.","author":"Ding"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-021-03819-2"},{"key":"ref67","first-page":"1032","article-title":"Universal statistics of fisher information in deep neural networks: Mean field approach","volume-title":"Proc. 22nd Int. Conf. Artif. Intell. Statist.","author":"Karakida"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/34\/10666888\/10496231.pdf?arnumber=10496231","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,4]],"date-time":"2025-04-04T19:39:33Z","timestamp":1743795573000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10496231\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10]]},"references-count":67,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2024.3386927","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"type":"print","value":"0162-8828"},{"type":"electronic","value":"2160-9292"},{"type":"electronic","value":"1939-3539"}],"subject":[],"published":{"date-parts":[[2024,10]]}}}