Ultima attività 1728788677

Revisione 98b18830013ad4bb43c6e435fd835742324022d4

Colossal-AI-readme-10.bibtex Raw
1@inproceedings{10.1145/3605573.3605613,
2author = {Li, Shenggui and Liu, Hongxin and Bian, Zhengda and Fang, Jiarui and Huang, Haichen and Liu, Yuliang and Wang, Boxiang and You, Yang},
3title = {Colossal-AI: A Unified Deep Learning System For Large-Scale Parallel Training},
4year = {2023},
5isbn = {9798400708435},
6publisher = {Association for Computing Machinery},
7address = {New York, NY, USA},
8url = {https://doi.org/10.1145/3605573.3605613},
9doi = {10.1145/3605573.3605613},
10abstract = {The success of Transformer models has pushed the deep learning model scale to billions of parameters, but the memory limitation of a single GPU has led to an urgent need for training on multi-GPU clusters. However, the best practice for choosing the optimal parallel strategy is still lacking, as it requires domain expertise in both deep learning and parallel computing. The Colossal-AI system addressed the above challenge by introducing a unified interface to scale your sequential code of model training to distributed environments. It supports parallel training methods such as data, pipeline, tensor, and sequence parallelism and is integrated with heterogeneous training and zero redundancy optimizer. Compared to the baseline system, Colossal-AI can achieve up to 2.76 times training speedup on large-scale models.},
11booktitle = {Proceedings of the 52nd International Conference on Parallel Processing},
12pages = {766–775},
13numpages = {10},
14keywords = {datasets, gaze detection, text tagging, neural networks},
15location = {Salt Lake City, UT, USA},
16series = {ICPP '23}
17}