knox / Llama-Deployment-6.sh
0 likes
0 forks
1 files
Last active
1 | python3 -m venv venv |
2 | source venv/bin/activate |
knox / Llama-Deployment-5.sh
0 likes
0 forks
1 files
Last active
1 | cd ~ |
2 | git clone https://github.com/ollama/llama.git |
3 | cd llama |
knox / Llama-Deployment-4.sh
0 likes
0 forks
1 files
Last active
1 | sudo apt install -y python3 python3-pip python3-venv git curl |
knox / Llama-Deployment-3.sh
0 likes
0 forks
1 files
Last active
1 | su - deployer |
knox / Llama-Deployment-2.sh
0 likes
0 forks
1 files
Last active
1 | sudo adduser deployer |
2 | sudo usermod -aG sudo deployer |
knox / Llama-Deployment-1.sh
0 likes
0 forks
1 files
Last active
1 | sudo apt update && sudo apt upgrade -y |
knox / LLaMA-Factory-22.bib
0 likes
0 forks
1 files
Last active
1 | @inproceedings{zheng2024llamafactory, |
2 | title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models}, |
3 | author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Zhangchi Feng and Yongqiang Ma}, |
4 | booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)}, |
5 | address={Bangkok, Thailand}, |
6 | publisher={Association for Computational Linguistics}, |
7 | year={2024}, |
8 | url={http://arxiv.org/abs/2403.13372} |
9 | } |
knox / LLaMA-Factory-21.yaml
0 likes
0 forks
1 files
Last active
1 | report_to: wandb |
2 | run_name: test_run # optional |
knox / LLaMA-Factory-20.sh
0 likes
0 forks
1 files
Last active
1 | export USE_OPENMIND_HUB=1 # `set USE_OPENMIND_HUB=1` for Windows |
knox / LLaMA-Factory-19.sh
0 likes
0 forks
1 files
Last active
1 | export USE_MODELSCOPE_HUB=1 # `set USE_MODELSCOPE_HUB=1` for Windows |