knox / LLaMA-Factory-18.sh
0 likes
0 forks
1 files
Last active
1 | API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml |
knox / LLaMA-Factory-17.sh
0 likes
0 forks
1 files
Last active
1 | docker build -f ./docker/docker-rocm/Dockerfile \ |
2 | --build-arg INSTALL_BNB=false \ |
3 | --build-arg INSTALL_VLLM=false \ |
4 | --build-arg INSTALL_DEEPSPEED=false \ |
5 | --build-arg INSTALL_FLASHATTN=false \ |
6 | --build-arg PIP_INDEX=https://pypi.org/simple \ |
7 | -t llamafactory:latest . |
8 | |
9 | docker run -dit \ |
10 | -v ./hf_cache:/root/.cache/huggingface \ |
knox / LLaMA-Factory-16.sh
0 likes
0 forks
1 files
Last active
1 | # Choose docker image upon your environment |
2 | docker build -f ./docker/docker-npu/Dockerfile \ |
3 | --build-arg INSTALL_DEEPSPEED=false \ |
4 | --build-arg PIP_INDEX=https://pypi.org/simple \ |
5 | -t llamafactory:latest . |
6 | |
7 | # Change `device` upon your resources |
8 | docker run -dit \ |
9 | -v ./hf_cache:/root/.cache/huggingface \ |
10 | -v ./ms_cache:/root/.cache/modelscope \ |
knox / LLaMA-Factory-15.sh
0 likes
0 forks
1 files
Last active
1 | docker build -f ./docker/docker-cuda/Dockerfile \ |
2 | --build-arg INSTALL_BNB=false \ |
3 | --build-arg INSTALL_VLLM=false \ |
4 | --build-arg INSTALL_DEEPSPEED=false \ |
5 | --build-arg INSTALL_FLASHATTN=false \ |
6 | --build-arg PIP_INDEX=https://pypi.org/simple \ |
7 | -t llamafactory:latest . |
8 | |
9 | docker run -dit --gpus=all \ |
10 | -v ./hf_cache:/root/.cache/huggingface \ |
knox / LLaMA-Factory-14.sh
0 likes
0 forks
1 files
Last active
1 | cd docker/docker-rocm/ |
2 | docker compose up -d |
3 | docker compose exec llamafactory bash |
knox / LLaMA-Factory-13.sh
0 likes
0 forks
1 files
Last active
1 | cd docker/docker-npu/ |
2 | docker compose up -d |
3 | docker compose exec llamafactory bash |
knox / LLaMA-Factory-12.sh
0 likes
0 forks
1 files
Last active
1 | cd docker/docker-cuda/ |
2 | docker compose up -d |
3 | docker compose exec llamafactory bash |
knox / LLaMA-Factory-11.sh
0 likes
0 forks
1 files
Last active
1 | llamafactory-cli webui |
knox / LLaMA-Factory-10.sh
0 likes
0 forks
1 files
Last active
1 | llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml |
2 | llamafactory-cli chat examples/inference/llama3_lora_sft.yaml |
3 | llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml |
knox / LLaMA-Factory-9.md
0 likes
0 forks
1 files
Last active
Requirement | Minimum | Recommend |
---|---|---|
CANN | 8.0.RC1 | 8.0.RC1 |
torch | 2.1.0 | 2.1.0 |
torch-npu | 2.1.0 | 2.1.0.post3 |
deepspeed | 0.13.2 | 0.13.2 |