LLaMA-Factory-16.sh
· 941 B · Bash
Orginalformat
# Choose docker image upon your environment
docker build -f ./docker/docker-npu/Dockerfile \
--build-arg INSTALL_DEEPSPEED=false \
--build-arg PIP_INDEX=https://pypi.org/simple \
-t llamafactory:latest .
# Change `device` upon your resources
docker run -dit \
-v ./hf_cache:/root/.cache/huggingface \
-v ./ms_cache:/root/.cache/modelscope \
-v ./om_cache:/root/.cache/openmind \
-v ./data:/app/data \
-v ./output:/app/output \
-v /usr/local/dcmi:/usr/local/dcmi \
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
-v /etc/ascend_install.info:/etc/ascend_install.info \
-p 7860:7860 \
-p 8000:8000 \
--device /dev/davinci0 \
--device /dev/davinci_manager \
--device /dev/devmm_svm \
--device /dev/hisi_hdc \
--shm-size 16G \
--name llamafactory \
llamafactory:latest
docker exec -it llamafactory bash
1 | # Choose docker image upon your environment |
2 | docker build -f ./docker/docker-npu/Dockerfile \ |
3 | --build-arg INSTALL_DEEPSPEED=false \ |
4 | --build-arg PIP_INDEX=https://pypi.org/simple \ |
5 | -t llamafactory:latest . |
6 | |
7 | # Change `device` upon your resources |
8 | docker run -dit \ |
9 | -v ./hf_cache:/root/.cache/huggingface \ |
10 | -v ./ms_cache:/root/.cache/modelscope \ |
11 | -v ./om_cache:/root/.cache/openmind \ |
12 | -v ./data:/app/data \ |
13 | -v ./output:/app/output \ |
14 | -v /usr/local/dcmi:/usr/local/dcmi \ |
15 | -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \ |
16 | -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \ |
17 | -v /etc/ascend_install.info:/etc/ascend_install.info \ |
18 | -p 7860:7860 \ |
19 | -p 8000:8000 \ |
20 | --device /dev/davinci0 \ |
21 | --device /dev/davinci_manager \ |
22 | --device /dev/devmm_svm \ |
23 | --device /dev/hisi_hdc \ |
24 | --shm-size 16G \ |
25 | --name llamafactory \ |
26 | llamafactory:latest |
27 | |
28 | docker exec -it llamafactory bash |