LLaMA-Factory-15.sh
· 648 B · Bash
原始文件
docker build -f ./docker/docker-cuda/Dockerfile \
--build-arg INSTALL_BNB=false \
--build-arg INSTALL_VLLM=false \
--build-arg INSTALL_DEEPSPEED=false \
--build-arg INSTALL_FLASHATTN=false \
--build-arg PIP_INDEX=https://pypi.org/simple \
-t llamafactory:latest .
docker run -dit --gpus=all \
-v ./hf_cache:/root/.cache/huggingface \
-v ./ms_cache:/root/.cache/modelscope \
-v ./om_cache:/root/.cache/openmind \
-v ./data:/app/data \
-v ./output:/app/output \
-p 7860:7860 \
-p 8000:8000 \
--shm-size 16G \
--name llamafactory \
llamafactory:latest
docker exec -it llamafactory bash
1 | docker build -f ./docker/docker-cuda/Dockerfile \ |
2 | --build-arg INSTALL_BNB=false \ |
3 | --build-arg INSTALL_VLLM=false \ |
4 | --build-arg INSTALL_DEEPSPEED=false \ |
5 | --build-arg INSTALL_FLASHATTN=false \ |
6 | --build-arg PIP_INDEX=https://pypi.org/simple \ |
7 | -t llamafactory:latest . |
8 | |
9 | docker run -dit --gpus=all \ |
10 | -v ./hf_cache:/root/.cache/huggingface \ |
11 | -v ./ms_cache:/root/.cache/modelscope \ |
12 | -v ./om_cache:/root/.cache/openmind \ |
13 | -v ./data:/app/data \ |
14 | -v ./output:/app/output \ |
15 | -p 7860:7860 \ |
16 | -p 8000:8000 \ |
17 | --shm-size 16G \ |
18 | --name llamafactory \ |
19 | llamafactory:latest |
20 | |
21 | docker exec -it llamafactory bash |