74 lines
2.7 KiB
Plaintext
74 lines
2.7 KiB
Plaintext
一、cuda11.3容器启动过程
|
||
1、拷贝Dockerfile文件到任意磁盘目录,然后执行下面的命令
|
||
docker build -t nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda .
|
||
|
||
docker images
|
||
|
||
2、启动容器
|
||
打开镜像(常规模式--支持使用GPU)
|
||
docker run -i -t --gpus all nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda /bin/bash
|
||
|
||
|
||
打开镜像(增强模式--支持使用GPU、映射目录、设置内存)
|
||
docker run -i -t -v /home/liguopu/:/guopu:rw --gpus all --shm-size 16G nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04 /bin/bash
|
||
|
||
测试环境(使用端口映射,把服务映射出去)
|
||
docker run -i -td --name metehuman --gpus -p 8000:8000 all nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda /bin/bash
|
||
|
||
正式使用(8000端口为业务对外的服务端口,根据情况可以自行增加)
|
||
docker run -it --rm -p 8886:8888 -p 8000:8000 --gpus all nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda
|
||
docker run -itd -p 8886:8888 -p 8000:8000 --gpus all nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda
|
||
docker run -itd --name metehuman -p 8886:8888 -p 8000:8000 --gpus all nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda
|
||
|
||
docker run --gpus '"device=vgpu,id=0"' -it --rm nvidia/cuda:11.0-base nvidia-smi
|
||
|
||
docker run -itd --name metehuman \
|
||
-p 8885:8888 -p 8001:8000 \
|
||
-e GRANT_SUDO=yes \
|
||
-e JUPYTER_ENABLE_LAB=yes \
|
||
--user root \
|
||
--gpus all \
|
||
nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda
|
||
|
||
3、查看token
|
||
token=$(docker exec -it metehuman jupyter server list | grep -oP '(?<=token=)[a-zA-Z0-9]+')
|
||
echo $token
|
||
|
||
二、启动默认测试镜像
|
||
docker pull m11007322/cuda11.3.0-cudnn8-devel-ubuntu20.04-jupyterlab
|
||
docker run -it \
|
||
-d \
|
||
--gpus all \
|
||
-p 8887:8888 \
|
||
-p 8001:8000 \
|
||
--name metehuman2 \
|
||
--user root \
|
||
-e NB_USER="ubuntu" \
|
||
-e CHOWN_HOME=yes \
|
||
-e GRANT_SUDO=yes \
|
||
-w "/home/${NB_USER}" \
|
||
-v "$PWD":"/home/$USER/work" \
|
||
m11007322/cuda11.3.0-cudnn8-devel-ubuntu20.04-jupyterlab
|
||
|
||
三、启动jupter镜像测试
|
||
docker run -itd --name test \
|
||
-p 8886:8888 -p 8000:8000 \
|
||
-e GRANT_SUDO=yes \
|
||
-e JUPYTER_ENABLE_LAB=yes \
|
||
--user root \
|
||
--gpus '"device=vgpu,id=0"' \
|
||
nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda
|
||
|
||
docker run -it --name test --network=host --dns 8.8.8.8 --dns 8.8.4.4 --rm ubuntu
|
||
docker run -it --gpus all --network=host --rm registry.cn-hangzhou.aliyuncs.com/lipku/nerfstream:v1.3
|
||
|
||
四、查看容器IP
|
||
docker inspect bceda087524e | grep IPAddress
|
||
|
||
curl https://openai.api2d.net/v1/chat/completions \
|
||
-H 'Content-Type: application/json' \
|
||
-H 'Authorization: Bearer fk193752-RlcPi2mBQqPOU5u1F8SFkG2z0gtxD0HS' \
|
||
-d '{
|
||
"model": "gpt-3.5-turbo",
|
||
"messages": [{"role": "user", "content": "你好!给我讲个笑话。"}]
|
||
}' |