forked from modelscope/modelscope
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDockerfile.ubuntu_base
144 lines (132 loc) · 6.02 KB
/
Dockerfile.ubuntu_base
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
FROM $BASE_IMAGE
ARG DEBIAN_FRONTEND=noninteractive
ENV TZ=Asia/Shanghai
ENV CONDA_DIR /opt/conda
ENV PATH="${CONDA_DIR}/bin:${PATH}"
ENV arch=x86_64
SHELL ["/bin/bash", "-c"]
COPY docker/rcfiles /tmp/resources
COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins
RUN apt-get update && apt-get install -y --reinstall ca-certificates && \
apt-get clean && \
cp /tmp/resources/sources.list.aliyun /etc/apt/sources.list && \
apt-get update && \
apt-get install -y locales wget git strace gdb sox libopenmpi-dev curl \
libgeos-dev strace vim ffmpeg libsm6 tzdata language-pack-zh-hans \
ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build && \
wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \
dpkg -i ./git-lfs_3.2.0_amd64.deb && \
rm -f ./git-lfs_3.2.0_amd64.deb && \
locale-gen zh_CN && \
locale-gen zh_CN.utf8 && \
update-locale LANG=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 && \
ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
dpkg-reconfigure --frontend noninteractive tzdata && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8
#install and config python
ARG PYTHON_VERSION=3.7.13
# Miniconda3-py37_23.1.0-1-Linux-x86_64.sh is last python3.7 version
RUN if [ "$PYTHON_VERSION" = "3.7.13" ] ; then \
wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-py37_23.1.0-1-Linux-x86_64.sh -O ./miniconda.sh && \
/bin/bash miniconda.sh -b -p /opt/conda && \
rm -f miniconda.sh && \
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
cp /tmp/resources/conda.tuna ~/.condarc && \
source /root/.bashrc && \
conda install --yes python==${PYTHON_VERSION} && \
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
pip config set install.trusted-host mirrors.aliyun.com;\
else \
wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-latest-Linux-${arch}.sh -O ./miniconda.sh && \
/bin/bash miniconda.sh -b -p /opt/conda && \
rm -f miniconda.sh && \
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
cp /tmp/resources/conda.tuna ~/.condarc && \
source /root/.bashrc && \
conda install --yes python==${PYTHON_VERSION} && \
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
pip config set install.trusted-host mirrors.aliyun.com;\
fi
ARG USE_GPU=True
# install pytorch
ARG TORCH_VERSION=1.12.0
ARG CUDATOOLKIT_VERSION=cu117
RUN if [ "$USE_GPU" = "True" ] ; then \
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDATOOLKIT_VERSION; \
else \
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \
fi
# install tensorflow
ARG TENSORFLOW_VERSION=1.15.5
RUN if [ "$USE_GPU" = "True" ] ; then \
if [ "$TENSORFLOW_VERSION" = "1.15.5" ] ; then \
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
else \
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
fi \
else \
# only python 3.7 has tensorflow 1.15.5
if [ "$PYTHON_VERSION" = "3.7.13" ] ; then \
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
elif [ "$TENSORFLOW_VERSION" = "1.15.5" ] ; then \
pip install --no-cache-dir numpy==1.18.5 https://modelscope.oss-cn-beijing.aliyuncs.com/releases/dependencies/tensorflow-1.15.5-cp38-cp38-linux_x86_64.whl; \
else \
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
fi \
fi
# mmcv-full<=1.7.0 for mmdet3d compatible
RUN if [ "$USE_GPU" = "True" ] ; then \
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6" MMCV_WITH_OPS=1 MAX_JOBS=8 FORCE_CUDA=1 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
else \
MMCV_WITH_OPS=1 MAX_JOBS=8 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
fi
# default shell bash
ENV SHELL=/bin/bash
# install special package
RUN if [ "$USE_GPU" = "True" ] ; then \
pip install dgl -f https://data.dgl.ai/wheels/$CUDATOOLKIT_VERSION/repo.html; \
else \
pip install --no-cache-dir dgl==0.9.0 dglgo -f https://data.dgl.ai/wheels/repo.html; \
fi
# copy install scripts
COPY docker/scripts/install_unifold.sh docker/scripts/install_colmap.sh docker/scripts/install_pytorch3d_nvdiffrast.sh docker/scripts/install_tiny_cuda_nn.sh docker/scripts/install_apex.sh /tmp/
# for uniford
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_unifold.sh; \
else \
echo 'cpu unsupport uniford'; \
fi
RUN if [ "$USE_GPU" = "True" ] ; then \
export TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0;7.5;8.0;8.6+PTX" && pip install --no-cache-dir git+https://github.com/gxd1994/Pointnet2.PyTorch.git@master#subdirectory=pointnet2; \
else \
echo 'cpu unsupport Pointnet2'; \
fi
# 3d supports
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_colmap.sh; \
else \
echo 'cpu unsupport colmap'; \
fi
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_tiny_cuda_nn.sh \
else \
echo 'cpu unsupport tiny_cudann'; \
fi
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_pytorch3d_nvdiffrast.sh; \
else \
echo 'cpu unsupport pytorch3d nvdiffrast'; \
fi
# end of 3D
# install apex after deepspeed
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_apex.sh; \
else \
echo 'cpu unsupport apex'; \
fi
ENTRYPOINT []