forked from open-mmlab/mmyolo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.yml
204 lines (200 loc) · 6.94 KB
/
test.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
version: 2.1
# the default pipeline parameters, which will be updated according to
# the results of the path-filtering orb
parameters:
lint_only:
type: boolean
default: true
jobs:
lint:
docker:
- image: cimg/python:3.7.4
steps:
- checkout
- run:
name: Install pre-commit hook
command: |
pip install pre-commit
pre-commit install
- run:
name: Linting
command: pre-commit run --all-files
- run:
name: Check docstring coverage
command: |
pip install interrogate
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 90 mmyolo
build_cpu:
parameters:
# The python version must match available image tags in
# https://circleci.com/developer/images/image/cimg/python
python:
type: string
torch:
type: string
torchvision:
type: string
docker:
- image: cimg/python:<< parameters.python >>
resource_class: large
steps:
- checkout
- run:
name: Install Libraries
command: |
sudo apt-get update
sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5
- run:
name: Configure Python & pip
command: |
pip install --upgrade pip
pip install wheel
- run:
name: Install PyTorch
command: |
python -V
pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
- run:
name: Install ONNXRuntime
command: |
pip install onnxruntime==1.8.1
wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz
tar xvf onnxruntime-linux-x64-1.8.1.tgz
- run:
name: Install mmyolo dependencies
command: |
pip install -U openmim
mim install 'mmengine >= 0.3.1'
mim install 'mmcv >= 2.0.0rc1'
pip install git+https://github.com/open-mmlab/[email protected]
pip install -r requirements/albu.txt
pip install -r requirements/tests.txt
- run:
name: Install mmdeploy
command: |
pip install setuptools
git clone -b dev-1.x --depth 1 https://github.com/open-mmlab/mmdeploy.git mmdeploy --recurse-submodules
wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-x86_64.tar.gz
tar -xzvf cmake-3.20.0-linux-x86_64.tar.gz
sudo ln -sf $(pwd)/cmake-3.20.0-linux-x86_64/bin/* /usr/bin/
cd mmdeploy && mkdir build && cd build && cmake .. -DMMDEPLOY_TARGET_BACKENDS=ort -DONNXRUNTIME_DIR=/home/circleci/project/onnxruntime-linux-x64-1.8.1 && make -j8 && make install
export LD_LIBRARY_PATH=/home/circleci/project/onnxruntime-linux-x64-1.8.1/lib:${LD_LIBRARY_PATH}
cd /home/circleci/project/mmdeploy && python -m pip install -v -e .
- run:
name: Build and install
command: |
pip install -e .
- run:
name: Run unittests
command: |
export LD_LIBRARY_PATH=/home/circleci/project/onnxruntime-linux-x64-1.8.1/lib:${LD_LIBRARY_PATH}
coverage run --branch --source mmyolo -m pytest tests/
coverage xml
coverage report -m
build_cuda:
parameters:
torch:
type: string
cuda:
type: enum
enum: ["10.1", "10.2", "11.1","11.0"]
cudnn:
type: integer
default: 7
machine:
image: ubuntu-2004-cuda-11.4:202110-01
# docker_layer_caching: true
resource_class: gpu.nvidia.small
steps:
- checkout
- run:
# Cloning repos in VM since Docker doesn't have access to the private key
name: Clone Repos
command: |
git clone -b main --depth 1 https://github.com/open-mmlab/mmengine.git /home/circleci/mmengine
git clone -b dev-3.x --depth 1 https://github.com/open-mmlab/mmdetection.git /home/circleci/mmdetection
- run:
name: Build Docker image
command: |
docker build .circleci/docker -t mmyolo:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >>
docker run --gpus all -t -d -v /home/circleci/project:/mmyolo -v /home/circleci/mmengine:/mmengine -v /home/circleci/mmdetection:/mmdetection -w /mmyolo --name mmyolo mmyolo:gpu
- run:
name: Install mmyolo dependencies
command: |
docker exec mmyolo pip install -U openmim
docker exec mmyolo mim install 'mmengine >= 0.3.1'
docker exec mmyolo mim install 'mmcv >= 2.0.0rc1'
docker exec mmyolo pip install -e /mmdetection
docker exec mmyolo pip install -r requirements/albu.txt
docker exec mmyolo pip install -r requirements/tests.txt
- run:
name: Build and install
command: |
docker exec mmyolo pip install -e .
- run:
name: Run unittests
command: |
docker exec mmyolo pytest tests/
workflows:
pr_stage_lint:
when: << pipeline.parameters.lint_only >>
jobs:
- lint:
name: lint
filters:
branches:
ignore:
- main
pr_stage_test:
when:
not:
<< pipeline.parameters.lint_only >>
jobs:
- lint:
name: lint
filters:
branches:
ignore:
- main
- build_cpu:
name: minimum_version_cpu
torch: 1.8.0
torchvision: 0.9.0
python: 3.8.0 # The lowest python 3.6.x version available on CircleCI images
requires:
- lint
- build_cpu:
name: maximum_version_cpu
torch: 1.12.1
torchvision: 0.13.1
python: 3.9.0
requires:
- minimum_version_cpu
- hold:
type: approval
requires:
- maximum_version_cpu
- build_cuda:
name: mainstream_version_gpu
torch: 1.8.1
# Use double quotation mark to explicitly specify its type
# as string instead of number
cuda: "10.2"
requires:
- hold
merge_stage_test:
when:
not:
<< pipeline.parameters.lint_only >>
jobs:
- build_cuda:
name: minimum_version_gpu
torch: 1.7.0
# Use double quotation mark to explicitly specify its type
# as string instead of number
cuda: "11.0"
cudnn: 8
filters:
branches:
only:
- main