Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/dev-1.x' into 1.x
Browse files Browse the repository at this point in the history
  • Loading branch information
mzr1996 committed Sep 30, 2022
2 parents 078f98d + 7237a64 commit 38bea38
Show file tree
Hide file tree
Showing 193 changed files with 8,162 additions and 1,319 deletions.
45 changes: 20 additions & 25 deletions .circleci/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
name: Check docstring coverage
command: |
pip install interrogate
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 60 mmcls
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 60 mmcls
build_cpu:
parameters:
# The python version must match available image tags in
Expand All @@ -42,8 +42,6 @@ jobs:
type: string
torchvision:
type: string
mmcv:
type: string
docker:
- image: cimg/python:<< parameters.python >>
resource_class: large
Expand All @@ -57,31 +55,32 @@ jobs:
- run:
name: Configure Python & pip
command: |
python -m pip install --upgrade pip
python -m pip install wheel
pip install --upgrade pip
pip install wheel
- run:
name: Install PyTorch
command: |
python -V
python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
- run:
name: Install mmcls dependencies
command: |
python -m pip install git+ssh://[email protected]/open-mmlab/mmengine.git@main
python -m pip install << parameters.mmcv >>
python -m pip install timm
python -m pip install -r requirements.txt
pip install git+https://github.com/open-mmlab/mmengine.git@main
pip install -U openmim
mim install 'mmcv >= 2.0.0rc1'
pip install timm
pip install -r requirements.txt
python -c 'import mmcv; print(mmcv.__version__)'
- run:
name: Build and install
command: |
python -m pip install -e .
pip install -e .
- run:
name: Run unittests
command: |
python -m coverage run --branch --source mmcls -m pytest tests/
python -m coverage xml
python -m coverage report -m
coverage run --branch --source mmcls -m pytest tests/
coverage xml
coverage report -m
build_cuda:
machine:
Expand All @@ -96,15 +95,13 @@ jobs:
cudnn:
type: integer
default: 7
mmcv:
type: string
steps:
- checkout
- run:
# Cloning repos in VM since Docker doesn't have access to the private key
name: Clone Repos
command: |
git clone -b main --depth 1 ssh://git@github.com/open-mmlab/mmengine.git /home/circleci/mmengine
git clone -b main --depth 1 https://github.com/open-mmlab/mmengine.git /home/circleci/mmengine
- run:
name: Build Docker image
command: |
Expand All @@ -114,7 +111,8 @@ jobs:
name: Install mmcls dependencies
command: |
docker exec mmcls pip install -e /mmengine
docker exec mmcls pip install << parameters.mmcv >>
docker exec mmcls pip install -U openmim
docker exec mmcls mim install 'mmcv >= 2.0.0rc1'
docker exec mmcls pip install -r requirements.txt
docker exec mmcls python -c 'import mmcv; print(mmcv.__version__)'
- run:
Expand All @@ -124,7 +122,7 @@ jobs:
- run:
name: Run unittests
command: |
docker exec mmcls python -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
docker exec mmcls python -m pytest tests/ -k 'not timm'
# Invoke jobs via workflows
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
Expand All @@ -138,6 +136,7 @@ workflows:
branches:
ignore:
- dev-1.x
- 1.x
pr_stage_test:
when:
not:
Expand All @@ -154,15 +153,13 @@ workflows:
torch: 1.6.0
torchvision: 0.7.0
python: 3.6.9 # The lowest python 3.6.x version available on CircleCI images
mmcv: https://download.openmmlab.com/mmcv/dev-2.x/cpu/torch1.6.0/mmcv_full-2.0.0rc0-cp36-cp36m-manylinux1_x86_64.whl
requires:
- lint
- build_cpu:
name: maximum_version_cpu
torch: 1.9.0 # TODO: Update the version after mmcv provides more pre-compiled packages.
torchvision: 0.10.0
torch: 1.12.1
torchvision: 0.13.1
python: 3.9.0
mmcv: https://download.openmmlab.com/mmcv/dev-2.x/cpu/torch1.9.0/mmcv_full-2.0.0rc0-cp39-cp39-manylinux1_x86_64.whl
requires:
- minimum_version_cpu
- hold:
Expand All @@ -175,7 +172,6 @@ workflows:
# Use double quotation mark to explicitly specify its type
# as string instead of number
cuda: "10.2"
mmcv: https://download.openmmlab.com/mmcv/dev-2.x/cu102/torch1.8.0/mmcv_full-2.0.0rc0-cp37-cp37m-manylinux1_x86_64.whl
requires:
- hold
merge_stage_test:
Expand All @@ -188,7 +184,6 @@ workflows:
torch: 1.6.0
# Use double quotation mark to explicitly specify its type
# as string instead of number
mmcv: https://download.openmmlab.com/mmcv/dev-2.x/cu101/torch1.6.0/mmcv_full-2.0.0rc0-cp37-cp37m-manylinux1_x86_64.whl
cuda: "10.1"
filters:
branches:
Expand Down
44 changes: 31 additions & 13 deletions .dev_scripts/benchmark_regression/1-benchmark_valid.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,17 @@
import re
import tempfile
from argparse import ArgumentParser
from collections import OrderedDict
from pathlib import Path
from time import time
from typing import OrderedDict

import mmcv
import numpy as np
import torch
from mmengine import Config, MMLogger, Runner
from mmengine.dataset import Compose
from mmengine import Config, DictAction, MMLogger
from mmengine.dataset import Compose, default_collate
from mmengine.fileio import FileClient
from mmengine.runner import Runner
from modelindex.load_model_index import load
from rich.console import Console
from rich.table import Table
Expand Down Expand Up @@ -52,6 +54,16 @@ def parse_args():
'--flops-str',
action='store_true',
help='Output FLOPs and params counts in a string form.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args

Expand All @@ -62,6 +74,8 @@ def inference(config_file, checkpoint, work_dir, args, exp_name):
cfg.load_from = checkpoint
cfg.log_level = 'WARN'
cfg.experiment_name = exp_name
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)

# build the data pipeline
test_dataset = cfg.test_dataloader.dataset
Expand All @@ -72,7 +86,8 @@ def inference(config_file, checkpoint, work_dir, args, exp_name):
test_dataset.pipeline.insert(1, dict(type='Resize', scale=32))

data = Compose(test_dataset.pipeline)({'img_path': args.img})
resolution = tuple(data['inputs'].shape[1:])
data = default_collate([data])
resolution = tuple(data['inputs'].shape[-2:])

runner: Runner = Runner.from_cfg(cfg)
model = runner.model
Expand All @@ -83,26 +98,30 @@ def inference(config_file, checkpoint, work_dir, args, exp_name):
if args.inference_time:
time_record = []
for _ in range(10):
model.val_step(data) # warmup before profiling
torch.cuda.synchronize()
start = time()
model.val_step([data])
model.val_step(data)
torch.cuda.synchronize()
time_record.append((time() - start) * 1000)
result['time_mean'] = np.mean(time_record[1:-1])
result['time_std'] = np.std(time_record[1:-1])
else:
model.val_step([data])
model.val_step(data)

result['model'] = config_file.stem

if args.flops:
from mmcv.cnn.utils import get_model_complexity_info
from fvcore.nn import FlopCountAnalysis, parameter_count
from fvcore.nn.print_model_statistics import _format_size
_format_size = _format_size if args.flops_str else lambda x: x
with torch.no_grad():
if hasattr(model, 'extract_feat'):
model.forward = model.extract_feat
flops, params = get_model_complexity_info(
model,
input_shape=(3, ) + resolution,
print_per_layer_stat=False,
as_strings=args.flops_str)
model.to('cpu')
inputs = (torch.randn((1, 3, *resolution)), )
flops = _format_size(FlopCountAnalysis(model, inputs).total())
params = _format_size(parameter_count(model)[''])
result['flops'] = flops if args.flops_str else int(flops)
result['params'] = params if args.flops_str else int(params)
else:
Expand Down Expand Up @@ -184,7 +203,6 @@ def main(args):
if args.checkpoint_root is not None:
root = args.checkpoint_root
if 's3://' in args.checkpoint_root:
from mmcv.fileio import FileClient
from petrel_client.common.exception import AccessDeniedError
file_client = FileClient.infer_client(uri=root)
checkpoint = file_client.join_path(
Expand Down
9 changes: 8 additions & 1 deletion .dev_scripts/benchmark_regression/2-benchmark_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,12 @@ def parse_args():
action='store_true',
help='Summarize benchmark test results.')
parser.add_argument('--save', action='store_true', help='Save the summary')
parser.add_argument(
'--cfg-options',
nargs='+',
type=str,
default=[],
help='Config options for all config files.')

args = parser.parse_args()
return args
Expand All @@ -76,7 +82,7 @@ def create_test_job_batch(commands, model_info, args, port, script_name):

http_prefix = 'https://download.openmmlab.com/mmclassification/'
if 's3://' in args.checkpoint_root:
from mmcv.fileio import FileClient
from mmengine.fileio import FileClient
from petrel_client.common.exception import AccessDeniedError
file_client = FileClient.infer_client(uri=args.checkpoint_root)
checkpoint = file_client.join_path(
Expand Down Expand Up @@ -125,6 +131,7 @@ def create_test_job_batch(commands, model_info, args, port, script_name):
f'--work-dir={work_dir} '
f'--out={result_file} '
f'--cfg-option dist_params.port={port} '
f'{" ".join(args.cfg_options)} '
f'--launcher={launcher}\n')

with open(work_dir / 'job.sh', 'w') as f:
Expand Down
Loading

0 comments on commit 38bea38

Please sign in to comment.