Skip to content

Commit

Permalink
Update omegaconf version to 2.3.0 (#631)
Browse files Browse the repository at this point in the history
Update `omegaconf` version to
[2.3.0](https://pypi.org/project/omegaconf/2.3.0/) as omegaconf 2.0.6
has a non-standard dependency specifier PyYAML>=5.1.*. pip 24.1 will
enforce this behaviour change.
Discussion can be found at pypa/pip#12063.
  • Loading branch information
RyoYang committed Jul 23, 2024
1 parent 7435f10 commit 9a3ce39
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 43 deletions.
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ def run(self):
'natsort>=7.1.1',
'networkx>=2.5',
'numpy>=1.19.2',
'omegaconf==2.0.6',
'omegaconf==2.3.0',
'openpyxl>=3.0.7',
'packaging>=21.0',
'pandas>=1.1.5',
Expand Down Expand Up @@ -198,7 +198,7 @@ def run(self):
'pydocstyle>=5.1.1',
'pytest-cov>=2.11.1',
'pytest-subtests>=0.4.0',
'pytest>=6.2.2',
'pytest>=6.2.2, <=7.4.4',
'types-markdown',
'types-pkg_resources',
'types-pyyaml',
Expand Down
47 changes: 26 additions & 21 deletions superbench/executor/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,13 @@ def __get_enabled_benchmarks(self):
Return:
list: List of benchmarks which will be executed.
"""
if self._sb_config.superbench.enable:
if 'enable' in self._sb_config.superbench and self._sb_config.superbench.enable:
if isinstance(self._sb_config.superbench.enable, str):
return [self._sb_config.superbench.enable]
elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):
return list(self._sb_config.superbench.enable)
# TODO: may exist order issue
return [k for k, v in self._sb_benchmarks.items() if v.enable]
return [k for k, v in self._sb_benchmarks.items() if 'enable' in v and v.enable]

def __get_platform(self):
"""Detect runninng platform by environment."""
Expand Down Expand Up @@ -228,32 +228,37 @@ def exec(self):
logger.warning('Monitor can not support CPU platform.')

benchmark_real_name = benchmark_name.split(':')[0]
for framework in benchmark_config.frameworks or [Framework.NONE.value]:
if benchmark_real_name == 'model-benchmarks' or (
':' not in benchmark_name and benchmark_name.endswith('_models')
):
for model in benchmark_config.models:
full_name = f'{benchmark_name}/{framework}-{model}'
if 'frameworks' in benchmark_config:
for framework in benchmark_config.frameworks or [Framework.NONE.value]:
if benchmark_real_name == 'model-benchmarks' or (
':' not in benchmark_name and benchmark_name.endswith('_models')
):
for model in benchmark_config.models:
full_name = f'{benchmark_name}/{framework}-{model}'
logger.info('Executor is going to execute %s.', full_name)
context = BenchmarkRegistry.create_benchmark_context(
model,
platform=self.__get_platform(),
framework=Framework(framework.lower()),
parameters=self.__get_arguments(
{} if 'parameters' not in benchmark_config else benchmark_config.parameters
)
)
result = self.__exec_benchmark(full_name, context)
benchmark_results.append(result)
else:
full_name = benchmark_name
logger.info('Executor is going to execute %s.', full_name)
context = BenchmarkRegistry.create_benchmark_context(
model,
benchmark_real_name,
platform=self.__get_platform(),
framework=Framework(framework.lower()),
parameters=self.__get_arguments(benchmark_config.parameters)
parameters=self.__get_arguments(
{} if 'parameters' not in benchmark_config else benchmark_config.parameters
)
)
result = self.__exec_benchmark(full_name, context)
benchmark_results.append(result)
else:
full_name = benchmark_name
logger.info('Executor is going to execute %s.', full_name)
context = BenchmarkRegistry.create_benchmark_context(
benchmark_real_name,
platform=self.__get_platform(),
framework=Framework(framework.lower()),
parameters=self.__get_arguments(benchmark_config.parameters)
)
result = self.__exec_benchmark(full_name, context)
benchmark_results.append(result)

if monitor:
monitor.stop()
Expand Down
43 changes: 24 additions & 19 deletions superbench/runner/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,24 +67,24 @@ def __validate_sb_config(self): # noqa: C901
InvalidConfigError: If input config is invalid.
"""
# TODO: add validation and defaulting
if not self._sb_config.superbench.env:
if 'env' not in self._sb_config.superbench:
self._sb_config.superbench.env = {}
for name in self._sb_benchmarks:
if not self._sb_benchmarks[name].modes:
if 'modes' not in self._sb_benchmarks[name]:
self._sb_benchmarks[name].modes = []
for idx, mode in enumerate(self._sb_benchmarks[name].modes):
if not mode.env:
if 'env' not in mode:
self._sb_benchmarks[name].modes[idx].env = {}
if mode.name == 'local':
if not mode.proc_num:
if 'proc_num' not in mode:
self._sb_benchmarks[name].modes[idx].proc_num = 1
if not mode.prefix:
if 'prefix' not in mode:
self._sb_benchmarks[name].modes[idx].prefix = ''
elif mode.name == 'torch.distributed':
if not mode.proc_num:
if 'proc_num' not in mode:
self._sb_benchmarks[name].modes[idx].proc_num = 8
elif mode.name == 'mpi':
if not mode.mca:
if 'machinefile' not in mode:
self._sb_benchmarks[name].modes[idx].mca = {
'pml': 'ob1',
'btl': '^openib',
Expand All @@ -93,8 +93,8 @@ def __validate_sb_config(self): # noqa: C901
}
for key in ['PATH', 'LD_LIBRARY_PATH', 'SB_MICRO_PATH', 'SB_WORKSPACE']:
self._sb_benchmarks[name].modes[idx].env.setdefault(key, None)
if mode.pattern:
if mode.pattern.type == 'topo-aware' and not mode.pattern.ibstat:
if 'pattern' in mode:
if mode.pattern.type == 'topo-aware' and 'ibstat' not in mode.pattern:
self._sb_benchmarks[name].modes[idx].pattern.ibstat = gen_ibstat(
self._ansible_config, str(self._output_path / 'ibstate_file.txt')
)
Expand All @@ -105,12 +105,12 @@ def __get_enabled_benchmarks(self):
Return:
list: List of benchmarks which will be executed.
"""
if self._sb_config.superbench.enable:
if 'enable' in self._sb_config.superbench and self._sb_config.superbench.enable:
if isinstance(self._sb_config.superbench.enable, str):
return [self._sb_config.superbench.enable]
elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):
return list(self._sb_config.superbench.enable)
return [k for k, v in self._sb_benchmarks.items() if v.enable]
return [k for k, v in self._sb_benchmarks.items() if 'enable' in v and v.enable]

def __get_mode_command(self, benchmark_name, mode, timeout=None):
"""Get runner command for given mode.
Expand Down Expand Up @@ -141,7 +141,7 @@ def __get_mode_command(self, benchmark_name, mode, timeout=None):
elif mode.name == 'torch.distributed':
# TODO: replace with torch.distributed.run in v1.9
# TODO: only supports node_num=1 and node_num=all currently
torch_dist_params = '' if mode.node_num == 1 else \
torch_dist_params = '' if 'node_num' in mode and mode.node_num == 1 else \
'--nnodes=$NNODES --node_rank=$NODE_RANK --master_addr=$MASTER_ADDR --master_port=$MASTER_PORT '
mode_command = (
f'torchrun'
Expand All @@ -158,8 +158,8 @@ def __get_mode_command(self, benchmark_name, mode, timeout=None):
'-bind-to numa ' # bind processes to numa
'{mca_list} {env_list} {command}'
).format(
host_list=f'-host localhost:{mode.proc_num}' if mode.node_num == 1 else
f'-hostfile hostfile -map-by ppr:{mode.proc_num}:node' if mode.host_list is None else '-host ' +
host_list=f'-host localhost:{mode.proc_num}' if 'node_num' in mode and mode.node_num == 1 else
f'-hostfile hostfile -map-by ppr:{mode.proc_num}:node' if 'host_list' not in mode else '-host ' +
','.join(f'{host}:{mode.proc_num}' for host in mode.host_list),
mca_list=' '.join(f'-mca {k} {v}' for k, v in mode.mca.items()),
env_list=' '.join(
Expand Down Expand Up @@ -206,6 +206,9 @@ def run_sys_info(self):
logger.info('Runner is going to get node system info.')

fcmd = "docker exec sb-workspace bash -c '{command}'"

if 'skip' not in self._docker_config:
self._docker_config.skip = False
if self._docker_config.skip:
fcmd = "bash -c 'cd $SB_WORKSPACE && {command}'"
ansible_runner_config = self._ansible_client.get_shell_config(
Expand All @@ -225,7 +228,7 @@ def check_env(self): # pragma: no cover
self._ansible_client.get_playbook_config(
'check_env.yaml',
extravars={
'no_docker': bool(self._docker_config.skip),
'no_docker': False if 'skip' not in self._docker_config else self._docker_config.skip,
'output_dir': str(self._output_path),
'env': '\n'.join(f'{k}={v}' for k, v in self._sb_config.superbench.env.items()),
}
Expand Down Expand Up @@ -441,15 +444,17 @@ def _run_proc(self, benchmark_name, mode, vars):
int: Process return code.
"""
mode.update(vars)
if mode.name == 'mpi' and mode.pattern:
if mode.name == 'mpi' and 'pattern' in mode:
mode.env.update({'SB_MODE_SERIAL_INDEX': mode.serial_index, 'SB_MODE_PARALLEL_INDEX': mode.parallel_index})
logger.info('Runner is going to run %s in %s mode, proc rank %d.', benchmark_name, mode.name, mode.proc_rank)

timeout = self._sb_benchmarks[benchmark_name].timeout
timeout = self._sb_benchmarks[benchmark_name].get('timeout', 60)
if isinstance(timeout, int):
timeout = max(timeout, 60)

env_list = '--env-file /tmp/sb.env'
if 'skip' not in self._docker_config:
self._docker_config.skip = False
if self._docker_config.skip:
env_list = 'set -o allexport && source /tmp/sb.env && set +o allexport'
for k, v in mode.env.items():
Expand All @@ -463,7 +468,7 @@ def _run_proc(self, benchmark_name, mode, vars):
ansible_runner_config = self._ansible_client.get_shell_config(
fcmd.format(env_list=env_list, command=self.__get_mode_command(benchmark_name, mode, timeout))
)
if mode.name == 'mpi' and mode.node_num != 1:
if mode.name == 'mpi' and 'node_num' in mode and mode.node_num != 1:
ansible_runner_config = self._ansible_client.update_mpi_config(ansible_runner_config)

if isinstance(timeout, int):
Expand Down Expand Up @@ -495,7 +500,7 @@ def run(self):
)
ansible_rc = sum(rc_list)
elif mode.name == 'torch.distributed' or mode.name == 'mpi':
if not mode.pattern:
if 'pattern' not in mode:
ansible_rc = self._run_proc(benchmark_name, mode, {'proc_rank': 0})
else:
if not os.path.exists(self._output_path / 'hostfile'):
Expand Down
2 changes: 1 addition & 1 deletion tests/executor/test_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def test_set_logger(self):
def test_get_enabled_benchmarks_enable_none(self):
"""Test enabled benchmarks when superbench.enable is none."""
benchmarks = self.default_config.superbench.benchmarks
expected_enabled_benchmarks = [x for x in benchmarks if benchmarks[x]['enable']]
expected_enabled_benchmarks = [x for x in benchmarks if 'enable' in benchmarks[x] and benchmarks[x]['enable']]
self.assertListEqual(self.executor._sb_enabled, expected_enabled_benchmarks)

def test_get_enabled_benchmarks_enable_str(self):
Expand Down

0 comments on commit 9a3ce39

Please sign in to comment.