Skip to content

add support for distributed data parallel training (#116) #311

add support for distributed data parallel training (#116)

add support for distributed data parallel training (#116) #311

Workflow file for this run

name: TorchCFM Tests
on:
push:
branches: [main]
pull_request:
branches: [main, "release/*"]
jobs:
run_tests_ubuntu:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, ubuntu-20.04, macos-latest, windows-latest]
python-version: ["3.8", "3.9", "3.10", "3.11"]
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
pip install sh
pip install -e .
- name: List dependencies
run: |
python -m pip list
- name: Run pytest
run: |
pytest -v --ignore=examples --ignore=runner
# upload code coverage report
code-coverage-torchcfm:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
pip install pytest-cov[toml]
pip install sh
pip install -e .
- name: Run tests and collect coverage
run: pytest . --cov torchcfm --ignore=runner --ignore=examples --ignore=torchcfm/models/ --cov-fail-under=30
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
name: codecov-torchcfm
verbose: true