diff --git a/.github/workflows/docker-plugin.yml b/.github/workflows/docker-plugin.yml index 813740053..5d8783b08 100644 --- a/.github/workflows/docker-plugin.yml +++ b/.github/workflows/docker-plugin.yml @@ -26,10 +26,10 @@ jobs: MONAI_ZOO_AUTH_TOKEN: ${{ github.token }} steps: - uses: actions/checkout@v4 - - name: Set up Python 3.8 + - name: Set up Python 3.9 uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 - name: clean up run: | sudo rm -rf /usr/share/dotnet diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 44a1ee86e..ca80efd57 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -28,10 +28,10 @@ jobs: MONAI_ZOO_AUTH_TOKEN: ${{ github.token }} steps: - uses: actions/checkout@v4 - - name: Set up Python 3.8 + - name: Set up Python 3.9 uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 - name: clean up run: | sudo rm -rf /usr/share/dotnet diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 9fc15fdfd..c224c0566 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -32,7 +32,7 @@ jobs: strategy: matrix: os: [windows-latest, ubuntu-latest] - python-version: ["3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10", "3.11"] steps: - uses: actions/checkout@v4 - name: Set up Python @@ -50,7 +50,7 @@ jobs: MONAI_ZOO_AUTH_TOKEN: ${{ github.token }} strategy: matrix: - python-version: ["3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10", "3.11"] steps: - uses: actions/checkout@v4 - name: Set up Python @@ -83,7 +83,7 @@ jobs: MONAI_ZOO_AUTH_TOKEN: ${{ github.token }} strategy: matrix: - python-version: ["3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10", "3.11"] steps: - uses: actions/checkout@v4 with: diff --git a/monailabel/app.py b/monailabel/app.py index 2a39851f9..a2714a616 100644 --- a/monailabel/app.py +++ b/monailabel/app.py @@ -11,6 +11,7 @@ import os import pathlib +from contextlib import asynccontextmanager from fastapi import FastAPI from fastapi.middleware import Middleware @@ -41,11 +42,24 @@ origins = [str(origin) for origin in settings.MONAI_LABEL_CORS_ORIGINS] if settings.MONAI_LABEL_CORS_ORIGINS else ["*"] print(f"Allow Origins: {origins}") + +@asynccontextmanager +async def lifespan(app: FastAPI): + print("App Init...") + instance = app_instance() + instance.server_mode(True) + instance.on_init_complete() + + yield + print("App Shutdown...") + + app = FastAPI( title=settings.MONAI_LABEL_PROJECT_NAME, openapi_url="/openapi.json", docs_url=None, redoc_url="/docs", + lifespan=lifespan, middleware=[ Middleware( CORSMiddleware, @@ -101,10 +115,3 @@ async def favicon(): def reload(): clear_cache() return {} - - -@app.on_event("startup") -async def startup_event(): - instance = app_instance() - instance.server_mode(True) - instance.on_init_complete() diff --git a/monailabel/config.py b/monailabel/config.py index e041cacce..bf195c7bc 100644 --- a/monailabel/config.py +++ b/monailabel/config.py @@ -12,7 +12,8 @@ import os from typing import Any, Dict, List, Optional -from pydantic import AnyHttpUrl, BaseSettings +from pydantic import AnyHttpUrl +from pydantic_settings import BaseSettings, SettingsConfigDict class Settings(BaseSettings): @@ -21,7 +22,7 @@ class Settings(BaseSettings): MONAI_LABEL_APP_DIR: str = "" MONAI_LABEL_STUDIES: str = "" - MONAI_LABEL_APP_CONF: Dict[str, str] = {} + MONAI_LABEL_APP_CONF: Dict[str, Any] = {} MONAI_LABEL_AUTH_ENABLE: bool = False MONAI_LABEL_AUTH_REALM_URI: str = "http://localhost:8080/realms/monailabel" @@ -82,7 +83,7 @@ class Settings(BaseSettings): MONAI_LABEL_SERVER_PORT: int = 8000 MONAI_LABEL_CORS_ORIGINS: List[AnyHttpUrl] = [] - MONAI_LABEL_AUTO_UPDATE_SCORING = True + MONAI_LABEL_AUTO_UPDATE_SCORING: bool = True MONAI_LABEL_SESSIONS: bool = True MONAI_LABEL_SESSION_PATH: str = "" @@ -93,13 +94,15 @@ class Settings(BaseSettings): MONAI_LABEL_TRACKING_ENABLED: bool = True MONAI_LABEL_TRACKING_URI: str = "" - MONAI_ZOO_SOURCE: str = os.environ.get("BUNDLE_DOWNLOAD_SRC", "github") + MONAI_ZOO_SOURCE: str = os.environ.get("BUNDLE_DOWNLOAD_SRC", "monaihosting") MONAI_ZOO_REPO: str = "Project-MONAI/model-zoo/hosting_storage_v1" MONAI_ZOO_AUTH_TOKEN: str = "" - class Config: - env_file = ".env" - case_sensitive = True + model_config = SettingsConfigDict( + env_file=".env", + case_sensitive=True, + extra="ignore", + ) settings = Settings() diff --git a/monailabel/datastore/local.py b/monailabel/datastore/local.py index ac02660ea..d8b0538aa 100644 --- a/monailabel/datastore/local.py +++ b/monailabel/datastore/local.py @@ -398,7 +398,7 @@ def get_dataset_archive(self, limit_cases: Optional[int]) -> str: path = d[key] archive.write(path, arcname=os.path.join(key, os.path.basename(path))) # add metadata - datastore_metadata: str = self._datastore.json(exclude={"base_path"}) + datastore_metadata: str = self._datastore.model_dump_json(exclude={"base_path"}) archive.writestr("metadata.json", datastore_metadata) assert archive.filename is not None, "ZIP archive could not be created" @@ -670,7 +670,8 @@ def _init_from_datastore_file(self, throw_exception=False): ts = os.stat(self._datastore_config_path).st_mtime if self._config_ts != ts: logger.debug(f"Reload Datastore; old ts: {self._config_ts}; new ts: {ts}") - self._datastore = LocalDatastoreModel.parse_file(self._datastore_config_path) + with open(self._datastore_config_path) as fp: + self._datastore = LocalDatastoreModel.model_validate_json(fp.read()) self._datastore.base_path = self._datastore_path self._config_ts = ts logger.debug("Release the Lock...") @@ -684,7 +685,7 @@ def _write_to_file(): logger.debug("+++ Datastore is updated...") self._ignore_event_config = True with open(self._datastore_config_path, "w") as f: - f.write(json.dumps(self._datastore.dict(exclude={"base_path"}), indent=2, default=str)) + f.write(json.dumps(self._datastore.model_dump(exclude={"base_path"}), indent=2, default=str)) self._config_ts = os.stat(self._datastore_config_path).st_mtime if lock: @@ -716,4 +717,4 @@ def status(self) -> Dict[str, Any]: } def json(self): - return self._datastore.dict(exclude={"base_path"}) + return self._datastore.model_dump(exclude={"base_path"}) diff --git a/monailabel/endpoints/login.py b/monailabel/endpoints/login.py index 7a3eb19ad..20d9b840b 100644 --- a/monailabel/endpoints/login.py +++ b/monailabel/endpoints/login.py @@ -61,4 +61,4 @@ async def access_token(form_data: OAuth2PasswordRequestForm = Depends()): @router.get("/token/valid", summary="Check If current token is Valid") async def valid_token(user: User = Depends(get_current_user)): - return user.dict() + return user.model_dump() diff --git a/monailabel/endpoints/wsi_infer.py b/monailabel/endpoints/wsi_infer.py index fcf211685..bd40c1a9a 100644 --- a/monailabel/endpoints/wsi_infer.py +++ b/monailabel/endpoints/wsi_infer.py @@ -96,7 +96,7 @@ def run_wsi_inference( config = instance.info().get("config", {}).get("infer", {}) request.update(config) - request.update(wsi.dict(exclude={"params"})) + request.update(wsi.model_dump(exclude={"params"})) if wsi.params: request.update(wsi.params) @@ -139,7 +139,7 @@ async def api_run_wsi_v2_inference( image: str = "", session_id: str = "", file: UploadFile = File(None), - wsi: str = Form(WSIInput().json()), + wsi: str = Form(WSIInput().model_dump_json()), output: Optional[ResultType] = None, user: User = Depends(RBAC(settings.MONAI_LABEL_AUTH_ROLE_USER)), ): diff --git a/monailabel/interfaces/app.py b/monailabel/interfaces/app.py index c904ee649..215f8c598 100644 --- a/monailabel/interfaces/app.py +++ b/monailabel/interfaces/app.py @@ -416,7 +416,7 @@ def train(self, request): if not task: raise MONAILabelException( MONAILabelError.INVALID_INPUT, - f"Train Task is not Initialized. There is no model '{model}' available", + f"Train Task is not Initialized. There is no model '{model}' available; {request}", ) request = copy.deepcopy(request) diff --git a/monailabel/interfaces/utils/app.py b/monailabel/interfaces/utils/app.py index 1235f23b4..f6ad59da4 100644 --- a/monailabel/interfaces/utils/app.py +++ b/monailabel/interfaces/utils/app.py @@ -88,7 +88,7 @@ def run_main(): logger.debug("------------------------------------------------------") logger.debug("SETTINGS") logger.debug("------------------------------------------------------") - logger.debug(json.dumps(settings.dict(), indent=2)) + logger.debug(json.dumps(settings.model_dump(), indent=2)) logger.debug("") app_dir = args.app if args.app else settings.MONAI_LABEL_APP_DIR diff --git a/monailabel/main.py b/monailabel/main.py index 8f692c60f..7d2205a7d 100644 --- a/monailabel/main.py +++ b/monailabel/main.py @@ -290,7 +290,7 @@ def start_server_validate_args(self, args): "MONAI_LABEL_DATASTORE_PASSWORD", "MONAI_LABEL_DATASTORE_API_KEY", ] - for k, v in settings.dict().items(): + for k, v in settings.model_dump().items(): v = f"'{json.dumps(v)}'" if isinstance(v, list) or isinstance(v, dict) else v logger.debug(f"ENV SETTINGS:: {k} = {'*' * len(v) if k in sensitive else v}") logger.info("") @@ -316,7 +316,7 @@ def start_server_init_settings(self, args): if args.dryrun: export_key = "set " if any(platform.win32_ver()) else "export " with open("env.bat" if any(platform.win32_ver()) else ".env", "w") as f: - for k, v in settings.dict().items(): + for k, v in settings.model_dump().items(): v = f"'{json.dumps(v)}'" if isinstance(v, list) or isinstance(v, dict) else v e = f"{export_key}{k}={v}" f.write(e) @@ -338,7 +338,7 @@ def start_server_init_settings(self, args): logger.debug("**********************************************************") logger.debug(" ENV VARIABLES/SETTINGS ") logger.debug("**********************************************************") - for k, v in settings.dict().items(): + for k, v in settings.model_dump().items(): if isinstance(v, list) or isinstance(v, dict): v = json.dumps(v) elif v is not None: diff --git a/requirements.txt b/requirements.txt index 92e804c44..5153011a7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,40 +10,46 @@ # limitations under the License. monai[nibabel, skimage, pillow, tensorboard, gdown, ignite, torchvision, itk, tqdm, lmdb, psutil, openslide, fire, mlflow]>=1.3.0 -uvicorn==0.21.1 -pydantic>=1.10.7,<2.0.0 -python-dotenv==1.0.0 -fastapi==0.110.0 +uvicorn==0.29.0 +pydantic==2.7.0 +pydantic-settings==2.2.1 +python-dotenv==1.0.1 +fastapi==0.110.2 python-multipart==0.0.9 -httpx==0.23.3 -schedule==1.1.0 +httpx==0.27.0 +schedule==1.2.1 dicomweb-client[gcp]==0.59.1 timeloop==1.0.2 expiringdict==1.2.2 expiring_dict==1.1.0 -cachetools==5.3.0 -watchdog==3.0.0 -pydicom==2.3.1 +cachetools==5.3.3 +watchdog==4.0.0 +pydicom==2.4.4 pydicom-seg==0.4.1 pynetdicom==2.0.2 pynrrd==1.0.0 -opencv-python-headless==4.7.0.72 +opencv-python-headless==4.9.0.80 numpymaxflow==0.0.6 -girder-client==3.1.17 -ninja==1.11.1 -einops>=0.6.0 -pyyaml==6.0 -filelock==3.11.0 +girder-client==3.2.3 +ninja==1.11.1.1 +einops==0.7.0 +pyyaml==6.0.1 +filelock==3.13.4 passlib==1.7.4 python-jose[cryptography]==3.3.0 -bcrypt==4.0.1 -shapely==2.0.1 +bcrypt==4.1.2 +shapely==2.0.4 requests==2.31.0 -urllib3==1.26.11 -requests-toolbelt +requests-toolbelt==1.0.0 +urllib3==2.2.1 scikit-learn scipy -google-auth>=1.6.3 +google-auth==2.29.0 -#sudo apt-get install openslide-tools -y +# scipy and scikit-learn latest packages are missing on python 3.8 +# sudo apt-get install openslide-tools -y + +# How to auto update versions? +# pip install pur +# pur -r requirements.txt diff --git a/runtests.sh b/runtests.sh index 425fe6a70..9d572549c 100755 --- a/runtests.sh +++ b/runtests.sh @@ -115,16 +115,13 @@ function install_deps() { } function clean_py() { - TO_CLEAN="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - echo "Removing temporary files in ${TO_CLEAN}" - rm -rf sample-apps/*/logs rm -rf sample-apps/*/.venv rm -rf sample-apps/*/bin rm -rf monailabel/endpoints/static/ohif rm -rf pytest.log rm -rf htmlcov - rm -rf coverage.xml + rm -rf coverage.xml .coverage* rm -rf junit rm -rf docs/build/ rm -rf docs/source/apidocs/ @@ -135,26 +132,14 @@ function clean_py() { find sample-apps/*/model -type f -not -name *.zip -not -name .gitignore -exec rm -rf "{}" + find sample-apps/* -type d -empty -exec rm -rf "{}" + find sample-apps/* -type d -empty -exec rm -rf "{}" + + find sample-apps/* -type d -empty -exec rm -rf "{}" + rm -rf tests/data/* + rm -rf build + rm -rf dist - find ${TO_CLEAN} -type f -name "*.py[co]" -delete - find ${TO_CLEAN} -type f -name "*.so" -delete - find ${TO_CLEAN} -type d -name "__pycache__" -delete - find ${TO_CLEAN} -type d -name ".pytest_cache" -exec rm -r "{}" + - find ${TO_CLEAN} -maxdepth 1 -type f -name ".coverage.*" -delete - - find ${TO_CLEAN} -type d -name "node_modules" -exec rm -rf "{}" + - find ${TO_CLEAN} -type d -name ".gradle" -exec rm -rf "{}" + - - find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".eggs" -exec rm -r "{}" + - find ${TO_CLEAN} -depth -maxdepth 1 -type d -name "monailabel.egg-info" -exec rm -r "{}" + - find ${TO_CLEAN} -depth -maxdepth 1 -type d -name "build" -exec rm -r "{}" + - find ${TO_CLEAN} -depth -maxdepth 1 -type d -name "dist" -exec rm -r "{}" + - find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".mypy_cache" -exec rm -r "{}" + - find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".pytype" -exec rm -r "{}" + - find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".coverage" -exec rm -r "{}" + - find ${TO_CLEAN} -depth -maxdepth 1 -type d -name "__pycache__" -exec rm -r "{}" + + find sample-apps -type d -name "__pycache__" -exec rm -rf "{}" + + find monailabel -type d -name "__pycache__" -exec rm -rf "{}" + } function torch_validate() { @@ -473,7 +458,7 @@ function run_integration_tests() { if [ $doNetTests = true ]; then run_integration_tests "radiology" "tests/data/dataset/local/spleen" "deepedit,segmentation_spleen,segmentation,deepgrow_2d,deepgrow_3d" "." run_integration_tests "pathology" "tests/data/pathology" "segmentation_nuclei,nuclick,classification_nuclei" "." - #run_integration_tests "monaibundle" "tests/data/dataset/local/spleen" "spleen_ct_segmentation,spleen_deepedit_annotation,swin_unetr_btcv_segmentation" "bundles" + run_integration_tests "monaibundle" "tests/data/dataset/local/spleen" "spleen_ct_segmentation" "bundles" run_integration_tests "endoscopy" "tests/data/endoscopy" "tooltracking,inbody,deepedit" "." - #run_integration_tests "monaibundle" "tests/data/detection" "lung_nodule_ct_detection" "detection" + run_integration_tests "monaibundle" "tests/data/detection" "lung_nodule_ct_detection" "detection" fi diff --git a/setup.cfg b/setup.cfg index fdcf5012f..de8ea8647 100644 --- a/setup.cfg +++ b/setup.cfg @@ -36,39 +36,41 @@ setup_requires = ninja install_requires = monai[nibabel, skimage, pillow, tensorboard, gdown, ignite, torchvision, itk, tqdm, lmdb, psutil, openslide, fire, mlflow]>=1.3.0 - uvicorn==0.21.1 - pydantic>=1.10.7,<2.0.0 - python-dotenv==1.0.0 - fastapi==0.110.0 + uvicorn==0.29.0 + pydantic==2.7.0 + pydantic-settings==2.2.1 + python-dotenv==1.0.1 + fastapi==0.110.2 python-multipart==0.0.9 - requests-toolbelt==0.10.1 - httpx==0.23.3 - schedule==1.1.0 + httpx==0.27.0 + schedule==1.2.1 dicomweb-client[gcp]==0.59.1 timeloop==1.0.2 expiringdict==1.2.2 expiring_dict==1.1.0 - cachetools==5.3.0 - watchdog==3.0.0 - pydicom==2.3.1 + cachetools==5.3.3 + watchdog==4.0.0 + pydicom==2.4.4 pydicom-seg==0.4.1 pynetdicom==2.0.2 pynrrd==1.0.0 - opencv-python-headless==4.7.0.72 + opencv-python-headless==4.9.0.80 numpymaxflow==0.0.6 - girder-client==3.1.17 - ninja==1.11.1 - einops>=0.6.0 - pyyaml==6.0 - filelock==3.11.0 + girder-client==3.2.3 + ninja==1.11.1.1 + einops==0.7.0 + pyyaml==6.0.1 + filelock==3.13.4 passlib==1.7.4 python-jose[cryptography]==3.3.0 - bcrypt==4.0.1 - shapely==2.0.1 + bcrypt==4.1.2 + shapely==2.0.4 requests==2.31.0 - urllib3==1.26.11 + requests-toolbelt==1.0.0 + urllib3==2.2.1 scikit-learn scipy + google-auth==2.29.0 [flake8] select = B,C,E,F,N,P,T4,W,B9 diff --git a/tests/integration/monaibundle/bundles/test_infer.py b/tests/integration/monaibundle/bundles/test_infer.py index 8b97bbd2e..a71fca1d3 100644 --- a/tests/integration/monaibundle/bundles/test_infer.py +++ b/tests/integration/monaibundle/bundles/test_infer.py @@ -27,6 +27,7 @@ def test_segmentation_spleen(self): response = requests.post(f"{SERVER_URI}/infer/{model}?image={image}") assert response.status_code == 200 + @unittest.skip("Skipped BTCV Bundle") def test_segmentation(self): if not torch.cuda.is_available(): return @@ -37,7 +38,8 @@ def test_segmentation(self): response = requests.post(f"{SERVER_URI}/infer/{model}?image={image}") assert response.status_code == 200 - def disabled_test_segmentation_pancreas(self): + @unittest.skip("Skipped DINTS Bundle") + def test_segmentation_pancreas(self): if not torch.cuda.is_available(): return @@ -47,6 +49,7 @@ def disabled_test_segmentation_pancreas(self): response = requests.post(f"{SERVER_URI}/infer/{model}?image={image}") assert response.status_code == 200 + @unittest.skip("Skipped DeepEdit Bundle") def test_deepedit(self): if not torch.cuda.is_available(): return diff --git a/tests/integration/monaibundle/bundles/test_trainer.py b/tests/integration/monaibundle/bundles/test_trainer.py index c062fc019..eb95b0517 100644 --- a/tests/integration/monaibundle/bundles/test_trainer.py +++ b/tests/integration/monaibundle/bundles/test_trainer.py @@ -32,6 +32,7 @@ def test_segmentation_spleen_trainer(self): response = requests.post(f"{SERVER_URI}/train/spleen_ct_segmentation?run_sync=True", json=params) assert response.status_code == 200 + @unittest.skip("Skipped BTCV Bundle") def test_multiorgan_segmentation_trainer(self): if not torch.cuda.is_available(): return @@ -47,6 +48,7 @@ def test_multiorgan_segmentation_trainer(self): response = requests.post(f"{SERVER_URI}/train/swin_unetr_btcv_segmentation?run_sync=True", json=params) assert response.status_code == 200 + @unittest.skip("Skipped DeepEdit Bundle") def test_deepedit(self): if not torch.cuda.is_available(): return diff --git a/tests/unit/endpoints/test_infer_v2.py b/tests/unit/endpoints/test_infer_v2.py index 7fd3b97be..5afbec7c8 100644 --- a/tests/unit/endpoints/test_infer_v2.py +++ b/tests/unit/endpoints/test_infer_v2.py @@ -57,7 +57,6 @@ def test_spleen_bundle_infer(self): class TestDetectionBundleInferTask(BasicDetectionBundleTestSuite): - @unittest.skip("Bundle needs to be fixed for EnsureChannelFirstd init Arguments") def test_lung_nodule_detector_infer(self): if not torch.cuda.is_available(): return diff --git a/tests/unit/endpoints/test_train.py b/tests/unit/endpoints/test_train.py index 33097d9be..584d8a6a4 100644 --- a/tests/unit/endpoints/test_train.py +++ b/tests/unit/endpoints/test_train.py @@ -84,7 +84,7 @@ def test_005_stop(self): class TestBundleTrainTask(BasicBundleTestSuite): - def test_spleen_bundle_train(self): + def test_006_spleen_bundle_train(self): if not torch.cuda.is_available(): return @@ -101,8 +101,7 @@ def test_spleen_bundle_train(self): class TestDetectionBundleTrainTask(BasicDetectionBundleTestSuite): - @unittest.skip("Bundle needs to be fixed for EnsureChannelFirstd init Arguments") - def test_lung_nodule_detection_train(self): + def test_007_lung_nodule_detection_train(self): if not torch.cuda.is_available(): return @@ -118,8 +117,7 @@ def test_lung_nodule_detection_train(self): response = self.client.post("/train/?run_sync=True", json=params) assert response.status_code == 200 - @unittest.skip("Bundle needs to be fixed for EnsureChannelFirstd init Arguments") - def test_bundle_stop(self): + def test_008_bundle_stop(self): if not torch.cuda.is_available(): return