Skip to content

Commit

Permalink
UI/app structure and utility implementation.
Browse files Browse the repository at this point in the history
- Initializers for webui/API launch
- Schedulers file for SD scheduling utilities
- Additions to API-level utilities
- Added embeddings module for LoRA, Lycoris, yada yada
- Added image_processing module for resamplers, resize tools,
  transforms, and any image annotation (PNG metadata)
- shared_cmd_opts module -- sorry, this is stable_args.py. It lives on.
  We still want to have some global control over the app exclusively
  from the command-line. At least we will be free from shark_args.
- Moving around some utility pieces.
- Try to make api+webui concurrency possible in index.py
- SD UI -- this is just img2imgUI but hopefully a little better.
- UI utilities for your nod logos and your gradio temps.
  • Loading branch information
monorimet committed Dec 11, 2023
1 parent d2e3ec1 commit edcb5b7
Show file tree
Hide file tree
Showing 12 changed files with 2,109 additions and 147 deletions.
76 changes: 76 additions & 0 deletions apps/shark_studio/api/initializers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import importlib
import logging
import os
import signal
import sys
import re
import warnings
import json
from threading import Thread

from apps.shark_studio.modules.timer import startup_timer


def imports():
import torch # noqa: F401
startup_timer.record("import torch")
warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="torch")
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")

import gradio # noqa: F401
startup_timer.record("import gradio")

#from apps.shark_studio.modules import shared_init
#shared_init.initialize()
#startup_timer.record("initialize shared")

from apps.shark_studio.modules import processing, gradio_extensons, ui # noqa: F401
startup_timer.record("other imports")

def initialize():
configure_sigint_handler()
configure_opts_onchange()

#from apps.shark_studio.modules import modelloader
#modelloader.cleanup_models()

#from apps.shark_studio.modules import sd_models
#sd_models.setup_model()
#startup_timer.record("setup SD model")

#initialize_rest(reload_script_modules=False)

def initialize_rest(*, reload_script_modules=False):
"""
Called both from initialize() and when reloading the webui.
"""
# Keep this for adding reload options to the webUI.

def dumpstacks():
import threading
import traceback

id2name = {th.ident: th.name for th in threading.enumerate()}
code = []
for threadId, stack in sys._current_frames().items():
code.append(f"\n# Thread: {id2name.get(threadId, '')}({threadId})")
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append(f"""File: "{filename}", line {lineno}, in {name}""")
if line:
code.append(" " + line.strip())

print("\n".join(code))


def configure_sigint_handler():
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
print(f'Interrupted with signal {sig} in {frame}')

dumpstacks()

os._exit(0)

signal.signal(signal.SIGINT, sigint_handler)


20 changes: 20 additions & 0 deletions apps/shark_studio/api/schedulers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#from shark_turbine.turbine_models.schedulers import export_scheduler_model

def export_scheduler_model(model):
return "None", "None"

scheduler_model_map = {
"EulerDiscrete": export_scheduler_model("EulerDiscreteScheduler"),
"EulerAncestralDiscrete": export_scheduler_model("EulerAncestralDiscreteScheduler"),
"LCM": export_scheduler_model("LCMScheduler"),
"LMSDiscrete": export_scheduler_model("LMSDiscreteScheduler"),
"PNDM": export_scheduler_model("PNDMScheduler"),
"DDPM": export_scheduler_model("DDPMScheduler"),
"DDIM": export_scheduler_model("DDIMScheduler"),
"DPMSolverMultistep": export_scheduler_model("DPMSolverMultistepScheduler"),
"KDPM2Discrete": export_scheduler_model("KDPM2DiscreteScheduler"),
"DEISMultistep": export_scheduler_model("DEISMultistepScheduler"),
"DPMSolverSinglestep": export_scheduler_model("DPMSolverSingleStepScheduler"),
"KDPM2AncestralDiscrete": export_scheduler_model("KDPM2AncestralDiscreteScheduler"),
"HeunDiscrete": export_scheduler_model("HeunDiscreteScheduler"),
}
270 changes: 269 additions & 1 deletion apps/shark_studio/api/utils.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,78 @@
import os
import sys
import os
import numpy as np
from random import (
randint,
seed as seed_random,
getstate as random_getstate,
setstate as random_setstate,
)

from pathlib import Path
from safetensors.torch import load_file
from apps.shark_studio.modules.shared_cmd_opts import cmd_opts


def get_available_devices():
return ["cpu-task"]
def get_devices_by_name(driver_name):
from shark.iree_utils._common import iree_device_map

device_list = []
try:
driver_name = iree_device_map(driver_name)
device_list_dict = get_all_devices(driver_name)
print(f"{driver_name} devices are available.")
except:
print(f"{driver_name} devices are not available.")
else:
cpu_name = get_cpu_info()["brand_raw"]
for i, device in enumerate(device_list_dict):
device_name = (
cpu_name if device["name"] == "default" else device["name"]
)
if "local" in driver_name:
device_list.append(
f"{device_name} => {driver_name.replace('local', 'cpu')}"
)
else:
# for drivers with single devices
# let the default device be selected without any indexing
if len(device_list_dict) == 1:
device_list.append(f"{device_name} => {driver_name}")
else:
device_list.append(
f"{device_name} => {driver_name}://{i}"
)
return device_list

set_iree_runtime_flags()

available_devices = []
from shark.iree_utils.vulkan_utils import (
get_all_vulkan_devices,
)

vulkaninfo_list = get_all_vulkan_devices()
vulkan_devices = []
id = 0
for device in vulkaninfo_list:
vulkan_devices.append(f"{device.strip()} => vulkan://{id}")
id += 1
if id != 0:
print(f"vulkan devices are available.")
available_devices.extend(vulkan_devices)
metal_devices = get_devices_by_name("metal")
available_devices.extend(metal_devices)
cuda_devices = get_devices_by_name("cuda")
available_devices.extend(cuda_devices)
rocm_devices = get_devices_by_name("rocm")
available_devices.extend(rocm_devices)
cpu_device = get_devices_by_name("cpu-sync")
available_devices.extend(cpu_device)
cpu_device = get_devices_by_name("cpu-task")
available_devices.extend(cpu_device)
return available_devices


def get_resource_path(relative_path):
Expand All @@ -12,3 +81,202 @@ def get_resource_path(relative_path):
sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))
)
return os.path.join(base_path, relative_path)



def get_generated_imgs_path() -> Path:
return Path(
cmd_opts.output_dir
if cmd_opts.output_dir
else get_resource_path("..\web\generated_imgs")
)


def get_generated_imgs_todays_subdir() -> str:
return dt.now().strftime("%Y%m%d")


def get_checkpoints_path(model = ""):
return get_resource_path(f"..\web\models\{model}")


def get_checkpoints(path):
files = []
for file in


def get_device_mapping(driver, key_combination=3):
"""This method ensures consistent device ordering when choosing
specific devices for execution
Args:
driver (str): execution driver (vulkan, cuda, rocm, etc)
key_combination (int, optional): choice for mapping value for
device name.
1 : path
2 : name
3 : (name, path)
Defaults to 3.
Returns:
dict: map to possible device names user can input mapped to desired
combination of name/path.
"""
from shark.iree_utils._common import iree_device_map

driver = iree_device_map(driver)
device_list = get_all_devices(driver)
device_map = dict()

def get_output_value(dev_dict):
if key_combination == 1:
return f"{driver}://{dev_dict['path']}"
if key_combination == 2:
return dev_dict["name"]
if key_combination == 3:
return dev_dict["name"], f"{driver}://{dev_dict['path']}"

# mapping driver name to default device (driver://0)
device_map[f"{driver}"] = get_output_value(device_list[0])
for i, device in enumerate(device_list):
# mapping with index
device_map[f"{driver}://{i}"] = get_output_value(device)
# mapping with full path
device_map[f"{driver}://{device['path']}"] = get_output_value(device)
return device_map


def map_device_to_name_path(device, key_combination=3):
"""Gives the appropriate device data (supported name/path) for user
selected execution device
Args:
device (str): user
key_combination (int, optional): choice for mapping value for
device name.
1 : path
2 : name
3 : (name, path)
Defaults to 3.
Raises:
ValueError:
Returns:
str / tuple: returns the mapping str or tuple of mapping str for
the device depending on key_combination value
"""
driver = device.split("://")[0]
device_map = get_device_mapping(driver, key_combination)
try:
device_mapping = device_map[device]
except KeyError:
raise ValueError(f"Device '{device}' is not a valid device.")
return device_mapping

def get_devices_by_name(driver_name):
from shark.iree_utils._common import iree_device_map

device_list = []
try:
driver_name = iree_device_map(driver_name)
device_list_dict = get_all_devices(driver_name)
print(f"{driver_name} devices are available.")
except:
print(f"{driver_name} devices are not available.")
else:
cpu_name = get_cpu_info()["brand_raw"]
for i, device in enumerate(device_list_dict):
device_name = (
cpu_name if device["name"] == "default" else device["name"]
)
if "local" in driver_name:
device_list.append(
f"{device_name} => {driver_name.replace('local', 'cpu')}"
)
else:
# for drivers with single devices
# let the default device be selected without any indexing
if len(device_list_dict) == 1:
device_list.append(f"{device_name} => {driver_name}")
else:
device_list.append(
f"{device_name} => {driver_name}://{i}"
)
return device_list

set_iree_runtime_flags()

available_devices = []
from shark.iree_utils.vulkan_utils import (
get_all_vulkan_devices,
)

vulkaninfo_list = get_all_vulkan_devices()
vulkan_devices = []
id = 0
for device in vulkaninfo_list:
vulkan_devices.append(f"{device.strip()} => vulkan://{id}")
id += 1
if id != 0:
print(f"vulkan devices are available.")
available_devices.extend(vulkan_devices)
metal_devices = get_devices_by_name("metal")
available_devices.extend(metal_devices)
cuda_devices = get_devices_by_name("cuda")
available_devices.extend(cuda_devices)
rocm_devices = get_devices_by_name("rocm")
available_devices.extend(rocm_devices)
cpu_device = get_devices_by_name("cpu-sync")
available_devices.extend(cpu_device)
cpu_device = get_devices_by_name("cpu-task")
available_devices.extend(cpu_device)
return available_devices


# take a seed expression in an input format and convert it to
# a list of integers, where possible
def parse_seed_input(seed_input: str | list | int):
if isinstance(seed_input, str):
try:
seed_input = json.loads(seed_input)
except (ValueError, TypeError):
seed_input = None

if isinstance(seed_input, int):
return [seed_input]

if isinstance(seed_input, list) and all(
type(seed) is int for seed in seed_input
):
return seed_input

raise TypeError(
"Seed input must be an integer or an array of integers in JSON format"
)

# Generate and return a new seed if the provided one is not in the
# supported range (including -1)
def sanitize_seed(seed: int | str):
seed = int(seed)
uint32_info = np.iinfo(np.uint32)
uint32_min, uint32_max = uint32_info.min, uint32_info.max
if seed < uint32_min or seed >= uint32_max:
seed = randint(uint32_min, uint32_max)
return seed

# take a seed expression in an input format and convert it to
# a list of integers, where possible
def parse_seed_input(seed_input: str | list | int):
if isinstance(seed_input, str):
try:
seed_input = json.loads(seed_input)
except (ValueError, TypeError):
seed_input = None

if isinstance(seed_input, int):
return [seed_input]

if isinstance(seed_input, list) and all(
type(seed) is int for seed in seed_input
):
return seed_input

raise TypeError(
"Seed input must be an integer or an array of integers in JSON format"
)
Loading

0 comments on commit edcb5b7

Please sign in to comment.