diff --git a/apps/stable_diffusion/src/pipelines/pipeline_shark_stable_diffusion_utils.py b/apps/stable_diffusion/src/pipelines/pipeline_shark_stable_diffusion_utils.py
index 20978c72e0..2144fd85e4 100644
--- a/apps/stable_diffusion/src/pipelines/pipeline_shark_stable_diffusion_utils.py
+++ b/apps/stable_diffusion/src/pipelines/pipeline_shark_stable_diffusion_utils.py
@@ -722,6 +722,10 @@ def from_pretrained(
is_fp32_vae,
)
+ return cls(
+ scheduler, sd_model, import_mlir, use_lora, lora_strength, ondemand
+ )
+
# #####################################################
# Implements text embeddings with weights from prompts
# https://huggingface.co/AlanB/lpw_stable_diffusion_mod
diff --git a/apps/stable_diffusion/src/utils/stable_args.py b/apps/stable_diffusion/src/utils/stable_args.py
index 88434ff580..466d854471 100644
--- a/apps/stable_diffusion/src/utils/stable_args.py
+++ b/apps/stable_diffusion/src/utils/stable_args.py
@@ -435,6 +435,13 @@ def is_valid_file(arg):
"file (~3 MB).",
)
+p.add_argument(
+ "--lora_strength",
+ type=float,
+ default=1.0,
+ help="Strength (alpha) scaling factor to use when applying LoRA weights",
+)
+
p.add_argument(
"--use_quantize",
type=str,
diff --git a/apps/stable_diffusion/src/utils/utils.py b/apps/stable_diffusion/src/utils/utils.py
index 2389caee55..c4363b8d99 100644
--- a/apps/stable_diffusion/src/utils/utils.py
+++ b/apps/stable_diffusion/src/utils/utils.py
@@ -925,7 +925,7 @@ def save_output_img(output_img, img_seed, extra_info=None):
img_lora = None
if args.use_lora:
- img_lora = Path(os.path.basename(args.use_lora)).stem
+ img_lora = f"{Path(os.path.basename(args.use_lora)).stem}:{args.lora_strength}"
if args.output_img_format == "jpg":
out_img_path = Path(generated_imgs_path, f"{out_img_name}.jpg")
diff --git a/apps/stable_diffusion/web/api/sdapi_v1.py b/apps/stable_diffusion/web/api/sdapi_v1.py
index 29b54d15a3..a657494794 100644
--- a/apps/stable_diffusion/web/api/sdapi_v1.py
+++ b/apps/stable_diffusion/web/api/sdapi_v1.py
@@ -207,7 +207,7 @@ def txt2img_api(InputData: Txt2ImgInputData):
save_metadata_to_json=frozen_args.save_metadata_to_json,
save_metadata_to_png=frozen_args.write_metadata_to_png,
lora_weights=frozen_args.use_lora,
- lora_strength=1.0,
+ lora_strength=frozen_args.lora_strength,
ondemand=frozen_args.ondemand,
repeatable_seeds=False,
use_hiresfix=InputData.enable_hr,
@@ -306,7 +306,7 @@ def img2img_api(
save_metadata_to_json=frozen_args.save_metadata_to_json,
save_metadata_to_png=frozen_args.write_metadata_to_png,
lora_weights=frozen_args.use_lora,
- lora_strength=1.0,
+ lora_strength=frozen_args.lora_strength,
ondemand=frozen_args.ondemand,
repeatable_seeds=False,
resample_type=frozen_args.resample_type,
@@ -390,7 +390,7 @@ def inpaint_api(
save_metadata_to_json=frozen_args.save_metadata_to_json,
save_metadata_to_png=frozen_args.write_metadata_to_png,
lora_weights=frozen_args.use_lora,
- lora_strength=1.0,
+ lora_strength=frozen_args.lora_strength,
ondemand=frozen_args.ondemand,
repeatable_seeds=False,
)
@@ -480,7 +480,7 @@ def outpaint_api(
save_metadata_to_json=frozen_args.save_metadata_to_json,
save_metadata_to_png=frozen_args.write_metadata_to_png,
lora_weights=frozen_args.use_lora,
- lora_strength=1.0,
+ lora_strength=frozen_args.lora_strength,
ondemand=frozen_args.ondemand,
repeatable_seeds=False,
)
@@ -558,7 +558,7 @@ def upscaler_api(
save_metadata_to_json=frozen_args.save_metadata_to_json,
save_metadata_to_png=frozen_args.write_metadata_to_png,
lora_weights=frozen_args.use_lora,
- lora_strength=1.0,
+ lora_strength=frozen_args.lora_strength,
ondemand=frozen_args.ondemand,
repeatable_seeds=False,
)
diff --git a/apps/stable_diffusion/web/ui/common_ui_events.py b/apps/stable_diffusion/web/ui/common_ui_events.py
index f467f6b0ed..d297a142be 100644
--- a/apps/stable_diffusion/web/ui/common_ui_events.py
+++ b/apps/stable_diffusion/web/ui/common_ui_events.py
@@ -55,3 +55,10 @@ def lora_changed(lora_file):
return [
"
This LoRA has empty tag frequency metadata, or we could not parse it
"
]
+
+
+def lora_strength_changed(strength):
+ if strength > 1.0:
+ return gr.Number(elem_classes="value-out-of-range")
+ else:
+ return gr.Number(elem_classes="")
diff --git a/apps/stable_diffusion/web/ui/css/sd_dark_theme.css b/apps/stable_diffusion/web/ui/css/sd_dark_theme.css
index fa8d50adf2..beda723557 100644
--- a/apps/stable_diffusion/web/ui/css/sd_dark_theme.css
+++ b/apps/stable_diffusion/web/ui/css/sd_dark_theme.css
@@ -244,6 +244,11 @@ footer {
padding-right: 8px;
}
+/* number input value is out of range */
+.value-out-of-range input[type="number"] {
+ color: red !important;
+}
+
/* reduced animation load when generating */
.generating {
animation-play-state: paused !important;
diff --git a/apps/stable_diffusion/web/ui/img2img_ui.py b/apps/stable_diffusion/web/ui/img2img_ui.py
index 7ba6da6157..f77e6a9909 100644
--- a/apps/stable_diffusion/web/ui/img2img_ui.py
+++ b/apps/stable_diffusion/web/ui/img2img_ui.py
@@ -21,7 +21,10 @@
predefined_models,
cancel_sd,
)
-from apps.stable_diffusion.web.ui.common_ui_events import lora_changed
+from apps.stable_diffusion.web.ui.common_ui_events import (
+ lora_changed,
+ lora_strength_changed,
+)
from apps.stable_diffusion.src import (
args,
Image2ImagePipeline,
@@ -821,9 +824,11 @@ def update_cn_input(
label="LoRA Strength",
info="Will be baked into the .vmfb",
step=0.01,
- minimum=0.1,
- maximum=1.0,
- value=1.0,
+ # number is checked on change so to allow 0.n values
+ # we have to allow 0 or you can't type 0.n in
+ minimum=0.0,
+ maximum=2.0,
+ value=args.lora_strength,
scale=1,
)
with gr.Row():
@@ -1051,3 +1056,11 @@ def update_cn_input(
outputs=[lora_tags],
queue=True,
)
+
+ lora_strength.change(
+ fn=lora_strength_changed,
+ inputs=lora_strength,
+ outputs=lora_strength,
+ queue=False,
+ show_progress=False,
+ )
diff --git a/apps/stable_diffusion/web/ui/inpaint_ui.py b/apps/stable_diffusion/web/ui/inpaint_ui.py
index b10719da71..3c13ad79a2 100644
--- a/apps/stable_diffusion/web/ui/inpaint_ui.py
+++ b/apps/stable_diffusion/web/ui/inpaint_ui.py
@@ -21,7 +21,10 @@
predefined_paint_models,
cancel_sd,
)
-from apps.stable_diffusion.web.ui.common_ui_events import lora_changed
+from apps.stable_diffusion.web.ui.common_ui_events import (
+ lora_changed,
+ lora_strength_changed,
+)
from apps.stable_diffusion.src import (
args,
InpaintPipeline,
@@ -364,9 +367,11 @@ def inpaint_inf(
label="LoRA Strength",
info="Will be baked into the .vmfb",
step=0.01,
- minimum=0.1,
- maximum=1.0,
- value=1.0,
+ # number is checked on change so to allow 0.n values
+ # we have to allow 0 or you can't type 0.n in
+ minimum=0.0,
+ maximum=2.0,
+ value=args.lora_strength,
scale=1,
)
with gr.Row():
@@ -618,3 +623,11 @@ def inpaint_inf(
outputs=[lora_tags],
queue=True,
)
+
+ lora_strength.change(
+ fn=lora_strength_changed,
+ inputs=lora_strength,
+ outputs=lora_strength,
+ queue=False,
+ show_progress=False,
+ )
diff --git a/apps/stable_diffusion/web/ui/outpaint_ui.py b/apps/stable_diffusion/web/ui/outpaint_ui.py
index 820031e3b3..8a83201c80 100644
--- a/apps/stable_diffusion/web/ui/outpaint_ui.py
+++ b/apps/stable_diffusion/web/ui/outpaint_ui.py
@@ -4,7 +4,10 @@
import gradio as gr
from PIL import Image
-from apps.stable_diffusion.web.ui.common_ui_events import lora_changed
+from apps.stable_diffusion.web.ui.common_ui_events import (
+ lora_changed,
+ lora_strength_changed,
+)
from apps.stable_diffusion.web.ui.utils import (
available_devices,
nodlogo_loc,
@@ -310,9 +313,11 @@ def outpaint_inf(
label="LoRA Strength",
info="Will be baked into the .vmfb",
step=0.01,
- minimum=0.1,
- maximum=1.0,
- value=1.0,
+ # number is checked on change so to allow 0.n values
+ # we have to allow 0 or you can't type 0.n in
+ minimum=0.0,
+ maximum=2.0,
+ value=args.lora_strength,
scale=1,
)
with gr.Row():
@@ -552,3 +557,11 @@ def outpaint_inf(
outputs=[lora_tags],
queue=True,
)
+
+ lora_strength.change(
+ fn=lora_strength_changed,
+ inputs=lora_strength,
+ outputs=lora_strength,
+ queue=False,
+ show_progress=False,
+ )
diff --git a/apps/stable_diffusion/web/ui/txt2img_sdxl_ui.py b/apps/stable_diffusion/web/ui/txt2img_sdxl_ui.py
index 851adcbdd9..0a0bb2d80f 100644
--- a/apps/stable_diffusion/web/ui/txt2img_sdxl_ui.py
+++ b/apps/stable_diffusion/web/ui/txt2img_sdxl_ui.py
@@ -15,7 +15,10 @@
cancel_sd,
set_model_default_configs,
)
-from apps.stable_diffusion.web.ui.common_ui_events import lora_changed
+from apps.stable_diffusion.web.ui.common_ui_events import (
+ lora_changed,
+ lora_strength_changed,
+)
from apps.stable_diffusion.web.utils.metadata import import_png_metadata
from apps.stable_diffusion.web.utils.common_label_calc import status_label
from apps.stable_diffusion.src import (
@@ -330,9 +333,11 @@ def txt2img_sdxl_inf(
label="LoRA Strength",
info="Will be baked into the .vmfb",
step=0.01,
- minimum=0.1,
- maximum=1.0,
- value=1.0,
+ # number is checked on change so to allow 0.n values
+ # we have to allow 0 or you can't type 0.n in
+ minimum=0.0,
+ maximum=2.0,
+ value=args.lora_strength,
scale=1,
)
with gr.Row():
@@ -645,3 +650,11 @@ def check_last_input(prompt):
outputs=[lora_tags],
queue=True,
)
+
+ lora_strength.change(
+ fn=lora_strength_changed,
+ inputs=lora_strength,
+ outputs=lora_strength,
+ queue=False,
+ show_progress=False,
+ )
diff --git a/apps/stable_diffusion/web/ui/txt2img_ui.py b/apps/stable_diffusion/web/ui/txt2img_ui.py
index cafdc1ea9f..12dbccec00 100644
--- a/apps/stable_diffusion/web/ui/txt2img_ui.py
+++ b/apps/stable_diffusion/web/ui/txt2img_ui.py
@@ -18,7 +18,10 @@
predefined_models,
cancel_sd,
)
-from apps.stable_diffusion.web.ui.common_ui_events import lora_changed
+from apps.stable_diffusion.web.ui.common_ui_events import (
+ lora_changed,
+ lora_strength_changed,
+)
from apps.stable_diffusion.web.utils.metadata import import_png_metadata
from apps.stable_diffusion.web.utils.common_label_calc import status_label
from apps.stable_diffusion.src import (
@@ -387,7 +390,7 @@ def load_settings():
loaded_settings.get("prompt", args.prompts[0]),
loaded_settings.get("negative_prompt", args.negative_prompts[0]),
loaded_settings.get("lora_weights", "None"),
- loaded_settings.get("lora_strength", 1.0),
+ loaded_settings.get("lora_strength", args.lora_strength),
loaded_settings.get("scheduler", args.scheduler),
loaded_settings.get(
"save_metadata_to_png", args.write_metadata_to_png
@@ -504,14 +507,17 @@ def onload_load_settings():
value=default_settings.get("lora_weights"),
choices=["None"] + get_custom_model_files("lora"),
allow_custom_value=True,
+ scale=3,
)
lora_strength = gr.Number(
label="LoRA Strength",
info="Will be baked into the .vmfb",
step=0.01,
- minimum=0.1,
- maximum=1.0,
- value=default_settings.get("lora_strength")
+ # number is checked on change so to allow 0.n values
+ # we have to allow 0 or you can't type 0.n in
+ minimum=0.0,
+ maximum=2.0,
+ value=default_settings.get("lora_strength"),
scale=1,
)
with gr.Row():
@@ -732,7 +738,7 @@ def onload_load_settings():
prompt,
negative_prompt,
lora_weights,
- lora_f,
+ lora_strength,
scheduler,
save_metadata_to_png,
save_metadata_to_json,
@@ -765,7 +771,7 @@ def onload_load_settings():
prompt,
negative_prompt,
lora_weights,
- lora_hf_id,
+ lora_strength,
scheduler,
save_metadata_to_png,
save_metadata_to_json,
@@ -866,6 +872,7 @@ def onload_load_settings():
height,
txt2img_custom_model,
lora_weights,
+ lora_strength,
custom_vae,
],
)
@@ -896,3 +903,11 @@ def set_compatible_schedulers(hires_fix_selected):
outputs=[lora_tags],
queue=True,
)
+
+ lora_strength.change(
+ fn=lora_strength_changed,
+ inputs=lora_strength,
+ outputs=lora_strength,
+ queue=False,
+ show_progress=False,
+ )
diff --git a/apps/stable_diffusion/web/ui/upscaler_ui.py b/apps/stable_diffusion/web/ui/upscaler_ui.py
index 2e02afea4d..4d562e6e90 100644
--- a/apps/stable_diffusion/web/ui/upscaler_ui.py
+++ b/apps/stable_diffusion/web/ui/upscaler_ui.py
@@ -13,7 +13,10 @@
predefined_upscaler_models,
cancel_sd,
)
-from apps.stable_diffusion.web.ui.common_ui_events import lora_changed
+from apps.stable_diffusion.web.ui.common_ui_events import (
+ lora_changed,
+ lora_strength_changed,
+)
from apps.stable_diffusion.web.utils.common_label_calc import status_label
from apps.stable_diffusion.src import (
args,
@@ -332,9 +335,11 @@ def upscaler_inf(
label="LoRA Strength",
info="Will be baked into the .vmfb",
step=0.01,
- minimum=0.1,
- maximum=1.0,
- value=1.0,
+ # number is checked on change so to allow 0.n values
+ # we have to allow 0 or you can't type 0.n in
+ minimum=0.0,
+ maximum=2.0,
+ value=args.lora_strength,
scale=1,
)
with gr.Row():
@@ -548,3 +553,11 @@ def upscaler_inf(
outputs=[lora_tags],
queue=True,
)
+
+ lora_strength.change(
+ fn=lora_strength_changed,
+ inputs=lora_strength,
+ outputs=lora_strength,
+ queue=False,
+ show_progress=False,
+ )
diff --git a/apps/stable_diffusion/web/utils/metadata/png_metadata.py b/apps/stable_diffusion/web/utils/metadata/png_metadata.py
index 96439ba291..1797ed93e5 100644
--- a/apps/stable_diffusion/web/utils/metadata/png_metadata.py
+++ b/apps/stable_diffusion/web/utils/metadata/png_metadata.py
@@ -122,18 +122,26 @@ def find_vae_from_png_metadata(
def find_lora_from_png_metadata(
key: str, metadata: dict[str, str | int]
-) -> tuple[str, str]:
+) -> tuple[str, float]:
lora_custom = ""
+ lora_strength = 1.0
if key in metadata:
- lora_file = metadata[key]
+ split_metadata = metadata[key].split(":")
+ lora_file = split_metadata[0]
+ if len(split_metadata) == 2:
+ try:
+ lora_strength = float(split_metadata[1])
+ except ValueError:
+ pass
+
lora_custom = try_find_model_base_from_png_metadata(lora_file, "lora")
# If nothing had matched, check vendor/hf_model_id
if not lora_custom and lora_file.count("/"):
lora_custom = lora_file
# LoRA input is optional, should not print or throw an error if missing
- return lora_custom
+ return lora_custom, lora_strength
def import_png_metadata(
@@ -157,7 +165,10 @@ def import_png_metadata(
(png_custom_model, png_hf_model_id) = find_model_from_png_metadata(
"Model", metadata
)
- lora_custom_model = find_lora_from_png_metadata("LoRA", metadata)
+ (
+ custom_lora,
+ custom_lora_strength,
+ ) = find_lora_from_png_metadata("LoRA", metadata)
vae_custom_model = find_vae_from_png_metadata("VAE", metadata)
negative_prompt = metadata["Negative prompt"]
@@ -172,9 +183,7 @@ def import_png_metadata(
elif "Model" in metadata and png_hf_model_id:
custom_model = png_hf_model_id
- if "LoRA" in metadata and lora_custom_model:
- custom_lora = lora_custom_model
- else:
+ if "LoRA" in metadata and not custom_lora:
custom_lora = "None"
if "VAE" in metadata and vae_custom_model:
@@ -208,5 +217,6 @@ def import_png_metadata(
height,
custom_model,
custom_lora,
+ custom_lora_strength,
custom_vae,
)
diff --git a/shark_patch_dict_lora_te_.json b/shark_patch_dict_lora_te_.json
deleted file mode 100644
index 5658e875d9..0000000000
--- a/shark_patch_dict_lora_te_.json
+++ /dev/null
@@ -1,434 +0,0 @@
-{
- "lora_te_text_model_encoder_layers_0_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_0_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_0_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_0_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_0_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_0_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_10_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_10_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_10_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_10_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_10_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_10_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_11_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_11_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_11_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_11_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_11_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_11_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_1_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_1_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_1_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_1_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_1_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_1_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_2_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_2_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_2_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_2_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_2_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_2_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_3_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_3_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_3_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_3_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_3_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_3_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_4_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_4_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_4_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_4_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_4_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_4_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_5_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_5_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_5_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_5_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_5_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_5_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_6_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_6_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_6_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_6_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_6_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_6_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_7_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_7_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_7_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_7_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_7_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_7_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_8_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_8_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_8_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_8_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_8_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_8_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_9_mlp_fc1": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_9_mlp_fc2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_9_self_attn_k_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_9_self_attn_out_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_9_self_attn_q_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_te_text_model_encoder_layers_9_self_attn_v_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- }
-}
\ No newline at end of file
diff --git a/shark_patch_dict_lora_unet_.json b/shark_patch_dict_lora_unet_.json
deleted file mode 100644
index e296b60551..0000000000
--- a/shark_patch_dict_lora_unet_.json
+++ /dev/null
@@ -1,1154 +0,0 @@
-{
- "lora_unet_down_blocks_0_attentions_0_proj_in": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_proj_out": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_0_transformer_blocks_0_ff_net_2": {
- "up": "",
- "down": "",
- "alpha": "0.015625",
- "mid": ""
- },
- "lora_unet_down_blocks_0_attentions_1_proj_in": {
- "up": "",
- "down": "