Skip to content

Commit

Permalink
fix lint
Browse files Browse the repository at this point in the history
  • Loading branch information
liuwenran committed Dec 18, 2023
1 parent 1bc0100 commit a8d6a1d
Showing 1 changed file with 71 additions and 47 deletions.
118 changes: 71 additions & 47 deletions projects/powerpaint/gradio_PowerPaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
StableDiffusionInpaintPipeline as Pipeline
from pipeline.pipeline_PowerPaint_ControlNet import \
StableDiffusionControlNetInpaintPipeline as controlnetPipeline
from safetensors.torch import load_model
from transformers import DPTFeatureExtractor, DPTForDepthEstimation
from utils.utils import TokenizerWrapper, add_tokens

Expand All @@ -19,8 +20,7 @@
weight_dtype = torch.float16
global pipe
pipe = Pipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting',
torch_dtype=weight_dtype)
'runwayml/stable-diffusion-inpainting', torch_dtype=weight_dtype)
pipe.tokenizer = TokenizerWrapper(
from_pretrained='runwayml/stable-diffusion-v1-5',
subfolder='tokenizer',
Expand All @@ -33,12 +33,11 @@
initialize_tokens=['a', 'a', 'a'],
num_vectors_per_token=10)

from safetensors.torch import load_model
load_model(pipe.unet, "./models/unet/diffusion_pytorch_model.safetensors")
pipe.text_encoder.load_state_dict(torch.load("./models/text_encoder/pytorch_model.bin"), strict=False)
load_model(pipe.unet, './models/unet/diffusion_pytorch_model.safetensors')
pipe.text_encoder.load_state_dict(
torch.load('./models/text_encoder/pytorch_model.bin'), strict=False)
pipe = pipe.to('cuda')


depth_estimator = DPTForDepthEstimation.from_pretrained(
'Intel/dpt-hybrid-midas').to('cuda')
feature_extractor = DPTFeatureExtractor.from_pretrained(
Expand Down Expand Up @@ -107,12 +106,12 @@ def add_task(prompt, negative_prompt, control_type):
return promptA, promptB, negative_promptA, negative_promptB



def predict(input_image, prompt, fitting_degree, ddim_steps, scale, seed,
negative_prompt, task,vertical_expansion_ratio,horizontal_expansion_ratio):
negative_prompt, task, vertical_expansion_ratio,
horizontal_expansion_ratio):
size1, size2 = input_image['image'].convert('RGB').size

if task!='image-outpainting':
if task != 'image-outpainting':
if size1 < size2:
input_image['image'] = input_image['image'].convert('RGB').resize(
(640, int(size2 / size1 * 640)))
Expand All @@ -127,31 +126,41 @@ def predict(input_image, prompt, fitting_degree, ddim_steps, scale, seed,
input_image['image'] = input_image['image'].convert('RGB').resize(
(int(size1 / size2 * 512), 512))

if vertical_expansion_ratio!=None and horizontal_expansion_ratio!=None:
o_W,o_H = input_image['image'].convert('RGB').size
c_W = int(horizontal_expansion_ratio*o_W)
c_H = int(vertical_expansion_ratio*o_H)
if (vertical_expansion_ratio is not None
and horizontal_expansion_ratio is not None):
o_W, o_H = input_image['image'].convert('RGB').size
c_W = int(horizontal_expansion_ratio * o_W)
c_H = int(vertical_expansion_ratio * o_H)

expand_img = np.ones((c_H, c_W,3), dtype=np.uint8)*127
expand_img = np.ones((c_H, c_W, 3), dtype=np.uint8) * 127
original_img = np.array(input_image['image'])
expand_img[int((c_H-o_H)/2.0):int((c_H-o_H)/2.0)+o_H,int((c_W-o_W)/2.0):int((c_W-o_W)/2.0)+o_W,:] = original_img
expand_img[int((c_H - o_H) / 2.0):int((c_H - o_H) / 2.0) + o_H,
int((c_W - o_W) / 2.0):int((c_W - o_W) / 2.0) +
o_W, :] = original_img

blurry_gap = 10

expand_mask = np.ones((c_H, c_W,3), dtype=np.uint8)*255
if vertical_expansion_ratio == 1 and horizontal_expansion_ratio!=1:
expand_mask[int((c_H-o_H)/2.0):int((c_H-o_H)/2.0)+o_H,int((c_W-o_W)/2.0)+blurry_gap:int((c_W-o_W)/2.0)+o_W-blurry_gap,:] = 0 #noqa
elif vertical_expansion_ratio != 1 and horizontal_expansion_ratio!=1:
expand_mask[int((c_H-o_H)/2.0)+blurry_gap:int((c_H-o_H)/2.0)+o_H-blurry_gap,int((c_W-o_W)/2.0)+blurry_gap:int((c_W-o_W)/2.0)+o_W-blurry_gap,:] = 0 #noqa
elif vertical_expansion_ratio != 1 and horizontal_expansion_ratio==1:
expand_mask[int((c_H-o_H)/2.0)+blurry_gap:int((c_H-o_H)/2.0)+o_H-blurry_gap,int((c_W-o_W)/2.0):int((c_W-o_W)/2.0)+o_W,:] = 0 #noqa
expand_mask = np.ones((c_H, c_W, 3), dtype=np.uint8) * 255
if vertical_expansion_ratio == 1 and horizontal_expansion_ratio != 1:
expand_mask[int((c_H - o_H) / 2.0):int((c_H - o_H) / 2.0) + o_H,
int((c_W - o_W) / 2.0) +
blurry_gap:int((c_W - o_W) / 2.0) + o_W -
blurry_gap, :] = 0 # noqa
elif vertical_expansion_ratio != 1 and horizontal_expansion_ratio != 1:
expand_mask[int((c_H - o_H) / 2.0) +
blurry_gap:int((c_H - o_H) / 2.0) + o_H - blurry_gap,
int((c_W - o_W) / 2.0) +
blurry_gap:int((c_W - o_W) / 2.0) + o_W -
blurry_gap, :] = 0 # noqa
elif vertical_expansion_ratio != 1 and horizontal_expansion_ratio == 1:
expand_mask[int((c_H - o_H) / 2.0) +
blurry_gap:int((c_H - o_H) / 2.0) + o_H - blurry_gap,
int((c_W - o_W) /
2.0):int((c_W - o_W) / 2.0) + o_W, :] = 0 # noqa

input_image['image'] = Image.fromarray(expand_img)
input_image['mask'] = Image.fromarray(expand_mask)




promptA, promptB, negative_promptA, negative_promptB = add_task(
prompt, negative_prompt, task)
print(promptA, promptB, negative_promptA, negative_promptB)
Expand Down Expand Up @@ -201,7 +210,8 @@ def predict(input_image, prompt, fitting_degree, ddim_steps, scale, seed,


def predict_controlnet(input_image, input_control_image, control_type, prompt,
ddim_steps, scale, seed, negative_prompt,controlnet_conditioning_scale):
ddim_steps, scale, seed, negative_prompt,
controlnet_conditioning_scale):
promptA = prompt + ' P_obj'
promptB = prompt + ' P_obj'
negative_promptA = negative_prompt
Expand Down Expand Up @@ -278,7 +288,7 @@ def predict_controlnet(input_image, input_control_image, control_type, prompt,
width=H,
height=W,
guidance_scale=scale,
controlnet_conditioning_scale = controlnet_conditioning_scale,
controlnet_conditioning_scale=controlnet_conditioning_scale,
num_inference_steps=ddim_steps).images[0]
red = np.array(result).astype('float') * 1
red[:, :, 0] = 180.0
Expand All @@ -297,14 +307,16 @@ def predict_controlnet(input_image, input_control_image, control_type, prompt,
ours_np = np.asarray(result) / 255.0
ours_np = ours_np * m_img + (1 - m_img) * img_np
result_paste = Image.fromarray(np.uint8(ours_np * 255))
return [input_image['image'].convert('RGB'), result_paste], [controlnet_image, result_m]
return [input_image['image'].convert('RGB'),
result_paste], [controlnet_image, result_m]


def infer(input_image, text_guided_prompt, text_guided_negative_prompt,
shape_guided_prompt, shape_guided_negative_prompt, fitting_degree,
ddim_steps, scale, seed, task, enable_control, input_control_image,
control_type,vertical_expansion_ratio,horizontal_expansion_ratio,outpaint_prompt,
outpaint_negative_prompt,controlnet_conditioning_scale,removal_prompt,
control_type, vertical_expansion_ratio, horizontal_expansion_ratio,
outpaint_prompt, outpaint_negative_prompt,
controlnet_conditioning_scale, removal_prompt,
removal_negative_prompt):
if task == 'text-guided':
prompt = text_guided_prompt
Expand All @@ -319,7 +331,8 @@ def infer(input_image, text_guided_prompt, text_guided_negative_prompt,
prompt = outpaint_prompt
negative_prompt = outpaint_negative_prompt
return predict(input_image, prompt, fitting_degree, ddim_steps, scale,
seed, negative_prompt, task,vertical_expansion_ratio,horizontal_expansion_ratio)
seed, negative_prompt, task, vertical_expansion_ratio,
horizontal_expansion_ratio)
else:
task = 'text-guided'
prompt = text_guided_prompt
Expand All @@ -328,10 +341,11 @@ def infer(input_image, text_guided_prompt, text_guided_negative_prompt,
if enable_control and task == 'text-guided':
return predict_controlnet(input_image, input_control_image,
control_type, prompt, ddim_steps, scale,
seed, negative_prompt,controlnet_conditioning_scale)
seed, negative_prompt,
controlnet_conditioning_scale)
else:
return predict(input_image, prompt, fitting_degree, ddim_steps, scale,
seed, negative_prompt, task,None,None)
seed, negative_prompt, task, None, None)


def select_tab_text_guided():
Expand All @@ -341,6 +355,7 @@ def select_tab_text_guided():
def select_tab_object_removal():
return 'object-removal'


def select_tab_image_outpainting():
return 'image-outpainting'

Expand All @@ -362,14 +377,17 @@ def select_tab_shape_guided():
)
with gr.Row():
gr.Markdown(
"**Note:** Due to network-related factors, the page may experience occasional bugs! If the inpainting results deviate significantly from expectations, consider toggling between task options to refresh the content." # noqa
'**Note:** Due to network-related factors, the page may experience occasional bugs! If the inpainting results deviate significantly from expectations, consider toggling between task options to refresh the content.' # noqa
)
with gr.Row():
with gr.Column():
gr.Markdown('### Input image and draw mask')
input_image = gr.Image(source='upload', tool='sketch', type='pil')

task = gr.Radio(['text-guided', 'object-removal', 'shape-guided', 'image-outpainting'],
task = gr.Radio([
'text-guided', 'object-removal', 'shape-guided',
'image-outpainting'
],
show_label=False,
visible=False)

Expand Down Expand Up @@ -404,13 +422,14 @@ def select_tab_shape_guided():
enable_object_removal = gr.Checkbox(
label='Enable object removal inpainting',
value=True,
info='The recommended configuration for the Guidance Scale is 10 or higher. \
If undesired objects appear in the masked area, \
you can address this by specifically increasing the Guidance Scale.',
info='The recommended configuration for '
'the Guidance Scale is 10 or higher.'
'If undesired objects appear in the masked area, '
'you can address this by specifically increasing '
'the Guidance Scale.',
interactive=False)
removal_prompt = gr.Textbox(label='Prompt')
removal_negative_prompt = gr.Textbox(
label='negative_prompt')
removal_negative_prompt = gr.Textbox(label='negative_prompt')
tab_object_removal.select(
fn=select_tab_object_removal, inputs=None, outputs=task)

Expand All @@ -419,9 +438,12 @@ def select_tab_shape_guided():
enable_object_removal = gr.Checkbox(
label='Enable image outpainting',
value=True,
info='The recommended configuration for the Guidance Scale is 10 or higher. \
If unwanted random objects appear in the extended image region, \
you can enhance the cleanliness of the extension area by increasing the Guidance Scale.',
info='The recommended configuration for the Guidance '
'Scale is 10 or higher. '
'If unwanted random objects appear in '
'the extended image region, '
'you can enhance the cleanliness of the extension '
'area by increasing the Guidance Scale.',
interactive=False)
outpaint_prompt = gr.Textbox(label='Outpainting_prompt')
outpaint_negative_prompt = gr.Textbox(
Expand Down Expand Up @@ -468,7 +490,8 @@ def select_tab_shape_guided():
label='Steps', minimum=1, maximum=50, value=45, step=1)
scale = gr.Slider(
label='Guidance Scale',
info='For object removal and image outpainting, it is recommended to set the value at 10 or above.', #noqa
info='For object removal and image outpainting, '
'it is recommended to set the value at 10 or above.',
minimum=0.1,
maximum=30.0,
value=7.5,
Expand All @@ -494,9 +517,10 @@ def select_tab_shape_guided():
input_image, text_guided_prompt, text_guided_negative_prompt,
shape_guided_prompt, shape_guided_negative_prompt, fitting_degree,
ddim_steps, scale, seed, task, enable_control, input_control_image,
control_type,vertical_expansion_ratio,horizontal_expansion_ratio,
outpaint_prompt,outpaint_negative_prompt,controlnet_conditioning_scale,
removal_prompt,removal_negative_prompt
control_type, vertical_expansion_ratio, horizontal_expansion_ratio,
outpaint_prompt, outpaint_negative_prompt,
controlnet_conditioning_scale, removal_prompt,
removal_negative_prompt
],
outputs=[inpaint_result, gallery])

Expand Down

0 comments on commit a8d6a1d

Please sign in to comment.