From 726d73d6ba0c7a093763ce3a9b243ebf047f5ffa Mon Sep 17 00:00:00 2001 From: powderluv Date: Fri, 23 Jun 2023 10:29:00 -0700 Subject: [PATCH] Revert "[vicuna] Add streaming of tokens (#1587)" (#1588) This reverts commit 4d55e51d46f5e6d8ac2a348d8d989456c89345c0. --- apps/language_models/src/pipelines/vicuna_pipeline.py | 3 --- apps/stable_diffusion/web/ui/stablelm_ui.py | 8 +++++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/apps/language_models/src/pipelines/vicuna_pipeline.py b/apps/language_models/src/pipelines/vicuna_pipeline.py index f5d22e52a1..60cf258f4e 100644 --- a/apps/language_models/src/pipelines/vicuna_pipeline.py +++ b/apps/language_models/src/pipelines/vicuna_pipeline.py @@ -464,7 +464,6 @@ def generate(self, prompt, cli=False): logits = generated_token_op["logits"] pkv = generated_token_op["pkv"] detok = generated_token_op["detok"] - yield detok res.append(detok) res_tokens.append(token) @@ -506,8 +505,6 @@ def generate(self, prompt, cli=False): res.append(detok) if cli: print(f"{detok}", end=" ", flush=True) - yield detok - if self.device == "cuda": del sec_vic, pkv, logits torch.cuda.empty_cache() diff --git a/apps/stable_diffusion/web/ui/stablelm_ui.py b/apps/stable_diffusion/web/ui/stablelm_ui.py index b9edbbc08b..369d864a31 100644 --- a/apps/stable_diffusion/web/ui/stablelm_ui.py +++ b/apps/stable_diffusion/web/ui/stablelm_ui.py @@ -65,14 +65,16 @@ def chat(curr_system_message, history, model, device, precision): ) prompt = messages.strip() print("prompt = ", prompt) + sentence = vicuna_model.generate(prompt) partial_text = "" - for new_text in vicuna_model.generate(prompt): + for new_text in sentence.split(" "): # print(new_text) partial_text += new_text + " " history[-1][1] = partial_text # Yield an empty string to cleanup the message textbox and the updated conversation history yield history + history[-1][1] = sentence return history # else Model is StableLM @@ -122,13 +124,13 @@ def chat(curr_system_message, history, model, device, precision): "TheBloke/vicuna-7B-1.1-HF", ], ) - supported_devices = available_devices + ["AMD-AIE"] + supported_devices = available_devices enabled = len(supported_devices) > 0 device = gr.Dropdown( label="Device", value=supported_devices[0] if enabled - else "No devices supported for now", + else "Only CUDA Supported for now", choices=supported_devices, interactive=enabled, )