diff --git a/.gitignore b/.gitignore
index 5b84386..56f0236 100644
--- a/.gitignore
+++ b/.gitignore
@@ -186,4 +186,7 @@ playground
log_reload.py
*competition*
-apikey_local.yml
\ No newline at end of file
+apikey_local.yml
+llava-old/
+mmte/models/llava/serve/
+test_env/
diff --git a/env/requirements.txt b/env/requirements.txt
index 70c90b3..37b878d 100644
--- a/env/requirements.txt
+++ b/env/requirements.txt
@@ -35,7 +35,7 @@ ninja==1.11.1.1
torch==2.3.0+cu121
torchvision==0.18.0+cu121
tqdm==4.66.1
-transformers==4.33.2
+transformers==4.37.2
webdataset==0.2.86
datasets==2.18.0
openai==1.9.0
diff --git a/mmte/models/__init__.py b/mmte/models/__init__.py
index b8da7c9..cedb218 100644
--- a/mmte/models/__init__.py
+++ b/mmte/models/__init__.py
@@ -1,22 +1,20 @@
from mmte.utils.registry import registry
from mmte.models.base import BaseChat, Response
-import transformers
+from mmte.models.llava_chat import LLaVAChat
+from mmte.models.llava_rlhf_chat import LLaVARLHFChat
+from mmte.models.mplug_owl2_chat import mPLUGOwl2Chat
+from mmte.models.internvl_chat import InternVLChat
+from mmte.models.lrv_instruction_chat import LRVInstructionChat
from mmte.models.openai_chat import OpenAIChat
from mmte.models.google_chat import GoogleChat
from mmte.models.claude3_chat import ClaudeChat
from mmte.models.qwen_plus_chat import QwenPlusChat
from mmte.models.minigpt4_chat import MiniGPT4Chat
from mmte.models.instructblip_chat import InstructBLIPChat
-if str(transformers.__version__)<'4.37.2':
- from mmte.models.llava_chat import LLaVAChat
- from mmte.models.llava_rlhf_chat import LLaVARLHFChat
- from mmte.models.mplug_owl2_chat import mPLUGOwl2Chat
- from mmte.models.internvl_chat import InternVLChat
from mmte.models.qwen_chat import QwenChat
from mmte.models.otter_chat import OtterChat
from mmte.models.mplug_owl_chat import mPLUGOwlChat
from mmte.models.internlm_xcomposer_chat import InternLMXComposerChat
-from mmte.models.lrv_instruction_chat import LRVInstructionChat
from mmte.models.sharegpt4v_chat import ShareGPT4VChat
from mmte.models.cogvlm_chat import CogVLMChat
from mmte.models.phi3_chat import Phi3Chat
diff --git a/mmte/models/llava/conversation.py b/mmte/models/llava/conversation.py
index 0025f5b..00c5686 100644
--- a/mmte/models/llava/conversation.py
+++ b/mmte/models/llava/conversation.py
@@ -1,6 +1,9 @@
import dataclasses
from enum import auto, Enum
from typing import List, Tuple
+import base64
+from io import BytesIO
+from PIL import Image
class SeparatorStyle(Enum):
@@ -68,7 +71,7 @@ def get_prompt(self):
else:
ret += role
elif self.sep_style == SeparatorStyle.LLAMA_2:
- wrap_sys = lambda msg: f"<>\n{msg}\n<>\n\n"
+ wrap_sys = lambda msg: f"<>\n{msg}\n<>\n\n" if len(msg) > 0 else msg
wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
ret = ""
@@ -106,54 +109,54 @@ def get_prompt(self):
def append_message(self, role, message):
self.messages.append([role, message])
+ def process_image(self, image, image_process_mode, return_pil=False, image_format='PNG', max_len=1344, min_len=672):
+ if image_process_mode == "Pad":
+ def expand2square(pil_img, background_color=(122, 116, 104)):
+ width, height = pil_img.size
+ if width == height:
+ return pil_img
+ elif width > height:
+ result = Image.new(pil_img.mode, (width, width), background_color)
+ result.paste(pil_img, (0, (width - height) // 2))
+ return result
+ else:
+ result = Image.new(pil_img.mode, (height, height), background_color)
+ result.paste(pil_img, ((height - width) // 2, 0))
+ return result
+ image = expand2square(image)
+ elif image_process_mode in ["Default", "Crop"]:
+ pass
+ elif image_process_mode == "Resize":
+ image = image.resize((336, 336))
+ else:
+ raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
+ if max(image.size) > max_len:
+ max_hw, min_hw = max(image.size), min(image.size)
+ aspect_ratio = max_hw / min_hw
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
+ longest_edge = int(shortest_edge * aspect_ratio)
+ W, H = image.size
+ if H > W:
+ H, W = longest_edge, shortest_edge
+ else:
+ H, W = shortest_edge, longest_edge
+ image = image.resize((W, H))
+ if return_pil:
+ return image
+ else:
+ buffered = BytesIO()
+ image.save(buffered, format=image_format)
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
+ return img_b64_str
+
def get_images(self, return_pil=False):
images = []
for i, (role, msg) in enumerate(self.messages[self.offset:]):
if i % 2 == 0:
if type(msg) is tuple:
- import base64
- from io import BytesIO
- from PIL import Image
msg, image, image_process_mode = msg
- if image_process_mode == "Pad":
- def expand2square(pil_img, background_color=(122, 116, 104)):
- width, height = pil_img.size
- if width == height:
- return pil_img
- elif width > height:
- result = Image.new(pil_img.mode, (width, width), background_color)
- result.paste(pil_img, (0, (width - height) // 2))
- return result
- else:
- result = Image.new(pil_img.mode, (height, height), background_color)
- result.paste(pil_img, ((height - width) // 2, 0))
- return result
- image = expand2square(image)
- elif image_process_mode in ["Default", "Crop"]:
- pass
- elif image_process_mode == "Resize":
- image = image.resize((336, 336))
- else:
- raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
- max_hw, min_hw = max(image.size), min(image.size)
- aspect_ratio = max_hw / min_hw
- max_len, min_len = 800, 400
- shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
- longest_edge = int(shortest_edge * aspect_ratio)
- W, H = image.size
- if longest_edge != max(image.size):
- if H > W:
- H, W = longest_edge, shortest_edge
- else:
- H, W = shortest_edge, longest_edge
- image = image.resize((W, H))
- if return_pil:
- images.append(image)
- else:
- buffered = BytesIO()
- image.save(buffered, format="PNG")
- img_b64_str = base64.b64encode(buffered.getvalue()).decode()
- images.append(img_b64_str)
+ image = self.process_image(image, image_process_mode, return_pil=return_pil)
+ images.append(image)
return images
def to_gradio_chatbot(self):
@@ -161,24 +164,11 @@ def to_gradio_chatbot(self):
for i, (role, msg) in enumerate(self.messages[self.offset:]):
if i % 2 == 0:
if type(msg) is tuple:
- import base64
- from io import BytesIO
msg, image, image_process_mode = msg
- max_hw, min_hw = max(image.size), min(image.size)
- aspect_ratio = max_hw / min_hw
- max_len, min_len = 800, 400
- shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
- longest_edge = int(shortest_edge * aspect_ratio)
- W, H = image.size
- if H > W:
- H, W = longest_edge, shortest_edge
- else:
- H, W = shortest_edge, longest_edge
- image = image.resize((W, H))
- buffered = BytesIO()
- image.save(buffered, format="JPEG")
- img_b64_str = base64.b64encode(buffered.getvalue()).decode()
- img_str = f''
+ img_b64_str = self.process_image(
+ image, "Default", return_pil=False,
+ image_format='JPEG')
+ img_str = f''
msg = img_str + msg.replace('', '').strip()
ret.append([msg, None])
else:
@@ -357,6 +347,28 @@ def dict(self):
version="v1_mmtag",
)
+conv_mistral_instruct = Conversation(
+ system="",
+ roles=("USER", "ASSISTANT"),
+ version="llama_v2",
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.LLAMA_2,
+ sep="",
+ sep2="",
+)
+
+conv_chatml_direct = Conversation(
+ system="""<|im_start|>system
+Answer the questions.""",
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
+ version="mpt",
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.MPT,
+ sep="<|im_end|>",
+)
+
default_conversation = conv_vicuna_v1
conv_templates = {
"default": conv_vicuna_v0,
@@ -364,6 +376,9 @@ def dict(self):
"v1": conv_vicuna_v1,
"vicuna_v1": conv_vicuna_v1,
"llama_2": conv_llama_2,
+ "mistral_instruct": conv_mistral_instruct,
+ "chatml_direct": conv_chatml_direct,
+ "mistral_direct": conv_chatml_direct,
"plain": conv_llava_plain,
"v0_plain": conv_llava_plain,
diff --git a/mmte/models/llava/eval/eval_gpt_review.py b/mmte/models/llava/eval/eval_gpt_review.py
new file mode 100644
index 0000000..8af4559
--- /dev/null
+++ b/mmte/models/llava/eval/eval_gpt_review.py
@@ -0,0 +1,113 @@
+import argparse
+import json
+import os
+
+import openai
+import tqdm
+import ray
+import time
+
+NUM_SECONDS_TO_SLEEP = 3
+
+@ray.remote(num_cpus=4)
+def get_eval(content: str, max_tokens: int):
+ while True:
+ try:
+ response = openai.ChatCompletion.create(
+ model='gpt-4',
+ messages=[{
+ 'role': 'system',
+ 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
+ }, {
+ 'role': 'user',
+ 'content': content,
+ }],
+ temperature=0.2, # TODO: figure out which temperature is best for evaluation
+ max_tokens=max_tokens,
+ )
+ break
+ except openai.error.RateLimitError:
+ pass
+ except Exception as e:
+ print(e)
+ time.sleep(NUM_SECONDS_TO_SLEEP)
+
+ print('success!')
+ return response['choices'][0]['message']['content']
+
+
+def parse_score(review):
+ try:
+ score_pair = review.split('\n')[0]
+ score_pair = score_pair.replace(',', ' ')
+ sp = score_pair.split(' ')
+ if len(sp) == 2:
+ return [float(sp[0]), float(sp[1])]
+ else:
+ print('error', review)
+ return [-1, -1]
+ except Exception as e:
+ print(e)
+ print('error', review)
+ return [-1, -1]
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
+ parser.add_argument('-q', '--question')
+ # parser.add_argument('-a', '--answer')
+ parser.add_argument('-a', '--answer-list', nargs='+', default=[])
+ parser.add_argument('-r', '--rule')
+ parser.add_argument('-o', '--output')
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
+ args = parser.parse_args()
+
+ ray.init()
+
+ f_q = open(os.path.expanduser(args.question))
+ f_ans1 = open(os.path.expanduser(args.answer_list[0]))
+ f_ans2 = open(os.path.expanduser(args.answer_list[1]))
+ rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
+
+ review_file = open(f'{args.output}', 'w')
+
+ js_list = []
+ handles = []
+ idx = 0
+ for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
+ # if idx == 1:
+ # break
+
+ ques = json.loads(ques_js)
+ ans1 = json.loads(ans1_js)
+ ans2 = json.loads(ans2_js)
+
+ category = json.loads(ques_js)['category']
+ if category in rule_dict:
+ rule = rule_dict[category]
+ else:
+ rule = rule_dict['default']
+ prompt = rule['prompt']
+ role = rule['role']
+ content = (f'[Question]\n{ques["text"]}\n\n'
+ f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
+ f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
+ f'[System]\n{prompt}\n\n')
+ js_list.append({
+ 'id': idx+1,
+ 'question_id': ques['question_id'],
+ 'answer1_id': ans1['answer_id'],
+ 'answer2_id': ans2['answer_id'],
+ 'category': category})
+ idx += 1
+ handles.append(get_eval.remote(content, args.max_tokens))
+ # To avoid the rate limit set by OpenAI
+ time.sleep(NUM_SECONDS_TO_SLEEP)
+
+ reviews = ray.get(handles)
+ for idx, review in enumerate(reviews):
+ scores = parse_score(review)
+ js_list[idx]['content'] = review
+ js_list[idx]['tuple'] = scores
+ review_file.write(json.dumps(js_list[idx]) + '\n')
+ review_file.close()
diff --git a/mmte/models/llava/eval/eval_gpt_review_bench.py b/mmte/models/llava/eval/eval_gpt_review_bench.py
new file mode 100644
index 0000000..06160f2
--- /dev/null
+++ b/mmte/models/llava/eval/eval_gpt_review_bench.py
@@ -0,0 +1,121 @@
+import argparse
+import json
+import os
+
+import openai
+import time
+
+NUM_SECONDS_TO_SLEEP = 0.5
+
+
+def get_eval(content: str, max_tokens: int):
+ while True:
+ try:
+ response = openai.ChatCompletion.create(
+ model='gpt-4-0314',
+ messages=[{
+ 'role': 'system',
+ 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
+ }, {
+ 'role': 'user',
+ 'content': content,
+ }],
+ temperature=0.2, # TODO: figure out which temperature is best for evaluation
+ max_tokens=max_tokens,
+ )
+ break
+ except openai.error.RateLimitError:
+ pass
+ except Exception as e:
+ print(e)
+ time.sleep(NUM_SECONDS_TO_SLEEP)
+
+ return response['choices'][0]['message']['content']
+
+
+def parse_score(review):
+ try:
+ score_pair = review.split('\n')[0]
+ score_pair = score_pair.replace(',', ' ')
+ sp = score_pair.split(' ')
+ if len(sp) == 2:
+ return [float(sp[0]), float(sp[1])]
+ else:
+ print('error', review)
+ return [-1, -1]
+ except Exception as e:
+ print(e)
+ print('error', review)
+ return [-1, -1]
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
+ parser.add_argument('-q', '--question')
+ parser.add_argument('-c', '--context')
+ parser.add_argument('-a', '--answer-list', nargs='+', default=[])
+ parser.add_argument('-r', '--rule')
+ parser.add_argument('-o', '--output')
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
+ args = parser.parse_args()
+
+ f_q = open(os.path.expanduser(args.question))
+ f_ans1 = open(os.path.expanduser(args.answer_list[0]))
+ f_ans2 = open(os.path.expanduser(args.answer_list[1]))
+ rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
+
+ if os.path.isfile(os.path.expanduser(args.output)):
+ cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
+ else:
+ cur_reviews = []
+
+ review_file = open(f'{args.output}', 'a')
+
+ context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
+ image_to_context = {context['image']: context for context in context_list}
+
+ handles = []
+ idx = 0
+ for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
+ ques = json.loads(ques_js)
+ ans1 = json.loads(ans1_js)
+ ans2 = json.loads(ans2_js)
+
+ inst = image_to_context[ques['image']]
+
+ if isinstance(inst['caption'], list):
+ cap_str = '\n'.join(inst['caption'])
+ else:
+ cap_str = inst['caption']
+
+ category = 'llava_bench_' + json.loads(ques_js)['category']
+ if category in rule_dict:
+ rule = rule_dict[category]
+ else:
+ assert False, f"Visual QA category not found in rule file: {category}."
+ prompt = rule['prompt']
+ role = rule['role']
+ content = (f'[Context]\n{cap_str}\n\n'
+ f'[Question]\n{ques["text"]}\n\n'
+ f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
+ f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
+ f'[System]\n{prompt}\n\n')
+ cur_js = {
+ 'id': idx+1,
+ 'question_id': ques['question_id'],
+ 'answer1_id': ans1.get('answer_id', ans1['question_id']),
+ 'answer2_id': ans2.get('answer_id', ans2['answer_id']),
+ 'category': category
+ }
+ if idx >= len(cur_reviews):
+ review = get_eval(content, args.max_tokens)
+ scores = parse_score(review)
+ cur_js['content'] = review
+ cur_js['tuple'] = scores
+ review_file.write(json.dumps(cur_js) + '\n')
+ review_file.flush()
+ else:
+ print(f'Skipping {idx} as we already have it.')
+ idx += 1
+ print(idx)
+ review_file.close()
diff --git a/mmte/models/llava/eval/eval_gpt_review_visual.py b/mmte/models/llava/eval/eval_gpt_review_visual.py
new file mode 100644
index 0000000..d6e407a
--- /dev/null
+++ b/mmte/models/llava/eval/eval_gpt_review_visual.py
@@ -0,0 +1,118 @@
+import argparse
+import json
+import os
+
+import openai
+import time
+
+NUM_SECONDS_TO_SLEEP = 0.5
+
+
+def get_eval(content: str, max_tokens: int):
+ while True:
+ try:
+ response = openai.ChatCompletion.create(
+ model='gpt-4-0314',
+ messages=[{
+ 'role': 'system',
+ 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
+ }, {
+ 'role': 'user',
+ 'content': content,
+ }],
+ temperature=0.2, # TODO: figure out which temperature is best for evaluation
+ max_tokens=max_tokens,
+ )
+ break
+ except openai.error.RateLimitError:
+ pass
+ except Exception as e:
+ print(e)
+ time.sleep(NUM_SECONDS_TO_SLEEP)
+
+ return response['choices'][0]['message']['content']
+
+
+def parse_score(review):
+ try:
+ score_pair = review.split('\n')[0]
+ score_pair = score_pair.replace(',', ' ')
+ sp = score_pair.split(' ')
+ if len(sp) == 2:
+ return [float(sp[0]), float(sp[1])]
+ else:
+ print('error', review)
+ return [-1, -1]
+ except Exception as e:
+ print(e)
+ print('error', review)
+ return [-1, -1]
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
+ parser.add_argument('-q', '--question')
+ parser.add_argument('-c', '--context')
+ parser.add_argument('-a', '--answer-list', nargs='+', default=[])
+ parser.add_argument('-r', '--rule')
+ parser.add_argument('-o', '--output')
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
+ args = parser.parse_args()
+
+ f_q = open(os.path.expanduser(args.question))
+ f_ans1 = open(os.path.expanduser(args.answer_list[0]))
+ f_ans2 = open(os.path.expanduser(args.answer_list[1]))
+ rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
+
+ if os.path.isfile(os.path.expanduser(args.output)):
+ cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
+ else:
+ cur_reviews = []
+
+ review_file = open(f'{args.output}', 'a')
+
+ context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
+ image_to_context = {context['image']: context for context in context_list}
+
+ handles = []
+ idx = 0
+ for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
+ ques = json.loads(ques_js)
+ ans1 = json.loads(ans1_js)
+ ans2 = json.loads(ans2_js)
+
+ inst = image_to_context[ques['image']]
+ cap_str = '\n'.join(inst['captions'])
+ box_str = '\n'.join([f'{instance["category"]}: {instance["bbox"]}' for instance in inst['instances']])
+
+ category = json.loads(ques_js)['category']
+ if category in rule_dict:
+ rule = rule_dict[category]
+ else:
+ assert False, f"Visual QA category not found in rule file: {category}."
+ prompt = rule['prompt']
+ role = rule['role']
+ content = (f'[Context]\n{cap_str}\n\n{box_str}\n\n'
+ f'[Question]\n{ques["text"]}\n\n'
+ f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
+ f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
+ f'[System]\n{prompt}\n\n')
+ cur_js = {
+ 'id': idx+1,
+ 'question_id': ques['question_id'],
+ 'answer1_id': ans1.get('answer_id', ans1['question_id']),
+ 'answer2_id': ans2.get('answer_id', ans2['answer_id']),
+ 'category': category
+ }
+ if idx >= len(cur_reviews):
+ review = get_eval(content, args.max_tokens)
+ scores = parse_score(review)
+ cur_js['content'] = review
+ cur_js['tuple'] = scores
+ review_file.write(json.dumps(cur_js) + '\n')
+ review_file.flush()
+ else:
+ print(f'Skipping {idx} as we already have it.')
+ idx += 1
+ print(idx)
+ review_file.close()
diff --git a/mmte/models/llava/eval/eval_pope.py b/mmte/models/llava/eval/eval_pope.py
new file mode 100644
index 0000000..b115b8f
--- /dev/null
+++ b/mmte/models/llava/eval/eval_pope.py
@@ -0,0 +1,81 @@
+import os
+import json
+import argparse
+
+def eval_pope(answers, label_file):
+ label_list = [json.loads(q)['label'] for q in open(label_file, 'r')]
+
+ for answer in answers:
+ text = answer['text']
+
+ # Only keep the first sentence
+ if text.find('.') != -1:
+ text = text.split('.')[0]
+
+ text = text.replace(',', '')
+ words = text.split(' ')
+ if 'No' in words or 'not' in words or 'no' in words:
+ answer['text'] = 'no'
+ else:
+ answer['text'] = 'yes'
+
+ for i in range(len(label_list)):
+ if label_list[i] == 'no':
+ label_list[i] = 0
+ else:
+ label_list[i] = 1
+
+ pred_list = []
+ for answer in answers:
+ if answer['text'] == 'no':
+ pred_list.append(0)
+ else:
+ pred_list.append(1)
+
+ pos = 1
+ neg = 0
+ yes_ratio = pred_list.count(1) / len(pred_list)
+
+ TP, TN, FP, FN = 0, 0, 0, 0
+ for pred, label in zip(pred_list, label_list):
+ if pred == pos and label == pos:
+ TP += 1
+ elif pred == pos and label == neg:
+ FP += 1
+ elif pred == neg and label == neg:
+ TN += 1
+ elif pred == neg and label == pos:
+ FN += 1
+
+ print('TP\tFP\tTN\tFN\t')
+ print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN))
+
+ precision = float(TP) / float(TP + FP)
+ recall = float(TP) / float(TP + FN)
+ f1 = 2*precision*recall / (precision + recall)
+ acc = (TP + TN) / (TP + TN + FP + FN)
+ print('Accuracy: {}'.format(acc))
+ print('Precision: {}'.format(precision))
+ print('Recall: {}'.format(recall))
+ print('F1 score: {}'.format(f1))
+ print('Yes ratio: {}'.format(yes_ratio))
+ print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) )
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--annotation-dir", type=str)
+ parser.add_argument("--question-file", type=str)
+ parser.add_argument("--result-file", type=str)
+ args = parser.parse_args()
+
+ questions = [json.loads(line) for line in open(args.question_file)]
+ questions = {question['question_id']: question for question in questions}
+ answers = [json.loads(q) for q in open(args.result_file)]
+ for file in os.listdir(args.annotation_dir):
+ assert file.startswith('coco_pope_')
+ assert file.endswith('.json')
+ category = file[10:-5]
+ cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category]
+ print('Category: {}, # samples: {}'.format(category, len(cur_answers)))
+ eval_pope(cur_answers, os.path.join(args.annotation_dir, file))
+ print("====================================")
diff --git a/mmte/models/llava/eval/eval_science_qa.py b/mmte/models/llava/eval/eval_science_qa.py
new file mode 100644
index 0000000..ccf206b
--- /dev/null
+++ b/mmte/models/llava/eval/eval_science_qa.py
@@ -0,0 +1,114 @@
+import argparse
+import json
+import os
+import re
+import random
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--base-dir', type=str)
+ parser.add_argument('--result-file', type=str)
+ parser.add_argument('--output-file', type=str)
+ parser.add_argument('--output-result', type=str)
+ parser.add_argument('--split', type=str, default='test')
+ parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
+ return parser.parse_args()
+
+
+def convert_caps(results):
+ fakecaps = []
+ for result in results:
+ image_id = result['question_id']
+ caption = result['text']
+ fakecaps.append({"image_id": int(image_id), "caption": caption})
+ return fakecaps
+
+
+def get_pred_idx(prediction, choices, options):
+ """
+ Get the index (e.g. 2) from the prediction (e.g. 'C')
+ """
+ if prediction in options[:len(choices)]:
+ return options.index(prediction)
+ else:
+ return -1
+ return random.choice(range(len(choices)))
+
+
+if __name__ == "__main__":
+ args = get_args()
+
+ base_dir = args.base_dir
+ split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
+ problems = json.load(open(os.path.join(base_dir, "problems.json")))
+ predictions = [json.loads(line) for line in open(args.result_file)]
+ predictions = {pred['question_id']: pred for pred in predictions}
+ split_problems = {idx: problems[idx] for idx in split_indices}
+
+ results = {'correct': [], 'incorrect': []}
+ sqa_results = {}
+ sqa_results['acc'] = None
+ sqa_results['correct'] = None
+ sqa_results['count'] = None
+ sqa_results['results'] = {}
+ sqa_results['outputs'] = {}
+
+ for prob_id, prob in split_problems.items():
+ if prob_id not in predictions:
+ pred = {'text': 'FAILED', 'prompt': 'Unknown'}
+ pred_text = 'FAILED'
+ else:
+ pred = predictions[prob_id]
+ pred_text = pred['text']
+
+ if pred_text in args.options:
+ answer = pred_text
+ elif len(pred_text) >= 3 and pred_text[0] in args.options and pred_text[1:3] == ". ":
+ answer = pred_text[0]
+ else:
+ pattern = re.compile(r'The answer is ([A-Z]).')
+ res = pattern.findall(pred_text)
+ if len(res) == 1:
+ answer = res[0] # 'A', 'B', ...
+ else:
+ answer = "FAILED"
+
+ pred_idx = get_pred_idx(answer, prob['choices'], args.options)
+
+ analysis = {
+ 'question_id': prob_id,
+ 'parsed_ans': answer,
+ 'ground_truth': args.options[prob['answer']],
+ 'question': pred['prompt'],
+ 'pred': pred_text,
+ 'is_multimodal': '' in pred['prompt'],
+ }
+
+ sqa_results['results'][prob_id] = get_pred_idx(answer, prob['choices'], args.options)
+ sqa_results['outputs'][prob_id] = pred_text
+
+ if pred_idx == prob['answer']:
+ results['correct'].append(analysis)
+ else:
+ results['incorrect'].append(analysis)
+
+ correct = len(results['correct'])
+ total = len(results['correct']) + len(results['incorrect'])
+
+ ###### IMG ######
+ multimodal_correct = len([x for x in results['correct'] if x['is_multimodal']])
+ multimodal_incorrect = len([x for x in results['incorrect'] if x['is_multimodal']])
+ multimodal_total = multimodal_correct + multimodal_incorrect
+ ###### IMG ######
+
+ print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%, IMG-Accuracy: {multimodal_correct / multimodal_total * 100:.2f}%')
+
+ sqa_results['acc'] = correct / total * 100
+ sqa_results['correct'] = correct
+ sqa_results['count'] = total
+
+ with open(args.output_file, 'w') as f:
+ json.dump(results, f, indent=2)
+ with open(args.output_result, 'w') as f:
+ json.dump(sqa_results, f, indent=2)
diff --git a/mmte/models/llava/eval/eval_science_qa_gpt4.py b/mmte/models/llava/eval/eval_science_qa_gpt4.py
new file mode 100644
index 0000000..c2ff17c
--- /dev/null
+++ b/mmte/models/llava/eval/eval_science_qa_gpt4.py
@@ -0,0 +1,104 @@
+import argparse
+import json
+import os
+import re
+import random
+from collections import defaultdict
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--base-dir', type=str)
+ parser.add_argument('--gpt4-result', type=str)
+ parser.add_argument('--our-result', type=str)
+ parser.add_argument('--split', type=str, default='test')
+ parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
+ return parser.parse_args()
+
+
+def convert_caps(results):
+ fakecaps = []
+ for result in results:
+ image_id = result['question_id']
+ caption = result['text']
+ fakecaps.append({"image_id": int(image_id), "caption": caption})
+ return fakecaps
+
+
+def get_pred_idx(prediction, choices, options):
+ """
+ Get the index (e.g. 2) from the prediction (e.g. 'C')
+ """
+ if prediction in options[:len(choices)]:
+ return options.index(prediction)
+ else:
+ return random.choice(range(len(choices)))
+
+
+if __name__ == "__main__":
+ args = get_args()
+
+ base_dir = args.base_dir
+ split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
+ problems = json.load(open(os.path.join(base_dir, "problems.json")))
+ our_predictions = [json.loads(line) for line in open(args.our_result)]
+ our_predictions = {pred['question_id']: pred for pred in our_predictions}
+ split_problems = {idx: problems[idx] for idx in split_indices}
+
+ gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
+
+ results = defaultdict(lambda: 0)
+
+ for prob_id, prob in split_problems.items():
+ if prob_id not in our_predictions:
+ continue
+ if prob_id not in gpt4_predictions:
+ continue
+ our_pred = our_predictions[prob_id]['text']
+ gpt4_pred = gpt4_predictions[prob_id]
+
+ pattern = re.compile(r'The answer is ([A-Z]).')
+ our_res = pattern.findall(our_pred)
+ if len(our_res) == 1:
+ our_answer = our_res[0] # 'A', 'B', ...
+ else:
+ our_answer = "FAILED"
+ gpt4_res = pattern.findall(gpt4_pred)
+ if len(gpt4_res) == 1:
+ gpt4_answer = gpt4_res[0] # 'A', 'B', ...
+ else:
+ gpt4_answer = "FAILED"
+
+ our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
+ gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
+
+ if gpt4_answer == 'FAILED':
+ results['gpt4_failed'] += 1
+ # continue
+ gpt4_pred_idx = our_pred_idx
+ # if our_pred_idx != prob['answer']:
+ # print(our_predictions[prob_id]['prompt'])
+ # print('-----------------')
+ # print(f'LECTURE: {prob["lecture"]}')
+ # print(f'SOLUTION: {prob["solution"]}')
+ # print('=====================')
+ else:
+ # continue
+ pass
+ # gpt4_pred_idx = our_pred_idx
+
+ if gpt4_pred_idx == prob['answer']:
+ results['correct'] += 1
+ else:
+ results['incorrect'] += 1
+
+
+ if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
+ results['correct_upperbound'] += 1
+
+ correct = results['correct']
+ total = results['correct'] + results['incorrect']
+ print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%')
+ print(f'Total: {total}, Correct (upper): {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
+ print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
+
diff --git a/mmte/models/llava/eval/eval_science_qa_gpt4_requery.py b/mmte/models/llava/eval/eval_science_qa_gpt4_requery.py
new file mode 100644
index 0000000..698546e
--- /dev/null
+++ b/mmte/models/llava/eval/eval_science_qa_gpt4_requery.py
@@ -0,0 +1,149 @@
+import argparse
+import json
+import os
+import re
+import random
+from collections import defaultdict
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--base-dir', type=str)
+ parser.add_argument('--gpt4-result', type=str)
+ parser.add_argument('--requery-result', type=str)
+ parser.add_argument('--our-result', type=str)
+ parser.add_argument('--output-result', type=str)
+ parser.add_argument('--split', type=str, default='test')
+ parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
+ return parser.parse_args()
+
+
+def convert_caps(results):
+ fakecaps = []
+ for result in results:
+ image_id = result['question_id']
+ caption = result['text']
+ fakecaps.append({"image_id": int(image_id), "caption": caption})
+ return fakecaps
+
+
+def get_pred_idx(prediction, choices, options):
+ """
+ Get the index (e.g. 2) from the prediction (e.g. 'C')
+ """
+ if prediction in options[:len(choices)]:
+ return options.index(prediction)
+ else:
+ return random.choice(range(len(choices)))
+
+
+if __name__ == "__main__":
+ args = get_args()
+
+ base_dir = args.base_dir
+ split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
+ problems = json.load(open(os.path.join(base_dir, "problems.json")))
+ our_predictions = [json.loads(line) for line in open(args.our_result)]
+ our_predictions = {pred['question_id']: pred for pred in our_predictions}
+ split_problems = {idx: problems[idx] for idx in split_indices}
+
+ requery_predictions = [json.loads(line) for line in open(args.requery_result)]
+ requery_predictions = {pred['question_id']: pred for pred in requery_predictions}
+
+ gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
+
+ results = defaultdict(lambda: 0)
+
+ sqa_results = {}
+ sqa_results['acc'] = None
+ sqa_results['correct'] = None
+ sqa_results['count'] = None
+ sqa_results['results'] = {}
+ sqa_results['outputs'] = {}
+
+ for prob_id, prob in split_problems.items():
+ if prob_id not in our_predictions:
+ assert False
+ if prob_id not in gpt4_predictions:
+ assert False
+ our_pred = our_predictions[prob_id]['text']
+ gpt4_pred = gpt4_predictions[prob_id]
+ if prob_id not in requery_predictions:
+ results['missing_requery'] += 1
+ requery_pred = "MISSING"
+ else:
+ requery_pred = requery_predictions[prob_id]['text']
+
+ pattern = re.compile(r'The answer is ([A-Z]).')
+ our_res = pattern.findall(our_pred)
+ if len(our_res) == 1:
+ our_answer = our_res[0] # 'A', 'B', ...
+ else:
+ our_answer = "FAILED"
+
+ requery_res = pattern.findall(requery_pred)
+ if len(requery_res) == 1:
+ requery_answer = requery_res[0] # 'A', 'B', ...
+ else:
+ requery_answer = "FAILED"
+
+ gpt4_res = pattern.findall(gpt4_pred)
+ if len(gpt4_res) == 1:
+ gpt4_answer = gpt4_res[0] # 'A', 'B', ...
+ else:
+ gpt4_answer = "FAILED"
+
+ our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
+ gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
+ requery_pred_idx = get_pred_idx(requery_answer, prob['choices'], args.options)
+
+ results['total'] += 1
+
+ if gpt4_answer == 'FAILED':
+ results['gpt4_failed'] += 1
+ if gpt4_pred_idx == prob['answer']:
+ results['gpt4_correct'] += 1
+ if our_pred_idx == prob['answer']:
+ results['gpt4_ourvisual_correct'] += 1
+ elif gpt4_pred_idx == prob['answer']:
+ results['gpt4_correct'] += 1
+ results['gpt4_ourvisual_correct'] += 1
+
+ if our_pred_idx == prob['answer']:
+ results['our_correct'] += 1
+
+ if requery_answer == 'FAILED':
+ sqa_results['results'][prob_id] = our_pred_idx
+ if our_pred_idx == prob['answer']:
+ results['requery_correct'] += 1
+ else:
+ sqa_results['results'][prob_id] = requery_pred_idx
+ if requery_pred_idx == prob['answer']:
+ results['requery_correct'] += 1
+ else:
+ print(f"""
+Question ({args.options[prob['answer']]}): {our_predictions[prob_id]['prompt']}
+Our ({our_answer}): {our_pred}
+GPT-4 ({gpt4_answer}): {gpt4_pred}
+Requery ({requery_answer}): {requery_pred}
+print("=====================================")
+""")
+
+ if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
+ results['correct_upperbound'] += 1
+
+ total = results['total']
+ print(f'Total: {total}, Our-Correct: {results["our_correct"]}, Accuracy: {results["our_correct"] / total * 100:.2f}%')
+ print(f'Total: {total}, GPT-4-Correct: {results["gpt4_correct"]}, Accuracy: {results["gpt4_correct"] / total * 100:.2f}%')
+ print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
+ print(f'Total: {total}, GPT-4-OursVisual-Correct: {results["gpt4_ourvisual_correct"]}, Accuracy: {results["gpt4_ourvisual_correct"] / total * 100:.2f}%')
+ print(f'Total: {total}, Requery-Correct: {results["requery_correct"]}, Accuracy: {results["requery_correct"] / total * 100:.2f}%')
+ print(f'Total: {total}, Correct upper: {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
+
+ sqa_results['acc'] = results["requery_correct"] / total * 100
+ sqa_results['correct'] = results["requery_correct"]
+ sqa_results['count'] = total
+
+ with open(args.output_result, 'w') as f:
+ json.dump(sqa_results, f, indent=2)
+
diff --git a/mmte/models/llava/eval/eval_textvqa.py b/mmte/models/llava/eval/eval_textvqa.py
new file mode 100644
index 0000000..72d8d52
--- /dev/null
+++ b/mmte/models/llava/eval/eval_textvqa.py
@@ -0,0 +1,65 @@
+import os
+import argparse
+import json
+import re
+
+from ..eval.m4c_evaluator import TextVQAAccuracyEvaluator
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--annotation-file', type=str)
+ parser.add_argument('--result-file', type=str)
+ parser.add_argument('--result-dir', type=str)
+ return parser.parse_args()
+
+
+def prompt_processor(prompt):
+ if prompt.startswith('OCR tokens: '):
+ pattern = r"Question: (.*?) Short answer:"
+ match = re.search(pattern, prompt, re.DOTALL)
+ question = match.group(1)
+ elif 'Reference OCR token: ' in prompt and len(prompt.split('\n')) == 3:
+ if prompt.startswith('Reference OCR token:'):
+ question = prompt.split('\n')[1]
+ else:
+ question = prompt.split('\n')[0]
+ elif len(prompt.split('\n')) == 2:
+ question = prompt.split('\n')[0]
+ else:
+ assert False
+
+ return question.lower()
+
+
+def eval_single(annotation_file, result_file):
+ experiment_name = os.path.splitext(os.path.basename(result_file))[0]
+ print(experiment_name)
+ annotations = json.load(open(annotation_file))['data']
+ annotations = {(annotation['image_id'], annotation['question'].lower()): annotation for annotation in annotations}
+ results = [json.loads(line) for line in open(result_file)]
+
+ pred_list = []
+ for result in results:
+ annotation = annotations[(result['question_id'], prompt_processor(result['prompt']))]
+ pred_list.append({
+ "pred_answer": result['text'],
+ "gt_answers": annotation['answers'],
+ })
+
+ evaluator = TextVQAAccuracyEvaluator()
+ print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), 100. * evaluator.eval_pred_list(pred_list)))
+
+
+if __name__ == "__main__":
+ args = get_args()
+
+ if args.result_file is not None:
+ eval_single(args.annotation_file, args.result_file)
+
+ if args.result_dir is not None:
+ for result_file in sorted(os.listdir(args.result_dir)):
+ if not result_file.endswith('.jsonl'):
+ print(f'Skipping {result_file}')
+ continue
+ eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))
diff --git a/mmte/models/llava/eval/generate_webpage_data_from_table.py b/mmte/models/llava/eval/generate_webpage_data_from_table.py
new file mode 100644
index 0000000..9260225
--- /dev/null
+++ b/mmte/models/llava/eval/generate_webpage_data_from_table.py
@@ -0,0 +1,111 @@
+"""Generate json file for webpage."""
+import json
+import os
+import re
+
+# models = ['llama', 'alpaca', 'gpt35', 'bard']
+models = ['vicuna']
+
+
+def read_jsonl(path: str, key: str=None):
+ data = []
+ with open(os.path.expanduser(path)) as f:
+ for line in f:
+ if not line:
+ continue
+ data.append(json.loads(line))
+ if key is not None:
+ data.sort(key=lambda x: x[key])
+ data = {item[key]: item for item in data}
+ return data
+
+
+def trim_hanging_lines(s: str, n: int) -> str:
+ s = s.strip()
+ for _ in range(n):
+ s = s.split('\n', 1)[1].strip()
+ return s
+
+
+if __name__ == '__main__':
+ questions = read_jsonl('table/question.jsonl', key='question_id')
+
+ # alpaca_answers = read_jsonl('table/answer/answer_alpaca-13b.jsonl', key='question_id')
+ # bard_answers = read_jsonl('table/answer/answer_bard.jsonl', key='question_id')
+ # gpt35_answers = read_jsonl('table/answer/answer_gpt35.jsonl', key='question_id')
+ # llama_answers = read_jsonl('table/answer/answer_llama-13b.jsonl', key='question_id')
+ vicuna_answers = read_jsonl('table/answer/answer_vicuna-13b.jsonl', key='question_id')
+ ours_answers = read_jsonl('table/results/llama-13b-hf-alpaca.jsonl', key='question_id')
+
+ review_vicuna = read_jsonl('table/review/review_vicuna-13b_llama-13b-hf-alpaca.jsonl', key='question_id')
+ # review_alpaca = read_jsonl('table/review/review_alpaca-13b_vicuna-13b.jsonl', key='question_id')
+ # review_bard = read_jsonl('table/review/review_bard_vicuna-13b.jsonl', key='question_id')
+ # review_gpt35 = read_jsonl('table/review/review_gpt35_vicuna-13b.jsonl', key='question_id')
+ # review_llama = read_jsonl('table/review/review_llama-13b_vicuna-13b.jsonl', key='question_id')
+
+ records = []
+ for qid in questions.keys():
+ r = {
+ 'id': qid,
+ 'category': questions[qid]['category'],
+ 'question': questions[qid]['text'],
+ 'answers': {
+ # 'alpaca': alpaca_answers[qid]['text'],
+ # 'llama': llama_answers[qid]['text'],
+ # 'bard': bard_answers[qid]['text'],
+ # 'gpt35': gpt35_answers[qid]['text'],
+ 'vicuna': vicuna_answers[qid]['text'],
+ 'ours': ours_answers[qid]['text'],
+ },
+ 'evaluations': {
+ # 'alpaca': review_alpaca[qid]['text'],
+ # 'llama': review_llama[qid]['text'],
+ # 'bard': review_bard[qid]['text'],
+ 'vicuna': review_vicuna[qid]['content'],
+ # 'gpt35': review_gpt35[qid]['text'],
+ },
+ 'scores': {
+ 'vicuna': review_vicuna[qid]['tuple'],
+ # 'alpaca': review_alpaca[qid]['score'],
+ # 'llama': review_llama[qid]['score'],
+ # 'bard': review_bard[qid]['score'],
+ # 'gpt35': review_gpt35[qid]['score'],
+ },
+ }
+
+ # cleanup data
+ cleaned_evals = {}
+ for k, v in r['evaluations'].items():
+ v = v.strip()
+ lines = v.split('\n')
+ # trim the first line if it's a pair of numbers
+ if re.match(r'\d+[, ]+\d+', lines[0]):
+ lines = lines[1:]
+ v = '\n'.join(lines)
+ cleaned_evals[k] = v.replace('Assistant 1', "**Assistant 1**").replace('Assistant 2', '**Assistant 2**')
+
+ r['evaluations'] = cleaned_evals
+ records.append(r)
+
+ # Reorder the records, this is optional
+ for r in records:
+ if r['id'] <= 20:
+ r['id'] += 60
+ else:
+ r['id'] -= 20
+ for r in records:
+ if r['id'] <= 50:
+ r['id'] += 10
+ elif 50 < r['id'] <= 60:
+ r['id'] -= 50
+ for r in records:
+ if r['id'] == 7:
+ r['id'] = 1
+ elif r['id'] < 7:
+ r['id'] += 1
+
+ records.sort(key=lambda x: x['id'])
+
+ # Write to file
+ with open('webpage/data.json', 'w') as f:
+ json.dump({'questions': records, 'models': models}, f, indent=2)
diff --git a/mmte/models/llava/eval/m4c_evaluator.py b/mmte/models/llava/eval/m4c_evaluator.py
new file mode 100644
index 0000000..e30e958
--- /dev/null
+++ b/mmte/models/llava/eval/m4c_evaluator.py
@@ -0,0 +1,334 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import re
+
+from tqdm import tqdm
+
+
+class EvalAIAnswerProcessor:
+ """
+ Processes an answer similar to Eval AI
+ copied from
+ https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897
+ """
+
+ CONTRACTIONS = {
+ "aint": "ain't",
+ "arent": "aren't",
+ "cant": "can't",
+ "couldve": "could've",
+ "couldnt": "couldn't",
+ "couldn'tve": "couldn't've",
+ "couldnt've": "couldn't've",
+ "didnt": "didn't",
+ "doesnt": "doesn't",
+ "dont": "don't",
+ "hadnt": "hadn't",
+ "hadnt've": "hadn't've",
+ "hadn'tve": "hadn't've",
+ "hasnt": "hasn't",
+ "havent": "haven't",
+ "hed": "he'd",
+ "hed've": "he'd've",
+ "he'dve": "he'd've",
+ "hes": "he's",
+ "howd": "how'd",
+ "howll": "how'll",
+ "hows": "how's",
+ "Id've": "I'd've",
+ "I'dve": "I'd've",
+ "Im": "I'm",
+ "Ive": "I've",
+ "isnt": "isn't",
+ "itd": "it'd",
+ "itd've": "it'd've",
+ "it'dve": "it'd've",
+ "itll": "it'll",
+ "let's": "let's",
+ "maam": "ma'am",
+ "mightnt": "mightn't",
+ "mightnt've": "mightn't've",
+ "mightn'tve": "mightn't've",
+ "mightve": "might've",
+ "mustnt": "mustn't",
+ "mustve": "must've",
+ "neednt": "needn't",
+ "notve": "not've",
+ "oclock": "o'clock",
+ "oughtnt": "oughtn't",
+ "ow's'at": "'ow's'at",
+ "'ows'at": "'ow's'at",
+ "'ow'sat": "'ow's'at",
+ "shant": "shan't",
+ "shed've": "she'd've",
+ "she'dve": "she'd've",
+ "she's": "she's",
+ "shouldve": "should've",
+ "shouldnt": "shouldn't",
+ "shouldnt've": "shouldn't've",
+ "shouldn'tve": "shouldn't've",
+ "somebody'd": "somebodyd",
+ "somebodyd've": "somebody'd've",
+ "somebody'dve": "somebody'd've",
+ "somebodyll": "somebody'll",
+ "somebodys": "somebody's",
+ "someoned": "someone'd",
+ "someoned've": "someone'd've",
+ "someone'dve": "someone'd've",
+ "someonell": "someone'll",
+ "someones": "someone's",
+ "somethingd": "something'd",
+ "somethingd've": "something'd've",
+ "something'dve": "something'd've",
+ "somethingll": "something'll",
+ "thats": "that's",
+ "thered": "there'd",
+ "thered've": "there'd've",
+ "there'dve": "there'd've",
+ "therere": "there're",
+ "theres": "there's",
+ "theyd": "they'd",
+ "theyd've": "they'd've",
+ "they'dve": "they'd've",
+ "theyll": "they'll",
+ "theyre": "they're",
+ "theyve": "they've",
+ "twas": "'twas",
+ "wasnt": "wasn't",
+ "wed've": "we'd've",
+ "we'dve": "we'd've",
+ "weve": "we've",
+ "werent": "weren't",
+ "whatll": "what'll",
+ "whatre": "what're",
+ "whats": "what's",
+ "whatve": "what've",
+ "whens": "when's",
+ "whered": "where'd",
+ "wheres": "where's",
+ "whereve": "where've",
+ "whod": "who'd",
+ "whod've": "who'd've",
+ "who'dve": "who'd've",
+ "wholl": "who'll",
+ "whos": "who's",
+ "whove": "who've",
+ "whyll": "why'll",
+ "whyre": "why're",
+ "whys": "why's",
+ "wont": "won't",
+ "wouldve": "would've",
+ "wouldnt": "wouldn't",
+ "wouldnt've": "wouldn't've",
+ "wouldn'tve": "wouldn't've",
+ "yall": "y'all",
+ "yall'll": "y'all'll",
+ "y'allll": "y'all'll",
+ "yall'd've": "y'all'd've",
+ "y'alld've": "y'all'd've",
+ "y'all'dve": "y'all'd've",
+ "youd": "you'd",
+ "youd've": "you'd've",
+ "you'dve": "you'd've",
+ "youll": "you'll",
+ "youre": "you're",
+ "youve": "you've",
+ }
+
+ NUMBER_MAP = {
+ "none": "0",
+ "zero": "0",
+ "one": "1",
+ "two": "2",
+ "three": "3",
+ "four": "4",
+ "five": "5",
+ "six": "6",
+ "seven": "7",
+ "eight": "8",
+ "nine": "9",
+ "ten": "10",
+ }
+ ARTICLES = ["a", "an", "the"]
+ PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)")
+ COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)")
+ PUNCTUATIONS = [
+ ";",
+ r"/",
+ "[",
+ "]",
+ '"',
+ "{",
+ "}",
+ "(",
+ ")",
+ "=",
+ "+",
+ "\\",
+ "_",
+ "-",
+ ">",
+ "<",
+ "@",
+ "`",
+ ",",
+ "?",
+ "!",
+ ]
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def word_tokenize(self, word):
+ word = word.lower()
+ word = word.replace(",", "").replace("?", "").replace("'s", " 's")
+ return word.strip()
+
+ def process_punctuation(self, in_text):
+ out_text = in_text
+ for p in self.PUNCTUATIONS:
+ if (p + " " in in_text or " " + p in in_text) or (
+ re.search(self.COMMA_STRIP, in_text) is not None
+ ):
+ out_text = out_text.replace(p, "")
+ else:
+ out_text = out_text.replace(p, " ")
+ out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE)
+ return out_text
+
+ def process_digit_article(self, in_text):
+ out_text = []
+ temp_text = in_text.lower().split()
+ for word in temp_text:
+ word = self.NUMBER_MAP.setdefault(word, word)
+ if word not in self.ARTICLES:
+ out_text.append(word)
+ else:
+ pass
+ for word_id, word in enumerate(out_text):
+ if word in self.CONTRACTIONS:
+ out_text[word_id] = self.CONTRACTIONS[word]
+ out_text = " ".join(out_text)
+ return out_text
+
+ def __call__(self, item):
+ item = self.word_tokenize(item)
+ item = item.replace("\n", " ").replace("\t", " ").strip()
+ item = self.process_punctuation(item)
+ item = self.process_digit_article(item)
+ return item
+
+
+class TextVQAAccuracyEvaluator:
+ def __init__(self):
+ self.answer_processor = EvalAIAnswerProcessor()
+
+ def _compute_answer_scores(self, raw_answers):
+ """
+ compute the accuracy (soft score) of human answers
+ """
+ answers = [self.answer_processor(a) for a in raw_answers]
+ assert len(answers) == 10
+ gt_answers = list(enumerate(answers))
+ unique_answers = set(answers)
+ unique_answer_scores = {}
+
+ for unique_answer in unique_answers:
+ accs = []
+ for gt_answer in gt_answers:
+ other_answers = [item for item in gt_answers if item != gt_answer]
+ matching_answers = [
+ item for item in other_answers if item[1] == unique_answer
+ ]
+ acc = min(1, float(len(matching_answers)) / 3)
+ accs.append(acc)
+ unique_answer_scores[unique_answer] = sum(accs) / len(accs)
+
+ return unique_answer_scores
+
+ def eval_pred_list(self, pred_list):
+ pred_scores = []
+ for entry in tqdm(pred_list):
+ pred_answer = self.answer_processor(entry["pred_answer"])
+ unique_answer_scores = self._compute_answer_scores(entry["gt_answers"])
+ score = unique_answer_scores.get(pred_answer, 0.0)
+ pred_scores.append(score)
+
+ accuracy = sum(pred_scores) / len(pred_scores)
+ return accuracy
+
+
+class STVQAAccuracyEvaluator:
+ def __init__(self):
+ self.answer_processor = EvalAIAnswerProcessor()
+
+ def eval_pred_list(self, pred_list):
+ pred_scores = []
+ for entry in pred_list:
+ pred_answer = self.answer_processor(entry["pred_answer"])
+ gts = [self.answer_processor(a) for a in entry["gt_answers"]]
+ score = 1.0 if pred_answer in gts else 0.0
+ pred_scores.append(score)
+
+ accuracy = sum(pred_scores) / len(pred_scores)
+ return accuracy
+
+
+class STVQAANLSEvaluator:
+ def __init__(self):
+ import editdistance # install with `pip install editdistance`
+
+ self.get_edit_distance = editdistance.eval
+
+ def get_anls(self, s1, s2):
+ s1 = s1.lower().strip()
+ s2 = s2.lower().strip()
+ iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2))
+ anls = iou if iou >= 0.5 else 0.0
+ return anls
+
+ def eval_pred_list(self, pred_list):
+ pred_scores = []
+ for entry in pred_list:
+ anls = max(
+ self.get_anls(entry["pred_answer"], gt) for gt in entry["gt_answers"]
+ )
+ pred_scores.append(anls)
+
+ accuracy = sum(pred_scores) / len(pred_scores)
+ return accuracy
+
+
+class TextCapsBleu4Evaluator:
+ def __init__(self):
+ # The following script requires Java 1.8.0 and pycocotools installed.
+ # The pycocoevalcap can be installed with pip as
+ # pip install git+https://github.com/ronghanghu/coco-caption.git@python23
+ # Original pycocoevalcap code is at https://github.com/tylin/coco-caption
+ # but has no python3 support yet.
+ try:
+ from pycocoevalcap.bleu.bleu import Bleu
+ from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
+ except ModuleNotFoundError:
+ print(
+ "Please install pycocoevalcap module using "
+ "pip install git+https://github.com/ronghanghu/coco-caption.git@python23" # noqa
+ )
+ raise
+
+ self.tokenizer = PTBTokenizer()
+ self.scorer = Bleu(4)
+
+ def eval_pred_list(self, pred_list):
+ # Create reference and hypotheses captions.
+ gts = {}
+ res = {}
+ for idx, entry in enumerate(pred_list):
+ gts[idx] = [{"caption": a} for a in entry["gt_answers"]]
+ res[idx] = [{"caption": entry["pred_answer"]}]
+
+ gts = self.tokenizer.tokenize(gts)
+ res = self.tokenizer.tokenize(res)
+ score, _ = self.scorer.compute_score(gts, res)
+
+ bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4)
+ return bleu4
diff --git a/mmte/models/llava/eval/model_qa.py b/mmte/models/llava/eval/model_qa.py
new file mode 100644
index 0000000..e2575fd
--- /dev/null
+++ b/mmte/models/llava/eval/model_qa.py
@@ -0,0 +1,64 @@
+import argparse
+from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria
+import torch
+import os
+import json
+from tqdm import tqdm
+import shortuuid
+
+from ..conversation import default_conversation
+from ..utils import disable_torch_init
+
+
+@torch.inference_mode()
+def eval_model(model_name, questions_file, answers_file):
+ # Model
+ disable_torch_init()
+ model_name = os.path.expanduser(model_name)
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
+ model = AutoModelForCausalLM.from_pretrained(model_name,
+ torch_dtype=torch.float16).cuda()
+
+
+ ques_file = open(os.path.expanduser(questions_file), "r")
+ ans_file = open(os.path.expanduser(answers_file), "w")
+ for i, line in enumerate(tqdm(ques_file)):
+ idx = json.loads(line)["question_id"]
+ qs = json.loads(line)["text"]
+ cat = json.loads(line)["category"]
+ conv = default_conversation.copy()
+ conv.append_message(conv.roles[0], qs)
+ prompt = conv.get_prompt()
+ inputs = tokenizer([prompt])
+ input_ids = torch.as_tensor(inputs.input_ids).cuda()
+ output_ids = model.generate(
+ input_ids,
+ do_sample=True,
+ use_cache=True,
+ temperature=0.7,
+ max_new_tokens=1024,)
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
+ try:
+ index = outputs.index(conv.sep, len(prompt))
+ except ValueError:
+ outputs += conv.sep
+ index = outputs.index(conv.sep, len(prompt))
+
+ outputs = outputs[len(prompt) + len(conv.roles[1]) + 2:index].strip()
+ ans_id = shortuuid.uuid()
+ ans_file.write(json.dumps({"question_id": idx,
+ "text": outputs,
+ "answer_id": ans_id,
+ "model_id": model_name,
+ "metadata": {}}) + "\n")
+ ans_file.flush()
+ ans_file.close()
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
+ args = parser.parse_args()
+
+ eval_model(args.model_name, args.question_file, args.answers_file)
diff --git a/mmte/models/llava/eval/model_vqa.py b/mmte/models/llava/eval/model_vqa.py
new file mode 100644
index 0000000..6f35f06
--- /dev/null
+++ b/mmte/models/llava/eval/model_vqa.py
@@ -0,0 +1,101 @@
+import argparse
+import torch
+import os
+import json
+from tqdm import tqdm
+import shortuuid
+
+from ..constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from ..conversation import conv_templates, SeparatorStyle
+from ..model.builder import load_pretrained_model
+from ..utils import disable_torch_init
+from ..mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
+
+from PIL import Image
+import math
+
+
+def split_list(lst, n):
+ """Split a list into n (roughly) equal-sized chunks"""
+ chunk_size = math.ceil(len(lst) / n) # integer division
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
+
+
+def get_chunk(lst, n, k):
+ chunks = split_list(lst, n)
+ return chunks[k]
+
+
+def eval_model(args):
+ # Model
+ disable_torch_init()
+ model_path = os.path.expanduser(args.model_path)
+ model_name = get_model_name_from_path(model_path)
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
+
+ questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
+ answers_file = os.path.expanduser(args.answers_file)
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
+ ans_file = open(answers_file, "w")
+ for line in tqdm(questions):
+ idx = line["question_id"]
+ image_file = line["image"]
+ qs = line["text"]
+ cur_prompt = qs
+ if model.config.mm_use_im_start_end:
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
+ else:
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
+
+ conv = conv_templates[args.conv_mode].copy()
+ conv.append_message(conv.roles[0], qs)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
+
+ image = Image.open(os.path.join(args.image_folder, image_file)).convert('RGB')
+ image_tensor = process_images([image], image_processor, model.config)[0]
+
+ with torch.inference_mode():
+ output_ids = model.generate(
+ input_ids,
+ images=image_tensor.unsqueeze(0).half().cuda(),
+ image_sizes=[image.size],
+ do_sample=True if args.temperature > 0 else False,
+ temperature=args.temperature,
+ top_p=args.top_p,
+ num_beams=args.num_beams,
+ # no_repeat_ngram_size=3,
+ max_new_tokens=1024,
+ use_cache=True)
+
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
+
+ ans_id = shortuuid.uuid()
+ ans_file.write(json.dumps({"question_id": idx,
+ "prompt": cur_prompt,
+ "text": outputs,
+ "answer_id": ans_id,
+ "model_id": model_name,
+ "metadata": {}}) + "\n")
+ ans_file.flush()
+ ans_file.close()
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
+ parser.add_argument("--model-base", type=str, default=None)
+ parser.add_argument("--image-folder", type=str, default="")
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
+ parser.add_argument("--conv-mode", type=str, default="llava_v1")
+ parser.add_argument("--num-chunks", type=int, default=1)
+ parser.add_argument("--chunk-idx", type=int, default=0)
+ parser.add_argument("--temperature", type=float, default=0.2)
+ parser.add_argument("--top_p", type=float, default=None)
+ parser.add_argument("--num_beams", type=int, default=1)
+ args = parser.parse_args()
+
+ eval_model(args)
diff --git a/mmte/models/llava/eval/model_vqa_loader.py b/mmte/models/llava/eval/model_vqa_loader.py
new file mode 100644
index 0000000..e57b118
--- /dev/null
+++ b/mmte/models/llava/eval/model_vqa_loader.py
@@ -0,0 +1,144 @@
+import argparse
+import torch
+import os
+import json
+from tqdm import tqdm
+import shortuuid
+
+from ..constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from ..conversation import conv_templates, SeparatorStyle
+from ..model.builder import load_pretrained_model
+from ..utils import disable_torch_init
+from ..mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
+from torch.utils.data import Dataset, DataLoader
+
+from PIL import Image
+import math
+
+
+def split_list(lst, n):
+ """Split a list into n (roughly) equal-sized chunks"""
+ chunk_size = math.ceil(len(lst) / n) # integer division
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
+
+
+def get_chunk(lst, n, k):
+ chunks = split_list(lst, n)
+ return chunks[k]
+
+
+# Custom dataset class
+class CustomDataset(Dataset):
+ def __init__(self, questions, image_folder, tokenizer, image_processor, model_config):
+ self.questions = questions
+ self.image_folder = image_folder
+ self.tokenizer = tokenizer
+ self.image_processor = image_processor
+ self.model_config = model_config
+
+ def __getitem__(self, index):
+ line = self.questions[index]
+ image_file = line["image"]
+ qs = line["text"]
+ if self.model_config.mm_use_im_start_end:
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
+ else:
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
+
+ conv = conv_templates[args.conv_mode].copy()
+ conv.append_message(conv.roles[0], qs)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+
+ image = Image.open(os.path.join(self.image_folder, image_file)).convert('RGB')
+ image_tensor = process_images([image], self.image_processor, self.model_config)[0]
+
+ input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
+
+ return input_ids, image_tensor, image.size
+
+ def __len__(self):
+ return len(self.questions)
+
+
+def collate_fn(batch):
+ input_ids, image_tensors, image_sizes = zip(*batch)
+ input_ids = torch.stack(input_ids, dim=0)
+ image_tensors = torch.stack(image_tensors, dim=0)
+ return input_ids, image_tensors, image_sizes
+
+
+# DataLoader
+def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, batch_size=1, num_workers=4):
+ assert batch_size == 1, "batch_size must be 1"
+ dataset = CustomDataset(questions, image_folder, tokenizer, image_processor, model_config)
+ data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False, collate_fn=collate_fn)
+ return data_loader
+
+
+def eval_model(args):
+ # Model
+ disable_torch_init()
+ model_path = os.path.expanduser(args.model_path)
+ model_name = get_model_name_from_path(model_path)
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
+
+ questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
+ answers_file = os.path.expanduser(args.answers_file)
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
+ ans_file = open(answers_file, "w")
+
+ if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
+ args.conv_mode = args.conv_mode + '_mmtag'
+ print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
+
+ data_loader = create_data_loader(questions, args.image_folder, tokenizer, image_processor, model.config)
+
+ for (input_ids, image_tensor, image_sizes), line in tqdm(zip(data_loader, questions), total=len(questions)):
+ idx = line["question_id"]
+ cur_prompt = line["text"]
+
+ input_ids = input_ids.to(device='cuda', non_blocking=True)
+
+ with torch.inference_mode():
+ output_ids = model.generate(
+ input_ids,
+ images=image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True),
+ image_sizes=image_sizes,
+ do_sample=True if args.temperature > 0 else False,
+ temperature=args.temperature,
+ top_p=args.top_p,
+ num_beams=args.num_beams,
+ max_new_tokens=args.max_new_tokens,
+ use_cache=True)
+
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
+
+ ans_id = shortuuid.uuid()
+ ans_file.write(json.dumps({"question_id": idx,
+ "prompt": cur_prompt,
+ "text": outputs,
+ "answer_id": ans_id,
+ "model_id": model_name,
+ "metadata": {}}) + "\n")
+ # ans_file.flush()
+ ans_file.close()
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
+ parser.add_argument("--model-base", type=str, default=None)
+ parser.add_argument("--image-folder", type=str, default="")
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
+ parser.add_argument("--conv-mode", type=str, default="llava_v1")
+ parser.add_argument("--num-chunks", type=int, default=1)
+ parser.add_argument("--chunk-idx", type=int, default=0)
+ parser.add_argument("--temperature", type=float, default=0.2)
+ parser.add_argument("--top_p", type=float, default=None)
+ parser.add_argument("--num_beams", type=int, default=1)
+ parser.add_argument("--max_new_tokens", type=int, default=128)
+ args = parser.parse_args()
+
+ eval_model(args)
diff --git a/mmte/models/llava/eval/model_vqa_mmbench.py b/mmte/models/llava/eval/model_vqa_mmbench.py
new file mode 100644
index 0000000..ae11007
--- /dev/null
+++ b/mmte/models/llava/eval/model_vqa_mmbench.py
@@ -0,0 +1,160 @@
+import argparse
+import torch
+import os
+import json
+import pandas as pd
+from tqdm import tqdm
+import shortuuid
+
+from ..constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from ..conversation import conv_templates, SeparatorStyle
+from ..model.builder import load_pretrained_model
+from ..utils import disable_torch_init
+from ..mm_utils import tokenizer_image_token, process_images, load_image_from_base64, get_model_name_from_path
+
+from PIL import Image
+import math
+
+
+all_options = ['A', 'B', 'C', 'D']
+
+
+def split_list(lst, n):
+ """Split a list into n (roughly) equal-sized chunks"""
+ chunk_size = math.ceil(len(lst) / n) # integer division
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
+
+
+def get_chunk(lst, n, k):
+ chunks = split_list(lst, n)
+ return chunks[k]
+
+
+def is_none(value):
+ if value is None:
+ return True
+ if type(value) is float and math.isnan(value):
+ return True
+ if type(value) is str and value.lower() == 'nan':
+ return True
+ if type(value) is str and value.lower() == 'none':
+ return True
+ return False
+
+def get_options(row, options):
+ parsed_options = []
+ for option in options:
+ option_value = row[option]
+ if is_none(option_value):
+ break
+ parsed_options.append(option_value)
+ return parsed_options
+
+
+def eval_model(args):
+ # Model
+ disable_torch_init()
+ model_path = os.path.expanduser(args.model_path)
+ model_name = get_model_name_from_path(model_path)
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
+
+ questions = pd.read_table(os.path.expanduser(args.question_file))
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
+ answers_file = os.path.expanduser(args.answers_file)
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
+ ans_file = open(answers_file, "w")
+
+ if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
+ args.conv_mode = args.conv_mode + '_mmtag'
+ print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
+
+ for index, row in tqdm(questions.iterrows(), total=len(questions)):
+ options = get_options(row, all_options)
+ cur_option_char = all_options[:len(options)]
+
+ if args.all_rounds:
+ num_rounds = len(options)
+ else:
+ num_rounds = 1
+
+ for round_idx in range(num_rounds):
+ idx = row['index']
+ question = row['question']
+ hint = row['hint']
+ image = load_image_from_base64(row['image'])
+ if not is_none(hint):
+ question = hint + '\n' + question
+ for option_char, option in zip(all_options[:len(options)], options):
+ question = question + '\n' + option_char + '. ' + option
+ qs = cur_prompt = question
+ if model.config.mm_use_im_start_end:
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
+ else:
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
+
+ if args.single_pred_prompt:
+ if args.lang == 'cn':
+ qs = qs + '\n' + "请直接回答选项字母。"
+ else:
+ qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
+
+ conv = conv_templates[args.conv_mode].copy()
+ conv.append_message(conv.roles[0], qs)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
+
+ image_tensor = process_images([image], image_processor, model.config)[0]
+
+ with torch.inference_mode():
+ output_ids = model.generate(
+ input_ids,
+ images=image_tensor.unsqueeze(0).half().cuda(),
+ image_sizes=[image.size],
+ do_sample=True if args.temperature > 0 else False,
+ temperature=args.temperature,
+ top_p=args.top_p,
+ num_beams=args.num_beams,
+ # no_repeat_ngram_size=3,
+ max_new_tokens=1024,
+ use_cache=True)
+
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
+
+ ans_id = shortuuid.uuid()
+ ans_file.write(json.dumps({"question_id": idx,
+ "round_id": round_idx,
+ "prompt": cur_prompt,
+ "text": outputs,
+ "options": options,
+ "option_char": cur_option_char,
+ "answer_id": ans_id,
+ "model_id": model_name,
+ "metadata": {}}) + "\n")
+ ans_file.flush()
+
+ # rotate options
+ options = options[1:] + options[:1]
+ cur_option_char = cur_option_char[1:] + cur_option_char[:1]
+ ans_file.close()
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
+ parser.add_argument("--model-base", type=str, default=None)
+ parser.add_argument("--image-folder", type=str, default="")
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
+ parser.add_argument("--conv-mode", type=str, default="llava_v1")
+ parser.add_argument("--num-chunks", type=int, default=1)
+ parser.add_argument("--chunk-idx", type=int, default=0)
+ parser.add_argument("--temperature", type=float, default=0.2)
+ parser.add_argument("--top_p", type=float, default=None)
+ parser.add_argument("--num_beams", type=int, default=1)
+ parser.add_argument("--all-rounds", action="store_true")
+ parser.add_argument("--single-pred-prompt", action="store_true")
+ parser.add_argument("--lang", type=str, default="en")
+ args = parser.parse_args()
+
+ eval_model(args)
diff --git a/mmte/models/llava/eval/model_vqa_science.py b/mmte/models/llava/eval/model_vqa_science.py
new file mode 100644
index 0000000..27a2b30
--- /dev/null
+++ b/mmte/models/llava/eval/model_vqa_science.py
@@ -0,0 +1,111 @@
+import argparse
+import torch
+import os
+import json
+from tqdm import tqdm
+import shortuuid
+
+from ..constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from ..conversation import conv_templates, SeparatorStyle
+from ..model.builder import load_pretrained_model
+from ..utils import disable_torch_init
+from ..mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
+
+from PIL import Image
+import math
+
+
+def split_list(lst, n):
+ """Split a list into n (roughly) equal-sized chunks"""
+ chunk_size = math.ceil(len(lst) / n) # integer division
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
+
+
+def get_chunk(lst, n, k):
+ chunks = split_list(lst, n)
+ return chunks[k]
+
+
+def eval_model(args):
+ # Model
+ disable_torch_init()
+ model_path = os.path.expanduser(args.model_path)
+ model_name = get_model_name_from_path(model_path)
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
+
+ questions = json.load(open(os.path.expanduser(args.question_file), "r"))
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
+ answers_file = os.path.expanduser(args.answers_file)
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
+ ans_file = open(answers_file, "w")
+ for i, line in enumerate(tqdm(questions)):
+ idx = line["id"]
+ question = line['conversations'][0]
+ qs = question['value'].replace('', '').strip()
+ cur_prompt = qs
+
+ if 'image' in line:
+ image_file = line["image"]
+ image = Image.open(os.path.join(args.image_folder, image_file))
+ image_tensor = process_images([image], image_processor, model.config)[0]
+ images = image_tensor.unsqueeze(0).half().cuda()
+ image_sizes = [image.size]
+ if getattr(model.config, 'mm_use_im_start_end', False):
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
+ else:
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
+ cur_prompt = '' + '\n' + cur_prompt
+ else:
+ images = None
+ image_sizes = None
+
+ if args.single_pred_prompt:
+ qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
+ cur_prompt = cur_prompt + '\n' + "Answer with the option's letter from the given choices directly."
+
+ conv = conv_templates[args.conv_mode].copy()
+ conv.append_message(conv.roles[0], qs)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
+
+ with torch.inference_mode():
+ output_ids = model.generate(
+ input_ids,
+ images=images,
+ image_sizes=image_sizes,
+ do_sample=True if args.temperature > 0 else False,
+ temperature=args.temperature,
+ max_new_tokens=1024,
+ use_cache=True,
+ )
+
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
+
+ ans_id = shortuuid.uuid()
+ ans_file.write(json.dumps({"question_id": idx,
+ "prompt": cur_prompt,
+ "text": outputs,
+ "answer_id": ans_id,
+ "model_id": model_name,
+ "metadata": {}}) + "\n")
+ ans_file.flush()
+ ans_file.close()
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
+ parser.add_argument("--model-base", type=str, default=None)
+ parser.add_argument("--image-folder", type=str, default="")
+ parser.add_argument("--question-file", type=str, default="tables/question.json")
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
+ parser.add_argument("--conv-mode", type=str, default="llava_v0")
+ parser.add_argument("--num-chunks", type=int, default=1)
+ parser.add_argument("--chunk-idx", type=int, default=0)
+ parser.add_argument("--temperature", type=float, default=0.2)
+ parser.add_argument("--answer-prompter", action="store_true")
+ parser.add_argument("--single-pred-prompt", action="store_true")
+ args = parser.parse_args()
+
+ eval_model(args)
diff --git a/mmte/models/llava/eval/qa_baseline_gpt35.py b/mmte/models/llava/eval/qa_baseline_gpt35.py
new file mode 100644
index 0000000..babab6e
--- /dev/null
+++ b/mmte/models/llava/eval/qa_baseline_gpt35.py
@@ -0,0 +1,74 @@
+"""Generate answers with GPT-3.5"""
+# Note: you need to be using OpenAI Python v0.27.0 for the code below to work
+import argparse
+import json
+import os
+import time
+import concurrent.futures
+
+import openai
+import tqdm
+import shortuuid
+
+MODEL = 'gpt-3.5-turbo'
+MODEL_ID = 'gpt-3.5-turbo:20230327'
+
+def get_answer(question_id: int, question: str, max_tokens: int):
+ ans = {
+ 'answer_id': shortuuid.uuid(),
+ 'question_id': question_id,
+ 'model_id': MODEL_ID,
+ }
+ for _ in range(3):
+ try:
+ response = openai.ChatCompletion.create(
+ model=MODEL,
+ messages=[{
+ 'role': 'system',
+ 'content': 'You are a helpful assistant.'
+ }, {
+ 'role': 'user',
+ 'content': question,
+ }],
+ max_tokens=max_tokens,
+ )
+ ans['text'] = response['choices'][0]['message']['content']
+ return ans
+ except Exception as e:
+ print('[ERROR]', e)
+ ans['text'] = '#ERROR#'
+ time.sleep(1)
+ return ans
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='ChatGPT answer generation.')
+ parser.add_argument('-q', '--question')
+ parser.add_argument('-o', '--output')
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
+ args = parser.parse_args()
+
+ questions_dict = {}
+ with open(os.path.expanduser(args.question)) as f:
+ for line in f:
+ if not line:
+ continue
+ q = json.loads(line)
+ questions_dict[q['question_id']] = q['text']
+
+ answers = []
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
+ futures = []
+ for qid, question in questions_dict.items():
+ future = executor.submit(get_answer, qid, question, args.max_tokens)
+ futures.append(future)
+
+ for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
+ answers.append(future.result())
+
+ answers.sort(key=lambda x: x['question_id'])
+
+ with open(os.path.expanduser(args.output), 'w') as f:
+ table = [json.dumps(ans) for ans in answers]
+ f.write('\n'.join(table))
diff --git a/mmte/models/llava/eval/run_llava.py b/mmte/models/llava/eval/run_llava.py
index 5086479..9f442c4 100644
--- a/mmte/models/llava/eval/run_llava.py
+++ b/mmte/models/llava/eval/run_llava.py
@@ -15,9 +15,10 @@
process_images,
tokenizer_image_token,
get_model_name_from_path,
- KeywordsStoppingCriteria,
)
+from PIL import Image
+
import requests
from PIL import Image
from io import BytesIO
@@ -49,24 +50,11 @@ def load_images(image_files):
def eval_model(args):
# Model
disable_torch_init()
- print(args)
- # model_name = get_model_name_from_path(args.model_path)
- model_name = args.model_name
+ model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(
args.model_path, args.model_base, model_name
)
- print(f"Loaded model {model_name}")
- print(tokenizer, model, image_processor)
-
- if image_processor is None:
- _, _, image_processor, _ = load_pretrained_model(
- model_path="../model_weights/llava-v1.5-7b",
- model_base=None,
- model_name=model_name
- )
- print(model_name)
- print(f"Loaded image processor {image_processor}")
qs = args.query
image_token_se = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
@@ -83,6 +71,10 @@ def eval_model(args):
if "llama-2" in model_name.lower():
conv_mode = "llava_llama_2"
+ elif "mistral" in model_name.lower():
+ conv_mode = "mistral_instruct"
+ elif "v1.6-34b" in model_name.lower():
+ conv_mode = "chatml_direct"
elif "v1" in model_name.lower():
conv_mode = "llava_v1"
elif "mpt" in model_name.lower():
@@ -106,6 +98,7 @@ def eval_model(args):
image_files = image_parser(args)
images = load_images(image_files)
+ image_sizes = [x.size for x in images]
images_tensor = process_images(
images,
image_processor,
@@ -118,36 +111,20 @@ def eval_model(args):
.cuda()
)
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
- keywords = [stop_str]
- stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
-
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=images_tensor,
+ image_sizes=image_sizes,
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
top_p=args.top_p,
num_beams=args.num_beams,
max_new_tokens=args.max_new_tokens,
use_cache=True,
- stopping_criteria=[stopping_criteria],
)
- input_token_len = input_ids.shape[1]
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
- if n_diff_input_output > 0:
- print(
- f"[Warning] {n_diff_input_output} output_ids are not the same as the input_ids"
- )
- outputs = tokenizer.batch_decode(
- output_ids[:, input_token_len:], skip_special_tokens=True
- )[0]
- outputs = outputs.strip()
- if outputs.endswith(stop_str):
- outputs = outputs[: -len(stop_str)]
- outputs = outputs.strip()
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
print(outputs)
def chat_model(tokenizer, model, image_processor, args):
@@ -192,66 +169,40 @@ def chat_model(tokenizer, model, image_processor, args):
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
-
if args.image_file is not None:
image_files = image_parser(args)
images = load_images(image_files)
+ image_sizes = [x.size for x in images]
images_tensor = process_images(
images,
image_processor,
model.config
- ).to(model.device, dtype=args.dtype)
+ ).to(model.device, dtype=torch.float16)
else:
images_tensor = None
+ image_sizes = None
input_ids = (
tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt")
- .unsqueeze(0).to(model.device)
- # .cuda()
+ .unsqueeze(0)
+ .cuda()
)
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
- # keywords = [stop_str]
- # stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
-
- # with torch.inference_mode():
- # output_ids = model.generate(
- # input_ids,
- # images=images_tensor,
- # do_sample=True if args.temperature > 0 else False,
- # temperature=args.temperature,
- # top_p=args.top_p,
- # num_beams=args.num_beams,
- # max_new_tokens=args.max_new_tokens,
- # use_cache=True,
- # # stopping_criteria=[stopping_criteria],
- # )
with torch.inference_mode():
output_ids = model.generate(
- input_ids=input_ids, # 修正位置参数为关键字参数
+ inputs=input_ids,
images=images_tensor,
+ image_sizes=image_sizes,
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
top_p=args.top_p,
num_beams=args.num_beams,
max_new_tokens=args.max_new_tokens,
- use_cache=True
- # stopping_criteria=[stopping_criteria],
+ use_cache=True,
)
- input_token_len = input_ids.shape[1]
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
- if n_diff_input_output > 0:
- print(
- f"[Warning] {n_diff_input_output} output_ids are not the same as the input_ids"
- )
- outputs = tokenizer.batch_decode(
- output_ids[:, input_token_len:], skip_special_tokens=True
- )[0]
- outputs = outputs.strip()
- if outputs.endswith(stop_str):
- outputs = outputs[: -len(stop_str)]
- outputs = outputs.strip()
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
+
return outputs
diff --git a/mmte/models/llava/eval/summarize_gpt_review.py b/mmte/models/llava/eval/summarize_gpt_review.py
new file mode 100644
index 0000000..0f796a3
--- /dev/null
+++ b/mmte/models/llava/eval/summarize_gpt_review.py
@@ -0,0 +1,60 @@
+import json
+import os
+from collections import defaultdict
+
+import numpy as np
+
+import argparse
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
+ parser.add_argument('-d', '--dir', default=None)
+ parser.add_argument('-v', '--version', default=None)
+ parser.add_argument('-s', '--select', nargs='*', default=None)
+ parser.add_argument('-f', '--files', nargs='*', default=[])
+ parser.add_argument('-i', '--ignore', nargs='*', default=[])
+ return parser.parse_args()
+
+
+if __name__ == '__main__':
+ args = parse_args()
+
+ if args.ignore is not None:
+ args.ignore = [int(x) for x in args.ignore]
+
+ if len(args.files) > 0:
+ review_files = args.files
+ else:
+ review_files = [x for x in os.listdir(args.dir) if x.endswith('.jsonl') and (x.startswith('gpt4_text') or x.startswith('reviews_') or x.startswith('review_') or 'review' in args.dir)]
+
+ for review_file in sorted(review_files):
+ config = os.path.basename(review_file).replace('gpt4_text_', '').replace('.jsonl', '')
+ if args.select is not None and any(x not in config for x in args.select):
+ continue
+ if '0613' in config:
+ version = '0613'
+ else:
+ version = '0314'
+ if args.version is not None and args.version != version:
+ continue
+ scores = defaultdict(list)
+ print(config)
+ with open(os.path.join(args.dir, review_file) if args.dir is not None else review_file) as f:
+ for review_str in f:
+ review = json.loads(review_str)
+ if review['question_id'] in args.ignore:
+ continue
+ if 'category' in review:
+ scores[review['category']].append(review['tuple'])
+ scores['all'].append(review['tuple'])
+ else:
+ if 'tuple' in review:
+ scores['all'].append(review['tuple'])
+ else:
+ scores['all'].append(review['score'])
+ for k, v in sorted(scores.items()):
+ stats = np.asarray(v).mean(0).tolist()
+ stats = [round(x, 3) for x in stats]
+ # print(k, stats, round(stats[1]/stats[0]*100, 1))
+ print(k, round(stats[1]/stats[0]*100, 1), round(stats[0] * 10, 1), round(stats[1] * 10, 1))
+ print('=================================')
diff --git a/mmte/models/llava/mm_utils.py b/mmte/models/llava/mm_utils.py
index d62462e..7e3ccef 100644
--- a/mmte/models/llava/mm_utils.py
+++ b/mmte/models/llava/mm_utils.py
@@ -1,12 +1,150 @@
from PIL import Image
from io import BytesIO
import base64
-
import torch
+import math
+import ast
+
from transformers import StoppingCriteria
from mmte.models.llava.constants import IMAGE_TOKEN_INDEX
+def select_best_resolution(original_size, possible_resolutions):
+ """
+ Selects the best resolution from a list of possible resolutions based on the original size.
+
+ Args:
+ original_size (tuple): The original size of the image in the format (width, height).
+ possible_resolutions (list): A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
+
+ Returns:
+ tuple: The best fit resolution in the format (width, height).
+ """
+ original_width, original_height = original_size
+ best_fit = None
+ max_effective_resolution = 0
+ min_wasted_resolution = float('inf')
+
+ for width, height in possible_resolutions:
+ scale = min(width / original_width, height / original_height)
+ downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
+ effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
+ wasted_resolution = (width * height) - effective_resolution
+
+ if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
+ max_effective_resolution = effective_resolution
+ min_wasted_resolution = wasted_resolution
+ best_fit = (width, height)
+
+ return best_fit
+
+
+def resize_and_pad_image(image, target_resolution):
+ """
+ Resize and pad an image to a target resolution while maintaining aspect ratio.
+
+ Args:
+ image (PIL.Image.Image): The input image.
+ target_resolution (tuple): The target resolution (width, height) of the image.
+
+ Returns:
+ PIL.Image.Image: The resized and padded image.
+ """
+ original_width, original_height = image.size
+ target_width, target_height = target_resolution
+
+ scale_w = target_width / original_width
+ scale_h = target_height / original_height
+
+ if scale_w < scale_h:
+ new_width = target_width
+ new_height = min(math.ceil(original_height * scale_w), target_height)
+ else:
+ new_height = target_height
+ new_width = min(math.ceil(original_width * scale_h), target_width)
+
+ # Resize the image
+ resized_image = image.resize((new_width, new_height))
+
+ new_image = Image.new('RGB', (target_width, target_height), (0, 0, 0))
+ paste_x = (target_width - new_width) // 2
+ paste_y = (target_height - new_height) // 2
+ new_image.paste(resized_image, (paste_x, paste_y))
+
+ return new_image
+
+
+def divide_to_patches(image, patch_size):
+ """
+ Divides an image into patches of a specified size.
+
+ Args:
+ image (PIL.Image.Image): The input image.
+ patch_size (int): The size of each patch.
+
+ Returns:
+ list: A list of PIL.Image.Image objects representing the patches.
+ """
+ patches = []
+ width, height = image.size
+ for i in range(0, height, patch_size):
+ for j in range(0, width, patch_size):
+ box = (j, i, j + patch_size, i + patch_size)
+ patch = image.crop(box)
+ patches.append(patch)
+
+ return patches
+
+
+def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
+ """
+ Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
+
+ Args:
+ image_size (tuple): The size of the input image in the format (width, height).
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
+ patch_size (int): The size of each image patch.
+
+ Returns:
+ tuple: The shape of the image patch grid in the format (width, height).
+ """
+ if type(grid_pinpoints) is list:
+ possible_resolutions = grid_pinpoints
+ else:
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
+ width, height = select_best_resolution(image_size, possible_resolutions)
+ return width // patch_size, height // patch_size
+
+
+def process_anyres_image(image, processor, grid_pinpoints):
+ """
+ Process an image with variable resolutions.
+
+ Args:
+ image (PIL.Image.Image): The input image to be processed.
+ processor: The image processor object.
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
+
+ Returns:
+ torch.Tensor: A tensor containing the processed image patches.
+ """
+ if type(grid_pinpoints) is list:
+ possible_resolutions = grid_pinpoints
+ else:
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
+ best_resolution = select_best_resolution(image.size, possible_resolutions)
+ image_padded = resize_and_pad_image(image, best_resolution)
+
+ patches = divide_to_patches(image_padded, processor.crop_size['height'])
+
+ image_original_resize = image.resize((processor.size['shortest_edge'], processor.size['shortest_edge']))
+
+ image_patches = [image_original_resize] + patches
+ image_patches = [processor.preprocess(image_patch, return_tensors='pt')['pixel_values'][0]
+ for image_patch in image_patches]
+ return torch.stack(image_patches, dim=0)
+
+
def load_image_from_base64(image):
return Image.open(BytesIO(base64.b64decode(image)))
@@ -33,6 +171,10 @@ def process_images(images, image_processor, model_cfg):
image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))
image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
new_images.append(image)
+ elif image_aspect_ratio == "anyres":
+ for image in images:
+ image = process_anyres_image(image, image_processor, model_cfg.image_grid_pinpoints)
+ new_images.append(image)
else:
return image_processor(images, return_tensors='pt')['pixel_values']
if all(x.shape == new_images[0].shape for x in new_images):
@@ -89,7 +231,8 @@ def call_for_batch(self, output_ids: torch.LongTensor, scores: torch.FloatTensor
offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len)
self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
for keyword_id in self.keyword_ids:
- if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all():
+ truncated_output_ids = output_ids[0, -keyword_id.shape[0]:]
+ if torch.equal(truncated_output_ids, keyword_id):
return True
outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
for keyword in self.keywords:
diff --git a/mmte/models/llava/model/__init__.py b/mmte/models/llava/model/__init__.py
index fa79960..dbd9178 100644
--- a/mmte/models/llava/model/__init__.py
+++ b/mmte/models/llava/model/__init__.py
@@ -1,2 +1,6 @@
-from .language_model.llava_llama import LlavaLlamaForCausalLM, LlavaConfig
-from .language_model.llava_mpt import LlavaMPTForCausalLM, LlavaMPTConfig
+try:
+ from .language_model.llava_llama import LlavaLlamaForCausalLM, LlavaConfig
+ from .language_model.llava_mpt import LlavaMptForCausalLM, LlavaMptConfig
+ from .language_model.llava_mistral import LlavaMistralForCausalLM, LlavaMistralConfig
+except:
+ pass
diff --git a/mmte/models/llava/model/apply_delta.py b/mmte/models/llava/model/apply_delta.py
index 666dd96..05a8197 100644
--- a/mmte/models/llava/model/apply_delta.py
+++ b/mmte/models/llava/model/apply_delta.py
@@ -7,7 +7,7 @@
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
-from llava import LlavaLlamaForCausalLM
+from .. import LlavaLlamaForCausalLM
def apply_delta(base_model_path, target_model_path, delta_path):
diff --git a/mmte/models/llava/model/builder.py b/mmte/models/llava/model/builder.py
index 64c3f7f..2d9c8d9 100644
--- a/mmte/models/llava/model/builder.py
+++ b/mmte/models/llava/model/builder.py
@@ -23,7 +23,7 @@
from mmte.models.llava.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
-def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda", **kwargs):
+def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda", use_flash_attn=False, **kwargs):
kwargs = {"device_map": device_map, **kwargs}
if device != "cuda":
@@ -42,12 +42,16 @@ def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, l
else:
kwargs['torch_dtype'] = torch.float16
+ if use_flash_attn:
+ kwargs['attn_implementation'] = 'flash_attention_2'
+
if 'llava' in model_name.lower():
# Load LLaVA model
if 'lora' in model_name.lower() and model_base is None:
warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')
if 'lora' in model_name.lower() and model_base is not None:
- lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
+ from ..model.language_model.llava_llama import LlavaConfig
+ lora_cfg_pretrained = LlavaConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print('Loading LLaVA from base model...')
model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
@@ -88,7 +92,7 @@ def load_from_hf(repo_id, filename, subfolder=None):
shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)
cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
- model = LlavaMPTForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
+ model = LlavaMptForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
cfg_pretrained = AutoConfig.from_pretrained(model_path)
@@ -100,10 +104,21 @@ def load_from_hf(repo_id, filename, subfolder=None):
else:
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
- model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
+ model = LlavaMptForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
+ elif 'mistral' in model_name.lower():
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
+ model = LlavaMistralForCausalLM.from_pretrained(
+ model_path,
+ low_cpu_mem_usage=True,
+ **kwargs
+ )
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
- model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
+ model = LlavaLlamaForCausalLM.from_pretrained(
+ model_path,
+ low_cpu_mem_usage=True,
+ **kwargs
+ )
else:
# Load language model
if model_base is not None:
@@ -139,8 +154,9 @@ def load_from_hf(repo_id, filename, subfolder=None):
vision_tower = model.get_vision_tower()
if not vision_tower.is_loaded:
- vision_tower.load_model()
- vision_tower.to(device=device, dtype=torch.float16)
+ vision_tower.load_model(device_map=device_map)
+ if device_map != 'auto':
+ vision_tower.to(device=device_map, dtype=torch.float16)
image_processor = vision_tower.image_processor
if hasattr(model.config, "max_sequence_length"):
diff --git a/mmte/models/llava/model/consolidate.py b/mmte/models/llava/model/consolidate.py
index 1e32421..b7023bb 100644
--- a/mmte/models/llava/model/consolidate.py
+++ b/mmte/models/llava/model/consolidate.py
@@ -6,8 +6,8 @@
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
-from llava.model import *
-from llava.model.utils import auto_upgrade
+from ..model import *
+from ..model.utils import auto_upgrade
def consolidate_ckpt(src_path, dst_path):
diff --git a/mmte/models/llava/model/language_model/llava_llama.py b/mmte/models/llava/model/language_model/llava_llama.py
index 58ccb30..069d0d1 100644
--- a/mmte/models/llava/model/language_model/llava_llama.py
+++ b/mmte/models/llava/model/language_model/llava_llama.py
@@ -22,12 +22,13 @@
LlamaConfig, LlamaModel, LlamaForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
+from transformers.generation.utils import GenerateOutput
from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
class LlavaConfig(LlamaConfig):
- model_type = "llava"
+ model_type = "llava_llama"
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
@@ -65,6 +66,7 @@ def forward(
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
images: Optional[torch.FloatTensor] = None,
+ image_sizes: Optional[List[List[int]]] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
@@ -82,7 +84,8 @@ def forward(
attention_mask,
past_key_values,
labels,
- images
+ images,
+ image_sizes
)
return super().forward(
@@ -98,14 +101,58 @@ def forward(
return_dict=return_dict
)
- def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
+ @torch.no_grad()
+ def generate(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ images: Optional[torch.Tensor] = None,
+ image_sizes: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Union[GenerateOutput, torch.LongTensor]:
+ position_ids = kwargs.pop("position_ids", None)
+ attention_mask = kwargs.pop("attention_mask", None)
+ if "inputs_embeds" in kwargs:
+ raise NotImplementedError("`inputs_embeds` is not supported")
+
+ if images is not None:
+ (
+ inputs,
+ position_ids,
+ attention_mask,
+ _,
+ inputs_embeds,
+ _
+ ) = self.prepare_inputs_labels_for_multimodal(
+ inputs,
+ position_ids,
+ attention_mask,
+ None,
+ None,
+ images,
+ image_sizes=image_sizes
+ )
+ else:
+ inputs_embeds = self.get_model().embed_tokens(inputs)
+
+ return super().generate(
+ position_ids=position_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ **kwargs
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None,
+ inputs_embeds=None, **kwargs):
images = kwargs.pop("images", None)
- _inputs = super().prepare_inputs_for_generation(
+ image_sizes = kwargs.pop("image_sizes", None)
+ inputs = super().prepare_inputs_for_generation(
input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
)
if images is not None:
- _inputs['images'] = images
- return _inputs
+ inputs['images'] = images
+ if image_sizes is not None:
+ inputs['image_sizes'] = image_sizes
+ return inputs
-AutoConfig.register("llava", LlavaConfig)
+AutoConfig.register("llava_llama", LlavaConfig)
AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM)
diff --git a/mmte/models/llava/model/language_model/llava_mistral.py b/mmte/models/llava/model/language_model/llava_mistral.py
new file mode 100644
index 0000000..0def682
--- /dev/null
+++ b/mmte/models/llava/model/language_model/llava_mistral.py
@@ -0,0 +1,158 @@
+# Copyright 2023 Haotian Liu
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+from torch.nn import CrossEntropyLoss
+
+from transformers import AutoConfig, AutoModelForCausalLM, \
+ MistralConfig, MistralModel, MistralForCausalLM
+
+from transformers.modeling_outputs import CausalLMOutputWithPast
+from transformers.generation.utils import GenerateOutput
+
+from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
+
+
+class LlavaMistralConfig(MistralConfig):
+ model_type = "llava_mistral"
+
+
+class LlavaMistralModel(LlavaMetaModel, MistralModel):
+ config_class = LlavaMistralConfig
+
+ def __init__(self, config: MistralConfig):
+ super(LlavaMistralModel, self).__init__(config)
+
+
+class LlavaMistralForCausalLM(MistralForCausalLM, LlavaMetaForCausalLM):
+ config_class = LlavaMistralConfig
+
+ def __init__(self, config):
+ super(MistralForCausalLM, self).__init__(config)
+ self.model = LlavaMistralModel(config)
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_model(self):
+ return self.model
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ images: Optional[torch.FloatTensor] = None,
+ image_sizes: Optional[List[List[int]]] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+
+ if inputs_embeds is None:
+ (
+ input_ids,
+ position_ids,
+ attention_mask,
+ past_key_values,
+ inputs_embeds,
+ labels
+ ) = self.prepare_inputs_labels_for_multimodal(
+ input_ids,
+ position_ids,
+ attention_mask,
+ past_key_values,
+ labels,
+ images,
+ image_sizes
+ )
+
+ return super().forward(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ labels=labels,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ images: Optional[torch.Tensor] = None,
+ image_sizes: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Union[GenerateOutput, torch.LongTensor]:
+ position_ids = kwargs.pop("position_ids", None)
+ attention_mask = kwargs.pop("attention_mask", None)
+ if "inputs_embeds" in kwargs:
+ raise NotImplementedError("`inputs_embeds` is not supported")
+
+ if images is not None:
+ (
+ inputs,
+ position_ids,
+ attention_mask,
+ _,
+ inputs_embeds,
+ _
+ ) = self.prepare_inputs_labels_for_multimodal(
+ inputs,
+ position_ids,
+ attention_mask,
+ None,
+ None,
+ images,
+ image_sizes=image_sizes
+ )
+ else:
+ inputs_embeds = self.get_model().embed_tokens(inputs)
+
+ return super().generate(
+ position_ids=position_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ **kwargs
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None,
+ inputs_embeds=None, **kwargs):
+ images = kwargs.pop("images", None)
+ image_sizes = kwargs.pop("image_sizes", None)
+ inputs = super().prepare_inputs_for_generation(
+ input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
+ )
+ if images is not None:
+ inputs['images'] = images
+ if image_sizes is not None:
+ inputs['image_sizes'] = image_sizes
+ return inputs
+
+AutoConfig.register("llava_mistral", LlavaMistralConfig)
+AutoModelForCausalLM.register(LlavaMistralConfig, LlavaMistralForCausalLM)
diff --git a/mmte/models/llava/model/language_model/llava_mpt.py b/mmte/models/llava/model/language_model/llava_mpt.py
index 1b77712..7211575 100644
--- a/mmte/models/llava/model/language_model/llava_mpt.py
+++ b/mmte/models/llava/model/language_model/llava_mpt.py
@@ -13,101 +13,85 @@
# limitations under the License.
-from typing import List, Optional, Tuple
-import warnings
+from typing import Optional, Tuple
import torch
-import torch.nn.functional as F
-import math
-from transformers import AutoConfig, AutoModelForCausalLM
-from transformers.modeling_outputs import CausalLMOutputWithPast
-
-from .mpt.modeling_mpt import MPTConfig, MPTForCausalLM, MPTModel
+from transformers import AutoConfig, AutoModelForCausalLM, \
+ MptConfig, MptForCausalLM, MptModel
from mmte.models.llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
-class LlavaMPTConfig(MPTConfig):
+class LlavaMptConfig(MptConfig):
model_type = "llava_mpt"
-class LlavaMPTModel(LlavaMetaModel, MPTModel):
- config_class = LlavaMPTConfig
+class LlavaMptModel(LlavaMetaModel, MptModel):
+ config_class = LlavaMptConfig
- def __init__(self, config: MPTConfig):
+ def __init__(self, config: MptConfig):
config.hidden_size = config.d_model
- super(LlavaMPTModel, self).__init__(config)
+ super(LlavaMptModel, self).__init__(config)
def embed_tokens(self, x):
return self.wte(x)
-class LlavaMPTForCausalLM(MPTForCausalLM, LlavaMetaForCausalLM):
- config_class = LlavaMPTConfig
+class LlavaMptForCausalLM(MptForCausalLM, LlavaMetaForCausalLM):
+ config_class = LlavaMptConfig
supports_gradient_checkpointing = True
def __init__(self, config):
- super(MPTForCausalLM, self).__init__(config)
-
- if not config.tie_word_embeddings:
- raise ValueError('MPTForCausalLM only supports tied word embeddings')
- self.transformer = LlavaMPTModel(config)
- self.logit_scale = None
- if config.logit_scale is not None:
- logit_scale = config.logit_scale
- if isinstance(logit_scale, str):
- if logit_scale == 'inv_sqrt_d_model':
- logit_scale = 1 / math.sqrt(config.d_model)
- else:
- raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
- self.logit_scale = logit_scale
+ super(MptForCausalLM, self).__init__(config)
+
+ self.transformer = LlavaMptModel(config)
+ self.lm_head = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
def get_model(self):
return self.transformer
def _set_gradient_checkpointing(self, module, value=False):
- if isinstance(module, LlavaMPTModel):
+ if isinstance(module, LlavaMptModel):
module.gradient_checkpointing = value
- def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, images=None):
- return_dict = return_dict if return_dict is not None else self.config.return_dict
- use_cache = use_cache if use_cache is not None else self.config.use_cache
-
- input_ids, _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, None, attention_mask, past_key_values, labels, images)
- outputs = self.transformer(input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache)
- # FIXME: this is a hack to fix the multiple gpu inference issue in https://github.com/haotian-liu/LLaVA/issues/338
- logits = F.linear(outputs.last_hidden_state.to(self.transformer.wte.weight.device), self.transformer.wte.weight)
- if self.logit_scale is not None:
- if self.logit_scale == 0:
- warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
- logits *= self.logit_scale
- loss = None
- if labels is not None:
- labels = torch.roll(labels, shifts=-1)
- labels[:, -1] = -100
- loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1))
- return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ images=None):
+
+ input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)
+
+ return super().forward(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ labels=labels,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
- if inputs_embeds is not None:
- raise NotImplementedError('inputs_embeds is not implemented for MPT yet')
- attention_mask = kwargs['attention_mask'].bool()
- if attention_mask[:, -1].sum() != attention_mask.shape[0]:
- raise NotImplementedError('MPT does not support generation with right padding.')
- if self.transformer.attn_uses_sequence_id and self.training:
- sequence_id = torch.zeros_like(input_ids[:1])
- else:
- sequence_id = None
- if past_key_values is not None:
- input_ids = input_ids[:, -1].unsqueeze(-1)
- if self.transformer.prefix_lm:
- prefix_mask = torch.ones_like(attention_mask)
- if kwargs.get('use_cache') == False:
- raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.')
- else:
- prefix_mask = None
- return {'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True), "images": kwargs.get("images", None)}
-
-
-AutoConfig.register("llava_mpt", LlavaMPTConfig)
-AutoModelForCausalLM.register(LlavaMPTConfig, LlavaMPTForCausalLM)
+ images = kwargs.pop("images", None)
+ _inputs = super().prepare_inputs_for_generation(
+ input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
+ )
+ _inputs['images'] = images
+ return _inputs
+
+
+AutoConfig.register("llava_mpt", LlavaMptConfig)
+AutoModelForCausalLM.register(LlavaMptConfig, LlavaMptForCausalLM)
diff --git a/mmte/models/llava/model/language_model/mpt/adapt_tokenizer.py b/mmte/models/llava/model/language_model/mpt/adapt_tokenizer.py
deleted file mode 100644
index e640c15..0000000
--- a/mmte/models/llava/model/language_model/mpt/adapt_tokenizer.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from typing import Union
-from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
-Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
-NUM_SENTINEL_TOKENS: int = 100
-
-def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):
- """Adds sentinel tokens and padding token (if missing).
-
- Expands the tokenizer vocabulary to include sentinel tokens
- used in mixture-of-denoiser tasks as well as a padding token.
-
- All added tokens are added as special tokens. No tokens are
- added if sentinel tokens and padding token already exist.
- """
- sentinels_to_add = [f'' for i in range(NUM_SENTINEL_TOKENS)]
- tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
- if tokenizer.pad_token is None:
- tokenizer.add_tokens('', special_tokens=True)
- tokenizer.pad_token = ''
- assert tokenizer.pad_token_id is not None
- sentinels = ''.join([f'' for i in range(NUM_SENTINEL_TOKENS)])
- _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
- tokenizer.sentinel_token_ids = _sentinel_token_ids
-
-class AutoTokenizerForMOD(AutoTokenizer):
- """AutoTokenizer + Adaptation for MOD.
-
- A simple wrapper around AutoTokenizer to make instantiating
- an MOD-adapted tokenizer a bit easier.
-
- MOD-adapted tokenizers have sentinel tokens (e.g., ),
- a padding token, and a property to get the token ids of the
- sentinel tokens.
- """
-
- @classmethod
- def from_pretrained(cls, *args, **kwargs):
- """See `AutoTokenizer.from_pretrained` docstring."""
- tokenizer = super().from_pretrained(*args, **kwargs)
- adapt_tokenizer_for_denoising(tokenizer)
- return tokenizer
\ No newline at end of file
diff --git a/mmte/models/llava/model/language_model/mpt/attention.py b/mmte/models/llava/model/language_model/mpt/attention.py
deleted file mode 100644
index 3810e7e..0000000
--- a/mmte/models/llava/model/language_model/mpt/attention.py
+++ /dev/null
@@ -1,300 +0,0 @@
-"""Attention layers."""
-import math
-import warnings
-from typing import Optional
-import torch
-import torch.nn as nn
-from einops import rearrange
-from packaging import version
-from torch import nn
-from .norm import LPLayerNorm
-
-def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool):
- if original_is_causal and num_query_tokens != num_key_tokens:
- if num_query_tokens != 1:
- raise NotImplementedError('MPT does not support query and key with different number of tokens, unless number of query tokens is 1.')
- else:
- return False
- return original_is_causal
-
-def scaled_multihead_dot_product_attention(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
- q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads)
- kv_n_heads = 1 if multiquery else n_heads
- k = rearrange(key, 'b s (h d) -> b h d s', h=kv_n_heads)
- v = rearrange(value, 'b s (h d) -> b h s d', h=kv_n_heads)
- if past_key_value is not None:
- if len(past_key_value) != 0:
- k = torch.cat([past_key_value[0], k], dim=3)
- v = torch.cat([past_key_value[1], v], dim=2)
- past_key_value = (k, v)
- (b, _, s_q, d) = q.shape
- s_k = k.size(-1)
- if softmax_scale is None:
- softmax_scale = 1 / math.sqrt(d)
- attn_weight = q.matmul(k) * softmax_scale
- if attn_bias is not None:
- _s_q = max(0, attn_bias.size(2) - s_q)
- _s_k = max(0, attn_bias.size(3) - s_k)
- attn_bias = attn_bias[:, :, _s_q:, _s_k:]
- if attn_bias.size(-1) != 1 and attn_bias.size(-1) != s_k or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q):
- raise RuntimeError(f'attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.')
- attn_weight = attn_weight + attn_bias
- min_val = torch.finfo(q.dtype).min
- if key_padding_mask is not None:
- if attn_bias is not None:
- warnings.warn('Propogating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unneccessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.')
- attn_weight = attn_weight.masked_fill(~key_padding_mask.view((b, 1, 1, s_k)), min_val)
- if is_causal and (not q.size(2) == 1):
- s = max(s_q, s_k)
- causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16)
- causal_mask = causal_mask.tril()
- causal_mask = causal_mask.to(torch.bool)
- causal_mask = ~causal_mask
- causal_mask = causal_mask[-s_q:, -s_k:]
- attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val)
- attn_weight = torch.softmax(attn_weight, dim=-1)
- if dropout_p:
- attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p, training=training, inplace=True)
- out = attn_weight.to(v.dtype).matmul(v)
- out = rearrange(out, 'b h s d -> b s (h d)')
- if needs_weights:
- return (out, attn_weight, past_key_value)
- return (out, None, past_key_value)
-
-def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
- for tensor in tensors:
- if tensor.dtype not in valid_dtypes:
- raise TypeError(f'tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}.')
- if not tensor.is_cuda:
- raise TypeError(f'Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).')
-
-def flash_attn_fn(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
- try:
- from flash_attn import bert_padding, flash_attn_interface
- except:
- raise RuntimeError('Please install flash-attn==1.0.3.post0')
- check_valid_inputs(query, key, value)
- if past_key_value is not None:
- if len(past_key_value) != 0:
- key = torch.cat([past_key_value[0], key], dim=1)
- value = torch.cat([past_key_value[1], value], dim=1)
- past_key_value = (key, value)
- if attn_bias is not None:
- _s_q = max(0, attn_bias.size(2) - query.size(1))
- _s_k = max(0, attn_bias.size(3) - key.size(1))
- attn_bias = attn_bias[:, :, _s_q:, _s_k:]
- if attn_bias is not None:
- raise NotImplementedError(f'attn_bias not implemented for flash attn.')
- (batch_size, seqlen) = query.shape[:2]
- if key_padding_mask is None:
- key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
- query_padding_mask = key_padding_mask[:, -query.size(1):]
- (query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input(query, query_padding_mask)
- query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
- (key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input(key, key_padding_mask)
- key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=1 if multiquery else n_heads)
- (value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask)
- value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=1 if multiquery else n_heads)
- if multiquery:
- key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1))
- value_unpad = value_unpad.expand(value_unpad.size(0), n_heads, value_unpad.size(-1))
- dropout_p = dropout_p if training else 0.0
- reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
- output_unpad = flash_attn_interface.flash_attn_varlen_func(query_unpad, key_unpad, value_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights)
- output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)
- return (output, None, past_key_value)
-
-def triton_flash_attn_fn(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
- try:
- from .flash_attn_triton import flash_attn_func
- except:
- _installed = False
- if version.parse(torch.__version__) < version.parse('2.0.0'):
- _installed = True
- try:
- from flash_attn.flash_attn_triton import flash_attn_func
- except:
- _installed = False
- if not _installed:
- raise RuntimeError('Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU and `pip install .[gpu]` if installing from llm-foundry source or `pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). Note: (1) requires you have CMake and PyTorch already installed.')
- check_valid_inputs(query, key, value)
- if past_key_value is not None:
- if len(past_key_value) != 0:
- key = torch.cat([past_key_value[0], key], dim=1)
- value = torch.cat([past_key_value[1], value], dim=1)
- past_key_value = (key, value)
- if attn_bias is not None:
- _s_q = max(0, attn_bias.size(2) - query.size(1))
- _s_k = max(0, attn_bias.size(3) - key.size(1))
- attn_bias = attn_bias[:, :, _s_q:, _s_k:]
- if dropout_p:
- raise NotImplementedError(f'Dropout not implemented for attn_impl: triton.')
- if needs_weights:
- raise NotImplementedError(f'attn_impl: triton cannot return attn weights.')
- if key_padding_mask is not None:
- warnings.warn('Propagating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unnecessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.')
- (b_size, s_k) = key_padding_mask.shape[:2]
- if attn_bias is None:
- attn_bias = query.new_zeros(b_size, 1, 1, s_k)
- attn_bias = attn_bias.masked_fill(~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min)
- query = rearrange(query, 'b s (h d) -> b s h d', h=n_heads)
- key = rearrange(key, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
- value = rearrange(value, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
- if multiquery:
- key = key.expand(*key.shape[:2], n_heads, key.size(-1))
- value = value.expand(*value.shape[:2], n_heads, value.size(-1))
- reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
- attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal, softmax_scale)
- output = attn_output.view(*attn_output.shape[:2], -1)
- return (output, None, past_key_value)
-
-class MultiheadAttention(nn.Module):
- """Multi-head self attention.
-
- Using torch or triton attention implementation enables user to also use
- additive bias.
- """
-
- def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, low_precision_layernorm: bool=False, verbose: int=0, device: Optional[str]=None):
- super().__init__()
- self.attn_impl = attn_impl
- self.clip_qkv = clip_qkv
- self.qk_ln = qk_ln
- self.d_model = d_model
- self.n_heads = n_heads
- self.softmax_scale = softmax_scale
- if self.softmax_scale is None:
- self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
- self.attn_dropout_p = attn_pdrop
- self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device)
- fuse_splits = (d_model, 2 * d_model)
- self.Wqkv._fused = (0, fuse_splits)
- if self.qk_ln:
- layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
- self.q_ln = layernorm_class(self.d_model, device=device)
- self.k_ln = layernorm_class(self.d_model, device=device)
- if self.attn_impl == 'flash':
- self.attn_fn = flash_attn_fn
- elif self.attn_impl == 'triton':
- self.attn_fn = triton_flash_attn_fn
- if verbose:
- warnings.warn('While `attn_impl: triton` can be faster than `attn_impl: flash` ' + 'it uses more memory. When training larger models this can trigger ' + 'alloc retries which hurts performance. If encountered, we recommend ' + 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.')
- elif self.attn_impl == 'torch':
- self.attn_fn = scaled_multihead_dot_product_attention
- if torch.cuda.is_available() and verbose:
- warnings.warn('Using `attn_impl: torch`. If your model does not use `alibi` or ' + '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' + 'we recommend using `attn_impl: triton`.')
- else:
- raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
- self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
- self.out_proj._is_residual = True
-
- def forward(self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False):
- qkv = self.Wqkv(x)
- if self.clip_qkv:
- qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
- (query, key, value) = qkv.chunk(3, dim=2)
- key_padding_mask = attention_mask
- if self.qk_ln:
- dtype = query.dtype
- query = self.q_ln(query).to(dtype)
- key = self.k_ln(key).to(dtype)
- (context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights)
- return (self.out_proj(context), attn_weights, past_key_value)
-
-class MultiQueryAttention(nn.Module):
- """Multi-Query self attention.
-
- Using torch or triton attention implementation enables user to also use
- additive bias.
- """
-
- def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, low_precision_layernorm: bool=False, verbose: int=0, device: Optional[str]=None):
- super().__init__()
- self.attn_impl = attn_impl
- self.clip_qkv = clip_qkv
- self.qk_ln = qk_ln
- self.d_model = d_model
- self.n_heads = n_heads
- self.head_dim = d_model // n_heads
- self.softmax_scale = softmax_scale
- if self.softmax_scale is None:
- self.softmax_scale = 1 / math.sqrt(self.head_dim)
- self.attn_dropout_p = attn_pdrop
- self.Wqkv = nn.Linear(d_model, d_model + 2 * self.head_dim, device=device)
- fuse_splits = (d_model, d_model + self.head_dim)
- self.Wqkv._fused = (0, fuse_splits)
- if self.qk_ln:
- layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
- self.q_ln = layernorm_class(d_model, device=device)
- self.k_ln = layernorm_class(self.head_dim, device=device)
- if self.attn_impl == 'flash':
- self.attn_fn = flash_attn_fn
- elif self.attn_impl == 'triton':
- self.attn_fn = triton_flash_attn_fn
- if verbose:
- warnings.warn('While `attn_impl: triton` can be faster than `attn_impl: flash` ' + 'it uses more memory. When training larger models this can trigger ' + 'alloc retries which hurts performance. If encountered, we recommend ' + 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.')
- elif self.attn_impl == 'torch':
- self.attn_fn = scaled_multihead_dot_product_attention
- if torch.cuda.is_available() and verbose:
- warnings.warn('Using `attn_impl: torch`. If your model does not use `alibi` or ' + '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' + 'we recommend using `attn_impl: triton`.')
- else:
- raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
- self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
- self.out_proj._is_residual = True
-
- def forward(self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False):
- qkv = self.Wqkv(x)
- if self.clip_qkv:
- qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
- (query, key, value) = qkv.split([self.d_model, self.head_dim, self.head_dim], dim=2)
- key_padding_mask = attention_mask
- if self.qk_ln:
- dtype = query.dtype
- query = self.q_ln(query).to(dtype)
- key = self.k_ln(key).to(dtype)
- (context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, multiquery=True)
- return (self.out_proj(context), attn_weights, past_key_value)
-
-def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):
- if attn_impl == 'flash':
- return None
- elif attn_impl in ['torch', 'triton']:
- if alibi:
- if (prefix_lm or not causal) or use_sequence_id:
- return (1, n_heads, seq_len, seq_len)
- return (1, n_heads, 1, seq_len)
- elif prefix_lm or use_sequence_id:
- return (1, 1, seq_len, seq_len)
- return None
- else:
- raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
-
-def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):
- if attn_impl == 'flash':
- return None
- elif attn_impl in ['torch', 'triton']:
- if alibi:
- (device, dtype) = (attn_bias.device, attn_bias.dtype)
- attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))
- return attn_bias
- else:
- raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
-
-def gen_slopes(n_heads, alibi_bias_max=8, device=None):
- _n_heads = 2 ** math.ceil(math.log2(n_heads))
- m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
- m = m.mul(alibi_bias_max / _n_heads)
- slopes = 1.0 / torch.pow(2, m)
- if _n_heads != n_heads:
- slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
- return slopes.view(1, n_heads, 1, 1)
-
-def build_alibi_bias(n_heads, seq_len, full=False, alibi_bias_max=8, device=None, dtype=None):
- alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, 1, seq_len)
- if full:
- alibi_bias = alibi_bias - torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, seq_len, 1)
- alibi_bias = alibi_bias.abs().mul(-1)
- slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
- alibi_bias = alibi_bias * slopes
- return alibi_bias.to(dtype=dtype)
-ATTN_CLASS_REGISTRY = {'multihead_attention': MultiheadAttention, 'multiquery_attention': MultiQueryAttention}
diff --git a/mmte/models/llava/model/language_model/mpt/blocks.py b/mmte/models/llava/model/language_model/mpt/blocks.py
deleted file mode 100644
index 537e7f9..0000000
--- a/mmte/models/llava/model/language_model/mpt/blocks.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""GPT Blocks used for the GPT Model."""
-from typing import Dict, Optional, Tuple
-import torch
-import torch.nn as nn
-from .attention import ATTN_CLASS_REGISTRY
-from .norm import NORM_CLASS_REGISTRY
-
-class MPTMLP(nn.Module):
-
- def __init__(self, d_model: int, expansion_ratio: int, device: Optional[str]=None):
- super().__init__()
- self.up_proj = nn.Linear(d_model, expansion_ratio * d_model, device=device)
- self.act = nn.GELU(approximate='none')
- self.down_proj = nn.Linear(expansion_ratio * d_model, d_model, device=device)
- self.down_proj._is_residual = True
-
- def forward(self, x):
- return self.down_proj(self.act(self.up_proj(x)))
-
-class MPTBlock(nn.Module):
-
- def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):
- del kwargs
- super().__init__()
- norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
- attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]
- self.norm_1 = norm_class(d_model, device=device)
- self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)
- self.norm_2 = norm_class(d_model, device=device)
- self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)
- self.resid_attn_dropout = nn.Dropout(resid_pdrop)
- self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
-
- def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:
- a = self.norm_1(x)
- (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)
- x = x + self.resid_attn_dropout(b)
- m = self.norm_2(x)
- n = self.ffn(m)
- x = x + self.resid_ffn_dropout(n)
- return (x, attn_weights, past_key_value)
\ No newline at end of file
diff --git a/mmte/models/llava/model/language_model/mpt/configuration_mpt.py b/mmte/models/llava/model/language_model/mpt/configuration_mpt.py
deleted file mode 100644
index e9eb6fc..0000000
--- a/mmte/models/llava/model/language_model/mpt/configuration_mpt.py
+++ /dev/null
@@ -1,118 +0,0 @@
-"""A HuggingFace-style model configuration."""
-from typing import Dict, Optional, Union
-from transformers import PretrainedConfig
-attn_config_defaults: Dict = {'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}
-init_config_defaults: Dict = {'name': 'kaiming_normal_', 'fan_mode': 'fan_in', 'init_nonlinearity': 'relu', 'init_div_is_residual': True, 'emb_init_std': None, 'emb_init_uniform_lim': None, 'init_std': None, 'init_gain': 0.0}
-
-class MPTConfig(PretrainedConfig):
- model_type = 'mpt'
-
- def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):
- """The MPT configuration class.
-
- Args:
- d_model (int): The size of the embedding dimension of the model.
- n_heads (int): The number of attention heads.
- n_layers (int): The number of layers in the model.
- expansion_ratio (int): The ratio of the up/down scale in the MLP.
- max_seq_len (int): The maximum sequence length of the model.
- vocab_size (int): The size of the vocabulary.
- resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.
- emb_pdrop (float): The dropout probability for the embedding layer.
- learned_pos_emb (bool): Whether to use learned positional embeddings
- attn_config (Dict): A dictionary used to configure the model's attention module:
- attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention
- attn_pdrop (float): The dropout probability for the attention layers.
- attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.
- qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.
- clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to
- this value.
- softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,
- use the default scale of ``1/sqrt(d_keys)``.
- prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an
- extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix
- can attend to one another bi-directionally. Tokens outside the prefix use causal attention.
- attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.
- When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates
- which sub-sequence each token belongs to.
- Defaults to ``False`` meaning any provided `sequence_id` will be ignored.
- alibi (bool): Whether to use the alibi bias instead of position embeddings.
- alibi_bias_max (int): The maximum value of the alibi bias.
- init_device (str): The device to use for parameter initialization.
- logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.
- no_bias (bool): Whether to use bias in all layers.
- verbose (int): The verbosity level. 0 is silent.
- embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.
- norm_type (str): choose type of norm to use
- multiquery_attention (bool): Whether to use multiquery attention implementation.
- use_cache (bool): Whether or not the model should return the last key/values attentions
- init_config (Dict): A dictionary used to configure the model initialization:
- init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',
- 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or
- 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.
- init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.
- emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.
- emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution
- used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.
- init_std (float): The standard deviation of the normal distribution used to initialize the model,
- if using the baseline_ parameter initialization scheme.
- init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.
- fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.
- init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.
- ---
- See llmfoundry.models.utils.param_init_fns.py for info on other param init config options
- """
- self.d_model = d_model
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.expansion_ratio = expansion_ratio
- self.max_seq_len = max_seq_len
- self.vocab_size = vocab_size
- self.resid_pdrop = resid_pdrop
- self.emb_pdrop = emb_pdrop
- self.learned_pos_emb = learned_pos_emb
- self.attn_config = attn_config
- self.init_device = init_device
- self.logit_scale = logit_scale
- self.no_bias = no_bias
- self.verbose = verbose
- self.embedding_fraction = embedding_fraction
- self.norm_type = norm_type
- self.use_cache = use_cache
- self.init_config = init_config
- if 'name' in kwargs:
- del kwargs['name']
- if 'loss_fn' in kwargs:
- del kwargs['loss_fn']
- super().__init__(**kwargs)
- self._validate_config()
-
- def _set_config_defaults(self, config, config_defaults):
- for (k, v) in config_defaults.items():
- if k not in config:
- config[k] = v
- return config
-
- def _validate_config(self):
- self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)
- self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)
- if self.d_model % self.n_heads != 0:
- raise ValueError('d_model must be divisible by n_heads')
- if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):
- raise ValueError("self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1")
- if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:
- raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}")
- if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
- raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')
- if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
- raise NotImplementedError('alibi only implemented with torch and triton attention.')
- if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
- raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')
- if self.embedding_fraction > 1 or self.embedding_fraction <= 0:
- raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')
- if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':
- raise ValueError(f"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
- if self.init_config.get('name', None) is None:
- raise ValueError(f"self.init_config={self.init_config!r} 'name' needs to be set.")
- if not self.learned_pos_emb and (not self.attn_config['alibi']):
- raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')
\ No newline at end of file
diff --git a/mmte/models/llava/model/language_model/mpt/custom_embedding.py b/mmte/models/llava/model/language_model/mpt/custom_embedding.py
deleted file mode 100644
index ab35795..0000000
--- a/mmte/models/llava/model/language_model/mpt/custom_embedding.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch import Tensor
-
-class SharedEmbedding(nn.Embedding):
-
- def forward(self, input: Tensor, unembed: bool=False) -> Tensor:
- if unembed:
- return F.linear(input, self.weight)
- return super().forward(input)
\ No newline at end of file
diff --git a/mmte/models/llava/model/language_model/mpt/flash_attn_triton.py b/mmte/models/llava/model/language_model/mpt/flash_attn_triton.py
deleted file mode 100644
index c0a4218..0000000
--- a/mmte/models/llava/model/language_model/mpt/flash_attn_triton.py
+++ /dev/null
@@ -1,484 +0,0 @@
-"""
-Copied from https://github.com/HazyResearch/flash-attention/blob/eff9fe6b8076df59d64d7a3f464696738a3c7c24/flash_attn/flash_attn_triton.py
-update imports to use 'triton_pre_mlir'
-
-*Experimental* implementation of FlashAttention in Triton.
-Tested with triton==2.0.0.dev20221202.
-Triton 2.0 has a new backend (MLIR) but seems like it doesn't yet work for head dimensions
-other than 64:
-https://github.com/openai/triton/blob/d376020f90002757eea3ea9475d4f7cfc2ec5ead/python/triton/ops/flash_attention.py#L207
-We'll update this implementation with the new Triton backend once this is fixed.
-
-We use the FlashAttention implementation from Phil Tillet a starting point.
-https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
-
-Changes:
-- Implement both causal and non-causal attention.
-- Implement both self-attention and cross-attention.
-- Support arbitrary seqlens (not just multiples of 128), for both forward and backward.
-- Support all head dimensions up to 128 (not just 16, 32, 64, 128), for both forward and backward.
-- Support attention bias.
-- Speed up the forward pass a bit, and only store the LSE instead of m and l.
-- Make the backward for d=128 much faster by reducing register spilling.
-- Optionally parallelize the backward pass across seqlen_k, to deal with the case of
-small batch size * nheads.
-
-Caution:
-- This is an *experimental* implementation. The forward pass should be quite robust but
-I'm not 100% sure that the backward pass doesn't have race conditions (due to the Triton compiler).
-- This implementation has only been tested on A100.
-- If you plan to use headdim other than 64 and 128, you should test for race conditions
-(due to the Triton compiler), as done in tests/test_flash_attn.py
-"test_flash_attn_triton_race_condition". I've tested and fixed many race conditions
-for different head dimensions (40, 48, 64, 128, 80, 88, 96), but I'm still not 100% confident
-that there are none left for other head dimensions.
-
-Differences between this Triton version and the CUDA version:
-- Triton version doesn't support dropout.
-- Triton forward is generally faster than CUDA forward, while Triton backward is
-generally slower than CUDA backward. Overall Triton forward + backward is slightly slower
-than CUDA forward + backward.
-- Triton version doesn't support different sequence lengths in a batch (i.e., RaggedTensor/NestedTensor).
-- Triton version supports attention bias, while CUDA version doesn't.
-"""
-import math
-import torch
-import triton_pre_mlir as triton
-import triton_pre_mlir.language as tl
-
-@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args['BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args['BLOCK_N'] == 0, 'EVEN_HEADDIM': lambda args: args['headdim'] == args['BLOCK_HEADDIM']})
-@triton.jit
-def _fwd_kernel(Q, K, V, Bias, Out, Lse, TMP, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
- start_m = tl.program_id(0)
- off_hb = tl.program_id(1)
- off_b = off_hb // nheads
- off_h = off_hb % nheads
- offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
- offs_n = tl.arange(0, BLOCK_N)
- offs_d = tl.arange(0, BLOCK_HEADDIM)
- q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :])
- k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] * stride_kn + offs_d[None, :])
- v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] * stride_vn + offs_d[None, :])
- if BIAS_TYPE == 'vector':
- b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
- elif BIAS_TYPE == 'matrix':
- b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :])
- t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
- lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
- m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
- acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
- if EVEN_M & EVEN_N:
- if EVEN_HEADDIM:
- q = tl.load(q_ptrs)
- else:
- q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
- elif EVEN_HEADDIM:
- q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
- else:
- q = tl.load(q_ptrs, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0)
- end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k)
- for start_n in range(0, end_n, BLOCK_N):
- start_n = tl.multiple_of(start_n, BLOCK_N)
- if EVEN_N & EVEN_M:
- if EVEN_HEADDIM:
- k = tl.load(k_ptrs + start_n * stride_kn)
- else:
- k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_d[None, :] < headdim, other=0.0)
- elif EVEN_HEADDIM:
- k = tl.load(k_ptrs + start_n * stride_kn, mask=(start_n + offs_n)[:, None] < seqlen_k, other=0.0)
- else:
- k = tl.load(k_ptrs + start_n * stride_kn, mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
- qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
- qk += tl.dot(q, k, trans_b=True)
- if not EVEN_N:
- qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float('-inf'))
- if IS_CAUSAL:
- qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float('-inf'))
- if BIAS_TYPE != 'none':
- if BIAS_TYPE == 'vector':
- if EVEN_N:
- bias = tl.load(b_ptrs + start_n).to(tl.float32)
- else:
- bias = tl.load(b_ptrs + start_n, mask=start_n + offs_n < seqlen_k, other=0.0).to(tl.float32)
- bias = bias[None, :]
- elif BIAS_TYPE == 'matrix':
- if EVEN_M & EVEN_N:
- bias = tl.load(b_ptrs + start_n).to(tl.float32)
- else:
- bias = tl.load(b_ptrs + start_n, mask=(offs_m[:, None] < seqlen_q) & ((start_n + offs_n)[None, :] < seqlen_k), other=0.0).to(tl.float32)
- qk = qk * softmax_scale + bias
- m_ij = tl.maximum(tl.max(qk, 1), lse_i)
- p = tl.exp(qk - m_ij[:, None])
- else:
- m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
- p = tl.exp(qk * softmax_scale - m_ij[:, None])
- l_ij = tl.sum(p, 1)
- acc_o_scale = tl.exp(m_i - m_ij)
- tl.store(t_ptrs, acc_o_scale)
- acc_o_scale = tl.load(t_ptrs)
- acc_o = acc_o * acc_o_scale[:, None]
- if EVEN_N & EVEN_M:
- if EVEN_HEADDIM:
- v = tl.load(v_ptrs + start_n * stride_vn)
- else:
- v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_d[None, :] < headdim, other=0.0)
- elif EVEN_HEADDIM:
- v = tl.load(v_ptrs + start_n * stride_vn, mask=(start_n + offs_n)[:, None] < seqlen_k, other=0.0)
- else:
- v = tl.load(v_ptrs + start_n * stride_vn, mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
- p = p.to(v.dtype)
- acc_o += tl.dot(p, v)
- m_i = m_ij
- l_i_new = tl.exp(lse_i - m_ij) + l_ij
- lse_i = m_ij + tl.log(l_i_new)
- o_scale = tl.exp(m_i - lse_i)
- tl.store(t_ptrs, o_scale)
- o_scale = tl.load(t_ptrs)
- acc_o = acc_o * o_scale[:, None]
- start_m = tl.program_id(0)
- offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
- lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
- tl.store(lse_ptrs, lse_i)
- offs_d = tl.arange(0, BLOCK_HEADDIM)
- out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:, None] * stride_om + offs_d[None, :])
- if EVEN_M:
- if EVEN_HEADDIM:
- tl.store(out_ptrs, acc_o)
- else:
- tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
- elif EVEN_HEADDIM:
- tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
- else:
- tl.store(out_ptrs, acc_o, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
-
-@triton.jit
-def _bwd_preprocess_do_o_dot(Out, DO, Delta, stride_ob, stride_oh, stride_om, stride_dob, stride_doh, stride_dom, nheads, seqlen_q, seqlen_q_rounded, headdim, BLOCK_M: tl.constexpr, BLOCK_HEADDIM: tl.constexpr):
- start_m = tl.program_id(0)
- off_hb = tl.program_id(1)
- off_b = off_hb // nheads
- off_h = off_hb % nheads
- offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
- offs_d = tl.arange(0, BLOCK_HEADDIM)
- o = tl.load(Out + off_b * stride_ob + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :], mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32)
- do = tl.load(DO + off_b * stride_dob + off_h * stride_doh + offs_m[:, None] * stride_dom + offs_d[None, :], mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32)
- delta = tl.sum(o * do, axis=1)
- tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
-
-@triton.jit
-def _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr):
- if EVEN_N & EVEN_M:
- if EVEN_HEADDIM:
- tl.store(dv_ptrs, dv)
- tl.store(dk_ptrs, dk)
- else:
- tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim)
- tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim)
- elif EVEN_HEADDIM:
- tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k)
- tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k)
- else:
- tl.store(dv_ptrs, dv, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
- tl.store(dk_ptrs, dk, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
-
-@triton.jit
-def _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD: tl.constexpr, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
- begin_m = 0 if not IS_CAUSAL else start_n * BLOCK_N // BLOCK_M * BLOCK_M
- offs_qm = begin_m + tl.arange(0, BLOCK_M)
- offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
- offs_m = tl.arange(0, BLOCK_M)
- offs_d = tl.arange(0, BLOCK_HEADDIM)
- q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :])
- k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :])
- v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :])
- do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :])
- dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :])
- if BIAS_TYPE == 'vector':
- b_ptrs = Bias + offs_n
- elif BIAS_TYPE == 'matrix':
- b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :])
- dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
- dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
- if begin_m >= seqlen_q:
- dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
- dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
- _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
- return
- if EVEN_N & EVEN_M:
- if EVEN_HEADDIM:
- k = tl.load(k_ptrs)
- v = tl.load(v_ptrs)
- else:
- k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
- v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
- elif EVEN_HEADDIM:
- k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
- v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
- else:
- k = tl.load(k_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
- v = tl.load(v_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0)
- num_block_m = tl.cdiv(seqlen_q, BLOCK_M)
- for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M):
- start_m = tl.multiple_of(start_m, BLOCK_M)
- offs_m_curr = start_m + offs_m
- if EVEN_M & EVEN_HEADDIM:
- q = tl.load(q_ptrs)
- elif EVEN_HEADDIM:
- q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
- else:
- q = tl.load(q_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0)
- qk = tl.dot(q, k, trans_b=True)
- if not EVEN_N:
- qk = tl.where(offs_n[None, :] < seqlen_k, qk, float('-inf'))
- if IS_CAUSAL:
- qk = tl.where(offs_m_curr[:, None] >= offs_n[None, :], qk, float('-inf'))
- if BIAS_TYPE != 'none':
- tl.debug_barrier()
- if BIAS_TYPE == 'vector':
- if EVEN_N:
- bias = tl.load(b_ptrs).to(tl.float32)
- else:
- bias = tl.load(b_ptrs, mask=offs_n < seqlen_k, other=0.0).to(tl.float32)
- bias = bias[None, :]
- elif BIAS_TYPE == 'matrix':
- if EVEN_M & EVEN_N:
- bias = tl.load(b_ptrs).to(tl.float32)
- else:
- bias = tl.load(b_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_n[None, :] < seqlen_k), other=0.0).to(tl.float32)
- qk = qk * softmax_scale + bias
- if not EVEN_M & EVEN_HEADDIM:
- tl.debug_barrier()
- lse_i = tl.load(LSE + offs_m_curr)
- if BIAS_TYPE == 'none':
- p = tl.exp(qk * softmax_scale - lse_i[:, None])
- else:
- p = tl.exp(qk - lse_i[:, None])
- if EVEN_M & EVEN_HEADDIM:
- do = tl.load(do_ptrs)
- else:
- do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0)
- dv += tl.dot(p.to(do.dtype), do, trans_a=True)
- if not EVEN_M & EVEN_HEADDIM:
- tl.debug_barrier()
- dp = tl.dot(do, v, trans_b=True)
- if not EVEN_HEADDIM:
- tl.debug_barrier()
- Di = tl.load(D + offs_m_curr)
- ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype)
- dk += tl.dot(ds, q, trans_a=True)
- if not EVEN_M & EVEN_HEADDIM:
- tl.debug_barrier()
- if not ATOMIC_ADD:
- if EVEN_M & EVEN_HEADDIM:
- dq = tl.load(dq_ptrs, eviction_policy='evict_last')
- dq += tl.dot(ds, k)
- tl.store(dq_ptrs, dq, eviction_policy='evict_last')
- elif EVEN_HEADDIM:
- dq = tl.load(dq_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0, eviction_policy='evict_last')
- dq += tl.dot(ds, k)
- tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q, eviction_policy='evict_last')
- else:
- dq = tl.load(dq_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0, eviction_policy='evict_last')
- dq += tl.dot(ds, k)
- tl.store(dq_ptrs, dq, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), eviction_policy='evict_last')
- else:
- dq = tl.dot(ds, k)
- if EVEN_M & EVEN_HEADDIM:
- tl.atomic_add(dq_ptrs, dq)
- elif EVEN_HEADDIM:
- tl.atomic_add(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q)
- else:
- tl.atomic_add(dq_ptrs, dq, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
- dq_ptrs += BLOCK_M * stride_dqm
- q_ptrs += BLOCK_M * stride_qm
- do_ptrs += BLOCK_M * stride_dom
- if BIAS_TYPE == 'matrix':
- b_ptrs += BLOCK_M * stride_bm
- dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
- dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
- _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
-
-def init_to_zero(name):
- return lambda nargs: nargs[name].zero_()
-
-@triton.autotune(configs=[triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'SEQUENCE_PARALLEL': False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'SEQUENCE_PARALLEL': True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ'))], key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM'])
-@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args['BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args['BLOCK_N'] == 0, 'EVEN_HEADDIM': lambda args: args['headdim'] == args['BLOCK_HEADDIM']})
-@triton.jit
-def _bwd_kernel(Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_dob, stride_doh, stride_dom, stride_dqb, stride_dqh, stride_dqm, stride_dkb, stride_dkh, stride_dkn, stride_dvb, stride_dvh, stride_dvn, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
- off_hb = tl.program_id(1)
- off_b = off_hb // nheads
- off_h = off_hb % nheads
- Q += off_b * stride_qb + off_h * stride_qh
- K += off_b * stride_kb + off_h * stride_kh
- V += off_b * stride_vb + off_h * stride_vh
- DO += off_b * stride_dob + off_h * stride_doh
- DQ += off_b * stride_dqb + off_h * stride_dqh
- DK += off_b * stride_dkb + off_h * stride_dkh
- DV += off_b * stride_dvb + off_h * stride_dvh
- if BIAS_TYPE != 'none':
- Bias += off_b * stride_bb + off_h * stride_bh
- D += off_hb * seqlen_q_rounded
- LSE += off_hb * seqlen_q_rounded
- if not SEQUENCE_PARALLEL:
- num_block_n = tl.cdiv(seqlen_k, BLOCK_N)
- for start_n in range(0, num_block_n):
- _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD=False, BIAS_TYPE=BIAS_TYPE, IS_CAUSAL=IS_CAUSAL, BLOCK_HEADDIM=BLOCK_HEADDIM, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N)
- else:
- start_n = tl.program_id(0)
- _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD=True, BIAS_TYPE=BIAS_TYPE, IS_CAUSAL=IS_CAUSAL, BLOCK_HEADDIM=BLOCK_HEADDIM, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N)
-
-def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
- (batch, seqlen_q, nheads, d) = q.shape
- (_, seqlen_k, _, _) = k.shape
- assert k.shape == (batch, seqlen_k, nheads, d)
- assert v.shape == (batch, seqlen_k, nheads, d)
- assert d <= 128, 'FlashAttention only support head dimensions up to 128'
- assert q.dtype == k.dtype == v.dtype, 'All tensors must have the same type'
- assert q.dtype in [torch.float16, torch.bfloat16], 'Only support fp16 and bf16'
- assert q.is_cuda and k.is_cuda and v.is_cuda
- softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
- has_bias = bias is not None
- bias_type = 'none'
- if has_bias:
- assert bias.dtype in [q.dtype, torch.float]
- assert bias.is_cuda
- assert bias.dim() == 4
- if bias.stride(-1) != 1:
- bias = bias.contiguous()
- if bias.shape[2:] == (1, seqlen_k):
- bias_type = 'vector'
- elif bias.shape[2:] == (seqlen_q, seqlen_k):
- bias_type = 'matrix'
- else:
- raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)')
- bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
- bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
- seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
- lse = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
- tmp = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
- o = torch.empty_like(q)
- BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
- BLOCK = 128
- num_warps = 4 if d <= 64 else 8
- grid = lambda META: (triton.cdiv(seqlen_q, META['BLOCK_M']), batch * nheads)
- _fwd_kernel[grid](q, k, v, bias, o, lse, tmp, softmax_scale, q.stride(0), q.stride(2), q.stride(1), k.stride(0), k.stride(2), k.stride(1), v.stride(0), v.stride(2), v.stride(1), *bias_strides, o.stride(0), o.stride(2), o.stride(1), nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d, seqlen_q // 32, seqlen_k // 32, bias_type, causal, BLOCK_HEADDIM, BLOCK_M=BLOCK, BLOCK_N=BLOCK, num_warps=num_warps, num_stages=1)
- return (o, lse, softmax_scale)
-
-def _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None):
- if do.stride(-1) != 1:
- do = do.contiguous()
- (batch, seqlen_q, nheads, d) = q.shape
- (_, seqlen_k, _, _) = k.shape
- assert d <= 128
- seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
- assert lse.shape == (batch, nheads, seqlen_q_rounded)
- assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1
- assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1
- softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
- dq_accum = torch.empty_like(q, dtype=torch.float32)
- delta = torch.empty_like(lse)
- BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
- grid = lambda META: (triton.cdiv(seqlen_q, META['BLOCK_M']), batch * nheads)
- _bwd_preprocess_do_o_dot[grid](o, do, delta, o.stride(0), o.stride(2), o.stride(1), do.stride(0), do.stride(2), do.stride(1), nheads, seqlen_q, seqlen_q_rounded, d, BLOCK_M=128, BLOCK_HEADDIM=BLOCK_HEADDIM)
- has_bias = bias is not None
- bias_type = 'none'
- if has_bias:
- assert bias.dtype in [q.dtype, torch.float]
- assert bias.is_cuda
- assert bias.dim() == 4
- assert bias.stride(-1) == 1
- if bias.shape[2:] == (1, seqlen_k):
- bias_type = 'vector'
- elif bias.shape[2:] == (seqlen_q, seqlen_k):
- bias_type = 'matrix'
- else:
- raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)')
- bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
- bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
- grid = lambda META: (triton.cdiv(seqlen_k, META['BLOCK_N']) if META['SEQUENCE_PARALLEL'] else 1, batch * nheads)
- _bwd_kernel[grid](q, k, v, bias, do, dq_accum, dk, dv, lse, delta, softmax_scale, q.stride(0), q.stride(2), q.stride(1), k.stride(0), k.stride(2), k.stride(1), v.stride(0), v.stride(2), v.stride(1), *bias_strides, do.stride(0), do.stride(2), do.stride(1), dq_accum.stride(0), dq_accum.stride(2), dq_accum.stride(1), dk.stride(0), dk.stride(2), dk.stride(1), dv.stride(0), dv.stride(2), dv.stride(1), nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d, seqlen_q // 32, seqlen_k // 32, bias_type, causal, BLOCK_HEADDIM)
- dq.copy_(dq_accum)
-
-class FlashAttnQKVPackedFunc(torch.autograd.Function):
-
- @staticmethod
- def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None):
- """
- qkv: (batch, seqlen, 3, nheads, headdim)
- bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).
- For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen).
- ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)
- """
- if qkv.stride(-1) != 1:
- qkv = qkv.contiguous()
- (o, lse, ctx.softmax_scale) = _flash_attn_forward(qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], bias=bias, causal=causal, softmax_scale=softmax_scale)
- ctx.save_for_backward(qkv, o, lse, bias)
- ctx.causal = causal
- return o
-
- @staticmethod
- def backward(ctx, do):
- (qkv, o, lse, bias) = ctx.saved_tensors
- assert not ctx.needs_input_grad[1], 'FlashAttention does not support bias gradient yet'
- with torch.inference_mode():
- dqkv = torch.empty_like(qkv)
- _flash_attn_backward(do, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], o, lse, dqkv[:, :, 0], dqkv[:, :, 1], dqkv[:, :, 2], bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
- return (dqkv, None, None, None)
-flash_attn_qkvpacked_func = FlashAttnQKVPackedFunc.apply
-
-class FlashAttnKVPackedFunc(torch.autograd.Function):
-
- @staticmethod
- def forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None):
- """
- q: (batch, seqlen_q, nheads, headdim)
- kv: (batch, seqlen_k, 2, nheads, headdim)
- bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
- For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
- ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
- """
- (q, kv) = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, kv]]
- (o, lse, ctx.softmax_scale) = _flash_attn_forward(q, kv[:, :, 0], kv[:, :, 1], bias=bias, causal=causal, softmax_scale=softmax_scale)
- ctx.save_for_backward(q, kv, o, lse, bias)
- ctx.causal = causal
- return o
-
- @staticmethod
- def backward(ctx, do):
- (q, kv, o, lse, bias) = ctx.saved_tensors
- if len(ctx.needs_input_grad) >= 3:
- assert not ctx.needs_input_grad[2], 'FlashAttention does not support bias gradient yet'
- with torch.inference_mode():
- dq = torch.empty_like(q)
- dkv = torch.empty_like(kv)
- _flash_attn_backward(do, q, kv[:, :, 0], kv[:, :, 1], o, lse, dq, dkv[:, :, 0], dkv[:, :, 1], bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
- return (dq, dkv, None, None, None)
-flash_attn_kvpacked_func = FlashAttnKVPackedFunc.apply
-
-class FlashAttnFunc(torch.autograd.Function):
-
- @staticmethod
- def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
- """
- q: (batch_size, seqlen_q, nheads, headdim)
- k, v: (batch_size, seqlen_k, nheads, headdim)
- bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
- For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
- ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
- """
- (q, k, v) = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]]
- (o, lse, ctx.softmax_scale) = _flash_attn_forward(q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale)
- ctx.save_for_backward(q, k, v, o, lse, bias)
- ctx.causal = causal
- return o
-
- @staticmethod
- def backward(ctx, do):
- (q, k, v, o, lse, bias) = ctx.saved_tensors
- assert not ctx.needs_input_grad[3], 'FlashAttention does not support bias gradient yet'
- with torch.inference_mode():
- dq = torch.empty_like(q)
- dk = torch.empty_like(k)
- dv = torch.empty_like(v)
- _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
- return (dq, dk, dv, None, None, None)
-flash_attn_func = FlashAttnFunc.apply
\ No newline at end of file
diff --git a/mmte/models/llava/model/language_model/mpt/hf_prefixlm_converter.py b/mmte/models/llava/model/language_model/mpt/hf_prefixlm_converter.py
deleted file mode 100644
index 8c1a648..0000000
--- a/mmte/models/llava/model/language_model/mpt/hf_prefixlm_converter.py
+++ /dev/null
@@ -1,415 +0,0 @@
-"""Converts Huggingface Causal LM to Prefix LM.
-
-Conversion does lightweight surgery on a HuggingFace
-Causal LM to convert it to a Prefix LM.
-
-Prefix LMs accepts a `bidirectional_mask` input in `forward`
-and treat the input prompt as the prefix in `generate`.
-"""
-import math
-import warnings
-from types import MethodType
-from typing import Any, Dict, List, Optional, Tuple, Union
-import torch
-from transformers.models.bloom.modeling_bloom import BaseModelOutputWithPastAndCrossAttentions, BloomForCausalLM, BloomModel, CausalLMOutputWithCrossAttentions, CrossEntropyLoss
-from transformers.models.bloom.modeling_bloom import _expand_mask as _expand_mask_bloom
-from transformers.models.bloom.modeling_bloom import _make_causal_mask as _make_causal_mask_bloom
-from transformers.models.bloom.modeling_bloom import logging
-from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
-from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM
-from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
-from transformers.models.gptj.modeling_gptj import GPTJForCausalLM
-from transformers.models.opt.modeling_opt import OPTForCausalLM
-from transformers.models.opt.modeling_opt import _expand_mask as _expand_mask_opt
-from transformers.models.opt.modeling_opt import _make_causal_mask as _make_causal_mask_opt
-logger = logging.get_logger(__name__)
-_SUPPORTED_GPT_MODELS = (GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM)
-CAUSAL_GPT_TYPES = Union[GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM]
-
-def _convert_gpt_causal_lm_to_prefix_lm(model: CAUSAL_GPT_TYPES) -> CAUSAL_GPT_TYPES:
- """Converts a GPT-style Causal LM to a Prefix LM.
-
- Supported HuggingFace model classes:
- - `GPT2LMHeadModel`
- - `GPTNeoForCausalLM`
- - `GPTNeoXForCausalLM`
- - `GPTJForCausalLM`
-
- See `convert_hf_causal_lm_to_prefix_lm` for more details.
- """
- if hasattr(model, '_prefix_lm_converted'):
- return model
- assert isinstance(model, _SUPPORTED_GPT_MODELS)
- assert model.config.add_cross_attention == False, 'Only supports GPT-style decoder-only models'
-
- def _get_attn_modules(model: CAUSAL_GPT_TYPES) -> List[torch.nn.Module]:
- """Helper that gets a list of the model's attention modules.
-
- Each module has a `bias` buffer used for causal masking. The Prefix LM
- conversion adds logic to dynamically manipulate these biases to support
- Prefix LM attention masking.
- """
- attn_modules = []
- if isinstance(model, GPTNeoXForCausalLM):
- blocks = model.gpt_neox.layers
- else:
- blocks = model.transformer.h
- for block in blocks:
- if isinstance(model, GPTNeoForCausalLM):
- if block.attn.attention_type != 'global':
- continue
- attn_module = block.attn.attention
- elif isinstance(model, GPTNeoXForCausalLM):
- attn_module = block.attention
- else:
- attn_module = block.attn
- attn_modules.append(attn_module)
- return attn_modules
- setattr(model, '_original_forward', getattr(model, 'forward'))
- setattr(model, '_original_generate', getattr(model, 'generate'))
-
- def forward(self: CAUSAL_GPT_TYPES, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, bidirectional_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
- """Wraps original forward to enable PrefixLM attention."""
-
- def call_og_forward():
- if isinstance(self, GPTNeoXForCausalLM):
- return self._original_forward(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
- else:
- return self._original_forward(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
- if bidirectional_mask is None:
- return call_og_forward()
- assert isinstance(bidirectional_mask, torch.Tensor)
- attn_modules = _get_attn_modules(model)
- (b, s) = bidirectional_mask.shape
- max_length = attn_modules[0].bias.shape[-1]
- if s > max_length:
- raise ValueError(f'bidirectional_mask sequence length (={s}) exceeds the ' + f'max length allowed by the model ({max_length}).')
- assert s <= max_length
- if s < max_length:
- pad = torch.zeros((int(b), int(max_length - s)), dtype=bidirectional_mask.dtype, device=bidirectional_mask.device)
- bidirectional_mask = torch.cat([bidirectional_mask, pad], dim=1)
- bidirectional = bidirectional_mask.unsqueeze(1).unsqueeze(1)
- for attn_module in attn_modules:
- attn_module.bias.data = torch.logical_or(attn_module.bias.data, bidirectional)
- output = call_og_forward()
- for attn_module in attn_modules:
- attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
- return output
-
- def generate(self: CAUSAL_GPT_TYPES, *args: tuple, **kwargs: Dict[str, Any]):
- """Wraps original generate to enable PrefixLM attention."""
- attn_modules = _get_attn_modules(model)
- for attn_module in attn_modules:
- attn_module.bias.data[:] = 1
- output = self._original_generate(*args, **kwargs)
- for attn_module in attn_modules:
- attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
- return output
- setattr(model, 'forward', MethodType(forward, model))
- setattr(model, 'generate', MethodType(generate, model))
- setattr(model, '_prefix_lm_converted', True)
- return model
-
-def _convert_bloom_causal_lm_to_prefix_lm(model: BloomForCausalLM) -> BloomForCausalLM:
- """Converts a BLOOM Causal LM to a Prefix LM.
-
- Supported HuggingFace model classes:
- - `BloomForCausalLM`
-
- See `convert_hf_causal_lm_to_prefix_lm` for more details.
- """
- if hasattr(model, '_prefix_lm_converted'):
- return model
- assert isinstance(model, BloomForCausalLM)
- assert model.config.add_cross_attention == False, 'Only supports BLOOM decoder-only models'
-
- def _prepare_attn_mask(self: BloomModel, attention_mask: torch.Tensor, bidirectional_mask: Optional[torch.Tensor], input_shape: Tuple[int, int], past_key_values_length: int) -> torch.BoolTensor:
- combined_attention_mask = None
- device = attention_mask.device
- (_, src_length) = input_shape
- if src_length > 1:
- combined_attention_mask = _make_causal_mask_bloom(input_shape, device=device, past_key_values_length=past_key_values_length)
- if bidirectional_mask is not None:
- assert attention_mask.shape == bidirectional_mask.shape
- expanded_bidirectional_mask = _expand_mask_bloom(bidirectional_mask, tgt_length=src_length)
- combined_attention_mask = torch.logical_and(combined_attention_mask, expanded_bidirectional_mask)
- expanded_attn_mask = _expand_mask_bloom(attention_mask, tgt_length=src_length)
- combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask
- return combined_attention_mask
-
- def _build_alibi_tensor(self: BloomModel, batch_size: int, query_length: int, key_length: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
- num_heads = self.config.n_head
- closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
- base = torch.tensor(2 ** (-2 ** (-(math.log2(closest_power_of_2) - 3))), device=device, dtype=torch.float32)
- powers = torch.arange(1, 1 + closest_power_of_2, device=device, dtype=torch.int32)
- slopes = torch.pow(base, powers)
- if closest_power_of_2 != num_heads:
- extra_base = torch.tensor(2 ** (-2 ** (-(math.log2(2 * closest_power_of_2) - 3))), device=device, dtype=torch.float32)
- num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
- extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=device, dtype=torch.int32)
- slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
- qa = torch.arange(query_length, device=device, dtype=torch.int32).view(-1, 1)
- ka = torch.arange(key_length, device=device, dtype=torch.int32).view(1, -1)
- diffs = qa - ka + key_length - query_length
- diffs = -diffs.abs()
- alibi = slopes.view(1, num_heads, 1, 1) * diffs.view(1, 1, query_length, key_length)
- alibi = alibi.expand(batch_size, -1, -1, -1).reshape(-1, query_length, key_length)
- return alibi.to(dtype)
- KeyValueT = Tuple[torch.Tensor, torch.Tensor]
-
- def forward(self: BloomModel, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[KeyValueT, ...]]=None, attention_mask: Optional[torch.Tensor]=None, bidirectional_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
- if deprecated_arguments.pop('position_ids', False) is not False:
- warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. ' + 'You can safely ignore passing `position_ids`.', FutureWarning)
- if len(deprecated_arguments) > 0:
- raise ValueError(f'Got unexpected arguments: {deprecated_arguments}')
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
- elif input_ids is not None:
- (batch_size, seq_length) = input_ids.shape
- elif inputs_embeds is not None:
- (batch_size, seq_length, _) = inputs_embeds.shape
- else:
- raise ValueError('You have to specify either input_ids or inputs_embeds')
- if past_key_values is None:
- past_key_values = tuple([None] * len(self.h))
- head_mask = self.get_head_mask(head_mask, self.config.n_layer)
- if inputs_embeds is None:
- inputs_embeds = self.word_embeddings(input_ids)
- hidden_states = self.word_embeddings_layernorm(inputs_embeds)
- presents = () if use_cache else None
- all_self_attentions = () if output_attentions else None
- all_hidden_states = () if output_hidden_states else None
- seq_length_with_past = seq_length
- past_key_values_length = 0
- if past_key_values[0] is not None:
- tmp = past_key_values[0][0]
- past_key_values_length = tmp.shape[2]
- seq_length_with_past = seq_length_with_past + past_key_values_length
- if attention_mask is None:
- attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
- else:
- attention_mask = attention_mask.to(hidden_states.device)
- alibi = self._build_alibi_tensor(batch_size=batch_size, query_length=seq_length, key_length=seq_length_with_past, dtype=hidden_states.dtype, device=hidden_states.device)
- causal_mask = self._prepare_attn_mask(attention_mask, bidirectional_mask, input_shape=(batch_size, seq_length), past_key_values_length=past_key_values_length)
- for (i, (block, layer_past)) in enumerate(zip(self.h, past_key_values)):
- if output_hidden_states:
- hst = (hidden_states,)
- all_hidden_states = all_hidden_states + hst
- if self.gradient_checkpointing and self.training:
- if use_cache:
- logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
- use_cache = False
-
- def create_custom_forward(module):
-
- def custom_forward(*inputs):
- return module(*inputs, use_cache=use_cache, output_attentions=output_attentions)
- return custom_forward
- outputs = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, alibi, causal_mask, head_mask[i])
- else:
- outputs = block(hidden_states, layer_past=layer_past, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi)
- hidden_states = outputs[0]
- if use_cache is True:
- presents = presents + (outputs[1],)
- if output_attentions:
- oa = (outputs[2 if use_cache else 1],)
- all_self_attentions = all_self_attentions + oa
- hidden_states = self.ln_f(hidden_states)
- if output_hidden_states:
- hst = (hidden_states,)
- all_hidden_states = all_hidden_states + hst
- if not return_dict:
- return tuple((v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None))
- return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions)
- setattr(model.transformer, '_prepare_attn_mask', MethodType(_prepare_attn_mask, model.transformer))
- setattr(model.transformer, '_build_alibi_tensor', MethodType(_build_alibi_tensor, model.transformer))
- setattr(model.transformer, 'forward', MethodType(forward, model.transformer))
- KeyValueT = Tuple[torch.Tensor, torch.Tensor]
-
- def forward(self: BloomForCausalLM, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[KeyValueT, ...]]=None, attention_mask: Optional[torch.Tensor]=None, bidirectional_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
- """Replacement forward method for BloomCausalLM."""
- if deprecated_arguments.pop('position_ids', False) is not False:
- warnings.warn('`position_ids` have no functionality in BLOOM and will be removed ' + 'in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning)
- if len(deprecated_arguments) > 0:
- raise ValueError(f'Got unexpected arguments: {deprecated_arguments}')
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, bidirectional_mask=bidirectional_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
- hidden_states = transformer_outputs[0]
- lm_logits = self.lm_head(hidden_states)
- loss = None
- if labels is not None:
- shift_logits = lm_logits[..., :-1, :].contiguous()
- shift_labels = labels[..., 1:].contiguous()
- (batch_size, seq_length, vocab_size) = shift_logits.shape
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length))
- if not return_dict:
- output = (lm_logits,) + transformer_outputs[1:]
- return (loss,) + output if loss is not None else output
- return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
-
- def prepare_inputs_for_generation(self: BloomForCausalLM, input_ids: torch.LongTensor, past: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> dict:
- if past:
- input_ids = input_ids[:, -1].unsqueeze(-1)
- bidirectional_mask = None
- if past[0][0].shape[0] == input_ids.shape[0]:
- past = self._convert_to_bloom_cache(past)
- else:
- bidirectional_mask = torch.ones_like(input_ids)
- return {'input_ids': input_ids, 'past_key_values': past, 'use_cache': True, 'attention_mask': attention_mask, 'bidirectional_mask': bidirectional_mask}
- setattr(model, 'forward', MethodType(forward, model))
- setattr(model, 'prepare_inputs_for_generation', MethodType(prepare_inputs_for_generation, model))
- setattr(model, '_prefix_lm_converted', True)
- return model
-
-def _convert_opt_causal_lm_to_prefix_lm(model: OPTForCausalLM) -> OPTForCausalLM:
- """Converts an OPT Causal LM to a Prefix LM.
-
- Supported HuggingFace model classes:
- - `OPTForCausalLM`
-
- See `convert_hf_causal_lm_to_prefix_lm` for more details.
- """
- if hasattr(model, '_prefix_lm_converted'):
- return model
- assert isinstance(model, OPTForCausalLM)
- assert model.config.add_cross_attention == False, 'Only supports OPT decoder-only models'
- setattr(model, '_original_forward', getattr(model, 'forward'))
- setattr(model, '_original_generate', getattr(model, 'generate'))
- model.model.decoder.bidirectional_mask = None
-
- def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
- combined_attention_mask = None
- if input_shape[-1] > 1:
- if self.bidirectional_mask == 'g':
- (bsz, src_length) = input_shape
- combined_attention_mask = torch.zeros((bsz, 1, src_length, src_length + past_key_values_length), dtype=inputs_embeds.dtype, device=inputs_embeds.device)
- else:
- combined_attention_mask = _make_causal_mask_opt(input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length).to(inputs_embeds.device)
- if self.bidirectional_mask is not None:
- assert attention_mask.shape == self.bidirectional_mask.shape
- expanded_bidirectional_mask = _expand_mask_opt(self.bidirectional_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(inputs_embeds.device)
- combined_attention_mask = torch.maximum(expanded_bidirectional_mask, combined_attention_mask)
- if attention_mask is not None:
- expanded_attn_mask = _expand_mask_opt(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(inputs_embeds.device)
- combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
- return combined_attention_mask
- setattr(model.model.decoder, '_prepare_decoder_attention_mask', MethodType(_prepare_decoder_attention_mask, model.model.decoder))
-
- def forward(self: OPTForCausalLM, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, bidirectional_mask: Optional[torch.ByteTensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
-
- def call_og_forward():
- return self._original_forward(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
- if bidirectional_mask is None:
- return call_og_forward()
- self.model.decoder.bidirectional_mask = bidirectional_mask
- try:
- outputs = call_og_forward()
- except:
- self.model.decoder.bidirectional_mask = None
- raise
- self.model.decoder.bidirectional_mask = None
- return outputs
-
- def generate(self: OPTForCausalLM, *args: tuple, **kwargs: Dict[str, Any]):
- """Wraps original generate to enable PrefixLM-style attention."""
- self.model.decoder.bidirectional_mask = 'g'
- try:
- output = self._original_generate(*args, **kwargs)
- except:
- self.model.decoder.bidirectional_mask = None
- raise
- self.model.decoder.bidirectional_mask = None
- return output
- setattr(model, 'forward', MethodType(forward, model))
- setattr(model, 'generate', MethodType(generate, model))
- setattr(model, '_prefix_lm_converted', True)
- return model
-_SUPPORTED_HF_MODELS = _SUPPORTED_GPT_MODELS + (BloomForCausalLM, OPTForCausalLM)
-CAUSAL_LM_TYPES = Union[GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM, BloomForCausalLM, OPTForCausalLM]
-
-def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:
- """Converts a HuggingFace Causal LM to a Prefix LM.
-
- Supported HuggingFace model classes:
- - `GPT2LMHeadModel`
- - `GPTNeoForCausalLM`
- - `GPTNeoXForCausalLM`
- - `GPTJForCausalLM`
- - `BloomForCausalLM`
- - `OPTForCausalLM`
-
- Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the
- `generate` method and/or select underlying methods depending on the model class.
-
- These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask".
-
- Notes on training:
- To actually train the converted model as a Prefix LM, training batches will need to indicate
- the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.
-
- **This is not a standard input and requires custom layers either within or after your dataloader.**
-
- In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`
- such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.
- That is, the prefix portion of the sequence should not generate any loss. Loss should only be
- generated by the target portion of the sequence.
-
- Notes on `GPTNeoForCausalLM`:
- To simplify the implementation, "global" and "local" attention layers are handled differently.
- For "global" layers, we handle conversion as described above. For "local" layers, which use a
- causal attention mask within a restricted local window, we do not alter the masking.
-
- Notes on `forward` method conversion:
- After conversion, the `forward` method will handle a new input, `bidirectional_mask`,
- which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions
- belonging to the prefix (prefix tokens can attend to one another bidirectionally), and
- 0 indicates token positions belonging to the target.
-
- The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing
- causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset
- the causal masks before returning the result.
-
- Notes on `generate` method conversion:
- After conversion, the `generate` method will have the same signature but will internally
- convert all causal masks to be purely bidirectional, call the original `generate` method, and
- (where appropriate) reset the causal masks before returning the result.
-
- This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token
- "prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates
- each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one
- another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and
- previously-generated tokens (also as expected in a Prefix LM).
-
- To preserve the API, the original methods are renamed to `_original_forward` and
- `_original_generate`, and replaced with new `forward` and `generate` methods that wrap
- them, respectively. Although implementation details vary by model class.
- """
- if isinstance(model, _SUPPORTED_GPT_MODELS):
- return _convert_gpt_causal_lm_to_prefix_lm(model)
- elif isinstance(model, BloomForCausalLM):
- return _convert_bloom_causal_lm_to_prefix_lm(model)
- elif isinstance(model, OPTForCausalLM):
- return _convert_opt_causal_lm_to_prefix_lm(model)
- else:
- raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\n{_SUPPORTED_HF_MODELS}')
-
-def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):
- """Attempts to add bidirectional_mask to batch if missing.
-
- Raises:
- KeyError if bidirectional_mask is missing and can't be inferred
- """
- if 'bidirectional_mask' not in batch:
- if batch.get('mode', None) == 'icl_task':
- batch['bidirectional_mask'] = batch['attention_mask'].clone()
- for (i, continuation_indices) in enumerate(batch['continuation_indices']):
- batch['bidirectional_mask'][i, continuation_indices] = 0
- elif 'labels' in batch and 'attention_mask' in batch:
- batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])
- else:
- raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')
\ No newline at end of file
diff --git a/mmte/models/llava/model/language_model/mpt/meta_init_context.py b/mmte/models/llava/model/language_model/mpt/meta_init_context.py
deleted file mode 100644
index 6cba6ff..0000000
--- a/mmte/models/llava/model/language_model/mpt/meta_init_context.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from contextlib import contextmanager
-import torch
-import torch.nn as nn
-
-@contextmanager
-def init_empty_weights(include_buffers: bool=False):
- """Meta initialization context manager.
-
- A context manager under which models are initialized with all parameters
- on the meta device, therefore creating an empty model. Useful when just
- initializing the model would blow the available RAM.
-
- Args:
- include_buffers (`bool`, *optional*, defaults to `False`): Whether or
- not to also put all buffers on the meta device while initializing.
-
- Example:
- ```python
- import torch.nn as nn
-
- # Initialize a model with 100 billions parameters in no time and without using any RAM.
- with init_empty_weights():
- tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
- ```
-
-
-
- Any model created under this context manager has no weights. As such you can't do something like
- `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
-
-
- """
- with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:
- yield f
-
-@contextmanager
-def init_on_device(device: torch.device, include_buffers: bool=False):
- """Device initialization context manager.
-
- A context manager under which models are initialized with all parameters
- on the specified device.
-
- Args:
- device (`torch.device`): Device to initialize all parameters on.
- include_buffers (`bool`, *optional*, defaults to `False`): Whether or
- not to also put all buffers on the meta device while initializing.
-
- Example:
- ```python
- import torch.nn as nn
-
- with init_on_device(device=torch.device("cuda")):
- tst = nn.Liner(100, 100) # on `cuda` device
- ```
- """
- old_register_parameter = nn.Module.register_parameter
- if include_buffers:
- old_register_buffer = nn.Module.register_buffer
-
- def register_empty_parameter(module, name, param):
- old_register_parameter(module, name, param)
- if param is not None:
- param_cls = type(module._parameters[name])
- kwargs = module._parameters[name].__dict__
- module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
-
- def register_empty_buffer(module, name, buffer):
- old_register_buffer(module, name, buffer)
- if buffer is not None:
- module._buffers[name] = module._buffers[name].to(device)
- if include_buffers:
- tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full']}
- else:
- tensor_constructors_to_patch = {}
-
- def patch_tensor_constructor(fn):
-
- def wrapper(*args, **kwargs):
- kwargs['device'] = device
- return fn(*args, **kwargs)
- return wrapper
- try:
- nn.Module.register_parameter = register_empty_parameter
- if include_buffers:
- nn.Module.register_buffer = register_empty_buffer
- for torch_function_name in tensor_constructors_to_patch.keys():
- setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
- yield
- finally:
- nn.Module.register_parameter = old_register_parameter
- if include_buffers:
- nn.Module.register_buffer = old_register_buffer
- for (torch_function_name, old_torch_function) in tensor_constructors_to_patch.items():
- setattr(torch, torch_function_name, old_torch_function)
\ No newline at end of file
diff --git a/mmte/models/llava/model/language_model/mpt/modeling_mpt.py b/mmte/models/llava/model/language_model/mpt/modeling_mpt.py
deleted file mode 100644
index 1331344..0000000
--- a/mmte/models/llava/model/language_model/mpt/modeling_mpt.py
+++ /dev/null
@@ -1,331 +0,0 @@
-"""A simple, flexible implementation of a GPT model.
-
-Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
-"""
-import math
-import warnings
-from typing import List, Optional, Tuple, Union
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
-from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
-from .attention import attn_bias_shape, build_attn_bias
-from .blocks import MPTBlock
-from .custom_embedding import SharedEmbedding
-from .norm import NORM_CLASS_REGISTRY
-from .configuration_mpt import MPTConfig
-from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
-from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
-from .meta_init_context import init_empty_weights
-from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
-try:
- from .flash_attn_triton import flash_attn_func
-except:
- pass
-Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
-
-class MPTPreTrainedModel(PreTrainedModel):
- config_class = MPTConfig
- base_model_prefix = 'model'
- _no_split_modules = ['MPTBlock']
-
-class MPTModel(MPTPreTrainedModel):
-
- def __init__(self, config: MPTConfig):
- config._validate_config()
- super().__init__(config)
- self.attn_impl = config.attn_config['attn_impl']
- self.prefix_lm = config.attn_config['prefix_lm']
- self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
- self.alibi = config.attn_config['alibi']
- self.alibi_bias_max = config.attn_config['alibi_bias_max']
- if config.init_device == 'mixed':
- if dist.get_local_rank() == 0:
- config.init_device = 'cpu'
- else:
- config.init_device = 'meta'
- if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
- norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
- raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
- norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
- self.embedding_fraction = config.embedding_fraction
- self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
- if not self.alibi:
- self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
- self.emb_drop = nn.Dropout(config.emb_pdrop)
- self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
- self.norm_f = norm_class(config.d_model, device=config.init_device)
- if config.init_device != 'meta':
- print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
- self.apply(self.param_init_fn)
- self.is_causal = not self.prefix_lm
- self._attn_bias_initialized = False
- self.attn_bias = None
- self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
- if config.no_bias:
- for module in self.modules():
- if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
- if config.verbose:
- warnings.warn(f'Removing bias ({module.bias}) from {module}.')
- module.register_parameter('bias', None)
- if config.verbose and config.verbose > 2:
- print(self)
- if 'verbose' not in self.config.init_config:
- self.config.init_config['verbose'] = self.config.verbose
- if self.config.init_config['verbose'] > 1:
- init_fn_name = self.config.init_config['name']
- warnings.warn(f'Using {init_fn_name} initialization.')
- self.gradient_checkpointing = False
-
- def get_input_embeddings(self):
- return self.wte
-
- def set_input_embeddings(self, value):
- self.wte = value
-
- @torch.no_grad()
- def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
- if not self._attn_bias_initialized:
- if self.attn_bias_shape:
- self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
- self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
- self._attn_bias_initialized = True
- if self.attn_impl == 'flash':
- return (self.attn_bias, attention_mask)
- if self.attn_bias is not None:
- self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
- attn_bias = self.attn_bias
- if self.prefix_lm:
- assert isinstance(attn_bias, torch.Tensor)
- assert isinstance(prefix_mask, torch.Tensor)
- attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
- if self.attn_uses_sequence_id and sequence_id is not None:
- assert isinstance(attn_bias, torch.Tensor)
- attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
- if attention_mask is not None:
- s_k = attention_mask.shape[-1]
- if attn_bias is None:
- attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
- else:
- _s_k = max(0, attn_bias.size(-1) - s_k)
- attn_bias = attn_bias[:, :, :, _s_k:]
- if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
- raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
- min_val = torch.finfo(attn_bias.dtype).min
- attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
- return (attn_bias, None)
-
- def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
- (s_k, s_q) = attn_bias.shape[-2:]
- if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
- raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
- seq_len = prefix_mask.shape[-1]
- if seq_len > self.config.max_seq_len:
- raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
- attn_bias = attn_bias[..., :seq_len, :seq_len]
- causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
- prefix = prefix_mask.view(-1, 1, 1, seq_len)
- cannot_attend = ~torch.logical_or(causal, prefix.bool())
- min_val = torch.finfo(attn_bias.dtype).min
- attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
- return attn_bias
-
- def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
- seq_len = sequence_id.shape[-1]
- if seq_len > self.config.max_seq_len:
- raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
- attn_bias = attn_bias[..., :seq_len, :seq_len]
- cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
- min_val = torch.finfo(attn_bias.dtype).min
- attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
- return attn_bias
-
- def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
- return_dict = return_dict if return_dict is not None else self.config.return_dict
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- if attention_mask is not None:
- attention_mask = attention_mask.bool()
- if prefix_mask is not None:
- prefix_mask = prefix_mask.bool()
- if not return_dict:
- raise NotImplementedError('return_dict False is not implemented yet for MPT')
- if output_attentions:
- if self.attn_impl != 'torch':
- raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
- if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
- raise NotImplementedError('MPT does not support training with left padding.')
- if self.prefix_lm and prefix_mask is None:
- raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
- if self.training:
- if self.attn_uses_sequence_id and sequence_id is None:
- raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
- elif self.attn_uses_sequence_id is False and sequence_id is not None:
- warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
- if input_ids is not None:
- S = input_ids.size(1)
- assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
- tok_emb = self.wte(input_ids)
- else:
- assert inputs_embeds is not None
- assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
- S = inputs_embeds.size(1)
- tok_emb = inputs_embeds
- if self.alibi:
- x = tok_emb
- else:
- past_position = 0
- if past_key_values is not None:
- if len(past_key_values) != self.config.n_layers:
- raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
- past_position = past_key_values[0][0].size(1)
- if self.attn_impl == 'torch':
- past_position = past_key_values[0][0].size(3)
- if S + past_position > self.config.max_seq_len:
- raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
- pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
- if attention_mask is not None:
- pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
- pos_emb = self.wpe(pos)
- x = tok_emb + pos_emb
- if self.embedding_fraction == 1:
- x = self.emb_drop(x)
- else:
- x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
- assert isinstance(self.emb_drop, nn.Module)
- x = self.emb_drop(x_shrunk)
- (attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
- if use_cache and past_key_values is None:
- past_key_values = [() for _ in range(self.config.n_layers)]
- all_hidden_states = () if output_hidden_states else None
- all_self_attns = () if output_attentions else None
- for (b_idx, block) in enumerate(self.blocks):
- if output_hidden_states:
- assert all_hidden_states is not None
- all_hidden_states = all_hidden_states + (x,)
- past_key_value = past_key_values[b_idx] if past_key_values is not None else None
- if self.gradient_checkpointing and self.training:
- (x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
- else:
- (x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
- if past_key_values is not None:
- past_key_values[b_idx] = past_key_value
- if output_attentions:
- assert all_self_attns is not None
- all_self_attns = all_self_attns + (attn_weights,)
- x = self.norm_f(x)
- if output_hidden_states:
- assert all_hidden_states is not None
- all_hidden_states = all_hidden_states + (x,)
- return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
-
- def param_init_fn(self, module):
- init_fn_name = self.config.init_config['name']
- MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
-
- def fsdp_wrap_fn(self, module):
- return isinstance(module, MPTBlock)
-
- def activation_checkpointing_fn(self, module):
- return isinstance(module, MPTBlock)
-
-class MPTForCausalLM(MPTPreTrainedModel):
-
- def __init__(self, config: MPTConfig):
- super().__init__(config)
- if not config.tie_word_embeddings:
- raise ValueError('MPTForCausalLM only supports tied word embeddings')
- print(f'Instantiating an MPTForCausalLM model from {__file__}')
- self.transformer = MPTModel(config)
- for child in self.transformer.children():
- if isinstance(child, torch.nn.ModuleList):
- continue
- if isinstance(child, torch.nn.Module):
- child._fsdp_wrap = True
- self.logit_scale = None
- if config.logit_scale is not None:
- logit_scale = config.logit_scale
- if isinstance(logit_scale, str):
- if logit_scale == 'inv_sqrt_d_model':
- logit_scale = 1 / math.sqrt(config.d_model)
- else:
- raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
- self.logit_scale = logit_scale
-
- def get_input_embeddings(self):
- return self.transformer.wte
-
- def set_input_embeddings(self, value):
- self.transformer.wte = value
-
- def get_output_embeddings(self):
- return self.transformer.wte
-
- def set_output_embeddings(self, new_embeddings):
- self.transformer.wte = new_embeddings
-
- def set_decoder(self, decoder):
- self.transformer = decoder
-
- def get_decoder(self):
- return self.transformer
-
- def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.FloatTensor]=None):
- return_dict = return_dict if return_dict is not None else self.config.return_dict
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- if inputs_embeds is not None:
- raise NotImplementedError('inputs_embeds has to be None (for hf/peft support).')
- outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache)
- logits = self.transformer.wte(outputs.last_hidden_state.to(self.transformer.wte.weight.device), True)
- if self.logit_scale is not None:
- if self.logit_scale == 0:
- warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
- logits *= self.logit_scale
- loss = None
- if labels is not None:
- labels = torch.roll(labels, shifts=-1)
- labels[:, -1] = -100
- loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1))
- return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
-
- def param_init_fn(self, module):
- init_fn_name = self.config.init_config['name']
- MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
-
- def fsdp_wrap_fn(self, module):
- return isinstance(module, MPTBlock)
-
- def activation_checkpointing_fn(self, module):
- return isinstance(module, MPTBlock)
-
- def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
- if inputs_embeds is not None:
- raise NotImplementedError('inputs_embeds is not implemented for MPT yet')
- attention_mask = kwargs['attention_mask'].bool()
- if attention_mask[:, -1].sum() != attention_mask.shape[0]:
- raise NotImplementedError('MPT does not support generation with right padding.')
- if self.transformer.attn_uses_sequence_id and self.training:
- sequence_id = torch.zeros_like(input_ids[:1])
- else:
- sequence_id = None
- if past_key_values is not None:
- input_ids = input_ids[:, -1].unsqueeze(-1)
- if self.transformer.prefix_lm:
- prefix_mask = torch.ones_like(attention_mask)
- if kwargs.get('use_cache') == False:
- raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.')
- else:
- prefix_mask = None
- return {'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True)}
-
- @staticmethod
- def _reorder_cache(past_key_values, beam_idx):
- """Used by HuggingFace generate when using beam search with kv-caching.
-
- See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133
- for an example in transformers.
- """
- reordered_past = []
- for layer_past in past_key_values:
- reordered_past += [tuple((past_state.index_select(0, beam_idx) for past_state in layer_past))]
- return reordered_past
\ No newline at end of file
diff --git a/mmte/models/llava/model/language_model/mpt/norm.py b/mmte/models/llava/model/language_model/mpt/norm.py
deleted file mode 100644
index 067b614..0000000
--- a/mmte/models/llava/model/language_model/mpt/norm.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import torch
-
-def _cast_if_autocast_enabled(tensor):
- if torch.is_autocast_enabled():
- if tensor.device.type == 'cuda':
- dtype = torch.get_autocast_gpu_dtype()
- elif tensor.device.type == 'cpu':
- dtype = torch.get_autocast_cpu_dtype()
- else:
- raise NotImplementedError()
- return tensor.to(dtype=dtype)
- return tensor
-
-class LPLayerNorm(torch.nn.LayerNorm):
-
- def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None):
- super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device=device, dtype=dtype)
-
- def forward(self, x):
- module_device = x.device
- downcast_x = _cast_if_autocast_enabled(x)
- downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
- downcast_bias = _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
- with torch.autocast(enabled=False, device_type=module_device.type):
- return torch.nn.functional.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps)
-
-def rms_norm(x, weight=None, eps=1e-05):
- output = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps)
- if weight is not None:
- return output * weight
- return output
-
-class RMSNorm(torch.nn.Module):
-
- def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
- super().__init__()
- self.eps = eps
- if weight:
- self.weight = torch.nn.Parameter(torch.ones(normalized_shape, dtype=dtype, device=device))
- else:
- self.register_parameter('weight', None)
-
- def forward(self, x):
- return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype)
-
-class LPRMSNorm(RMSNorm):
-
- def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
- super().__init__(normalized_shape=normalized_shape, eps=eps, weight=weight, dtype=dtype, device=device)
-
- def forward(self, x):
- downcast_x = _cast_if_autocast_enabled(x)
- downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
- with torch.autocast(enabled=False, device_type=x.device.type):
- return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype)
-NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}
\ No newline at end of file
diff --git a/mmte/models/llava/model/language_model/mpt/param_init_fns.py b/mmte/models/llava/model/language_model/mpt/param_init_fns.py
deleted file mode 100644
index 418b83c..0000000
--- a/mmte/models/llava/model/language_model/mpt/param_init_fns.py
+++ /dev/null
@@ -1,181 +0,0 @@
-import math
-import warnings
-from collections.abc import Sequence
-from functools import partial
-from typing import Optional, Tuple, Union
-import torch
-from torch import nn
-from .norm import NORM_CLASS_REGISTRY
-
-def torch_default_param_init_fn_(module: nn.Module, verbose: int=0, **kwargs):
- del kwargs
- if verbose > 1:
- warnings.warn(f"Initializing network using module's reset_parameters attribute")
- if hasattr(module, 'reset_parameters'):
- module.reset_parameters()
-
-def fused_init_helper_(module: nn.Module, init_fn_):
- _fused = getattr(module, '_fused', None)
- if _fused is None:
- raise RuntimeError(f'Internal logic error')
- (dim, splits) = _fused
- splits = (0, *splits, module.weight.size(dim))
- for (s, e) in zip(splits[:-1], splits[1:]):
- slice_indices = [slice(None)] * module.weight.ndim
- slice_indices[dim] = slice(s, e)
- init_fn_(module.weight[slice_indices])
-
-def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
- del kwargs
- if verbose > 1:
- warnings.warn(f'If model has bias parameters they are initialized to 0.')
- init_div_is_residual = init_div_is_residual
- if init_div_is_residual is False:
- div_is_residual = 1.0
- elif init_div_is_residual is True:
- div_is_residual = math.sqrt(2 * n_layers)
- elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
- div_is_residual = init_div_is_residual
- elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
- div_is_residual = float(init_div_is_residual)
- else:
- div_is_residual = 1.0
- raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')
- if init_div_is_residual is not False:
- if verbose > 1:
- warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')
- if isinstance(module, nn.Linear):
- if hasattr(module, '_fused'):
- fused_init_helper_(module, init_fn_)
- else:
- init_fn_(module.weight)
- if module.bias is not None:
- torch.nn.init.zeros_(module.bias)
- if init_div_is_residual is not False and getattr(module, '_is_residual', False):
- with torch.no_grad():
- module.weight.div_(div_is_residual)
- elif isinstance(module, nn.Embedding):
- if emb_init_std is not None:
- std = emb_init_std
- if std == 0:
- warnings.warn(f'Embedding layer initialized to 0.')
- emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
- if verbose > 1:
- warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')
- elif emb_init_uniform_lim is not None:
- lim = emb_init_uniform_lim
- if isinstance(lim, Sequence):
- if len(lim) > 2:
- raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')
- if lim[0] == lim[1]:
- warnings.warn(f'Embedding layer initialized to {lim[0]}.')
- else:
- if lim == 0:
- warnings.warn(f'Embedding layer initialized to 0.')
- lim = [-lim, lim]
- (a, b) = lim
- emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
- if verbose > 1:
- warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')
- else:
- emb_init_fn_ = init_fn_
- emb_init_fn_(module.weight)
- elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
- if verbose > 1:
- warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')
- if hasattr(module, 'weight') and module.weight is not None:
- torch.nn.init.ones_(module.weight)
- if hasattr(module, 'bias') and module.bias is not None:
- torch.nn.init.zeros_(module.bias)
- elif isinstance(module, nn.MultiheadAttention):
- if module._qkv_same_embed_dim:
- assert module.in_proj_weight is not None
- assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)
- assert d_model is not None
- _d = d_model
- splits = (0, _d, 2 * _d, 3 * _d)
- for (s, e) in zip(splits[:-1], splits[1:]):
- init_fn_(module.in_proj_weight[s:e])
- else:
- assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)
- assert module.in_proj_weight is None
- init_fn_(module.q_proj_weight)
- init_fn_(module.k_proj_weight)
- init_fn_(module.v_proj_weight)
- if module.in_proj_bias is not None:
- torch.nn.init.zeros_(module.in_proj_bias)
- if module.bias_k is not None:
- torch.nn.init.zeros_(module.bias_k)
- if module.bias_v is not None:
- torch.nn.init.zeros_(module.bias_v)
- init_fn_(module.out_proj.weight)
- if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):
- with torch.no_grad():
- module.out_proj.weight.div_(div_is_residual)
- if module.out_proj.bias is not None:
- torch.nn.init.zeros_(module.out_proj.bias)
- else:
- for _ in module.parameters(recurse=False):
- raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')
-
-def _normal_init_(std, mean=0.0):
- return partial(torch.nn.init.normal_, mean=mean, std=std)
-
-def _normal_param_init_fn_(module: nn.Module, std: float, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
- del kwargs
- init_fn_ = _normal_init_(std=std)
- if verbose > 1:
- warnings.warn(f'Using torch.nn.init.normal_ init fn mean=0.0, std={std}')
- generic_param_init_fn_(module=module, init_fn_=init_fn_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def baseline_param_init_fn_(module: nn.Module, init_std: float, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
- del kwargs
- if init_std is None:
- raise ValueError("You must set model.init_config['init_std'] to a float value to use the default initialization scheme.")
- _normal_param_init_fn_(module=module, std=init_std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def small_param_init_fn_(module: nn.Module, n_layers: int, d_model: int, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
- del kwargs
- std = math.sqrt(2 / (5 * d_model))
- _normal_param_init_fn_(module=module, std=std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def neox_param_init_fn_(module: nn.Module, n_layers: int, d_model: int, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):
- """From section 2.3.1 of GPT-NeoX-20B:
-
- An Open-Source AutoregressiveLanguage Model — Black et. al. (2022)
- see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151
- and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py
- """
- del kwargs
- residual_div = n_layers / math.sqrt(10)
- if verbose > 1:
- warnings.warn(f'setting init_div_is_residual to {residual_div}')
- small_param_init_fn_(module=module, d_model=d_model, n_layers=n_layers, init_div_is_residual=residual_div, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def kaiming_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs):
- del kwargs
- if verbose > 1:
- warnings.warn(f'Using nn.init.kaiming_uniform_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}')
- kaiming_uniform_ = partial(nn.init.kaiming_uniform_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity)
- generic_param_init_fn_(module=module, init_fn_=kaiming_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def kaiming_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs):
- del kwargs
- if verbose > 1:
- warnings.warn(f'Using nn.init.kaiming_normal_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}')
- kaiming_normal_ = partial(torch.nn.init.kaiming_normal_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity)
- generic_param_init_fn_(module=module, init_fn_=kaiming_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def xavier_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, verbose: int=0, **kwargs):
- del kwargs
- xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain)
- if verbose > 1:
- warnings.warn(f'Using torch.nn.init.xavier_uniform_ init fn with parameters: ' + f'gain={init_gain}')
- generic_param_init_fn_(module=module, init_fn_=xavier_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-
-def xavier_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, init_gain: float=0, verbose: int=0, **kwargs):
- xavier_normal_ = partial(torch.nn.init.xavier_normal_, gain=init_gain)
- if verbose > 1:
- warnings.warn(f'Using torch.nn.init.xavier_normal_ init fn with parameters: ' + f'gain={init_gain}')
- generic_param_init_fn_(module=module, init_fn_=xavier_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
-MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}
\ No newline at end of file
diff --git a/mmte/models/llava/model/llava_arch.py b/mmte/models/llava/model/llava_arch.py
index 0a82fbf..4b95590 100644
--- a/mmte/models/llava/model/llava_arch.py
+++ b/mmte/models/llava/model/llava_arch.py
@@ -21,7 +21,9 @@
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
-from mmte.models.llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from ..constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+
+from ..mm_utils import get_anyres_image_grid_shape
class LlavaMetaModel:
@@ -33,6 +35,11 @@ def __init__(self, config):
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
+ if 'unpad' in getattr(config, 'mm_patch_merge_type', ''):
+ self.image_newline = nn.Parameter(
+ torch.empty(config.hidden_size, dtype=self.dtype)
+ )
+
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
@@ -44,6 +51,7 @@ def initialize_vision_modules(self, model_args, fsdp=None):
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
+ mm_patch_merge_type = model_args.mm_patch_merge_type
self.config.mm_vision_tower = vision_tower
@@ -66,9 +74,16 @@ def initialize_vision_modules(self, model_args, fsdp=None):
self.config.mm_hidden_size = vision_tower.hidden_size
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
+ self.config.mm_patch_merge_type = mm_patch_merge_type
if getattr(self, 'mm_projector', None) is None:
self.mm_projector = build_vision_projector(self.config)
+
+ if 'unpad' in mm_patch_merge_type:
+ embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
+ self.image_newline = nn.Parameter(
+ torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std
+ )
else:
# In case it is frozen by LoRA
for p in self.mm_projector.parameters():
@@ -82,6 +97,37 @@ def get_w(weights, keyword):
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
+def unpad_image(tensor, original_size):
+ """
+ Unpads a PyTorch tensor of a padded and resized image.
+
+ Args:
+ tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format.
+ original_size (tuple): The original size of PIL image (width, height).
+
+ Returns:
+ torch.Tensor: The unpadded image tensor.
+ """
+ original_width, original_height = original_size
+ current_height, current_width = tensor.shape[1:]
+
+ original_aspect_ratio = original_width / original_height
+ current_aspect_ratio = current_width / current_height
+
+ if original_aspect_ratio > current_aspect_ratio:
+ scale_factor = current_width / original_width
+ new_height = int(original_height * scale_factor)
+ padding = (current_height - new_height) // 2
+ unpadded_tensor = tensor[:, padding:current_height - padding, :]
+ else:
+ scale_factor = current_height / original_height
+ new_width = int(original_width * scale_factor)
+ padding = (current_width - new_width) // 2
+ unpadded_tensor = tensor[:, :, padding:current_width - padding]
+
+ return unpadded_tensor
+
+
class LlavaMetaForCausalLM(ABC):
@abstractmethod
@@ -97,28 +143,63 @@ def encode_images(self, images):
return image_features
def prepare_inputs_labels_for_multimodal(
- self, input_ids, position_ids, attention_mask, past_key_values, labels, images
+ self, input_ids, position_ids, attention_mask, past_key_values, labels,
+ images, image_sizes=None
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
- if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
- target_shape = past_key_values[-1][-1].shape[-2] + 1
- attention_mask = torch.cat((attention_mask, torch.ones(
- (attention_mask.shape[0], target_shape - attention_mask.shape[1]),
- dtype=attention_mask.dtype,
- device=attention_mask.device
- )), dim=1)
- position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1
return input_ids, position_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
+ if type(images) is list:
+ images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images]
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
- image_features = [x.flatten(0, 1).to(self.device) for x in image_features]
+ mm_patch_merge_type = getattr(self.config, 'mm_patch_merge_type', 'flat')
+ image_aspect_ratio = getattr(self.config, 'image_aspect_ratio', 'square')
+ if mm_patch_merge_type == 'flat':
+ image_features = [x.flatten(0, 1) for x in image_features]
+ elif mm_patch_merge_type.startswith('spatial'):
+ new_image_features = []
+ for image_idx, image_feature in enumerate(image_features):
+ if image_feature.shape[0] > 1:
+ base_image_feature = image_feature[0]
+ image_feature = image_feature[1:]
+ height = width = self.get_vision_tower().num_patches_per_side
+ assert height * width == base_image_feature.shape[0]
+ if image_aspect_ratio == 'anyres':
+ num_patch_width, num_patch_height = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, self.get_vision_tower().config.image_size)
+ image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
+ else:
+ raise NotImplementedError
+ if 'unpad' in mm_patch_merge_type:
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3)
+ image_feature = unpad_image(image_feature, image_sizes[image_idx])
+ image_feature = torch.cat((
+ image_feature,
+ self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)
+ ), dim=-1)
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1)
+ else:
+ image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous()
+ image_feature = image_feature.flatten(0, 3)
+ image_feature = torch.cat((base_image_feature, image_feature), dim=0)
+ else:
+ image_feature = image_feature[0]
+ if 'unpad' in mm_patch_merge_type:
+ image_feature = torch.cat((
+ image_feature,
+ self.model.image_newline[None].to(image_feature.device)
+ ), dim=0)
+ new_image_features.append(image_feature)
+ image_features = new_image_features
+ else:
+ raise ValueError(f"Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}")
else:
- image_features = self.encode_images(images).to(self.device)
+ image_features = self.encode_images(images)
# TODO: image start / end is not implemented here to support pretraining.
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
@@ -140,7 +221,8 @@ def prepare_inputs_labels_for_multimodal(
if labels is None:
labels = torch.full_like(input_ids, IGNORE_INDEX)
- # remove the padding using attention_mask -- TODO: double check
+ # remove the padding using attention_mask -- FIXME
+ _input_ids = input_ids
input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
@@ -180,6 +262,8 @@ def prepare_inputs_labels_for_multimodal(
cur_new_input_embeds.append(cur_image_features)
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
+ cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds]
+
cur_new_input_embeds = torch.cat(cur_new_input_embeds)
cur_new_labels = torch.cat(cur_new_labels)
diff --git a/mmte/models/llava/model/make_delta.py b/mmte/models/llava/model/make_delta.py
index 4ae55d5..c4e2a78 100644
--- a/mmte/models/llava/model/make_delta.py
+++ b/mmte/models/llava/model/make_delta.py
@@ -7,7 +7,7 @@
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
-from llava.model.utils import auto_upgrade
+from ..model.utils import auto_upgrade
def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
diff --git a/mmte/models/llava/model/multimodal_encoder/builder.py b/mmte/models/llava/model/multimodal_encoder/builder.py
index 2b13589..29f63a2 100644
--- a/mmte/models/llava/model/multimodal_encoder/builder.py
+++ b/mmte/models/llava/model/multimodal_encoder/builder.py
@@ -1,11 +1,15 @@
import os
-from .clip_encoder import CLIPVisionTower
+from .clip_encoder import CLIPVisionTower, CLIPVisionTowerS2
def build_vision_tower(vision_tower_cfg, **kwargs):
vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
is_absolute_path_exists = os.path.exists(vision_tower)
- if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"):
- return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
+ use_s2 = getattr(vision_tower_cfg, 's2', False)
+ if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion") or "ShareGPT4V" in vision_tower:
+ if use_s2:
+ return CLIPVisionTowerS2(vision_tower, args=vision_tower_cfg, **kwargs)
+ else:
+ return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
raise ValueError(f'Unknown vision tower: {vision_tower}')
diff --git a/mmte/models/llava/model/multimodal_encoder/clip_encoder.py b/mmte/models/llava/model/multimodal_encoder/clip_encoder.py
index dbb9015..2c81415 100644
--- a/mmte/models/llava/model/multimodal_encoder/clip_encoder.py
+++ b/mmte/models/llava/model/multimodal_encoder/clip_encoder.py
@@ -16,12 +16,18 @@ def __init__(self, vision_tower, args, delay_load=False):
if not delay_load:
self.load_model()
+ elif getattr(args, 'unfreeze_mm_vision_tower', False):
+ self.load_model()
else:
self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name)
- def load_model(self):
+ def load_model(self, device_map=None):
+ if self.is_loaded:
+ print('{} is already loaded, `load_model` called again, skipping.'.format(self.vision_tower_name))
+ return
+
self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)
- self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name)
+ self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name, device_map=device_map)
self.vision_tower.requires_grad_(False)
self.is_loaded = True
@@ -73,6 +79,69 @@ def config(self):
def hidden_size(self):
return self.config.hidden_size
+ @property
+ def num_patches_per_side(self):
+ return self.config.image_size // self.config.patch_size
+
@property
def num_patches(self):
return (self.config.image_size // self.config.patch_size) ** 2
+
+
+
+class CLIPVisionTowerS2(CLIPVisionTower):
+ def __init__(self, vision_tower, args, delay_load=False):
+ super().__init__(vision_tower, args, delay_load)
+
+ self.s2_scales = getattr(args, 's2_scales', '336,672,1008')
+ self.s2_scales = list(map(int, self.s2_scales.split(',')))
+ self.s2_scales.sort()
+ self.s2_split_size = self.s2_scales[0]
+ self.s2_image_size = self.s2_scales[-1]
+
+ try:
+ from s2wrapper import forward as multiscale_forward
+ except ImportError:
+ raise ImportError('Package s2wrapper not found! Please install by running: \npip install git+https://github.com/bfshi/scaling_on_scales.git')
+ self.multiscale_forward = multiscale_forward
+
+ # change resize/crop size in preprocessing to the largest image size in s2_scale
+ if not delay_load or getattr(args, 'unfreeze_mm_vision_tower', False):
+ self.image_processor.size['shortest_edge'] = self.s2_image_size
+ self.image_processor.crop_size['height'] = self.image_processor.crop_size['width'] = self.s2_image_size
+
+ def load_model(self, device_map=None):
+ if self.is_loaded:
+ print('{} is already loaded, `load_model` called again, skipping.'.format(self.vision_tower_name))
+ return
+
+ self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)
+ self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name, device_map=device_map)
+ self.vision_tower.requires_grad_(False)
+
+ self.image_processor.size['shortest_edge'] = self.s2_image_size
+ self.image_processor.crop_size['height'] = self.image_processor.crop_size['width'] = self.s2_image_size
+
+ self.is_loaded = True
+
+ @torch.no_grad()
+ def forward_feature(self, images):
+ image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
+ return image_features
+
+ @torch.no_grad()
+ def forward(self, images):
+ if type(images) is list:
+ image_features = []
+ for image in images:
+ image_feature = self.multiscale_forward(self.forward_feature, image.unsqueeze(0), img_sizes=self.s2_scales, max_split_size=self.s2_split_size)
+ image_features.append(image_feature)
+ else:
+ image_features = self.multiscale_forward(self.forward_feature, images, img_sizes=self.s2_scales, max_split_size=self.s2_split_size)
+
+ return image_features
+
+ @property
+ def hidden_size(self):
+ return self.config.hidden_size * len(self.s2_scales)
diff --git a/mmte/models/llava/train/llava_trainer.py b/mmte/models/llava/train/llava_trainer.py
index fa40d72..ce2853a 100644
--- a/mmte/models/llava/train/llava_trainer.py
+++ b/mmte/models/llava/train/llava_trainer.py
@@ -1,5 +1,6 @@
import os
import torch
+import torch.nn as nn
from torch.utils.data import Sampler
@@ -9,7 +10,6 @@
get_parameter_names,
has_length,
ALL_LAYERNORM_LAYERS,
- ShardedDDPOption,
logger,
)
from typing import List, Optional
@@ -156,8 +156,6 @@ def create_optimizer(self):
"""
if is_sagemaker_mp_enabled():
return super().create_optimizer()
- if self.sharded_ddp == ShardedDDPOption.SIMPLE:
- return super().create_optimizer()
opt_model = self.model
@@ -212,27 +210,20 @@ def create_optimizer(self):
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)
- if self.sharded_ddp == ShardedDDPOption.SIMPLE:
- self.optimizer = OSS(
- params=optimizer_grouped_parameters,
- optim=optimizer_cls,
- **optimizer_kwargs,
- )
- else:
- self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
- if optimizer_cls.__name__ == "Adam8bit":
- import bitsandbytes
-
- manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
-
- skipped = 0
- for module in opt_model.modules():
- if isinstance(module, nn.Embedding):
- skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
- logger.info(f"skipped {module}: {skipped/2**20}M params")
- manager.register_module_override(module, "weight", {"optim_bits": 32})
- logger.debug(f"bitsandbytes: will optimize {module} in fp32")
- logger.info(f"skipped: {skipped/2**20}M params")
+ self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
+ if optimizer_cls.__name__ == "Adam8bit":
+ import bitsandbytes
+
+ manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
+
+ skipped = 0
+ for module in opt_model.modules():
+ if isinstance(module, nn.Embedding):
+ skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
+ logger.info(f"skipped {module}: {skipped/2**20}M params")
+ manager.register_module_override(module, "weight", {"optim_bits": 32})
+ logger.debug(f"bitsandbytes: will optimize {module} in fp32")
+ logger.info(f"skipped: {skipped/2**20}M params")
return self.optimizer
diff --git a/mmte/models/llava/train/train.py b/mmte/models/llava/train/train.py
index 2306c3a..183bde5 100644
--- a/mmte/models/llava/train/train.py
+++ b/mmte/models/llava/train/train.py
@@ -25,14 +25,15 @@
import torch
import transformers
+import tokenizers
-from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from ..constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from torch.utils.data import Dataset
-from llava.train.llava_trainer import LLaVATrainer
+from ..train.llava_trainer import LLaVATrainer
-from llava import conversation as conversation_lib
-from llava.model import *
-from llava.mm_utils import tokenizer_image_token
+from .. import conversation as conversation_lib
+from ..model import *
+from ..mm_utils import tokenizer_image_token
from PIL import Image
@@ -45,6 +46,10 @@ def rank0_print(*args):
print(*args)
+from packaging import version
+IS_TOKENIZER_GREATER_THAN_0_14 = version.parse(tokenizers.__version__) >= version.parse('0.14')
+
+
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
@@ -57,6 +62,7 @@ class ModelArguments:
mm_projector_type: Optional[str] = field(default='linear')
mm_use_im_start_end: bool = field(default=False)
mm_use_im_patch_token: bool = field(default=True)
+ mm_patch_merge_type: Optional[str] = field(default='flat')
mm_vision_select_feature: Optional[str] = field(default="patch")
@@ -468,6 +474,10 @@ def preprocess_v1(
round_len = len(tokenizer(rou).input_ids)
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
+ if i != 0 and not tokenizer.legacy and IS_TOKENIZER_GREATER_THAN_0_14:
+ round_len -= 1
+ instruction_len -= 1
+
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
@@ -490,6 +500,7 @@ def preprocess_v1(
def preprocess_mpt(
sources,
tokenizer: transformers.PreTrainedTokenizer,
+ has_image: bool = False
) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
@@ -509,7 +520,18 @@ def preprocess_mpt(
conversations.append(conv.get_prompt())
# Tokenize conversations
- input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
+
+ if has_image:
+ input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
+ else:
+ input_ids = tokenizer(
+ conversations,
+ return_tensors="pt",
+ padding="longest",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ ).input_ids
+
targets = input_ids.clone()
assert conv.sep_style == conversation_lib.SeparatorStyle.MPT
@@ -532,8 +554,18 @@ def preprocess_mpt(
if len(parts) != 2:
break
parts[0] += sep
- round_len = len(tokenizer_image_token(rou, tokenizer)) + len(tokenizer_image_token(conv.sep, tokenizer))
- instruction_len = len(tokenizer_image_token(parts[0], tokenizer))
+
+ if has_image:
+ round_len = len(tokenizer_image_token(rou, tokenizer))
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 1
+ else:
+ round_len = len(tokenizer(rou).input_ids)
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 1
+
+ if i != 0 and getattr(tokenizer, 'legacy', False) and IS_TOKENIZER_GREATER_THAN_0_14:
+ round_len += 1
+ instruction_len += 1
+
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
@@ -594,7 +626,7 @@ def preprocess(
if conversation_lib.default_conversation.version.startswith("v1"):
return preprocess_v1(sources, tokenizer, has_image=has_image)
if conversation_lib.default_conversation.version == "mpt":
- return preprocess_mpt(sources, tokenizer)
+ return preprocess_mpt(sources, tokenizer, has_image=has_image)
# add end signal and concatenate together
conversations = []
for source in sources:
@@ -753,7 +785,7 @@ def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
data_collator=data_collator)
-def train():
+def train(attn_implementation=None):
global local_rank
parser = transformers.HfArgumentParser(
@@ -785,7 +817,7 @@ def train():
if 'mpt' in model_args.model_name_or_path:
config = transformers.AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
config.attn_config['attn_impl'] = training_args.mpt_attn_impl
- model = LlavaMPTForCausalLM.from_pretrained(
+ model = LlavaMptForCausalLM.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=training_args.cache_dir,
@@ -795,12 +827,16 @@ def train():
model = LlavaLlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
+ attn_implementation=attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
**bnb_model_from_pretrained_args
)
else:
model = transformers.LlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
+ attn_implementation=attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
**bnb_model_from_pretrained_args
)
model.config.use_cache = False
diff --git a/mmte/models/llava/train/train_mem.py b/mmte/models/llava/train/train_mem.py
index 2487d31..31c2db8 100644
--- a/mmte/models/llava/train/train_mem.py
+++ b/mmte/models/llava/train/train_mem.py
@@ -1,13 +1,4 @@
-# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
-# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
-# Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
-
-# Need to call this before importing transformers.
-from llava.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn
-
-replace_llama_attn_with_flash_attn()
-
-from llava.train.train import train
+from ..train.train import train
if __name__ == "__main__":
- train()
+ train(attn_implementation="flash_attention_2")
diff --git a/mmte/models/llava/train/train_xformers.py b/mmte/models/llava/train/train_xformers.py
index 23a59bf..9c69d9e 100644
--- a/mmte/models/llava/train/train_xformers.py
+++ b/mmte/models/llava/train/train_xformers.py
@@ -1,13 +1,13 @@
# Make it more memory efficient by monkey patching the LLaMA model with xformers attention.
# Need to call this before importing transformers.
-from llava.train.llama_xformers_attn_monkey_patch import (
+from ..train.llama_xformers_attn_monkey_patch import (
replace_llama_attn_with_xformers_attn,
)
replace_llama_attn_with_xformers_attn()
-from llava.train.train import train
+from ..train.train import train
if __name__ == "__main__":
train()
diff --git a/mmte/models/phi3_chat.py b/mmte/models/phi3_chat.py
index 5857c6b..7e89ad6 100644
--- a/mmte/models/phi3_chat.py
+++ b/mmte/models/phi3_chat.py
@@ -21,8 +21,6 @@ class Phi3Chat(BaseChat):
def __init__(self, model_id: str, device: str="cuda:0", bf16: bool=True):
super().__init__(model_id)
- if str(transformers.__version__) < '4.40.2':
- raise ImportError("Transformers version not compatible. '4.40.2' is expected.")
self.model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-vision-128k-instruct" , device_map=device, trust_remote_code=True, torch_dtype="auto", _attn_implementation='eager') # use _attn_implementation='eager' to disable flash attention
self.processor = AutoProcessor.from_pretrained("microsoft/Phi-3-vision-128k-instruct" , trust_remote_code=True)