Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updated with huggingface #3

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10,401 changes: 10,401 additions & 0 deletions Clarifying_questions/6-non_generic_train_snippets_cluster.csv

Large diffs are not rendered by default.

123 changes: 123 additions & 0 deletions Clarifying_questions/LitModel_trainer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
import transformers
from torch.utils.data import DataLoader, TensorDataset, random_split, RandomSampler, Dataset
import pandas as pd
import numpy as np
import torch.nn.functional as F
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import ModelCheckpoint
import math
import random
import re
import argparse
import wandb
from rouge import Rouge
from nltk.translate.bleu_score import sentence_bleu
from bert_score import score
from Mischallenaous import shift_tokens_right
import logging

class LitModel(pl.LightningModule):
# Instantiate the model
def __init__(self, learning_rate, tokenizer, model, hparams):
super().__init__()
self.tokenizer = tokenizer
self.model = model
self.learning_rate = learning_rate
# self.freeze_encoder = freeze_encoder
# self.freeze_embeds_ = freeze_embeds
self.save_hyperparameters(hparams)

#.get_encoder just converts the text to numbers

if self.hparams.freeze_encoder:
checker = self.freeze_params(self.model.get_encoder())

if self.hparams.freeze_embeds:
checker = self.freeze_embeds()

def freeze_embeds(self):
#just the positional embedddings
''' freeze the positional embedding parameters of the model; adapted from finetune.py '''
checker = self.freeze_params(self.model.model.shared)
for d in [self.model.model.encoder, self.model.model.decoder]:
checker = self.freeze_params(d.embed_positions)
checker = self.freeze_params(d.embed_tokens)

# try:
# checker = self.freeze_params(self.model.model.shared)
# for d in [self.model.model.encoder, self.model.model.decoder]:
# checker = self.freeze_params(d.embed_positions)
# checker = self.freeze_params(d.embed_tokens)
# except AttributeError:
# checker = self.freeze_params(self.model.shared)
# for d in [self.model.encoder, self.model.decoder]:
# checker = self.freeze_params(d.embed_tokens)

# Do a forward pass through the model
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)

def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr = self.learning_rate)
return optimizer

def training_step(self, batch, batch_idx):
# Load the data into variables
src_ids, src_mask = batch[0], batch[1]
tgt_ids = batch[2]
# i have to ask this
# Shift the decoder tokens right (but NOT the tgt_ids)
decoder_input_ids = shift_tokens_right(tgt_ids, self.tokenizer.pad_token_id)

# Run the model and get the logits
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False)
lm_logits = outputs[0]
# Create the loss function
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=self.tokenizer.pad_token_id)
# Calculate the loss on the un-shifted tokens
loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))

return {'loss':loss}

def validation_step(self, batch, batch_idx):
src_ids, src_mask = batch[0], batch[1]
tgt_ids = batch[2]

decoder_input_ids = shift_tokens_right(tgt_ids, self.tokenizer.pad_token_id)

# Run the model and get the logits
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False)
lm_logits = outputs[0]

ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=self.tokenizer.pad_token_id)
val_loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))

wandb.log({"val_loss": val_loss})
return {'loss': val_loss}

# Method that generates text using the BartForConditionalGeneration's generate() method
def generate_text(self, text, eval_beams, early_stopping = True, max_len = 40):
''' Function to generate text '''
generated_ids = self.model.generate(
input_ids= text["input_ids"].to('cuda'),
# attention_mask=text["attention_mask"],
# use_cache=True,
# decoder_start_token_id = self.tokenizer.pad_token_id,
num_beams= eval_beams,
max_length = max_len,
# early_stopping = early_stopping
)
return [self.tokenizer.decode(w, skip_special_tokens=True, clean_up_tokenization_spaces=True) for w in generated_ids]

def freeze_params(self,model):
''' Function that takes a model as input (or part of a model) and freezes the layers for faster training
adapted from finetune.py '''
for layer in model.parameters():
layer.requires_grade = False
return 1





165 changes: 165 additions & 0 deletions Clarifying_questions/Main_trainer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
import transformers
from torch.utils.data import DataLoader, TensorDataset, random_split, RandomSampler, Dataset
import pandas as pd
import numpy as np
import torch.nn.functional as F
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import ModelCheckpoint
import math
import random
import re
import argparse
import wandb
from rouge import Rouge
from nltk.translate.bleu_score import sentence_bleu
from bert_score import score
from pytorch_lightning.callbacks import TQDMProgressBar
from transformers import BartTokenizer, BartForConditionalGeneration, AdamW, BartConfig, AutoTokenizer
from SummaryDataModule import SummaryDataModule
from LitModel_trainer import LitModel
import logging
from rouge import Rouge
from nltk.translate.bleu_score import sentence_bleu
from evaluate import load
from transformers import convert_tf_checkpoint_to_pytorch


torch.cuda.empty_cache()

bertscore = load("bertscore")

model_name = "facebook/bart-large"
model_path = "Models/facebook-bart-large.ckpt"
pytorch_checkpoint_path = "Models/facebook-bart-large.pth"
convert_tf_checkpoint_to_pytorch(model_path, pytorch_checkpoint_path)
def generate_prediction(seed_line, model_):
# Put the model on eval mode
model_.to("cuda")
model_.eval()

prompt_line_tokens = tokenizer(seed_line, max_length = 192, return_tensors = "pt", padding=True,truncation = True)

line = model_.generate_text(prompt_line_tokens, eval_beams = 8)


return line


def run_tests(testfile,model_loaded):
df = pd.read_csv(testfile)
df[['predicted', 'Blue', 'Blue_1gram', 'Blue_2gram', 'Blue_3gram']] = ''
df[['rouge_1_r', 'rouge_1_p', 'rouge_1_f', 'rouge_2_r','rouge_2_p', 'rouge_2_f', 'rouge_l_r', 'rouge_l_p' ,'rouge_l_f']] = ''
df[['bert_p', 'bert_r', 'bert_f1']] = ''
for i in range(len(df)):
line = df.iloc[i]['source']
question_pred = generate_prediction(seed_line = line, model_ = model_loaded)
question_true = df.iloc[i]['target']
Blue_score = sentence_bleu([question_pred[0].split()], question_true.split())
Blue_score_1n = sentence_bleu([question_pred[0].split()], question_true.split(), weights=(1, 0, 0, 0))
Blue_score_2n = sentence_bleu([question_pred[0].split()], question_true.split(), weights=(0, 1, 0, 0))
Blue_score_3n = sentence_bleu([question_pred[0].split()], question_true.split(), weights=(0, 0, 1, 0))

rouge = Rouge()
# logging.info(question_pred[0])
# logging.info(question_true)
# print(question_pred[0])
# print(question_true)
rouge_res = rouge.get_scores(question_pred[0], question_true)

df.loc[i]['predicted'] = question_pred
df.loc[i]['Blue'] = Blue_score
df.loc[i]['Blue_1gram'] = Blue_score_1n
df.loc[i]['Blue_2gram'] = Blue_score_2n
df.loc[i]['Blue_3gram'] = Blue_score_3n
df.loc[i]['rouge_1_r'] = rouge_res[0]["rouge-1"]['r']
df.loc[i]['rouge_1_p'] = rouge_res[0]["rouge-1"]['p']
df.loc[i]['rouge_1_f'] = rouge_res[0]["rouge-1"]['f']
df.loc[i]['rouge_2_r'] = rouge_res[0]["rouge-2"]['r']
df.loc[i]['rouge_2_p'] = rouge_res[0]["rouge-2"]['p']
df.loc[i]['rouge_2_f'] = rouge_res[0]["rouge-2"]['f']
df.loc[i]['rouge_l_r'] = rouge_res[0]["rouge-l"]['r']
df.loc[i]['rouge_l_p'] = rouge_res[0]["rouge-l"]['p']
df.loc[i]['rouge_l_f'] = rouge_res[0]["rouge-l"]['f']
print("iteration")

output_file = "facebook_bart-large"+'_metrics.csv'

results = bertscore.compute(predictions =df['target'].values.tolist(), references = df['predicted'].values.tolist(), lang="en", verbose=True)
df['bert_p'] = results['precision']
df['bert_r'] = results['recall']
df['bert_f1'] = results['f1']
df.to_csv(output_file,index=False)


logging.basicConfig(filename = "logss.log", level =logging.INFO, filemode = "w")
base_dir = ''
wandb.init()
wandb.config = {
"learning_rate": 0.00002,
"epochs": 10,
"batch_size": 64
}
logging.info("The run name on wandb is {}".format(wandb.run.name))

hparams = argparse.Namespace()
hparams.freeze_encoder = True
hparams.freeze_embeds = True
hparams.eval_beams = 4


tokenizer = BartTokenizer.from_pretrained(model_name, add_prefix_space=True)
# tokenizer = BartTokenizer.from_pretrained(model_path)



bart_model = BartForConditionalGeneration.from_pretrained(model_name)


summary_data = SummaryDataModule(tokenizer, base_dir + '6-non_generic_train_snippets_cluster.csv',
batch_size = 4)

model = LitModel(learning_rate = 2e-5, tokenizer = tokenizer, model = bart_model, hparams = hparams)


checkpoint = ModelCheckpoint(dirpath=base_dir)

trainer = pl.Trainer(gpus = 1,
max_epochs = 1,
min_epochs = 1,
auto_lr_find = True,
callbacks=[checkpoint,TQDMProgressBar(refresh_rate=100)])

trainer.fit(model, summary_data)

trainer.save_checkpoint(base_dir + "./Models/facebook-bart-large")




#run_tests("test.csv",model)
# run_tests("7-openAI-clustered.csv",model)









line_pred = generate_prediction(seed_line = ["Samsung | Television , Smartphone, Soundbox , Computer , Vaccum ",
"Samsung | Stockmarket, CEO, Devices, Headquarter",
"mercedes cla class convertible | exterior , interior , engine , prices , competition",
"Selena Gomez | Age , Birthday , Albums , Livingplace",
"Weather | wind , temperature, precipitation, humidity , visibility | Weather is controlled by many factors, "],
model_ = model)

print(line_pred)







Loading