diff --git a/convert-bloom-hf-to-gguf.py b/convert-bloom-hf-to-gguf.py index 14dbd793c8444..6e866d9434818 100755 --- a/convert-bloom-hf-to-gguf.py +++ b/convert-bloom-hf-to-gguf.py @@ -118,15 +118,24 @@ def parse_args() -> argparse.Namespace: vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) assert max(tokenizer.vocab.values()) < vocab_size +added_vocab = tokenizer.get_added_vocab() reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} for i in range(vocab_size): - tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]") - scores.append(0.0) # dummy - toktypes.append(gguf.TokenType.NORMAL) + if i not in reverse_vocab: + tokens.append(f"[PAD{i}]") + toktypes.append(gguf.TokenType.USER_DEFINED) + elif reverse_vocab[i] in added_vocab: + tokens.append(reverse_vocab[i]) + if tokenizer.added_tokens_decoder[i].special: + toktypes.append(gguf.TokenType.CONTROL) + else: + toktypes.append(gguf.TokenType.USER_DEFINED) + else: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.NORMAL) gguf_writer.add_token_list(tokens) -gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab = len(tokens)) diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index f1599b0c44e34..02d1fdf164eea 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -123,15 +123,24 @@ def parse_args() -> argparse.Namespace: vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) assert max(tokenizer.vocab.values()) < vocab_size +added_vocab = tokenizer.get_added_vocab() reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} for i in range(vocab_size): - tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]") - scores.append(0.0) # dummy - toktypes.append(gguf.TokenType.NORMAL) + if i not in reverse_vocab: + tokens.append(f"[PAD{i}]") + toktypes.append(gguf.TokenType.USER_DEFINED) + elif reverse_vocab[i] in added_vocab: + tokens.append(reverse_vocab[i]) + if tokenizer.added_tokens_decoder[i].special: + toktypes.append(gguf.TokenType.CONTROL) + else: + toktypes.append(gguf.TokenType.USER_DEFINED) + else: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.NORMAL) gguf_writer.add_token_list(tokens) -gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) diff --git a/convert-mpt-hf-to-gguf.py b/convert-mpt-hf-to-gguf.py index 2d2fa2329dd1b..70d154b3f5c01 100755 --- a/convert-mpt-hf-to-gguf.py +++ b/convert-mpt-hf-to-gguf.py @@ -136,9 +136,11 @@ def parse_args() -> argparse.Namespace: tokens.append(f"[PAD{i}]") toktypes.append(gguf.TokenType.USER_DEFINED) elif reverse_vocab[i] in added_vocab: - # NOTE: wouldn't we like to distinguish CONTROL tokens here? tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.USER_DEFINED) + if tokenizer.added_tokens_decoder[i].special: + toktypes.append(gguf.TokenType.CONTROL) + else: + toktypes.append(gguf.TokenType.USER_DEFINED) else: tokens.append(reverse_vocab[i]) toktypes.append(gguf.TokenType.NORMAL) diff --git a/convert-refact-hf-to-gguf.py b/convert-refact-hf-to-gguf.py index 934f3852b2418..f0cfe84d81c8b 100755 --- a/convert-refact-hf-to-gguf.py +++ b/convert-refact-hf-to-gguf.py @@ -139,15 +139,24 @@ def parse_args() -> argparse.Namespace: vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) assert max(tokenizer.vocab.values()) < vocab_size +added_vocab = tokenizer.get_added_vocab() reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} for i in range(vocab_size): - tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]") - scores.append(0.0) # dummy - toktypes.append(gguf.TokenType.NORMAL) + if i not in reverse_vocab: + tokens.append(f"[PAD{i}]") + toktypes.append(gguf.TokenType.USER_DEFINED) + elif reverse_vocab[i] in added_vocab: + tokens.append(reverse_vocab[i]) + if tokenizer.added_tokens_decoder[i].special: + toktypes.append(gguf.TokenType.CONTROL) + else: + toktypes.append(gguf.TokenType.USER_DEFINED) + else: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.NORMAL) gguf_writer.add_token_list(tokens) -gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab = len(tokens)) diff --git a/convert-starcoder-hf-to-gguf.py b/convert-starcoder-hf-to-gguf.py index fe8815cbf6f47..a9bfed85e31ba 100755 --- a/convert-starcoder-hf-to-gguf.py +++ b/convert-starcoder-hf-to-gguf.py @@ -111,17 +111,25 @@ def parse_args() -> argparse.Namespace: vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) assert max(tokenizer.vocab.values()) < vocab_size +added_vocab = tokenizer.get_added_vocab() reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} for i in range(vocab_size): - tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]") - scores.append(0.0) # dummy - toktypes.append(gguf.TokenType.NORMAL) + if i not in reverse_vocab: + tokens.append(f"[PAD{i}]") + toktypes.append(gguf.TokenType.USER_DEFINED) + elif reverse_vocab[i] in added_vocab: + tokens.append(reverse_vocab[i]) + if tokenizer.added_tokens_decoder[i].special: + toktypes.append(gguf.TokenType.CONTROL) + else: + toktypes.append(gguf.TokenType.USER_DEFINED) + else: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.NORMAL) gguf_writer.add_token_list(tokens) -gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) - special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) special_vocab.add_to_gguf(gguf_writer)