From 37301347767d555d0a66c043ce4ef6ead8e61c55 Mon Sep 17 00:00:00 2001 From: opparco Date: Sun, 3 Sep 2023 19:18:09 +0900 Subject: [PATCH] llama : fix bpe tokenize from byte (#2889) --- llama.cpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index 2b0cf30f6ec0d..c97c1462f6d14 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3366,9 +3366,15 @@ struct llm_tokenizer_bpe { std::string byte_str(1, *j); auto token_multibyte = vocab.token_to_id.find(byte_str); if (token_multibyte == vocab.token_to_id.end()) { - fprintf(stderr,"ERROR: byte not found in vocab: '%s'\n", byte_str.c_str()); + try { + llama_token token_byte = llama_byte_to_token(vocab, *j); + output.push_back(token_byte); + } catch (const std::out_of_range & err) { + fprintf(stderr,"ERROR: byte not found in vocab: '%s'\n", byte_str.c_str()); + } + } else { + output.push_back((*token_multibyte).second); } - output.push_back((*token_multibyte).second); } } else { output.push_back((*token).second);