Skip to content

Commit

Permalink
Streamlining code and adding some more assertions
Browse files Browse the repository at this point in the history
Important change: I'm classifying added tokens as control tokens now for BPE.
  • Loading branch information
goerch committed Sep 19, 2023
1 parent a4e9448 commit 17ca832
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 42 deletions.
8 changes: 2 additions & 6 deletions convert-falcon-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,13 +161,9 @@ def parse_args() -> argparse.Namespace:
byte_decoder = {v: k for k, v in byte_encoder.items()}

for i in range(vocab_size):
text = reverse_vocab[i]
tokens.append(text)
tokens.append(reverse_vocab[i])
scores.append(0.0) # dummy
if text in byte_decoder:
toktypes.append(gguf.TokenType.BYTE)
else:
toktypes.append(gguf.TokenType.NORMAL)
toktypes.append(gguf.TokenType.NORMAL)

gguf_writer.add_token_list(tokens)
gguf_writer.add_token_scores(scores)
Expand Down
10 changes: 2 additions & 8 deletions convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,19 +343,13 @@ def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
byte_encoder = tokenization_gpt2.bytes_to_unicode()
byte_decoder = {v: k for k, v in byte_encoder.items()}

score = 0.0
for i, _ in enumerate(tokenizer):
text = reverse_vocab[i]
if text in byte_decoder:
toktype = gguf.TokenType.BYTE
else:
toktype = gguf.TokenType.NORMAL
yield text, score, toktype
yield reverse_vocab[i], 0.0, gguf.TokenType.NORMAL

def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
for text in self.added_tokens_list:
score = -1000.0
yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED
yield text.encode("utf-8"), score, gguf.TokenType.CONTROL

def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
yield from self.bpe_tokens()
Expand Down
66 changes: 38 additions & 28 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3884,6 +3884,10 @@ static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE;
}

static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id) {
return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_USER_DEFINED;
}

static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
GGML_ASSERT(llama_is_byte_token(vocab, id));
const auto& token_data = vocab.id_to_token.at(id);
Expand Down Expand Up @@ -7224,47 +7228,53 @@ static std::string llama_decode_text(const std::string& text) {
// does not write null-terminator to buf
int llama_token_to_piece_with_model(const struct llama_model * model, llama_token token, char * buf, int length) {
if (0 <= token && token < llama_model_n_vocab(model)) {
if (llama_is_normal_token(model->vocab, token)) {
std::string result = model->vocab.id_to_token[token].text;
if (llama_vocab_get_type(model->vocab) == LLAMA_VOCAB_TYPE_SPM) {
switch (llama_vocab_get_type(model->vocab)) {
case LLAMA_VOCAB_TYPE_SPM: {
if (llama_is_normal_token(model->vocab, token)) {
std::string result = model->vocab.id_to_token[token].text;
llama_unescape_whitespace(result);
} else if (llama_vocab_get_type(model->vocab) == LLAMA_VOCAB_TYPE_BPE) {
result = llama_decode_text(result);
} else {
GGML_ASSERT(false);
}
if (length < (int) result.length()) {
return -result.length();
}
memcpy(buf, result.c_str(), result.length());
return result.length();
} else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
if (length < 3) {
return -3;
}
buf[0] = '\xe2';
buf[1] = '\x96';
buf[2] = '\x85';
return 3;
} else if (llama_is_control_token(model->vocab, token)) {
;
} else if (llama_is_byte_token(model->vocab, token)) {
if (llama_vocab_get_type(model->vocab) == LLAMA_VOCAB_TYPE_SPM) {
if (length < (int) result.length()) {
return -result.length();
}
memcpy(buf, result.c_str(), result.length());
return result.length();
} else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
if (length < 3) {
return -3;
}
memcpy(buf, "\xe2\x96\x85", 3);
return 3;
} else if (llama_is_control_token(model->vocab, token)) {
;
} else if (llama_is_byte_token(model->vocab, token)) {
if (length < 1) {
return -1;
}
buf[0] = llama_token_to_byte(model->vocab, token);
return 1;
} else if (llama_vocab_get_type(model->vocab) == LLAMA_VOCAB_TYPE_BPE) {
std::string result = llama_decode_text(model->vocab.id_to_token[token].text);
if (length < (int)result.length()) {
} else {
GGML_ASSERT(false);
}
break;
}
case LLAMA_VOCAB_TYPE_BPE: {
if (llama_is_normal_token(model->vocab, token)) {
std::string result = model->vocab.id_to_token[token].text;
result = llama_decode_text(result);
if (length < (int) result.length()) {
return -result.length();
}
memcpy(buf, result.c_str(), result.length());
return result.length();
} else if (llama_is_control_token(model->vocab, token)) {
;
} else {
GGML_ASSERT(false);
}
break;
}
default:
GGML_ASSERT(false);
}
}
return 0;
Expand Down
Binary file modified models/ggml-vocab-aquila.gguf
Binary file not shown.
Binary file modified models/ggml-vocab-falcon.gguf
Binary file not shown.

0 comments on commit 17ca832

Please sign in to comment.