Skip to content

Commit

Permalink
Merge pull request #318 from clarismiranda/grammar-memo
Browse files Browse the repository at this point in the history
Grammar memo
  • Loading branch information
Nexesenex authored Oct 23, 2024
2 parents 3752217 + dc68a59 commit 85ea121
Show file tree
Hide file tree
Showing 5 changed files with 144 additions and 41 deletions.
11 changes: 4 additions & 7 deletions examples/gbnf-validator/gbnf-validator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,15 @@
static bool llama_grammar_validate(struct llama_grammar * grammar, const std::string & input_str, size_t & error_pos, std::string & error_msg) {
const auto cpts = unicode_cpts_from_utf8(input_str);

const llama_grammar_rules & rules = llama_grammar_get_rules (grammar);
llama_grammar_stacks & stacks_cur = llama_grammar_get_stacks(grammar);
auto & stacks_cur = llama_grammar_get_stacks(grammar);

size_t pos = 0;
for (const auto & cpt : cpts) {
const llama_grammar_stacks stacks_prev = llama_grammar_get_stacks(grammar); // copy

llama_grammar_accept(rules, stacks_prev, cpt, stacks_cur);
llama_grammar_accept(grammar, cpt);

if (stacks_cur.empty()) {
error_pos = pos;
error_msg = "Unexpected character '" + unicode_cpt_to_utf8(cpt) + "'";
stacks_cur = stacks_prev;
return false;
}
++pos;
Expand Down Expand Up @@ -82,7 +78,8 @@ int main(int argc, char** argv) {

llama_grammar * grammar = llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root");
if (grammar == nullptr) {
throw std::runtime_error("Failed to initialize llama_grammar");
fprintf(stdout, "Failed to initialize llama_grammar\n");
return 1;
}
// Read the input file
std::string input_str;
Expand Down
136 changes: 117 additions & 19 deletions src/llama-grammar.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -682,6 +682,101 @@ static bool llama_grammar_match_partial_char(
return !is_positive_char;
}

// transforms a grammar pushdown stack into N possible stacks, all ending
// at a character range (terminal element)
// additionally memoizes the stack to its possible stacks by mapping
// < llama_grammar_stack, llama_grammar_stacks >

static void llama_grammar_advance_stack_memo(
const llama_grammar_rules & rules,
const llama_grammar_stack & stack,
llama_grammar_stacks & new_stacks,
llama_grammar_stacks_cache & stacks_cache);

static void llama_grammar_advance_stack_memo_impl(
const llama_grammar_rules & rules,
const llama_grammar_stack & stack,
llama_grammar_stacks & new_stacks,
llama_grammar_stacks_cache & stacks_cache) {
if (stack.empty()) {
if (std::find(new_stacks.begin(), new_stacks.end(), stack) == new_stacks.end()) {
new_stacks.emplace_back(stack);
}
return;
}

const llama_grammar_element * pos = stack.back();

switch (pos->type) {
case LLAMA_GRETYPE_RULE_REF: {
const size_t rule_id = static_cast<size_t>(pos->value);
const llama_grammar_element * subpos = rules[rule_id].data();
do {
// init new stack without the top (pos)
llama_grammar_stack new_stack(stack.begin(), stack.end() - 1);
if (!llama_grammar_is_end_of_sequence(pos + 1)) {
// if this rule ref is followed by another element, add that to stack
new_stack.push_back(pos + 1);
}
if (!llama_grammar_is_end_of_sequence(subpos)) {
// if alternate is nonempty, add to stack
new_stack.push_back(subpos);
}
llama_grammar_advance_stack_memo(rules, new_stack, new_stacks, stacks_cache);
while (!llama_grammar_is_end_of_sequence(subpos)) {
// scan to end of alternate def
subpos++;
}
if (subpos->type == LLAMA_GRETYPE_ALT) {
// there's another alternate def of this rule to process
subpos++;
} else {
break;
}
} while (true);
break;
}
case LLAMA_GRETYPE_CHAR:
case LLAMA_GRETYPE_CHAR_NOT:
case LLAMA_GRETYPE_CHAR_ANY:
if (std::find(new_stacks.begin(), new_stacks.end(), stack) == new_stacks.end()) {
// only add the stack if it's not a duplicate of one we already have
new_stacks.emplace_back(stack);
}
break;
default:
// end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
// (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
// those
GGML_ABORT("fatal error");
}
}

static void llama_grammar_advance_stack_memo(
const llama_grammar_rules & rules,
const llama_grammar_stack & stack,
llama_grammar_stacks & new_stacks,
llama_grammar_stacks_cache & stacks_cache) {

llama_grammar_stacks advanced_stacks;
// Look if stack is already in memory
auto it = stacks_cache.find(stack);
if (it != stacks_cache.end()) {
advanced_stacks = it->second;
} else {
// Advance stacks with memoization
llama_grammar_advance_stack_memo_impl(rules, stack, advanced_stacks, stacks_cache);
stacks_cache.insert(make_pair(stack, advanced_stacks));
}
// Add the advanced stacks to new_stacks avoiding duplicates
for (const auto & new_stack : advanced_stacks) {
if (std::find(new_stacks.begin(), new_stacks.end(), new_stack) == new_stacks.end()) {
new_stacks.emplace_back(new_stack);
}
}

}

// transforms a grammar pushdown stack into N possible stacks, all ending
// at a character range (terminal element)
static void llama_grammar_advance_stack(
Expand Down Expand Up @@ -822,15 +917,11 @@ llama_grammar_stacks & llama_grammar_get_stacks(struct llama_grammar * grammar)
return grammar->stacks;
}

void llama_grammar_accept(
const llama_grammar_rules & rules,
const llama_grammar_stacks & stacks,
const uint32_t chr,
llama_grammar_stacks & stacks_new) {
stacks_new.clear();
stacks_new.reserve(stacks.size());
void llama_grammar_accept(struct llama_grammar * grammar, uint32_t chr) {
llama_grammar_stacks stacks_new;
stacks_new.reserve(grammar->stacks.size());

for (const auto & stack : stacks) {
for (const auto & stack : grammar->stacks) {
if (stack.empty()) {
continue;
}
Expand All @@ -844,9 +935,11 @@ void llama_grammar_accept(
if (!llama_grammar_is_end_of_sequence(pos)) {
new_stack.push_back(pos);
}
llama_grammar_advance_stack(rules, new_stack, stacks_new);
llama_grammar_advance_stack_memo(grammar->rules, new_stack, stacks_new, grammar->stacks_cache);
}
}

grammar->stacks = std::move(stacks_new);
}

llama_grammar_candidates llama_grammar_reject_candidates_for_stack(
Expand Down Expand Up @@ -938,14 +1031,15 @@ struct llama_grammar * llama_grammar_init_impl(

// loop over alternates of start rule to build initial stacks
llama_grammar_stacks stacks;
llama_grammar_stacks_cache stacks_cache;
pos = vec_rules[start_rule_index].data();
do {
llama_grammar_stack stack;
if (!llama_grammar_is_end_of_sequence(pos)) {
// if alternate is nonempty, add to stack
stack.push_back(pos);
}
llama_grammar_advance_stack(vec_rules, stack, stacks);
llama_grammar_advance_stack_memo(vec_rules, stack, stacks, stacks_cache);
while (!llama_grammar_is_end_of_sequence(pos)) {
// scan to end of alternate def
pos++;
Expand All @@ -961,7 +1055,7 @@ struct llama_grammar * llama_grammar_init_impl(
// Important: vec_rules has to be moved here, not copied, because stacks contains
// pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
// then the pointers would be invalidated when the local vec_rules goes out of scope.
return new llama_grammar { vocab, std::move(vec_rules), std::move(stacks), {}, };
return new llama_grammar { vocab, std::move(vec_rules), std::move(stacks), std::move(stacks_cache), {}, };
}

struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab, const char * grammar_str, const char * grammar_root) {
Expand Down Expand Up @@ -1016,14 +1110,15 @@ struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab,

// loop over alternates of start rule to build initial stacks
llama_grammar_stacks stacks;
llama_grammar_stacks_cache stacks_cache;
pos = vec_rules[start_rule_index].data();
do {
llama_grammar_stack stack;
if (!llama_grammar_is_end_of_sequence(pos)) {
// if alternate is nonempty, add to stack
stack.push_back(pos);
}
llama_grammar_advance_stack(vec_rules, stack, stacks);
llama_grammar_advance_stack_memo(vec_rules, stack, stacks, stacks_cache);
while (!llama_grammar_is_end_of_sequence(pos)) {
// scan to end of alternate def
pos++;
Expand All @@ -1039,7 +1134,7 @@ struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab,
// Important: vec_rules has to be moved here, not copied, because stacks contains
// pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
// then the pointers would be invalidated when the local vec_rules goes out of scope.
return new llama_grammar { vocab, std::move(vec_rules), std::move(stacks), {}, };
return new llama_grammar { vocab, std::move(vec_rules), std::move(stacks), std::move(stacks_cache), {}, };
}

void llama_grammar_free_impl(struct llama_grammar * grammar) {
Expand All @@ -1051,15 +1146,21 @@ void llama_grammar_free_impl(struct llama_grammar * grammar) {
}

struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & grammar) {
llama_grammar * result = new llama_grammar { grammar.vocab, grammar.rules, grammar.stacks, grammar.partial_utf8, };
llama_grammar * result = new llama_grammar {
grammar.vocab,
grammar.rules,
grammar.stacks,
grammar.stacks_cache,
grammar.partial_utf8,
};

// redirect elements in stacks to point to new rules
for (size_t is = 0; is < result->stacks.size(); is++) {
for (size_t ie = 0; ie < result->stacks[is].size(); ie++) {
for (size_t ir0 = 0; ir0 < grammar.rules.size(); ir0++) {
for (size_t ir1 = 0; ir1 < grammar.rules[ir0].size(); ir1++) {
if (grammar.stacks[is][ie] == &grammar.rules[ir0][ir1]) {
result->stacks[is][ie] = &result->rules[ir0][ir1];
result->stacks[is][ie] = &result->rules[ir0][ir1];
}
}
}
Expand Down Expand Up @@ -1126,11 +1227,8 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token
const auto decoded = decode_utf8(piece, grammar.partial_utf8);
const auto & code_points = decoded.first;

llama_grammar_stacks stacks_new;

for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
llama_grammar_accept(grammar.rules, grammar.stacks, *it, stacks_new);
grammar.stacks = std::move(stacks_new);
llama_grammar_accept(&grammar, *it);
}

grammar.partial_utf8 = decoded.second;
Expand Down
23 changes: 18 additions & 5 deletions src/llama-grammar.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include "llama-impl.h"

#include <map>
#include <unordered_map>

struct llama_vocab;

Expand Down Expand Up @@ -58,18 +59,27 @@ using llama_grammar_rules = std::vector<llama_grammar_rule>;
using llama_grammar_stacks = std::vector<llama_grammar_stack>;
using llama_grammar_candidates = std::vector<llama_grammar_candidate>;

struct VectorPointerHash {
size_t operator()(const llama_grammar_stack & v) const {
size_t seed = v.size();
for (const auto* ptr : v) {
seed ^= std::hash<const llama_grammar_element*>()(ptr) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
return seed;
}
};

using llama_grammar_stacks_cache = std::unordered_map<llama_grammar_stack, llama_grammar_stacks, VectorPointerHash>;

// TODO: remove, needed for tests atm
const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar * grammar);
llama_grammar_stacks & llama_grammar_get_stacks( struct llama_grammar * grammar);

// takes a set of possible pushdown stacks on a grammar, which are required to
// be positioned at a character range (see `llama_grammar_advance_stack`), and
// produces the N possible stacks if the given char is accepted at those
// positions
void llama_grammar_accept(
const llama_grammar_rules & rules,
const llama_grammar_stacks & stacks,
uint32_t chr,
llama_grammar_stacks & stacks_new);
void llama_grammar_accept(struct llama_grammar * grammar, uint32_t chr);

std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
const llama_grammar_rules & rules,
Expand Down Expand Up @@ -113,6 +123,9 @@ struct llama_grammar {
const llama_grammar_rules rules; // TODO: shared ptr
llama_grammar_stacks stacks;

// cache N possible stacks from a stack
llama_grammar_stacks_cache stacks_cache;

// buffer for partially generated UTF-8 sequence from accepted tokens
llama_partial_utf8 partial_utf8;
};
Expand Down
9 changes: 3 additions & 6 deletions tests/test-grammar-integration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,10 @@ static bool test_build_grammar_fails(const std::string & grammar_str) {
static bool match_string(const std::string & input, llama_grammar * grammar) {
const auto cpts = unicode_cpts_from_utf8(input);

const llama_grammar_rules & rules = llama_grammar_get_rules (grammar);
llama_grammar_stacks & stacks_cur = llama_grammar_get_stacks(grammar);
auto & stacks_cur = llama_grammar_get_stacks(grammar);

for (const auto & cpt : cpts) {
const llama_grammar_stacks stacks_prev = llama_grammar_get_stacks(grammar); // copy

llama_grammar_accept(rules, stacks_prev, cpt, stacks_cur);
llama_grammar_accept(grammar, cpt);

if (stacks_cur.empty()) {
// no stacks means that the grammar failed to match at this point
Expand All @@ -63,7 +60,7 @@ static void test(const std::string & test_desc, const std::string & grammar_str,
auto * grammar = build_grammar(grammar_str);

// Save the original grammar stacks so that we can reset after every new string we want to test
const llama_grammar_stacks stacks_org = llama_grammar_get_stacks(grammar);
const llama_grammar_stacks stacks_org = llama_grammar_get_stacks(grammar); // copy

llama_grammar_stacks & stacks_cur = llama_grammar_get_stacks(grammar);

Expand Down
6 changes: 2 additions & 4 deletions tests/test-llama-grammar.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,12 +113,10 @@ int main()
}
}

llama_grammar * grammar = NULL;
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());

grammar = llama_grammar_init_impl(nullptr, grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
if (grammar == nullptr)
{
llama_grammar * grammar = llama_grammar_init_impl(nullptr, grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
if (grammar == nullptr) {
throw std::runtime_error("Failed to initialize llama_grammar");
}

Expand Down

0 comments on commit 85ea121

Please sign in to comment.