Skip to content

Commit

Permalink
from_files -> from_file everywhere
Browse files Browse the repository at this point in the history
- read_files -> read_file
- from_file pure rust impl in python bindings
- Fix some typing in python binding
- Added {BPE,WordLevel,WordPiece}.from_file tests.
  • Loading branch information
Narsil committed Sep 24, 2020
1 parent 9672995 commit 36832bf
Show file tree
Hide file tree
Showing 18 changed files with 130 additions and 71 deletions.
18 changes: 9 additions & 9 deletions bindings/node/native/src/models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,14 +156,14 @@ pub fn bpe_init(mut cx: FunctionContext) -> JsResult<JsUndefined> {
Ok(cx.undefined())
}

/// bpe_from_files(vocab: String, merges: String, options: {
/// bpe_from_file(vocab: String, merges: String, options: {
/// cacheCapacity?: number,
/// dropout?: number,
/// unkToken?: String,
/// continuingSubwordPrefix?: String,
/// endOfWordSuffix?: String
/// }, callback)
pub fn bpe_from_files(mut cx: FunctionContext) -> JsResult<JsUndefined> {
pub fn bpe_from_file(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let (options, callback) = match cx.extract_opt::<BpeOptions>(2) {
// Options were there, and extracted
Ok(Some(options)) => (options, cx.argument::<JsFunction>(3)?),
Expand All @@ -174,7 +174,7 @@ pub fn bpe_from_files(mut cx: FunctionContext) -> JsResult<JsUndefined> {
};
let vocab = cx.extract::<String>(0)?;
let merges = cx.extract::<String>(1)?;
let mut builder = tk::models::bpe::BPE::from_files(&vocab, &merges);
let mut builder = tk::models::bpe::BPE::from_file(&vocab, &merges);

builder = options.apply_to_bpe_builder(builder);

Expand Down Expand Up @@ -242,12 +242,12 @@ pub fn wordpiece_init(mut cx: FunctionContext) -> JsResult<JsUndefined> {
Ok(cx.undefined())
}

/// wordpiece_from_files(vocab: String, options: {
/// wordpiece_from_file(vocab: String, options: {
/// unkToken?: String = "[UNK]",
/// maxInputCharsPerWord?: number = 100,
/// continuingSubwordPrefix?: "##",
/// }, callback)
pub fn wordpiece_from_files(mut cx: FunctionContext) -> JsResult<JsUndefined> {
pub fn wordpiece_from_file(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let (options, callback) = match cx.extract_opt::<WordPieceOptions>(1) {
// Options were there, and extracted
Ok(Some(options)) => (options, cx.argument::<JsFunction>(2)?),
Expand All @@ -257,7 +257,7 @@ pub fn wordpiece_from_files(mut cx: FunctionContext) -> JsResult<JsUndefined> {
Err(_) => (WordPieceOptions::default(), cx.argument::<JsFunction>(1)?),
};
let vocab = cx.extract::<String>(0)?;
let mut builder = tk::models::wordpiece::WordPiece::from_files(&vocab);
let mut builder = tk::models::wordpiece::WordPiece::from_file(&vocab);
builder = options.apply_to_wordpiece_builder(builder);
let task = WordPieceFromFilesTask::new(builder);
task.schedule(callback);
Expand All @@ -279,12 +279,12 @@ pub fn wordpiece_empty(mut cx: FunctionContext) -> JsResult<JsModel> {
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_BPE_init", prefix), bpe_init)?;
m.export_function(&format!("{}_BPE_from_files", prefix), bpe_from_files)?;
m.export_function(&format!("{}_BPE_from_file", prefix), bpe_from_file)?;
m.export_function(&format!("{}_BPE_empty", prefix), bpe_empty)?;
m.export_function(&format!("{}_WordPiece_init", prefix), wordpiece_init)?;
m.export_function(
&format!("{}_WordPiece_from_files", prefix),
wordpiece_from_files,
&format!("{}_WordPiece_from_file", prefix),
wordpiece_from_file,
)?;
m.export_function(&format!("{}_WordPiece_empty", prefix), wordpiece_empty)?;
Ok(())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ def __init__(
super().__init__(tokenizer, parameters)

@staticmethod
def from_files(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_files(vocab_filename, merges_filename)
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return ByteLevelBPETokenizer(vocab, merges, **kwargs)

def train(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ def __init__(
super().__init__(tokenizer, parameters)

@staticmethod
def from_files(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_files(vocab_filename, merges_filename)
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return CharBPETokenizer(vocab, merges, **kwargs)

def train(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def __init__(
super().__init__(tokenizer, parameters)

@staticmethod
def from_files(vocab_filename: str, merges_filename: str, **kwargs):
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_files(vocab_filename, merges_filename)
return SentencePieceBPETokenizer(vocab, merges, **kwargs)

Expand Down
47 changes: 34 additions & 13 deletions bindings/python/py_src/tokenizers/models/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class Model:
class BPE(Model):
"""BytePairEncoding model class
Instantiate a BPE Model from the given vocab and merges files.
Instantiate a BPE Model from the given vocab and merges.
Args:
vocab: ('`optional`) Dict[str, int]:
Expand Down Expand Up @@ -76,12 +76,19 @@ class BPE(Model):
):
pass
@staticmethod
def read_files(vocab_filename: str, merges_filename: str) -> Tuple[Vocab, Merges]:
def read_file(vocab_filename: str, merges_filename: str) -> Tuple[Vocab, Merges]:
pass
@staticmethod
def from_files(vocab_filename: str, merges_filename: str, **kwargs) -> BPE:
vocab, merges = BPE.read_files(vocab_filename, merges_filename)
return BPE(vocab, merges, **kwargs)
def from_file(vocab_filename: str, merges_filename: str, **kwargs) -> BPE:
"""
Convenient method to intialize a BPE from files
Roughly equivalent to
def from_file(vocab_filename, merges_filenames, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return BPE(vocab, merges, **kwargs)
"""
pass

class WordPiece(Model):
"""WordPiece model class
Expand All @@ -107,12 +114,19 @@ class WordPiece(Model):
):
pass
@staticmethod
def read_file(vocab_filename: str) -> Tuple[Vocab]:
def read_file(vocab_filename: str) -> Vocab:
pass
@staticmethod
def from_files(vocab_filename: str, **kwargs) -> WordPiece:
vocab = WordPiece.read_files(vocab_filename)
return WordPiece(vocab, **kwargs)
def from_file(vocab_filename: str, **kwargs) -> WordPiece:
"""
Convenient method to intialize a WordPiece from file
Roughly equivalent to
def from_file(vocab_filename, **kwargs):
vocab, merges = WordPiece.read_file(vocab_filename)
return WordPiece(vocab, **kwargs)
"""
pass

class WordLevel(Model):
"""
Expand All @@ -131,12 +145,19 @@ class WordLevel(Model):
def __init__(self, vocab: Optional[Union[str, Dict[str, int]]], unk_token: Optional[str]):
pass
@staticmethod
def read_file(vocab_filename: str) -> Tuple[Vocab]:
def read_file(vocab_filename: str) -> Vocab:
pass
@staticmethod
def from_files(vocab_filename: str, **kwargs) -> WordLevel:
vocab = WordLevel.read_files(vocab_filename)
return WordLevel(vocab, **kwargs)
def from_file(vocab_filename: str, **kwargs) -> WordLevelg:
"""
Convenient method to intialize a WordLevelg from file
Roughly equivalent to
def from_file(vocab_filename, **kwargs):
vocab, merges = WordLevelg.read_file(vocab_filename)
return WordLevelg(vocab, **kwargs)
"""
pass

class Unigram(Model):
"""UnigramEncoding model class
Expand Down
51 changes: 45 additions & 6 deletions bindings/python/src/models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ impl PyBPE {
(PyVocab::Filename(vocab_filename), PyMerges::Filename(merges_filename)) => {
deprecation_warning(
"0.9.0",
"BPE.__init__ will not create from files anymore, try `BPE.from_files` instead",
"BPE.__init__ will not create from files anymore, try `BPE.from_file` instead",
)?;
builder =
builder.files(vocab_filename.to_string(), merges_filename.to_string());
Expand All @@ -226,14 +226,35 @@ impl PyBPE {
}

#[staticmethod]
fn read_files(vocab_filename: &str, merges_filename: &str) -> PyResult<(Vocab, Merges)> {
BPE::read_files(vocab_filename, merges_filename).map_err(|e| {
fn read_file(vocab_filename: &str, merges_filename: &str) -> PyResult<(Vocab, Merges)> {
BPE::read_file(vocab_filename, merges_filename).map_err(|e| {
exceptions::PyValueError::new_err(format!(
"Error while reading vocab&merges files: {}",
e
))
})
}

#[staticmethod]
#[args(kwargs = "**")]
fn from_file(
py: Python,
vocab_filename: &str,
merges_filename: &str,
kwargs: Option<&PyDict>,
) -> PyResult<Py<Self>> {
let (vocab, merges) = BPE::read_file(vocab_filename, merges_filename).map_err(|e| {
exceptions::PyValueError::new_err(format!("Error while reading BPE files: {}", e))
})?;
Py::new(
py,
PyBPE::new(
Some(PyVocab::Vocab(vocab)),
Some(PyMerges::Merges(merges)),
kwargs,
)?,
)
}
}

/// WordPiece Model
Expand Down Expand Up @@ -300,10 +321,19 @@ impl PyWordPiece {

#[staticmethod]
fn read_file(vocab_filename: &str) -> PyResult<Vocab> {
WordPiece::read_files(vocab_filename).map_err(|e| {
WordPiece::read_file(vocab_filename).map_err(|e| {
exceptions::PyValueError::new_err(format!("Error while reading WordPiece file: {}", e))
})
}

#[staticmethod]
#[args(kwargs = "**")]
fn from_file(py: Python, vocab_filename: &str, kwargs: Option<&PyDict>) -> PyResult<Py<Self>> {
let vocab = WordPiece::read_file(vocab_filename).map_err(|e| {
exceptions::PyValueError::new_err(format!("Error while reading WordPiece file: {}", e))
})?;
Py::new(py, PyWordPiece::new(Some(PyVocab::Vocab(vocab)), kwargs)?)
}
}

#[pyclass(extends=PyModel, module = "tokenizers.models", name=WordLevel)]
Expand Down Expand Up @@ -344,7 +374,7 @@ impl PyWordLevel {
"0.9.0",
"WordLevel.__init__ will not create from files anymore, try `WordLevel.from_file` instead",
)?;
WordLevel::from_files(vocab_filename, unk_token).map_err(|e| {
WordLevel::from_file(vocab_filename, unk_token).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while loading WordLevel: {}",
e
Expand All @@ -364,10 +394,19 @@ impl PyWordLevel {

#[staticmethod]
fn read_file(vocab_filename: &str) -> PyResult<Vocab> {
WordLevel::read_files(vocab_filename).map_err(|e| {
WordLevel::read_file(vocab_filename).map_err(|e| {
exceptions::PyValueError::new_err(format!("Error while reading WordLevel file: {}", e))
})
}

#[staticmethod]
#[args(kwargs = "**")]
fn from_file(py: Python, vocab_filename: &str, kwargs: Option<&PyDict>) -> PyResult<Py<Self>> {
let vocab = WordLevel::read_file(vocab_filename).map_err(|e| {
exceptions::PyValueError::new_err(format!("Error while reading WordLevel file: {}", e))
})?;
Py::new(py, PyWordLevel::new(Some(PyVocab::Vocab(vocab)), kwargs)?)
}
}

#[pyclass(extends=PyModel, module = "tokenizers.models", name=Unigram)]
Expand Down
3 changes: 3 additions & 0 deletions bindings/python/tests/bindings/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def test_instantiate(self, roberta_files):
vocab = {"a": 0, "b": 1, "ab": 2}
merges = {(0, 1): (0, 2)}
assert isinstance(BPE(vocab, merges), Model)
assert isinstance(BPE.from_file(roberta_files["vocab"], roberta_files["merges"]), BPE)
with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"):
BPE(vocab=vocab)
BPE(merges=merges)
Expand Down Expand Up @@ -42,6 +43,7 @@ def test_instantiate(self, bert_files):
vocab = {"a": 0, "b": 1, "ab": 2}
assert isinstance(WordPiece(vocab), Model)
assert isinstance(WordPiece(vocab), WordPiece)
assert isinstance(WordPiece.from_file(bert_files["vocab"]), WordPiece)
assert isinstance(pickle.loads(pickle.dumps(WordPiece(vocab))), WordPiece)

# Deprecated calls in 0.9
Expand All @@ -59,6 +61,7 @@ def test_instantiate(self, roberta_files):
vocab = {"a": 0, "b": 1, "ab": 2}
assert isinstance(WordLevel(vocab), Model)
assert isinstance(WordLevel(vocab), WordLevel)
assert isinstance(WordLevel.from_file(roberta_files["vocab"]), WordLevel)

# The WordLevel model expects a vocab.json using the same format as roberta
# so we can just try to load with this file
Expand Down
12 changes: 4 additions & 8 deletions bindings/python/tests/implementations/test_byte_level_bpe.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@

class TestByteLevelBPE:
def test_basic_encode(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_files(
roberta_files["vocab"], roberta_files["merges"]
)
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")

assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
Expand Down Expand Up @@ -36,7 +34,7 @@ def test_basic_encode(self, roberta_files):
]

def test_add_prefix_space(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_files(
tokenizer = ByteLevelBPETokenizer.from_file(
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True
)
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
Expand Down Expand Up @@ -66,7 +64,7 @@ def test_add_prefix_space(self, roberta_files):
]

def test_lowerspace(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_files(
tokenizer = ByteLevelBPETokenizer.from_file(
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True, lowercase=True,
)
output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog")
Expand All @@ -85,8 +83,6 @@ def test_lowerspace(self, roberta_files):
]

def test_multiprocessing_with_parallelism(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_files(
roberta_files["vocab"], roberta_files["merges"]
)
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
8 changes: 4 additions & 4 deletions bindings/python/tests/implementations/test_char_bpe.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

class TestBertWordPieceBPE:
def test_basic_encode(self, openai_files):
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])

output = tokenizer.encode("My name is John", "pair")
assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688]
Expand All @@ -33,7 +33,7 @@ def test_basic_encode(self, openai_files):
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1]

def test_lowercase(self, openai_files):
tokenizer = CharBPETokenizer.from_files(
tokenizer = CharBPETokenizer.from_file(
openai_files["vocab"], openai_files["merges"], lowercase=True
)
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
Expand All @@ -43,13 +43,13 @@ def test_lowercase(self, openai_files):
assert output.type_ids == [0, 0, 0, 0, 1]

def test_decoding(self, openai_files):
tokenizer = CharBPETokenizer.from_files(
tokenizer = CharBPETokenizer.from_file(
openai_files["vocab"], openai_files["merges"], lowercase=True
)
decoded = tokenizer.decode(tokenizer.encode("my name is john").ids)
assert decoded == "my name is john"

def test_multiprocessing_with_parallelism(self, openai_files):
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
2 changes: 1 addition & 1 deletion tokenizers/benches/bert_benchmark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ fn create_bert_tokenizer(wp: WordPiece) -> BertTokenizer {
}

pub fn bench_bert(c: &mut Criterion) {
let wp = WordPiece::from_files("data/bert-base-uncased-vocab.txt")
let wp = WordPiece::from_file("data/bert-base-uncased-vocab.txt")
.build()
.unwrap();
let tokenizer = create_bert_tokenizer(wp);
Expand Down
4 changes: 2 additions & 2 deletions tokenizers/benches/bpe_benchmark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ fn create_gpt2_tokenizer(bpe: BPE) -> Tokenizer {
}

fn bench_gpt2(c: &mut Criterion) {
let bpe = BPE::from_files("data/gpt2-vocab.json", "data/gpt2-merges.txt")
let bpe = BPE::from_file("data/gpt2-vocab.json", "data/gpt2-merges.txt")
.build()
.unwrap();
let tokenizer = create_gpt2_tokenizer(bpe);
Expand All @@ -53,7 +53,7 @@ fn bench_gpt2(c: &mut Criterion) {
b.iter_custom(|iters| iter_bench_encode_batch(iters, tokenizer.deref(), &batches))
});

let bpe = BPE::from_files("data/gpt2-vocab.json", "data/gpt2-merges.txt")
let bpe = BPE::from_file("data/gpt2-vocab.json", "data/gpt2-merges.txt")
.cache_capacity(0)
.build()
.unwrap();
Expand Down
Loading

0 comments on commit 36832bf

Please sign in to comment.