From 75f7c15187ecad9be9d018810beaa546508944af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Wed, 12 Jan 2022 13:38:52 +0100 Subject: [PATCH 1/3] Span/SpanGroup: wrap SpanC in shared_ptr (#9869) * Span/SpanGroup: wrap SpanC in shared_ptr When a Span that was retrieved from a SpanGroup was modified, these changes were not reflected in the SpanGroup because the underlying SpanC struct was copied. This change applies the solution proposed by @nrodnova, to wrap SpanC in a shared_ptr. This makes a SpanGroup and Spans derived from it share the same SpanC. So, changes made through a Span are visible in the SpanGroup as well. Fixes #9556 * Test that a SpanGroup is modified through its Spans * SpanGroup.push_back: remove nogil Modifying std::vector is not thread-safe. * C++ >= 11 does not allow const T in vector * Add Span.span_c as a shorthand for Span.c.get Since this method is cdef'ed, it is only visible from Cython, so we avoid using raw pointers in Python Replace existing uses of span.c.get() to use this new method. * Fix formatting * Style fix: pointer types * SpanGroup.to_bytes: reduce number of shared_ptr::get calls * Mark SpanGroup modification test with issue Co-authored-by: Sofie Van Landeghem Co-authored-by: Sofie Van Landeghem --- spacy/pipeline/_parser_internals/ner.pyx | 28 +++--- spacy/tests/doc/test_span.py | 14 ++- spacy/tokens/span.pxd | 11 ++- spacy/tokens/span.pyx | 110 +++++++++++++---------- spacy/tokens/span_group.pxd | 5 +- spacy/tokens/span_group.pyx | 21 +++-- 6 files changed, 115 insertions(+), 74 deletions(-) diff --git a/spacy/pipeline/_parser_internals/ner.pyx b/spacy/pipeline/_parser_internals/ner.pyx index 3edeff19a01..87410de0f19 100644 --- a/spacy/pipeline/_parser_internals/ner.pyx +++ b/spacy/pipeline/_parser_internals/ner.pyx @@ -1,6 +1,8 @@ import os import random from libc.stdint cimport int32_t +from libcpp.memory cimport shared_ptr +from libcpp.vector cimport vector from cymem.cymem cimport Pool from collections import Counter @@ -42,9 +44,7 @@ MOVE_NAMES[OUT] = 'O' cdef struct GoldNERStateC: Transition* ner - SpanC* negs - int32_t length - int32_t nr_neg + vector[shared_ptr[SpanC]] negs cdef class BiluoGold: @@ -77,8 +77,6 @@ cdef GoldNERStateC create_gold_state( negs = [] assert example.x.length > 0 gs.ner = mem.alloc(example.x.length, sizeof(Transition)) - gs.negs = mem.alloc(len(negs), sizeof(SpanC)) - gs.nr_neg = len(negs) ner_ents, ner_tags = example.get_aligned_ents_and_ner() for i, ner_tag in enumerate(ner_tags): gs.ner[i] = moves.lookup_transition(ner_tag) @@ -92,8 +90,8 @@ cdef GoldNERStateC create_gold_state( # In order to handle negative samples, we need to maintain the full # (start, end, label) triple. If we break it down to the 'isnt B-LOC' # thing, we'll get blocked if there's an incorrect prefix. - for i, neg in enumerate(negs): - gs.negs[i] = neg.c + for neg in negs: + gs.negs.push_back(neg.c) return gs @@ -410,6 +408,8 @@ cdef class Begin: cdef int g_act = gold.ner[b0].move cdef attr_t g_tag = gold.ner[b0].label + cdef shared_ptr[SpanC] span + if g_act == MISSING: pass elif g_act == BEGIN: @@ -427,8 +427,8 @@ cdef class Begin: # be correct or not. However, we can at least tell whether we're # going to be opening an entity where there's only one possible # L. - for span in gold.negs[:gold.nr_neg]: - if span.label == label and span.start == b0: + for span in gold.negs: + if span.get().label == label and span.get().start == b0: cost += 1 break return cost @@ -573,8 +573,9 @@ cdef class Last: # If we have negative-example entities, integrate them into the objective, # by marking actions that close an entity that we know is incorrect # as costly. - for span in gold.negs[:gold.nr_neg]: - if span.label == label and (span.end-1) == b0 and span.start == ent_start: + cdef shared_ptr[SpanC] span + for span in gold.negs: + if span.get().label == label and (span.get().end-1) == b0 and span.get().start == ent_start: cost += 1 break return cost @@ -638,8 +639,9 @@ cdef class Unit: # This is fairly straight-forward for U- entities, as we have a single # action cdef int b0 = s.B(0) - for span in gold.negs[:gold.nr_neg]: - if span.label == label and span.start == b0 and span.end == (b0+1): + cdef shared_ptr[SpanC] span + for span in gold.negs: + if span.get().label == label and span.get().start == b0 and span.get().end == (b0+1): cost += 1 break return cost diff --git a/spacy/tests/doc/test_span.py b/spacy/tests/doc/test_span.py index 10aba5b9434..0e7730b658a 100644 --- a/spacy/tests/doc/test_span.py +++ b/spacy/tests/doc/test_span.py @@ -4,7 +4,7 @@ from spacy.attrs import ORTH, LENGTH from spacy.lang.en import English -from spacy.tokens import Doc, Span, Token +from spacy.tokens import Doc, Span, SpanGroup, Token from spacy.vocab import Vocab from spacy.util import filter_spans from thinc.api import get_current_ops @@ -163,6 +163,18 @@ def test_char_span(doc, i_sent, i, j, text): assert span.text == text +@pytest.mark.issue(9556) +def test_modify_span_group(doc): + group = SpanGroup(doc, spans=doc.ents) + for span in group: + span.start = 0 + span.label = doc.vocab.strings["TEST"] + + # Span changes must be reflected in the span group + assert group[0].start == 0 + assert group[0].label == doc.vocab.strings["TEST"] + + def test_spans_sent_spans(doc): sents = list(doc.sents) assert sents[0].start == 0 diff --git a/spacy/tokens/span.pxd b/spacy/tokens/span.pxd index 78bee0a8cc3..85553068e65 100644 --- a/spacy/tokens/span.pxd +++ b/spacy/tokens/span.pxd @@ -1,3 +1,4 @@ +from libcpp.memory cimport shared_ptr cimport numpy as np from .doc cimport Doc @@ -7,19 +8,21 @@ from ..structs cimport SpanC cdef class Span: cdef readonly Doc doc - cdef SpanC c + cdef shared_ptr[SpanC] c cdef public _vector cdef public _vector_norm @staticmethod - cdef inline Span cinit(Doc doc, SpanC span): + cdef inline Span cinit(Doc doc, const shared_ptr[SpanC] &span): cdef Span self = Span.__new__( Span, doc, - start=span.start, - end=span.end + start=span.get().start, + end=span.get().end ) self.c = span return self cpdef np.ndarray to_array(self, object features) + + cdef SpanC* span_c(self) diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx index cd02cab3653..c8fb0d1a296 100644 --- a/spacy/tokens/span.pyx +++ b/spacy/tokens/span.pyx @@ -1,5 +1,6 @@ cimport numpy as np from libc.math cimport sqrt +from libcpp.memory cimport make_shared import numpy from thinc.api import get_array_module @@ -109,14 +110,14 @@ cdef class Span: end_char = start_char else: end_char = doc[end - 1].idx + len(doc[end - 1]) - self.c = SpanC( + self.c = make_shared[SpanC](SpanC( label=label, kb_id=kb_id, start=start, end=end, start_char=start_char, end_char=end_char, - ) + )) self._vector = vector self._vector_norm = vector_norm @@ -126,41 +127,46 @@ cdef class Span: return False else: return True + + cdef SpanC* span_c = self.span_c() + cdef SpanC* other_span_c = other.span_c() + # < if op == 0: - return self.c.start_char < other.c.start_char + return span_c.start_char < other_span_c.start_char # <= elif op == 1: - return self.c.start_char <= other.c.start_char + return span_c.start_char <= other_span_c.start_char # == elif op == 2: # Do the cheap comparisons first return ( - (self.c.start_char == other.c.start_char) and \ - (self.c.end_char == other.c.end_char) and \ - (self.c.label == other.c.label) and \ - (self.c.kb_id == other.c.kb_id) and \ + (span_c.start_char == other_span_c.start_char) and \ + (span_c.end_char == other_span_c.end_char) and \ + (span_c.label == other_span_c.label) and \ + (span_c.kb_id == other_span_c.kb_id) and \ (self.doc == other.doc) ) # != elif op == 3: # Do the cheap comparisons first return not ( - (self.c.start_char == other.c.start_char) and \ - (self.c.end_char == other.c.end_char) and \ - (self.c.label == other.c.label) and \ - (self.c.kb_id == other.c.kb_id) and \ + (span_c.start_char == other_span_c.start_char) and \ + (span_c.end_char == other_span_c.end_char) and \ + (span_c.label == other_span_c.label) and \ + (span_c.kb_id == other_span_c.kb_id) and \ (self.doc == other.doc) ) # > elif op == 4: - return self.c.start_char > other.c.start_char + return span_c.start_char > other_span_c.start_char # >= elif op == 5: - return self.c.start_char >= other.c.start_char + return span_c.start_char >= other_span_c.start_char def __hash__(self): - return hash((self.doc, self.c.start_char, self.c.end_char, self.c.label, self.c.kb_id)) + cdef SpanC* span_c = self.span_c() + return hash((self.doc, span_c.start_char, span_c.end_char, span_c.label, span_c.kb_id)) def __len__(self): """Get the number of tokens in the span. @@ -169,9 +175,10 @@ cdef class Span: DOCS: https://spacy.io/api/span#len """ - if self.c.end < self.c.start: + cdef SpanC* span_c = self.span_c() + if span_c.end < span_c.start: return 0 - return self.c.end - self.c.start + return span_c.end - span_c.start def __repr__(self): return self.text @@ -185,15 +192,16 @@ cdef class Span: DOCS: https://spacy.io/api/span#getitem """ + cdef SpanC* span_c = self.span_c() if isinstance(i, slice): start, end = normalize_slice(len(self), i.start, i.stop, i.step) return Span(self.doc, start + self.start, end + self.start) else: if i < 0: - token_i = self.c.end + i + token_i = span_c.end + i else: - token_i = self.c.start + i - if self.c.start <= token_i < self.c.end: + token_i = span_c.start + i + if span_c.start <= token_i < span_c.end: return self.doc[token_i] else: raise IndexError(Errors.E1002) @@ -205,7 +213,8 @@ cdef class Span: DOCS: https://spacy.io/api/span#iter """ - for i in range(self.c.start, self.c.end): + cdef SpanC* span_c = self.span_c() + for i in range(span_c.start, span_c.end): yield self.doc[i] def __reduce__(self): @@ -213,9 +222,10 @@ cdef class Span: @property def _(self): + cdef SpanC* span_c = self.span_c() """Custom extension attributes registered via `set_extension`.""" return Underscore(Underscore.span_extensions, self, - start=self.c.start_char, end=self.c.end_char) + start=span_c.start_char, end=span_c.end_char) def as_doc(self, *, bint copy_user_data=False, array_head=None, array=None): """Create a `Doc` object with a copy of the `Span`'s data. @@ -289,13 +299,14 @@ cdef class Span: cdef int length = len(array) cdef attr_t value cdef int i, head_col, ancestor_i + cdef SpanC* span_c = self.span_c() old_to_new_root = dict() if HEAD in attrs: head_col = attrs.index(HEAD) for i in range(length): # if the HEAD refers to a token outside this span, find a more appropriate ancestor token = self[i] - ancestor_i = token.head.i - self.c.start # span offset + ancestor_i = token.head.i - span_c.start # span offset if ancestor_i not in range(length): if DEP in attrs: array[i, attrs.index(DEP)] = dep @@ -303,7 +314,7 @@ cdef class Span: # try finding an ancestor within this span ancestors = token.ancestors for ancestor in ancestors: - ancestor_i = ancestor.i - self.c.start + ancestor_i = ancestor.i - span_c.start if ancestor_i in range(length): array[i, head_col] = ancestor_i - i @@ -332,7 +343,8 @@ cdef class Span: DOCS: https://spacy.io/api/span#get_lca_matrix """ - return numpy.asarray(_get_lca_matrix(self.doc, self.c.start, self.c.end)) + cdef SpanC* span_c = self.span_c() + return numpy.asarray(_get_lca_matrix(self.doc, span_c.start, span_c.end)) def similarity(self, other): """Make a semantic similarity estimate. The default estimate is cosine @@ -426,6 +438,9 @@ cdef class Span: else: raise ValueError(Errors.E030) + cdef SpanC* span_c(self): + return self.c.get() + @property def sents(self): """Obtain the sentences that contain this span. If the given span @@ -477,10 +492,13 @@ cdef class Span: DOCS: https://spacy.io/api/span#ents """ cdef Span ent + cdef SpanC* span_c = self.span_c() + cdef SpanC* ent_span_c ents = [] for ent in self.doc.ents: - if ent.c.start >= self.c.start: - if ent.c.end <= self.c.end: + ent_span_c = ent.span_c() + if ent_span_c.start >= span_c.start: + if ent_span_c.end <= span_c.end: ents.append(ent) else: break @@ -615,11 +633,12 @@ cdef class Span: # This should probably be called 'head', and the other one called # 'gov'. But we went with 'head' elsewhere, and now we're stuck =/ cdef int i + cdef SpanC* span_c = self.span_c() # First, we scan through the Span, and check whether there's a word # with head==0, i.e. a sentence root. If so, we can return it. The # longer the span, the more likely it contains a sentence root, and # in this case we return in linear time. - for i in range(self.c.start, self.c.end): + for i in range(span_c.start, span_c.end): if self.doc.c[i].head == 0: return self.doc[i] # If we don't have a sentence root, we do something that's not so @@ -630,15 +649,15 @@ cdef class Span: # think this should be okay. cdef int current_best = self.doc.length cdef int root = -1 - for i in range(self.c.start, self.c.end): - if self.c.start <= (i+self.doc.c[i].head) < self.c.end: + for i in range(span_c.start, span_c.end): + if span_c.start <= (i+self.doc.c[i].head) < span_c.end: continue words_to_root = _count_words_to_root(&self.doc.c[i], self.doc.length) if words_to_root < current_best: current_best = words_to_root root = i if root == -1: - return self.doc[self.c.start] + return self.doc[span_c.start] else: return self.doc[root] @@ -654,8 +673,9 @@ cdef class Span: the span. RETURNS (Span): The newly constructed object. """ - start_idx += self.c.start_char - end_idx += self.c.start_char + cdef SpanC* span_c = self.span_c() + start_idx += span_c.start_char + end_idx += span_c.start_char return self.doc.char_span(start_idx, end_idx, label=label, kb_id=kb_id, vector=vector) @property @@ -736,53 +756,53 @@ cdef class Span: property start: def __get__(self): - return self.c.start + return self.span_c().start def __set__(self, int start): if start < 0: raise IndexError("TODO") - self.c.start = start + self.span_c().start = start property end: def __get__(self): - return self.c.end + return self.span_c().end def __set__(self, int end): if end < 0: raise IndexError("TODO") - self.c.end = end + self.span_c().end = end property start_char: def __get__(self): - return self.c.start_char + return self.span_c().start_char def __set__(self, int start_char): if start_char < 0: raise IndexError("TODO") - self.c.start_char = start_char + self.span_c().start_char = start_char property end_char: def __get__(self): - return self.c.end_char + return self.span_c().end_char def __set__(self, int end_char): if end_char < 0: raise IndexError("TODO") - self.c.end_char = end_char + self.span_c().end_char = end_char property label: def __get__(self): - return self.c.label + return self.span_c().label def __set__(self, attr_t label): - self.c.label = label + self.span_c().label = label property kb_id: def __get__(self): - return self.c.kb_id + return self.span_c().kb_id def __set__(self, attr_t kb_id): - self.c.kb_id = kb_id + self.span_c().kb_id = kb_id property ent_id: """RETURNS (uint64): The entity ID.""" diff --git a/spacy/tokens/span_group.pxd b/spacy/tokens/span_group.pxd index 5074aa27546..6b817578a82 100644 --- a/spacy/tokens/span_group.pxd +++ b/spacy/tokens/span_group.pxd @@ -1,3 +1,4 @@ +from libcpp.memory cimport shared_ptr from libcpp.vector cimport vector from ..structs cimport SpanC @@ -5,6 +6,6 @@ cdef class SpanGroup: cdef public object _doc_ref cdef public str name cdef public dict attrs - cdef vector[SpanC] c + cdef vector[shared_ptr[SpanC]] c - cdef void push_back(self, SpanC span) nogil + cdef void push_back(self, const shared_ptr[SpanC] &span) diff --git a/spacy/tokens/span_group.pyx b/spacy/tokens/span_group.pyx index 6cfa7523789..c310da785ce 100644 --- a/spacy/tokens/span_group.pyx +++ b/spacy/tokens/span_group.pyx @@ -5,6 +5,7 @@ import srsly from spacy.errors import Errors from .span cimport Span from libc.stdint cimport uint64_t, uint32_t, int32_t +from libcpp.memory cimport make_shared cdef class SpanGroup: @@ -135,9 +136,11 @@ cdef class SpanGroup: DOCS: https://spacy.io/api/spangroup#to_bytes """ + cdef SpanC* span_c output = {"name": self.name, "attrs": self.attrs, "spans": []} for i in range(self.c.size()): span = self.c[i] + span_c = span.get() # The struct.pack here is probably overkill, but it might help if # you're saving tonnes of spans, and it doesn't really add any # complexity. We do take care to specify little-endian byte order @@ -149,13 +152,13 @@ cdef class SpanGroup: # l: int32_t output["spans"].append(struct.pack( ">QQQllll", - span.id, - span.kb_id, - span.label, - span.start, - span.end, - span.start_char, - span.end_char + span_c.id, + span_c.kb_id, + span_c.label, + span_c.start, + span_c.end, + span_c.start_char, + span_c.end_char )) return srsly.msgpack_dumps(output) @@ -182,8 +185,8 @@ cdef class SpanGroup: span.end = items[4] span.start_char = items[5] span.end_char = items[6] - self.c.push_back(span) + self.c.push_back(make_shared[SpanC](span)) return self - cdef void push_back(self, SpanC span) nogil: + cdef void push_back(self, const shared_ptr[SpanC] &span): self.c.push_back(span) From 0e71bd973fa39e9b39f2ff4e023a42cde2064ff7 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 15 Apr 2022 15:34:58 +0200 Subject: [PATCH 2/3] Return doc offsets in Matcher on spans (#10576) The returned match offsets were only adjusted for `as_spans`, not generally. Because the `on_match` callbacks are always applied to the doc, the `Matcher` matches on spans should consistently use the doc offsets. --- spacy/matcher/matcher.pyx | 7 ++++--- spacy/tests/matcher/test_matcher_api.py | 13 ++++++++++--- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index 745d7cf437b..863bd198ccb 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -252,6 +252,10 @@ cdef class Matcher: # non-overlapping ones this `match` can be either (start, end) or # (start, end, alignments) depending on `with_alignments=` option. for key, *match in matches: + # Adjust span matches to doc offsets + if isinstance(doclike, Span): + match[0] += doclike.start + match[1] += doclike.start span_filter = self._filter.get(key) if span_filter is not None: pairs = pairs_by_id.get(key, []) @@ -282,9 +286,6 @@ cdef class Matcher: if as_spans: final_results = [] for key, start, end, *_ in final_matches: - if isinstance(doclike, Span): - start += doclike.start - end += doclike.start final_results.append(Span(doc, start, end, label=key)) elif with_alignments: # convert alignments List[Dict[str, int]] --> List[int] diff --git a/spacy/tests/matcher/test_matcher_api.py b/spacy/tests/matcher/test_matcher_api.py index c02d65cdfe3..9d401382fbe 100644 --- a/spacy/tests/matcher/test_matcher_api.py +++ b/spacy/tests/matcher/test_matcher_api.py @@ -591,9 +591,16 @@ def test_matcher_span(matcher): doc = Doc(matcher.vocab, words=text.split()) span_js = doc[:3] span_java = doc[4:] - assert len(matcher(doc)) == 2 - assert len(matcher(span_js)) == 1 - assert len(matcher(span_java)) == 1 + doc_matches = matcher(doc) + span_js_matches = matcher(span_js) + span_java_matches = matcher(span_java) + assert len(doc_matches) == 2 + assert len(span_js_matches) == 1 + assert len(span_java_matches) == 1 + + # match offsets always refer to the doc + assert doc_matches[0] == span_js_matches[0] + assert doc_matches[1] == span_java_matches[0] def test_matcher_as_spans(matcher): From 3f76bc1eaec7c862b6a3a58dc16e4296d7db1faf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Mon, 27 Jun 2022 16:43:48 +0200 Subject: [PATCH 3/3] Merge `master` into `v4` (#11034) * Add "Aim-spaCy" to spaCy Universe (#10943) * Add Aim-spaCy to spaCy universe * Update Aim thumbnail * Fix author links Co-authored-by: Paul O'Leary McCann * Auto-format code with black (#10945) Co-authored-by: explosion-bot * precomputable_biaffine: avoid concatenation (#10911) The `forward` of `precomputable_biaffine` performs matrix multiplication and then `vstack`s the result with padding. This creates a temporary array used for the output of matrix concatenation. This change avoids the temporary by pre-allocating an array that is large enough for the output of matrix multiplication plus padding and fills the array in-place. This gave me a small speedup (a bit over 100 WPS) on de_core_news_lg on M1 Max (after changing thinc-apple-ops to support in-place gemm as BLIS does). * Add failing test: `test_matcher_extension_in_set_predicate` (#10948) * vectors: remove use of float as row number (#10955) The float -1 was returned rather than the integer -1 as the row for unknown keys. This doesn't introduce a realy bug, since such floats cast (without issues) to int in the conversion to NumPy arrays. Still, it's nice to to do the correct thing :). * Update for CBlas changes in Thinc 8.1.0.dev2 (#10970) * Workaround for Typer optional default values with Python calls (#10788) * Workaround for Typer optional default values with Python calls: added test and workaround. * @rmitsch Workaround for Typer optional default values with Python calls: reverting some black formatting changes. Co-authored-by: Sofie Van Landeghem * @rmitsch Workaround for Typer optional default values with Python calls: removing return type hint. Co-authored-by: Sofie Van Landeghem * Workaround for Typer optional default values with Python calls: fixed imports, added GitHub issue marker. * Workaround for Typer optional default values with Python calls: removed forcing of default values for optional arguments in init_config_cli(). Added default values for init_config(). Synchronized default values for init_config_cli() and init_config(). * Workaround for Typer optional default values with Python calls: removed unused import. * Workaround for Typer optional default values with Python calls: fixed usage of optimize in init_config_cli(). * Workaround for Typer optional default values with Pythhon calls: remove output_file from InitDefaultValues. * Workaround for Typer optional default values with Python calls: rename class for default init values. * Workaround for Typer optional default values with Python calls: remove newline. * remove introduced newlines * Remove test_init_config_from_python_without_optional_args(). * remove leftover import * reformat import * remove duplicate Co-authored-by: Sofie Van Landeghem * Made _initialize_X() methods private. (#10978) * Auto-format code with black (#10977) Co-authored-by: explosion-bot * account for NER labels with a hyphen in the name (#10960) * account for NER labels with a hyphen in the name * cleanup * fix docstring * add return type to helper method * shorter method and few more occurrences * user helper method across repo * fix circular import * partial revert to avoid circular import * `enable` argument for spacy.load() (#10784) * Enable flag on spacy.load: foundation for include, enable arguments. * Enable flag on spacy.load: fixed tests. * Enable flag on spacy.load: switched from pretrained model to empty model with added pipes for tests. * Enable flag on spacy.load: switched to more consistent error on misspecification of component activity. Test refactoring. Added to default config. * Enable flag on spacy.load: added support for fields not in pipeline. * Enable flag on spacy.load: removed serialization fields from supported fields. * Enable flag on spacy.load: removed 'enable' from config again. * Enable flag on spacy.load: relaxed checks in _resolve_component_activation_status() to allow non-standard pipes. * Enable flag on spacy.load: fixed relaxed checks for _resolve_component_activation_status() to allow non-standard pipes. Extended tests. * Enable flag on spacy.load: comments w.r.t. resolution workarounds. * Enable flag on spacy.load: remove include fields. Update website docs. * Enable flag on spacy.load: updates w.r.t. changes in master. * Implement Doc.from_json(): update docstrings. Co-authored-by: Adriane Boyd * Implement Doc.from_json(): remove newline. Co-authored-by: Adriane Boyd * Implement Doc.from_json(): change error message for E1038. Co-authored-by: Adriane Boyd * Enable flag on spacy.load: wrapped docstring for _resolve_component_status() at 80 chars. * Enable flag on spacy.load: changed exmples for enable flag. * Remove newline. Co-authored-by: Sofie Van Landeghem * Fix docstring for Language._resolve_component_status(). * Rename E1038 to E1042. Co-authored-by: Adriane Boyd Co-authored-by: Sofie Van Landeghem * add counts to verbose list of NER labels (#10957) * Update linguistic-features.md (#10993) Change link for downloading fasttext word vectors * Use thinc-apple-ops>=0.1.0.dev0 with `apple` extras (#10904) * Use thinc-apple-ops>=0.1.0.dev0 with `apple` extras Also test with thinc-apple-ops that is at least 0.1.0.dev0. * Check thinc-apple-ops on macOS with Python 3.10 Co-authored-by: Adriane Boyd * Use `pip install --pre` for installing thinc-apple-ops in CI Co-authored-by: Adriane Boyd Co-authored-by: Gor Arakelyan Co-authored-by: Paul O'Leary McCann Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: explosion-bot Co-authored-by: Madeesh Kannan Co-authored-by: Raphael Mitsch Co-authored-by: Sofie Van Landeghem Co-authored-by: Adriane Boyd Co-authored-by: Victoria <80417010+victorialslocum@users.noreply.github.com> --- .github/azure-steps.yml | 4 +- pyproject.toml | 2 +- requirements.txt | 2 +- setup.cfg | 6 +-- spacy/__init__.py | 10 +++- spacy/cli/debug_data.py | 10 ++-- spacy/cli/init_config.py | 37 +++++++++---- spacy/errors.py | 2 + spacy/kb.pyx | 22 ++++---- spacy/language.py | 50 +++++++++++++++++- spacy/ml/_precomputable_affine.py | 6 ++- spacy/ml/parser_model.pyx | 5 +- .../pipeline/_parser_internals/arc_eager.pyx | 3 +- spacy/pipeline/_parser_internals/ner.pyx | 3 +- spacy/pipeline/dep_parser.pyx | 3 +- spacy/pipeline/ner.pyx | 4 +- spacy/tests/matcher/test_matcher_api.py | 11 ++++ spacy/tests/parser/test_ner.py | 22 ++++++-- spacy/tests/parser/test_nonproj.py | 12 +++-- spacy/tests/pipeline/test_pipe_methods.py | 52 ++++++++++++++++++- spacy/tests/util.py | 3 +- spacy/training/__init__.py | 1 + spacy/training/augment.py | 8 ++- spacy/training/example.pyx | 4 +- spacy/training/iob_utils.py | 10 +++- spacy/util.py | 37 +++++++++++-- spacy/vectors.pyx | 2 +- website/docs/api/top-level.md | 1 + website/docs/usage/linguistic-features.md | 2 +- website/docs/usage/processing-pipelines.md | 12 +++++ website/meta/universe.json | 23 ++++++++ 31 files changed, 300 insertions(+), 69 deletions(-) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index 80c88b0b862..d7233328ab5 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -111,7 +111,7 @@ steps: condition: eq(variables['python_version'], '3.8') - script: | - ${{ parameters.prefix }} python -m pip install thinc-apple-ops + ${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops ${{ parameters.prefix }} python -m pytest --pyargs spacy displayName: "Run CPU tests with thinc-apple-ops" - condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.9')) + condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10')) diff --git a/pyproject.toml b/pyproject.toml index 14e09e30fd9..4fea41be236 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ requires = [ "cymem>=2.0.2,<2.1.0", "preshed>=3.0.2,<3.1.0", "murmurhash>=0.28.0,<1.1.0", - "thinc>=8.1.0.dev0,<8.2.0", + "thinc>=8.1.0.dev2,<8.2.0", "pathy", "numpy>=1.15.0", ] diff --git a/requirements.txt b/requirements.txt index b2929145eee..082ef152276 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ spacy-legacy>=3.0.9,<3.1.0 spacy-loggers>=1.0.0,<2.0.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 -thinc>=8.1.0.dev0,<8.2.0 +thinc>=8.1.0.dev2,<8.2.0 ml_datasets>=0.2.0,<0.3.0 murmurhash>=0.28.0,<1.1.0 wasabi>=0.9.1,<1.1.0 diff --git a/setup.cfg b/setup.cfg index c6036a8b3bd..d317847ba37 100644 --- a/setup.cfg +++ b/setup.cfg @@ -38,7 +38,7 @@ setup_requires = cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 murmurhash>=0.28.0,<1.1.0 - thinc>=8.1.0.dev0,<8.2.0 + thinc>=8.1.0.dev2,<8.2.0 install_requires = # Our libraries spacy-legacy>=3.0.9,<3.1.0 @@ -46,7 +46,7 @@ install_requires = murmurhash>=0.28.0,<1.1.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 - thinc>=8.1.0.dev0,<8.2.0 + thinc>=8.1.0.dev2,<8.2.0 wasabi>=0.9.1,<1.1.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 @@ -104,7 +104,7 @@ cuda114 = cuda115 = cupy-cuda115>=5.0.0b4,<11.0.0 apple = - thinc-apple-ops>=0.0.4,<1.0.0 + thinc-apple-ops>=0.1.0.dev0,<1.0.0 # Language tokenizers with external dependencies ja = sudachipy>=0.5.2,!=0.6.1 diff --git a/spacy/__init__.py b/spacy/__init__.py index ca47edc94cc..069215fda77 100644 --- a/spacy/__init__.py +++ b/spacy/__init__.py @@ -32,6 +32,7 @@ def load( *, vocab: Union[Vocab, bool] = True, disable: Iterable[str] = util.SimpleFrozenList(), + enable: Iterable[str] = util.SimpleFrozenList(), exclude: Iterable[str] = util.SimpleFrozenList(), config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(), ) -> Language: @@ -42,6 +43,8 @@ def load( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (but can be enabled later using nlp.enable_pipe). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict @@ -49,7 +52,12 @@ def load( RETURNS (Language): The loaded nlp object. """ return util.load_model( - name, vocab=vocab, disable=disable, exclude=exclude, config=config + name, + vocab=vocab, + disable=disable, + enable=enable, + exclude=exclude, + config=config, ) diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py index 0061515c68d..bd05471b1d0 100644 --- a/spacy/cli/debug_data.py +++ b/spacy/cli/debug_data.py @@ -10,7 +10,7 @@ from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides from ._util import import_code, debug_cli -from ..training import Example +from ..training import Example, remove_bilu_prefix from ..training.initialize import get_sourced_components from ..schemas import ConfigSchemaTraining from ..pipeline._parser_internals import nonproj @@ -361,7 +361,7 @@ def debug_data( if label != "-" ] labels_with_counts = _format_labels(labels_with_counts, counts=True) - msg.text(f"Labels in train data: {_format_labels(labels)}", show=verbose) + msg.text(f"Labels in train data: {labels_with_counts}", show=verbose) missing_labels = model_labels - labels if missing_labels: msg.warn( @@ -758,9 +758,9 @@ def _compile_gold( # "Illegal" whitespace entity data["ws_ents"] += 1 if label.startswith(("B-", "U-")): - combined_label = label.split("-")[1] + combined_label = remove_bilu_prefix(label) data["ner"][combined_label] += 1 - if sent_starts[i] == True and label.startswith(("I-", "L-")): + if sent_starts[i] and label.startswith(("I-", "L-")): data["boundary_cross_ents"] += 1 elif label == "-": data["ner"]["-"] += 1 @@ -908,7 +908,7 @@ def _get_examples_without_label( for eg in data: if component == "ner": labels = [ - label.split("-")[1] + remove_bilu_prefix(label) for label in eg.get_aligned_ner() if label not in ("O", "-", None) ] diff --git a/spacy/cli/init_config.py b/spacy/cli/init_config.py index d4cd939c25f..b634caa4ce3 100644 --- a/spacy/cli/init_config.py +++ b/spacy/cli/init_config.py @@ -10,6 +10,7 @@ from .. import util from ..language import DEFAULT_CONFIG_PRETRAIN_PATH from ..schemas import RecommendationSchema +from ..util import SimpleFrozenList from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND from ._util import string_to_list, import_code @@ -24,16 +25,30 @@ class Optimizations(str, Enum): accuracy = "accuracy" +class InitValues: + """ + Default values for initialization. Dedicated class to allow synchronized default values for init_config_cli() and + init_config(), i.e. initialization calls via CLI respectively Python. + """ + + lang = "en" + pipeline = SimpleFrozenList(["tagger", "parser", "ner"]) + optimize = Optimizations.efficiency + gpu = False + pretraining = False + force_overwrite = False + + @init_cli.command("config") def init_config_cli( # fmt: off output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True), - lang: str = Opt("en", "--lang", "-l", help="Two-letter code of the language to use"), - pipeline: str = Opt("tagger,parser,ner", "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"), - optimize: Optimizations = Opt(Optimizations.efficiency.value, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."), - gpu: bool = Opt(False, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."), - pretraining: bool = Opt(False, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"), - force_overwrite: bool = Opt(False, "--force", "-F", help="Force overwriting the output file"), + lang: str = Opt(InitValues.lang, "--lang", "-l", help="Two-letter code of the language to use"), + pipeline: str = Opt(",".join(InitValues.pipeline), "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"), + optimize: Optimizations = Opt(InitValues.optimize, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."), + gpu: bool = Opt(InitValues.gpu, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."), + pretraining: bool = Opt(InitValues.pretraining, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"), + force_overwrite: bool = Opt(InitValues.force_overwrite, "--force", "-F", help="Force overwriting the output file"), # fmt: on ): """ @@ -133,11 +148,11 @@ def fill_config( def init_config( *, - lang: str, - pipeline: List[str], - optimize: str, - gpu: bool, - pretraining: bool = False, + lang: str = InitValues.lang, + pipeline: List[str] = InitValues.pipeline, + optimize: str = InitValues.optimize, + gpu: bool = InitValues.gpu, + pretraining: bool = InitValues.pretraining, silent: bool = True, ) -> Config: msg = Printer(no_print=silent) diff --git a/spacy/errors.py b/spacy/errors.py index 384a6a4d24a..14010565bbd 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -932,6 +932,8 @@ class Errors(metaclass=ErrorsWithCodes): E1040 = ("Doc.from_json requires all tokens to have the same attributes. " "Some tokens do not contain annotation for: {partial_attrs}") E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}") + E1042 = ("Function was called with `{arg1}`={arg1_values} and " + "`{arg2}`={arg2_values} but these arguments are conflicting.") # Deprecated model shortcuts, only used in errors and warnings diff --git a/spacy/kb.pyx b/spacy/kb.pyx index 9a765c8e4ae..ae1983a8d10 100644 --- a/spacy/kb.pyx +++ b/spacy/kb.pyx @@ -93,14 +93,14 @@ cdef class KnowledgeBase: self.vocab = vocab self._create_empty_vectors(dummy_hash=self.vocab.strings[""]) - def initialize_entities(self, int64_t nr_entities): + def _initialize_entities(self, int64_t nr_entities): self._entry_index = PreshMap(nr_entities + 1) self._entries = entry_vec(nr_entities + 1) - def initialize_vectors(self, int64_t nr_entities): + def _initialize_vectors(self, int64_t nr_entities): self._vectors_table = float_matrix(nr_entities + 1) - def initialize_aliases(self, int64_t nr_aliases): + def _initialize_aliases(self, int64_t nr_aliases): self._alias_index = PreshMap(nr_aliases + 1) self._aliases_table = alias_vec(nr_aliases + 1) @@ -155,8 +155,8 @@ cdef class KnowledgeBase: raise ValueError(Errors.E140) nr_entities = len(set(entity_list)) - self.initialize_entities(nr_entities) - self.initialize_vectors(nr_entities) + self._initialize_entities(nr_entities) + self._initialize_vectors(nr_entities) i = 0 cdef KBEntryC entry @@ -388,9 +388,9 @@ cdef class KnowledgeBase: nr_entities = header[0] nr_aliases = header[1] entity_vector_length = header[2] - self.initialize_entities(nr_entities) - self.initialize_vectors(nr_entities) - self.initialize_aliases(nr_aliases) + self._initialize_entities(nr_entities) + self._initialize_vectors(nr_entities) + self._initialize_aliases(nr_aliases) self.entity_vector_length = entity_vector_length def deserialize_vectors(b): @@ -512,8 +512,8 @@ cdef class KnowledgeBase: cdef int64_t entity_vector_length reader.read_header(&nr_entities, &entity_vector_length) - self.initialize_entities(nr_entities) - self.initialize_vectors(nr_entities) + self._initialize_entities(nr_entities) + self._initialize_vectors(nr_entities) self.entity_vector_length = entity_vector_length # STEP 1: load entity vectors @@ -552,7 +552,7 @@ cdef class KnowledgeBase: # STEP 3: load aliases cdef int64_t nr_aliases reader.read_alias_length(&nr_aliases) - self.initialize_aliases(nr_aliases) + self._initialize_aliases(nr_aliases) cdef int64_t nr_candidates cdef vector[int64_t] entry_indices diff --git a/spacy/language.py b/spacy/language.py index 42847823fee..816bd6531ac 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -1,4 +1,4 @@ -from typing import Iterator, Optional, Any, Dict, Callable, Iterable +from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection from typing import Union, Tuple, List, Set, Pattern, Sequence from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload @@ -1694,6 +1694,7 @@ def from_config( *, vocab: Union[Vocab, bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), meta: Dict[str, Any] = SimpleFrozenDict(), auto_fill: bool = True, @@ -1708,6 +1709,8 @@ def from_config( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (and can be enabled using `nlp.enable_pipe`). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. meta (Dict[str, Any]): Meta overrides for nlp.meta. @@ -1861,8 +1864,15 @@ def from_config( # Restore the original vocab after sourcing if necessary if vocab_b is not None: nlp.vocab.from_bytes(vocab_b) - disabled_pipes = [*config["nlp"]["disabled"], *disable] + + # Resolve disabled/enabled settings. + disabled_pipes = cls._resolve_component_status( + [*config["nlp"]["disabled"], *disable], + [*config["nlp"].get("enabled", []), *enable], + config["nlp"]["pipeline"], + ) nlp._disabled = set(p for p in disabled_pipes if p not in exclude) + nlp.batch_size = config["nlp"]["batch_size"] nlp.config = filled if auto_fill else config if after_pipeline_creation is not None: @@ -2014,6 +2024,42 @@ def to_disk( serializers["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude) util.to_disk(path, serializers, exclude) + @staticmethod + def _resolve_component_status( + disable: Iterable[str], enable: Iterable[str], pipe_names: Collection[str] + ) -> Tuple[str, ...]: + """Derives whether (1) `disable` and `enable` values are consistent and (2) + resolves those to a single set of disabled components. Raises an error in + case of inconsistency. + + disable (Iterable[str]): Names of components or serialization fields to disable. + enable (Iterable[str]): Names of pipeline components to enable. + pipe_names (Iterable[str]): Names of all pipeline components. + + RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t. + specified includes and excludes. + """ + + if disable is not None and isinstance(disable, str): + disable = [disable] + to_disable = disable + + if enable: + to_disable = [ + pipe_name for pipe_name in pipe_names if pipe_name not in enable + ] + if disable and disable != to_disable: + raise ValueError( + Errors.E1042.format( + arg1="enable", + arg2="disable", + arg1_values=enable, + arg2_values=disable, + ) + ) + + return tuple(to_disable) + def from_disk( self, path: Union[str, Path], diff --git a/spacy/ml/_precomputable_affine.py b/spacy/ml/_precomputable_affine.py index b99de2d2b19..7a25e757450 100644 --- a/spacy/ml/_precomputable_affine.py +++ b/spacy/ml/_precomputable_affine.py @@ -22,9 +22,11 @@ def forward(model, X, is_train): nP = model.get_dim("nP") nI = model.get_dim("nI") W = model.get_param("W") - Yf = model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True) + # Preallocate array for layer output, including padding. + Yf = model.ops.alloc2f(X.shape[0] + 1, nF * nO * nP, zeros=False) + model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True, out=Yf[1:]) Yf = Yf.reshape((Yf.shape[0], nF, nO, nP)) - Yf = model.ops.xp.vstack((model.get_param("pad"), Yf)) + Yf[0] = model.get_param("pad") def backward(dY_ids): # This backprop is particularly tricky, because we get back a different diff --git a/spacy/ml/parser_model.pyx b/spacy/ml/parser_model.pyx index 57f933b07cb..e045dc3b775 100644 --- a/spacy/ml/parser_model.pyx +++ b/spacy/ml/parser_model.pyx @@ -4,6 +4,7 @@ from libc.math cimport exp from libc.string cimport memset, memcpy from libc.stdlib cimport calloc, free, realloc from thinc.backends.linalg cimport Vec, VecVec +from thinc.backends.cblas cimport saxpy, sgemm import numpy import numpy.random @@ -112,7 +113,7 @@ cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, memcpy(A.scores, A.hiddens, n.states * n.classes * sizeof(float)) else: # Compute hidden-to-output - cblas.sgemm()(False, True, n.states, n.classes, n.hiddens, + sgemm(cblas)(False, True, n.states, n.classes, n.hiddens, 1.0, A.hiddens, n.hiddens, W.hidden_weights, n.hiddens, 0.0, A.scores, n.classes) @@ -147,7 +148,7 @@ cdef void sum_state_features(CBlas cblas, float* output, else: idx = token_ids[f] * id_stride + f*O feature = &cached[idx] - cblas.saxpy()(O, one, feature, 1, &output[b*O], 1) + saxpy(cblas)(O, one, feature, 1, &output[b*O], 1) token_ids += F diff --git a/spacy/pipeline/_parser_internals/arc_eager.pyx b/spacy/pipeline/_parser_internals/arc_eager.pyx index d60f1c3e664..257b5ef8a66 100644 --- a/spacy/pipeline/_parser_internals/arc_eager.pyx +++ b/spacy/pipeline/_parser_internals/arc_eager.pyx @@ -10,6 +10,7 @@ from ...strings cimport hash_string from ...structs cimport TokenC from ...tokens.doc cimport Doc, set_children_from_heads from ...tokens.token cimport MISSING_DEP +from ...training import split_bilu_label from ...training.example cimport Example from .stateclass cimport StateClass from ._state cimport StateC, ArcC @@ -687,7 +688,7 @@ cdef class ArcEager(TransitionSystem): return self.c[name_or_id] name = name_or_id if '-' in name: - move_str, label_str = name.split('-', 1) + move_str, label_str = split_bilu_label(name) label = self.strings[label_str] else: move_str = name diff --git a/spacy/pipeline/_parser_internals/ner.pyx b/spacy/pipeline/_parser_internals/ner.pyx index 87410de0f19..cc196d85abe 100644 --- a/spacy/pipeline/_parser_internals/ner.pyx +++ b/spacy/pipeline/_parser_internals/ner.pyx @@ -15,6 +15,7 @@ from ...typedefs cimport weight_t, attr_t from ...lexeme cimport Lexeme from ...attrs cimport IS_SPACE from ...structs cimport TokenC, SpanC +from ...training import split_bilu_label from ...training.example cimport Example from .stateclass cimport StateClass from ._state cimport StateC @@ -180,7 +181,7 @@ cdef class BiluoPushDown(TransitionSystem): if name == '-' or name == '' or name is None: return Transition(clas=0, move=MISSING, label=0, score=0) elif '-' in name: - move_str, label_str = name.split('-', 1) + move_str, label_str = split_bilu_label(name) # Deprecated, hacky way to denote 'not this entity' if label_str.startswith('!'): raise ValueError(Errors.E869.format(label=name)) diff --git a/spacy/pipeline/dep_parser.pyx b/spacy/pipeline/dep_parser.pyx index 50c57ee5b72..e5f6861580d 100644 --- a/spacy/pipeline/dep_parser.pyx +++ b/spacy/pipeline/dep_parser.pyx @@ -12,6 +12,7 @@ from ..language import Language from ._parser_internals import nonproj from ._parser_internals.nonproj import DELIMITER from ..scorer import Scorer +from ..training import remove_bilu_prefix from ..util import registry @@ -314,7 +315,7 @@ cdef class DependencyParser(Parser): # Get the labels from the model by looking at the available moves for move in self.move_names: if "-" in move: - label = move.split("-")[1] + label = remove_bilu_prefix(move) if DELIMITER in label: label = label.split(DELIMITER)[1] labels.add(label) diff --git a/spacy/pipeline/ner.pyx b/spacy/pipeline/ner.pyx index 4835a8c4bee..25f48c9f857 100644 --- a/spacy/pipeline/ner.pyx +++ b/spacy/pipeline/ner.pyx @@ -6,10 +6,10 @@ from thinc.api import Model, Config from ._parser_internals.transition_system import TransitionSystem from .transition_parser cimport Parser from ._parser_internals.ner cimport BiluoPushDown - from ..language import Language from ..scorer import get_ner_prf, PRFScore from ..util import registry +from ..training import remove_bilu_prefix default_model_config = """ @@ -242,7 +242,7 @@ cdef class EntityRecognizer(Parser): def labels(self): # Get the labels from the model by looking at the available moves, e.g. # B-PERSON, I-PERSON, L-PERSON, U-PERSON - labels = set(move.split("-")[1] for move in self.move_names + labels = set(remove_bilu_prefix(move) for move in self.move_names if move[0] in ("B", "I", "L", "U")) return tuple(sorted(labels)) diff --git a/spacy/tests/matcher/test_matcher_api.py b/spacy/tests/matcher/test_matcher_api.py index 82abe09141b..dc698ae30d3 100644 --- a/spacy/tests/matcher/test_matcher_api.py +++ b/spacy/tests/matcher/test_matcher_api.py @@ -476,6 +476,17 @@ def test_matcher_extension_set_membership(en_vocab): assert len(matches) == 0 +@pytest.mark.xfail(reason="IN predicate must handle sequence values in extensions") +def test_matcher_extension_in_set_predicate(en_vocab): + matcher = Matcher(en_vocab) + Token.set_extension("ext", default=[]) + pattern = [{"_": {"ext": {"IN": ["A", "C"]}}}] + matcher.add("M", [pattern]) + doc = Doc(en_vocab, words=["a", "b", "c"]) + doc[0]._.ext = ["A", "B"] + assert len(matcher(doc)) == 1 + + def test_matcher_basic_check(en_vocab): matcher = Matcher(en_vocab) # Potential mistake: pass in pattern instead of list of patterns diff --git a/spacy/tests/parser/test_ner.py b/spacy/tests/parser/test_ner.py index b3b29d1f927..53bb2d55432 100644 --- a/spacy/tests/parser/test_ner.py +++ b/spacy/tests/parser/test_ner.py @@ -10,7 +10,7 @@ from spacy.language import Language from spacy.lookups import Lookups from spacy.pipeline._parser_internals.ner import BiluoPushDown -from spacy.training import Example, iob_to_biluo +from spacy.training import Example, iob_to_biluo, split_bilu_label from spacy.tokens import Doc, Span from spacy.vocab import Vocab import logging @@ -110,6 +110,9 @@ def test_issue2385(): # maintain support for iob2 format tags3 = ("B-PERSON", "I-PERSON", "B-PERSON") assert iob_to_biluo(tags3) == ["B-PERSON", "L-PERSON", "U-PERSON"] + # ensure it works with hyphens in the name + tags4 = ("B-MULTI-PERSON", "I-MULTI-PERSON", "B-MULTI-PERSON") + assert iob_to_biluo(tags4) == ["B-MULTI-PERSON", "L-MULTI-PERSON", "U-MULTI-PERSON"] @pytest.mark.issue(2800) @@ -154,6 +157,19 @@ def test_issue3209(): assert ner2.move_names == move_names +def test_labels_from_BILUO(): + """Test that labels are inferred correctly when there's a - in label. + """ + nlp = English() + ner = nlp.add_pipe("ner") + ner.add_label("LARGE-ANIMAL") + nlp.initialize() + move_names = ["O", "B-LARGE-ANIMAL", "I-LARGE-ANIMAL", "L-LARGE-ANIMAL", "U-LARGE-ANIMAL"] + labels = {"LARGE-ANIMAL"} + assert ner.move_names == move_names + assert set(ner.labels) == labels + + @pytest.mark.issue(4267) def test_issue4267(): """Test that running an entity_ruler after ner gives consistent results""" @@ -298,7 +314,7 @@ def test_oracle_moves_missing_B(en_vocab): elif tag == "O": moves.add_action(move_types.index("O"), "") else: - action, label = tag.split("-") + action, label = split_bilu_label(tag) moves.add_action(move_types.index("B"), label) moves.add_action(move_types.index("I"), label) moves.add_action(move_types.index("L"), label) @@ -324,7 +340,7 @@ def test_oracle_moves_whitespace(en_vocab): elif tag == "O": moves.add_action(move_types.index("O"), "") else: - action, label = tag.split("-") + action, label = split_bilu_label(tag) moves.add_action(move_types.index(action), label) moves.get_oracle_sequence(example) diff --git a/spacy/tests/parser/test_nonproj.py b/spacy/tests/parser/test_nonproj.py index b420c300f70..051d0ef0c05 100644 --- a/spacy/tests/parser/test_nonproj.py +++ b/spacy/tests/parser/test_nonproj.py @@ -49,7 +49,9 @@ def test_parser_contains_cycle(tree, cyclic_tree, partial_tree, multirooted_tree assert contains_cycle(multirooted_tree) is None -def test_parser_is_nonproj_arc(cyclic_tree, nonproj_tree, partial_tree, multirooted_tree): +def test_parser_is_nonproj_arc( + cyclic_tree, nonproj_tree, partial_tree, multirooted_tree +): assert is_nonproj_arc(0, nonproj_tree) is False assert is_nonproj_arc(1, nonproj_tree) is False assert is_nonproj_arc(2, nonproj_tree) is False @@ -62,7 +64,9 @@ def test_parser_is_nonproj_arc(cyclic_tree, nonproj_tree, partial_tree, multiroo assert is_nonproj_arc(7, partial_tree) is False assert is_nonproj_arc(17, multirooted_tree) is False assert is_nonproj_arc(16, multirooted_tree) is True - with pytest.raises(ValueError, match=r'Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]'): + with pytest.raises( + ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]" + ): is_nonproj_arc(6, cyclic_tree) @@ -73,7 +77,9 @@ def test_parser_is_nonproj_tree( assert is_nonproj_tree(nonproj_tree) is True assert is_nonproj_tree(partial_tree) is False assert is_nonproj_tree(multirooted_tree) is True - with pytest.raises(ValueError, match=r'Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]'): + with pytest.raises( + ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]" + ): is_nonproj_tree(cyclic_tree) diff --git a/spacy/tests/pipeline/test_pipe_methods.py b/spacy/tests/pipeline/test_pipe_methods.py index 4b8fb8ebcf1..6f00a1cd97a 100644 --- a/spacy/tests/pipeline/test_pipe_methods.py +++ b/spacy/tests/pipeline/test_pipe_methods.py @@ -4,13 +4,14 @@ import pytest from thinc.api import get_current_ops +import spacy from spacy.lang.en import English from spacy.lang.en.syntax_iterators import noun_chunks from spacy.language import Language from spacy.pipeline import TrainablePipe from spacy.tokens import Doc from spacy.training import Example -from spacy.util import SimpleFrozenList, get_arg_names +from spacy.util import SimpleFrozenList, get_arg_names, make_tempdir from spacy.vocab import Vocab @@ -602,3 +603,52 @@ def component(doc): assert results[component] == "".join(eg.predicted.text for eg in examples) for component in components - set(components_to_annotate): assert results[component] == "" + + +def test_load_disable_enable() -> None: + """ + Tests spacy.load() with dis-/enabling components. + """ + + base_nlp = English() + for pipe in ("sentencizer", "tagger", "parser"): + base_nlp.add_pipe(pipe) + + with make_tempdir() as tmp_dir: + base_nlp.to_disk(tmp_dir) + to_disable = ["parser", "tagger"] + to_enable = ["tagger", "parser"] + + # Setting only `disable`. + nlp = spacy.load(tmp_dir, disable=to_disable) + assert all([comp_name in nlp.disabled for comp_name in to_disable]) + + # Setting only `enable`. + nlp = spacy.load(tmp_dir, enable=to_enable) + assert all( + [ + (comp_name in nlp.disabled) is (comp_name not in to_enable) + for comp_name in nlp.component_names + ] + ) + + # Testing consistent enable/disable combination. + nlp = spacy.load( + tmp_dir, + enable=to_enable, + disable=[ + comp_name + for comp_name in nlp.component_names + if comp_name not in to_enable + ], + ) + assert all( + [ + (comp_name in nlp.disabled) is (comp_name not in to_enable) + for comp_name in nlp.component_names + ] + ) + + # Inconsistent enable/disable combination. + with pytest.raises(ValueError): + spacy.load(tmp_dir, enable=to_enable, disable=["parser"]) diff --git a/spacy/tests/util.py b/spacy/tests/util.py index 365ea4349d1..d5f3c39ff36 100644 --- a/spacy/tests/util.py +++ b/spacy/tests/util.py @@ -5,6 +5,7 @@ from spacy.tokens import Doc from spacy.vocab import Vocab from spacy.util import make_tempdir # noqa: F401 +from spacy.training import split_bilu_label from thinc.api import get_current_ops @@ -40,7 +41,7 @@ def apply_transition_sequence(parser, doc, sequence): desired state.""" for action_name in sequence: if "-" in action_name: - move, label = action_name.split("-") + move, label = split_bilu_label(action_name) parser.add_label(label) with parser.step_through(doc) as stepwise: for transition in sequence: diff --git a/spacy/training/__init__.py b/spacy/training/__init__.py index a4feb01f4f6..71d1fa775fd 100644 --- a/spacy/training/__init__.py +++ b/spacy/training/__init__.py @@ -5,6 +5,7 @@ from .iob_utils import iob_to_biluo, biluo_to_iob # noqa: F401 from .iob_utils import offsets_to_biluo_tags, biluo_tags_to_offsets # noqa: F401 from .iob_utils import biluo_tags_to_spans, tags_to_entities # noqa: F401 +from .iob_utils import split_bilu_label, remove_bilu_prefix # noqa: F401 from .gold_io import docs_to_json, read_json_file # noqa: F401 from .batchers import minibatch_by_padded_size, minibatch_by_words # noqa: F401 from .loggers import console_logger # noqa: F401 diff --git a/spacy/training/augment.py b/spacy/training/augment.py index 59a39c7ee71..55d780ba4bb 100644 --- a/spacy/training/augment.py +++ b/spacy/training/augment.py @@ -3,10 +3,10 @@ import random import itertools from functools import partial -from pydantic import BaseModel, StrictStr from ..util import registry from .example import Example +from .iob_utils import split_bilu_label if TYPE_CHECKING: from ..language import Language # noqa: F401 @@ -278,10 +278,8 @@ def make_whitespace_variant( ent_prev = doc_dict["entities"][position - 1] ent_next = doc_dict["entities"][position] if "-" in ent_prev and "-" in ent_next: - ent_iob_prev = ent_prev.split("-")[0] - ent_type_prev = ent_prev.split("-", 1)[1] - ent_iob_next = ent_next.split("-")[0] - ent_type_next = ent_next.split("-", 1)[1] + ent_iob_prev, ent_type_prev = split_bilu_label(ent_prev) + ent_iob_next, ent_type_next = split_bilu_label(ent_next) if ( ent_iob_prev in ("B", "I") and ent_iob_next in ("I", "L") diff --git a/spacy/training/example.pyx b/spacy/training/example.pyx index 3035388a643..045f0b48342 100644 --- a/spacy/training/example.pyx +++ b/spacy/training/example.pyx @@ -9,7 +9,7 @@ from ..tokens.span import Span from ..attrs import IDS from .alignment import Alignment from .iob_utils import biluo_to_iob, offsets_to_biluo_tags, doc_to_biluo_tags -from .iob_utils import biluo_tags_to_spans +from .iob_utils import biluo_tags_to_spans, remove_bilu_prefix from ..errors import Errors, Warnings from ..pipeline._parser_internals import nonproj from ..tokens.token cimport MISSING_DEP @@ -519,7 +519,7 @@ def _parse_ner_tags(biluo_or_offsets, vocab, words, spaces): else: ent_iobs.append(iob_tag.split("-")[0]) if iob_tag.startswith("I") or iob_tag.startswith("B"): - ent_types.append(iob_tag.split("-", 1)[1]) + ent_types.append(remove_bilu_prefix(iob_tag)) else: ent_types.append("") return ent_iobs, ent_types diff --git a/spacy/training/iob_utils.py b/spacy/training/iob_utils.py index 64492c2bc4f..61f83a1c3bd 100644 --- a/spacy/training/iob_utils.py +++ b/spacy/training/iob_utils.py @@ -1,4 +1,4 @@ -from typing import List, Dict, Tuple, Iterable, Union, Iterator +from typing import List, Dict, Tuple, Iterable, Union, Iterator, cast import warnings from ..errors import Errors, Warnings @@ -218,6 +218,14 @@ def tags_to_entities(tags: Iterable[str]) -> List[Tuple[str, int, int]]: return entities +def split_bilu_label(label: str) -> Tuple[str, str]: + return cast(Tuple[str, str], label.split("-", 1)) + + +def remove_bilu_prefix(label: str) -> str: + return label.split("-", 1)[1] + + # Fallbacks to make backwards-compat easier offsets_from_biluo_tags = biluo_tags_to_offsets spans_from_biluo_tags = biluo_tags_to_spans diff --git a/spacy/util.py b/spacy/util.py index 0111c839eb3..9b871b87ba5 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -1,6 +1,6 @@ from typing import List, Mapping, NoReturn, Union, Dict, Any, Set, cast from typing import Optional, Iterable, Callable, Tuple, Type -from typing import Iterator, Type, Pattern, Generator, TYPE_CHECKING +from typing import Iterator, Pattern, Generator, TYPE_CHECKING from types import ModuleType import os import importlib @@ -12,7 +12,6 @@ from thinc.api import ConfigValidationError, Model import functools import itertools -import numpy.random import numpy import srsly import catalogue @@ -400,6 +399,7 @@ def load_model( *, vocab: Union["Vocab", bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": @@ -409,11 +409,19 @@ def load_model( vocab (Vocab / True): Optional vocab to pass in on initialization. If True, a new Vocab object will be created. disable (Iterable[str]): Names of pipeline components to disable. + enable (Iterable[str]): Names of pipeline components to enable. All others will be disabled. + exclude (Iterable[str]): Names of pipeline components to exclude. config (Dict[str, Any] / Config): Config overrides as nested dict or dict keyed by section values in dot notation. RETURNS (Language): The loaded nlp object. """ - kwargs = {"vocab": vocab, "disable": disable, "exclude": exclude, "config": config} + kwargs = { + "vocab": vocab, + "disable": disable, + "enable": enable, + "exclude": exclude, + "config": config, + } if isinstance(name, str): # name or string path if name.startswith("blank:"): # shortcut for blank model return get_lang_class(name.replace("blank:", ""))() @@ -433,6 +441,7 @@ def load_model_from_package( *, vocab: Union["Vocab", bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": @@ -444,6 +453,8 @@ def load_model_from_package( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (and can be enabled using `nlp.enable_pipe`). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict @@ -451,7 +462,7 @@ def load_model_from_package( RETURNS (Language): The loaded nlp object. """ cls = importlib.import_module(name) - return cls.load(vocab=vocab, disable=disable, exclude=exclude, config=config) # type: ignore[attr-defined] + return cls.load(vocab=vocab, disable=disable, enable=enable, exclude=exclude, config=config) # type: ignore[attr-defined] def load_model_from_path( @@ -460,6 +471,7 @@ def load_model_from_path( meta: Optional[Dict[str, Any]] = None, vocab: Union["Vocab", bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": @@ -473,6 +485,8 @@ def load_model_from_path( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (and can be enabled using `nlp.enable_pipe`). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict @@ -487,7 +501,12 @@ def load_model_from_path( overrides = dict_to_dot(config) config = load_config(config_path, overrides=overrides) nlp = load_model_from_config( - config, vocab=vocab, disable=disable, exclude=exclude, meta=meta + config, + vocab=vocab, + disable=disable, + enable=enable, + exclude=exclude, + meta=meta, ) return nlp.from_disk(model_path, exclude=exclude, overrides=overrides) @@ -498,6 +517,7 @@ def load_model_from_config( meta: Dict[str, Any] = SimpleFrozenDict(), vocab: Union["Vocab", bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), auto_fill: bool = False, validate: bool = True, @@ -512,6 +532,8 @@ def load_model_from_config( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (and can be enabled using `nlp.enable_pipe`). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. auto_fill (bool): Whether to auto-fill config with missing defaults. @@ -530,6 +552,7 @@ def load_model_from_config( config, vocab=vocab, disable=disable, + enable=enable, exclude=exclude, auto_fill=auto_fill, validate=validate, @@ -594,6 +617,7 @@ def load_model_from_init_py( *, vocab: Union["Vocab", bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": @@ -605,6 +629,8 @@ def load_model_from_init_py( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (and can be enabled using `nlp.enable_pipe`). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict @@ -622,6 +648,7 @@ def load_model_from_init_py( vocab=vocab, meta=meta, disable=disable, + enable=enable, exclude=exclude, config=config, ) diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index bcba9d03f5c..93f6818eeef 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -339,7 +339,7 @@ cdef class Vectors: return self.key2row.get(key, -1) elif keys is not None: keys = [get_string_id(key) for key in keys] - rows = [self.key2row.get(key, -1.) for key in keys] + rows = [self.key2row.get(key, -1) for key in keys] return xp.asarray(rows, dtype="i") else: row2key = {row: key for key, row in self.key2row.items()} diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index 889c6437c9a..c96c571e97f 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -51,6 +51,7 @@ specified separately using the new `exclude` keyword argument. | _keyword-only_ | | | `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | | `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ | +| `enable` | Names of pipeline components to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~List[str]~~ | | `exclude` 3 | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ | | `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | | **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | diff --git a/website/docs/usage/linguistic-features.md b/website/docs/usage/linguistic-features.md index c547ec0bc72..9dae6f2ee20 100644 --- a/website/docs/usage/linguistic-features.md +++ b/website/docs/usage/linguistic-features.md @@ -1899,7 +1899,7 @@ access to some nice Latin vectors. You can then pass the directory path to > ``` ```cli -$ wget https://s3-us-west-1.amazonaws.com/fasttext-vectors/word-vectors-v2/cc.la.300.vec.gz +$ wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.la.300.vec.gz $ python -m spacy init vectors en cc.la.300.vec.gz /tmp/la_vectors_wiki_lg ``` diff --git a/website/docs/usage/processing-pipelines.md b/website/docs/usage/processing-pipelines.md index 4f75b5193a9..bd28810aec3 100644 --- a/website/docs/usage/processing-pipelines.md +++ b/website/docs/usage/processing-pipelines.md @@ -362,6 +362,18 @@ nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser"]) nlp.enable_pipe("tagger") ``` +In addition to `disable`, `spacy.load()` also accepts `enable`. If `enable` is +set, all components except for those in `enable` are disabled. + +```python +# Load the complete pipeline, but disable all components except for tok2vec and tagger +nlp = spacy.load("en_core_web_sm", enable=["tok2vec", "tagger"]) +# Has the same effect, as NER is already not part of enabled set of components +nlp = spacy.load("en_core_web_sm", enable=["tok2vec", "tagger"], disable=["ner"]) +# Will raise an error, as the sets of enabled and disabled components are conflicting +nlp = spacy.load("en_core_web_sm", enable=["ner"], disable=["ner"]) +``` + As of v3.0, the `disable` keyword argument specifies components to load but diff --git a/website/meta/universe.json b/website/meta/universe.json index b7f340f5296..9b644adf498 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1,5 +1,28 @@ { "resources": [ + { + "id": "aim-spacy", + "title": "Aim-spaCy", + "slogan": "Aim-spaCy is an Aim-based spaCy experiment tracker.", + "description": "Aim-spaCy helps to easily collect, store and explore training logs for spaCy, including: hyper-parameters, metrics and displaCy visualizations", + "github": "aimhubio/aim-spacy", + "pip": "aim-spacy", + "code_example": [ + "https://github.com/aimhubio/aim-spacy/tree/master/examples" + ], + "code_language": "python", + "url": "https://aimstack.io/spacy", + "thumb": "https://user-images.githubusercontent.com/13848158/172912427-ee9327ea-3cd8-47fa-8427-6c0d36cd831f.png", + "image": "https://user-images.githubusercontent.com/13848158/136364717-0939222c-55b6-44f0-ad32-d9ab749546e4.png", + "author": "AimStack", + "author_links": { + "twitter": "aimstackio", + "github": "aimhubio", + "website": "https://aimstack.io" + }, + "category": ["visualizers"], + "tags": ["experiment-tracking", "visualization"] + }, { "id": "spacy-report", "title": "spacy-report",