Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update parso to 0.2.1 #1841

Merged
merged 4 commits into from
Jun 5, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions news/2 Fixes/1721.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix for intellisense failing when using the new `Outline` feature.
1 change: 1 addition & 0 deletions news/3 Code Health/1833.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Update `parso` package to 0.2.1.
2 changes: 1 addition & 1 deletion pythonFiles/parso/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
from parso.utils import split_lines, python_bytes_to_unicode


__version__ = '0.2.0'
__version__ = '0.2.1'


def parse(code=None, **kwargs):
Expand Down
7 changes: 4 additions & 3 deletions pythonFiles/parso/grammar.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class Grammar(object):
"""
:py:func:`parso.load_grammar` returns instances of this class.

Creating custom grammars by calling this is not supported, yet.
Creating custom none-python grammars by calling this is not supported, yet.
"""
#:param text: A BNF representation of your grammar.
_error_normalizer_config = None
Expand Down Expand Up @@ -219,12 +219,13 @@ def load_grammar(**kwargs):
version.

:param str version: A python version string, e.g. ``version='3.3'``.
:param str path: A path to a grammar file
"""
def load_grammar(language='python', version=None):
def load_grammar(language='python', version=None, path=None):
if language == 'python':
version_info = parse_version_string(version)

file = os.path.join(
file = path or os.path.join(
'python',
'grammar%s%s.txt' % (version_info.major, version_info.minor)
)
Expand Down
5 changes: 3 additions & 2 deletions pythonFiles/parso/python/diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,6 +490,9 @@ def _copy_nodes(self, tos, nodes, until_line, line_offset):

new_tos = tos
for node in nodes:
if node.start_pos[0] > until_line:
break

if node.type == 'endmarker':
# We basically removed the endmarker, but we are not allowed to
# remove the newline at the end of the line, otherwise it's
Expand All @@ -501,8 +504,6 @@ def _copy_nodes(self, tos, nodes, until_line, line_offset):
# Endmarkers just distort all the checks below. Remove them.
break

if node.start_pos[0] > until_line:
break
# TODO this check might take a bit of time for large files. We
# might want to change this to do more intelligent guessing or
# binary search.
Expand Down
176 changes: 0 additions & 176 deletions pythonFiles/parso/python/issue_list.txt

This file was deleted.

21 changes: 14 additions & 7 deletions pythonFiles/parso/python/tokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@

TokenCollection = namedtuple(
'TokenCollection',
'pseudo_token single_quoted triple_quoted endpats fstring_pattern_map always_break_tokens',
'pseudo_token single_quoted triple_quoted endpats whitespace '
'fstring_pattern_map always_break_tokens',
)

BOM_UTF8_STRING = BOM_UTF8.decode('utf-8')
Expand Down Expand Up @@ -114,6 +115,7 @@ def _create_token_collection(version_info):
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
whitespace = _compile(Whitespace)
Comment = r'#[^\r\n]*'
Name = r'\w+'

Expand Down Expand Up @@ -225,7 +227,7 @@ def _create_token_collection(version_info):
pseudo_token_compiled = _compile(PseudoToken)
return TokenCollection(
pseudo_token_compiled, single_quoted, triple_quoted, endpats,
fstring_pattern_map, ALWAYS_BREAK_TOKENS
whitespace, fstring_pattern_map, ALWAYS_BREAK_TOKENS
)


Expand All @@ -244,7 +246,7 @@ def _get_type_name(self, exact=True):
return tok_name[self.type]

def __repr__(self):
return ('TokenInfo(type=%s, string=%r, start=%r, prefix=%r)' %
return ('TokenInfo(type=%s, string=%r, start_pos=%r, prefix=%r)' %
self._replace(type=self._get_type_name()))


Expand Down Expand Up @@ -354,7 +356,8 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):
token. This idea comes from lib2to3. The prefix contains all information
that is irrelevant for the parser like newlines in parentheses or comments.
"""
pseudo_token, single_quoted, triple_quoted, endpats, fstring_pattern_map, always_break_tokens, = \
pseudo_token, single_quoted, triple_quoted, endpats, whitespace, \
fstring_pattern_map, always_break_tokens, = \
_get_token_collection(version_info)
paren_level = 0 # count parentheses
indents = [0]
Expand Down Expand Up @@ -435,10 +438,14 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)):

pseudomatch = pseudo_token.match(line, pos)
if not pseudomatch: # scan for tokens
txt = line[pos:]
if txt.endswith('\n'):
if line.endswith('\n'):
new_line = True
yield PythonToken(ERRORTOKEN, txt, (lnum, pos), additional_prefix)
match = whitespace.match(line, pos)
pos = match.end()
yield PythonToken(
ERRORTOKEN, line[pos:], (lnum, pos),
additional_prefix + match.group(0)
)
additional_prefix = ''
break

Expand Down
8 changes: 5 additions & 3 deletions pythonFiles/parso/python/tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@
_IMPORTS = set(['import_name', 'import_from'])



class DocstringMixin(object):
__slots__ = ()

Expand Down Expand Up @@ -133,7 +132,6 @@ def get_start_pos_of_prefix(self):
return previous_leaf.end_pos



class _LeafWithoutNewlines(PythonLeaf):
"""
Simply here to optimize performance.
Expand Down Expand Up @@ -166,6 +164,10 @@ class EndMarker(_LeafWithoutNewlines):
__slots__ = ()
type = 'endmarker'

@utf8_repr
def __repr__(self):
return "<%s: prefix=%s>" % (type(self).__name__, repr(self.prefix))


class Newline(PythonLeaf):
"""Contains NEWLINE and ENDMARKER tokens."""
Expand Down Expand Up @@ -235,7 +237,6 @@ def get_definition(self, import_name_always=False):
return None



class Literal(PythonLeaf):
__slots__ = ()

Expand Down Expand Up @@ -653,6 +654,7 @@ def annotation(self):
except IndexError:
return None


class Lambda(Function):
"""
Lambdas are basically trimmed functions, so give it the same interface.
Expand Down