Skip to content

Commit

Permalink
Merge pull request #335 from koordinates/fix-highlighting
Browse files Browse the repository at this point in the history
Fix JSON syntax highlighting
  • Loading branch information
olsen232 authored Dec 16, 2020
2 parents d50a1ce + b2620ec commit 714078d
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 18 deletions.
45 changes: 27 additions & 18 deletions sno/output_util.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import datetime
import json
import re
import shutil
import sys
import textwrap
import types

import pygments
from pygments.lexers import JsonLexer
from pygments.lexer import ExtendedRegexLexer, LexerContext

from .wkt_lexer import WKTLexer

Expand Down Expand Up @@ -68,7 +68,7 @@ def format_json_for_output(output, fp, json_style="pretty"):
Adds syntax highlighting if appropriate.
"""
if json_style == "pretty" and fp == sys.stdout and fp.isatty():
if json_style == "pretty" and can_output_colour(fp):
# Add syntax highlighting
dumped = json.dumps(output, **JSON_PARAMS[json_style])
return pygments.highlight(
Expand All @@ -79,14 +79,18 @@ def format_json_for_output(output, fp, json_style="pretty"):
return json.dumps(output, **JSON_PARAMS[json_style]) + "\n"


def can_output_colour(fp):
return fp == sys.stdout and fp.isatty()


def format_wkt_for_output(output, fp=None, syntax_highlight=True):
"""
Formats WKT whitespace for readability.
Adds syntax highlighting if fp is a terminal and syntax_highlight=True.
Doesn't print the formatted WKT to fp, just returns it.
"""
token_iter = WKTLexer().get_tokens(output, pretty_print=True)
if syntax_highlight and fp == sys.stdout and fp.isatty():
if syntax_highlight and can_output_colour(fp):
return pygments.format(token_iter, get_terminal_formatter())
else:
token_value = (value for token_type, value in token_iter)
Expand Down Expand Up @@ -126,13 +130,24 @@ def wrap_text_to_terminal(text, indent=""):
return "".join(f"{indent}{line}\n" for line in lines)


class ExtendedJsonLexer(JsonLexer, ExtendedRegexLexer):
def _buffer_json_keys(chunk_generator):
"""
Inherits patterns from JsonLexer and get_tokens_unprocessed function from ExtendedRegexLexer.
get_tokens_unprocessed enables the lexer to lex incomplete chunks of json.
We can do chunk-by-chunk JSON highlighting, but only if we buffer everything that might be a key, so that:
{"key": value} can be treated differently to ["value", "value", "value", ...]
"""

pass
buf = None
for chunk in chunk_generator:
if buf is not None:
yield buf + chunk
buf = None
elif re.search(r"""["']\s*$""", chunk):
buf = chunk
else:
yield chunk

if buf is not None:
yield buf


def dump_json_output(output, output_path, json_style="pretty"):
Expand All @@ -141,21 +156,15 @@ def dump_json_output(output, output_path, json_style="pretty"):
"""
fp = resolve_output_path(output_path)

highlit = json_style == "pretty" and fp == sys.stdout and fp.isatty()
highlit = json_style == "pretty" and can_output_colour(fp)
json_encoder = ExtendedJsonEncoder(**JSON_PARAMS[json_style])
if highlit:
ex_json_lexer = ExtendedJsonLexer()
# The LexerContext stores the state of the lexer after each call to get_tokens_unprocessed
lexer_context = LexerContext("", 0)

for chunk in json_encoder.iterencode(output):
lexer_context.text = chunk
lexer_context.pos = 0
lexer_context.end = len(chunk)
json_lexer = JsonLexer()
for chunk in _buffer_json_keys(json_encoder.iterencode(output)):
token_generator = (
(token_type, value)
for (index, token_type, value) in ex_json_lexer.get_tokens_unprocessed(
context=lexer_context
for (index, token_type, value) in json_lexer.get_tokens_unprocessed(
chunk
)
)
fp.write(pygments.format(token_generator, get_terminal_formatter()))
Expand Down
12 changes: 12 additions & 0 deletions tests/test_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import html5lib
import pytest

import sno
from sno.diff_structs import Delta, DeltaDiff
from sno.geometry import hex_wkb_to_ogr
from sno.repo import SnoRepo
Expand Down Expand Up @@ -1500,6 +1501,17 @@ def test_show_json_format(data_archive_readonly, cli_runner):
assert '"sno.diff/v1+hexwkb": {"' in r.stdout


def test_show_json_coloured(data_archive_readonly, cli_runner, monkeypatch):
always_output_colour = lambda x: True
monkeypatch.setattr(sno.output_util, "can_output_colour", always_output_colour)

with data_archive_readonly("points"):
r = cli_runner.invoke(["show", f"-o", "json", "--json-style=pretty", "HEAD"])
assert r.exit_code == 0, r.stderr
# No asserts about colour codes - that would be system specific. Just a basic check:
assert '"sno.diff/v1+hexwkb"' in r.stdout


@pytest.mark.parametrize(*V1_OR_V2)
def test_create_patch(repo_version, data_archive_readonly, cli_runner):
"""
Expand Down
22 changes: 22 additions & 0 deletions tests/test_meta.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import json
import pytest

import sno

EXPECTED_GCG_JSON = {
"column_name": "geom",
"geometry_type_name": "POINT",
Expand Down Expand Up @@ -137,3 +139,23 @@ def test_meta_get_ref(data_archive, cli_runner):
assert json.loads(r.stdout) == {
"nz_pa_points_topo_150k": {"title": "NZ Pa Points (Topo, 1:50k)"}
}


def test_meta_get_coloured(data_archive, cli_runner, monkeypatch):
always_output_colour = lambda x: True
monkeypatch.setattr(sno.output_util, "can_output_colour", always_output_colour)

with data_archive("points2"):
r = cli_runner.invoke(
[
"meta",
"get",
"--ref=HEAD^",
"nz_pa_points_topo_150k",
"-o",
"json",
]
)
assert r.exit_code == 0, r.stderr
# No asserts about colour codes - that would be system specific. Just a basic check:
assert "nz_pa_points_topo_150k" in r.stdout

0 comments on commit 714078d

Please sign in to comment.