diff --git a/pycco/main.py b/pycco/main.py index 6a3415e..416178d 100644 --- a/pycco/main.py +++ b/pycco/main.py @@ -244,7 +244,8 @@ def highlight(sections, language, preserve_paths=True, outdir=None): output = output.replace(highlight_start, "").replace(highlight_end, "") fragments = re.split(language["divider_html"], output) for i, section in enumerate(sections): - section["code_html"] = highlight_start + shift(fragments, "") + highlight_end + section["code_html"] = highlight_start + \ + shift(fragments, "") + highlight_end try: docs_text = unicode(section["docs_text"]) except UnicodeError: @@ -280,7 +281,8 @@ def generate_html(source, sections, preserve_paths=True, outdir=None): csspath = path.relpath(path.join(outdir, "pycco.css"), path.split(dest)[0]) for sect in sections: - sect["code_html"] = re.sub(r"\{\{", r"__DOUBLE_OPEN_STACHE__", sect["code_html"]) + sect["code_html"] = re.sub( + r"\{\{", r"__DOUBLE_OPEN_STACHE__", sect["code_html"]) rendered = pycco_template({ "title": title, @@ -364,7 +366,8 @@ def generate_html(source, sections, preserve_paths=True, outdir=None): # The mirror of `divider_text` that we expect Pygments to return. We can split # on this to recover the original sections. - l["divider_html"] = re.compile(r'\n*' + l["symbol"] + 'DIVIDER\n*') + l["divider_html"] = re.compile( + r'\n*' + l["symbol"] + 'DIVIDER\n*') # Get the Pygments Lexer for this language. l["lexer"] = lexers.get_lexer_by_name(l["name"]) @@ -438,7 +441,8 @@ def remove_control_chars(s): # Sanitization regexp copied from # http://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python from pycco.compat import pycco_unichr - control_chars = ''.join(map(pycco_unichr, list(range(0, 32)) + list(range(127, 160)))) + control_chars = ''.join( + map(pycco_unichr, list(range(0, 32)) + list(range(127, 160)))) control_char_re = re.compile(u'[{}]'.format(re.escape(control_chars))) return control_char_re.sub('', s) @@ -461,6 +465,23 @@ def ensure_directory(directory): highlight_end = "" +def _flatten_sources(sources): + """ + This function will iterate through the list of sources and if a directory + is encountered it will walk the tree for any files + """ + _sources = [] + + for source in sources: + if os.path.isdir(source): + for dirpath, _, filenames in os.walk(source): + _sources.extend([os.path.join(dirpath, f) for f in filenames]) + else: + _sources.append(source) + + return _sources + + def process(sources, preserve_paths=True, outdir=None, language=None, encoding="utf8", index=False): """For each source file passed as argument, generate the documentation.""" @@ -469,7 +490,7 @@ def process(sources, preserve_paths=True, outdir=None, language=None, encoding=" # Make a copy of sources given on the command line. `main()` needs the # original list when monitoring for changed files. - sources = sorted(sources) + sources = sorted(_flatten_sources(sources)) # Proceed to generating the documentation. if sources: diff --git a/tests/test_pycco.py b/tests/test_pycco.py index 02ef008..6db8265 100644 --- a/tests/test_pycco.py +++ b/tests/test_pycco.py @@ -33,7 +33,8 @@ def test_shift(fragments, default): @given(text(), booleans(), text(min_size=1)) @example("/foo", True, "0") def test_destination(filepath, preserve_paths, outdir): - dest = p.destination(filepath, preserve_paths=preserve_paths, outdir=outdir) + dest = p.destination( + filepath, preserve_paths=preserve_paths, outdir=outdir) assert dest.startswith(outdir) assert dest.endswith(".html") @@ -65,12 +66,14 @@ def test_comment_with_only_cross_ref(): source = '''# ==Link Target==\n\ndef test_link():\n """[[testing.py#link-target]]"""\n pass''' sections = p.parse(source, PYTHON) p.highlight(sections, PYTHON, outdir=tempfile.gettempdir()) - assert sections[1]['docs_html'] == '

testing.py

' + assert sections[1][ + 'docs_html'] == '

testing.py

' @given(text(), text()) def test_get_language_specify_language(source, code): - assert p.get_language(source, code, language="python") == p.languages['.py'] + assert p.get_language( + source, code, language="python") == p.languages['.py'] with pytest.raises(ValueError): p.get_language(source, code, language="non-existent") @@ -99,7 +102,8 @@ def test_get_language_bad_code(code): @given(text(max_size=64)) def test_ensure_directory(dir_name): - tempdir = os.path.join(tempfile.gettempdir(), str(int(time.time())), dir_name) + tempdir = os.path.join(tempfile.gettempdir(), + str(int(time.time())), dir_name) # Use sanitization from function, but only for housekeeping. We # pass in the unsanitized string to the function. @@ -161,3 +165,26 @@ def test_generate_index(path_lists, outdir_list): file_paths = [os.path.join(*path_list) for path_list in path_lists] outdir = os.path.join(*outdir_list) generate_index.generate_index(file_paths, outdir=outdir) + + +def test_flatten_sources(tmpdir): + sources = [str(tmpdir)] + expected_sources = [] + + # Setup the base dir + td = tmpdir.join("test.py") + td.write("#!/bin/env python") + expected_sources.append(str(td)) + + # Make some more directories, each with a file present + for d in ["foo", "bar", "buzz"]: + dd = tmpdir.mkdir(d) + dummy_file = dd.join("test.py") + dummy_file.write("#!/bin/env python") + expected_sources.append(str(dummy_file)) + + # Get the flattened version of the base directory + flattened = p._flatten_sources(sources) + + # Make sure that the lists are the same + assert sorted(expected_sources) == sorted(flattened)