Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update dev manual scripts for python3 #439

Open
wants to merge 5 commits into
base: loader
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
195 changes: 130 additions & 65 deletions doc/dev_manual/resources/gen_blob_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,74 +5,97 @@
import glob
import subprocess
import re
import string
import tempfile
import os
import copy

dep_script = "../../src/components/cidl/calculate_dependencies.py"
comp_base = "../../src/components/"
comp_base = "../../src/components/"
components = comp_base + "implementation/*/*/doc.md"
libraries = comp_base + "lib/*/doc.md"
libraries = comp_base + "lib/*/doc.md"
interfaces = comp_base + "interface/*/doc.md"


def readfile(p):
with open(p) as f:
return f.read()
return ""


def remove_trailing_doc(wildcard_path):
ps = glob.glob(wildcard_path)
removed = []
for p in ps:
# remove the "doc.md" from the path
s = re.split("/", string.rstrip(p))
s = re.split("/", p.rstrip())
pathified = "/".join(s[0:-1]) + "/"
removed.append(pathified)
return removed


def filter_out_skel(p):
s = re.split("/", string.rstrip(p))
s = filter(lambda e: e != '', s)
return not(s[-1] == "skel" or s[-2] == "skel") and not(s[-2] == "archives")
s = re.split("/", p.rstrip())
s = list(filter(lambda e: e != "", s))
return not (s[-1] == "skel" or s[-2] == "skel") and not (s[-2] == "archives")

cs = filter(filter_out_skel, remove_trailing_doc(components))
ls = filter(filter_out_skel, remove_trailing_doc(libraries))

cs = filter(filter_out_skel, remove_trailing_doc(components))
ls = filter(filter_out_skel, remove_trailing_doc(libraries))
ifs = filter(filter_out_skel, remove_trailing_doc(interfaces))


def unique(l):
return list(set(l))


# listify the output
def clean_dep_output(lst):
return filter(lambda d: d != '', unique(re.split(" ", string.rstrip(lst))))
if isinstance(lst, bytes):
lst = lst.decode("utf-8")
return filter(lambda d: d != "", unique(re.split(" ", lst.rstrip())))


def gather_deps(path):
libdeps = clean_dep_output(subprocess.check_output(['python', dep_script, path, comp_base, 'shallowlibdeps']))
ifdeps = clean_dep_output(subprocess.check_output(['python', dep_script, path, comp_base, 'shallowifdeps']))
ifexps = clean_dep_output(subprocess.check_output(['python', dep_script, path, comp_base, 'shallowifexps']))
libdeps = clean_dep_output(
subprocess.check_output(
["python", dep_script, path, comp_base, "shallowlibdeps"]
)
)
ifdeps = clean_dep_output(
subprocess.check_output(
["python", dep_script, path, comp_base, "shallowifdeps"]
)
)
ifexps = clean_dep_output(
subprocess.check_output(
["python", dep_script, path, comp_base, "shallowifexps"]
)
)
return (libdeps, ifdeps, ifexps)


def comp_name(p):
s = re.split("/", string.rstrip(p))
s = filter(lambda e: e != '', s)
s = re.split("/", p.rstrip())
s = list(filter(lambda e: e != "", s))
return s[-2] + "." + s[-1]


def lib_or_if_name(p):
s = re.split("/", string.rstrip(p))
s = filter(lambda e: e != '', s)
s = re.split("/", p.rstrip())
s = list(filter(lambda e: e != "", s))
return s[-1]


compdeps = {}
libdeps = {}
ifdeps = {}
libdeps = {}
ifdeps = {}

for c in cs:
compdeps[comp_name(c)] = { "deps": gather_deps(c), "type": "component", "path":c }
compdeps[comp_name(c)] = {"deps": gather_deps(c), "type": "component", "path": c}
for l in ls:
libdeps[lib_or_if_name(l)] = { "deps": gather_deps(l), "type": "library", "path":l }
libdeps[lib_or_if_name(l)] = {"deps": gather_deps(l), "type": "library", "path": l}
for i in ifs:
ifdeps[lib_or_if_name(i)] = { "deps": gather_deps(i), "type": "interface", "path":i }
ifdeps[lib_or_if_name(i)] = {"deps": gather_deps(i), "type": "interface", "path": i}

header = """digraph blob_dependencies {
/* label = "Component Software Dependencies" ; */
Expand All @@ -88,14 +111,34 @@ def lib_or_if_name(p):
"""
footer = "}"


def gen_node(n, typestr):
fontattr = "fontname=\"Sans serif\""
if typestr == "component":
return "\"" + n + "\" [shape=hexagon,style=filled,fillcolor=lightblue," + fontattr + "] ;\n"
fontattr = 'fontname="Sans serif"'
if typestr == "component":
return (
'"'
+ n
+ '" [shape=hexagon,style=filled,fillcolor=lightblue,'
+ fontattr
+ "] ;\n"
)
elif typestr == "library":
return "\"" + n + "\" [shape=oval,style=filled,fillcolor=gray82," + fontattr + "] ;\n"
return (
'"'
+ n
+ '" [shape=oval,style=filled,fillcolor=gray82,'
+ fontattr
+ "] ;\n"
)
elif typestr == "interface":
return "\"" + n + "\" [shape=rectangle,style=filled,fillcolor=lightsteelblue," + fontattr + "] ;\n"
return (
'"'
+ n
+ '" [shape=rectangle,style=filled,fillcolor=lightsteelblue,'
+ fontattr
+ "] ;\n"
)


def gen_graphviz(blobs):
for (b, d) in blobs.items():
Expand All @@ -107,25 +150,37 @@ def gen_graphviz(blobs):
(libs, ifdeps, ifexps) = d["deps"]
for l in libs:
nodes.append(gen_node(l, "library"))
edges += "\"" + b + "\" -> \"" + l + "\" ;\n"
edges += '"' + b + '" -> "' + l + '" ;\n'
for ifd in ifdeps:
nodes.append(gen_node(ifd, "interface"))
edges += "\"" + b + "\" -> \"" + ifd + "\" ;\n"
edges += '"' + b + '" -> "' + ifd + '" ;\n'
for ife in ifexps:
nodes.append(gen_node(ife + "\n(export)", "interface"))
edges += "\"" + ife + "\n(export)\" -> \"" + b + "\" [fontname=\"Sans serif\",style=dashed] ;\n"

(gffd, gfpath) = tempfile.mkstemp(suffix=".gf", prefix="cos_docgen_" + b);
nodes_str = "".join(unique(nodes)) # rely on unique to simplify this logic
os.write(gffd, header + nodes_str + edges + footer)
d["gf"] = gfpath;
d["gffd"] = gffd;

(pdffd, pdfpath) = tempfile.mkstemp(suffix=".pdf", prefix="cos_docgen_" + b.replace(".", "_"));
subprocess.call(['dot', '-Tpdf', gfpath], stdout=pdffd)
d["pdf"] = pdfpath
edges += (
'"'
+ ife
+ '\n(export)" -> "'
+ b
+ '" [fontname="Sans serif",style=dashed] ;\n'
)

(gffd, gfpath) = tempfile.mkstemp(suffix=".gf", prefix="cos_docgen_" + b)
nodes_str = "".join(unique(nodes)) # rely on unique to simplify this logic
content = header + nodes_str + edges + footer
if isinstance(content, str):
content = content.encode("utf-8")
os.write(gffd, content)
d["gf"] = gfpath
d["gffd"] = gffd

(pdffd, pdfpath) = tempfile.mkstemp(
suffix=".pdf", prefix="cos_docgen_" + b.replace(".", "_")
)
subprocess.call(["dot", "-Tpdf", gfpath], stdout=pdffd)
d["pdf"] = pdfpath
d["pdffd"] = pdffd


gen_graphviz(compdeps)
gen_graphviz(libdeps)
gen_graphviz(ifdeps)
Expand All @@ -152,7 +207,7 @@ def gen_graphviz(blobs):
#
# This assumes and leverages the style perscribed by the CSG.

library_headers = comp_base + "lib/*/*.h"
library_headers = comp_base + "lib/*/*.h"
interface_headers = comp_base + "interface/*/*.h"
### Auto-gen the documentation for the interfaces and libraries
def gen_doc(header):
Expand All @@ -166,8 +221,8 @@ def gen_doc(header):
state = "scanning"

def de_comment(line):
l = re.sub(r'^ \*', r'', line) # get rid of the leading " *"
if len(l) > 0 and l[0] == ' ':
l = re.sub(r"^ \*", r"", line) # get rid of the leading " *"
if len(l) > 0 and l[0] == " ":
return l[1:]
return l

Expand All @@ -182,13 +237,13 @@ def commit_proto(proto, comment):
in_comment = False
for l in re.split("\n", header):
# First, filter out # directives and comments
if re.match('^#.*', l) != None:
continue # skip #endif/#define etc...
c_single = re.match(r'(.*)//.*', l) # remove text following //
if re.match("^#.*", l) != None:
continue # skip #endif/#define etc...
c_single = re.match(r"(.*)//.*", l) # remove text following //
if c_single != None:
l = c_single.group(0)
c_start = re.match(r'(.*)/\*[^\*].*', l) # remove all after /*
c_end = re.match(r'.*\*/(.*)', l) # leave only code after */
c_start = re.match(r"(.*)/\*[^\*].*", l) # remove all after /*
c_end = re.match(r".*\*/(.*)", l) # leave only code after */
if not in_comment and c_start != None:
in_comment = True
l = c_start.group(0)
Expand All @@ -210,13 +265,13 @@ def commit_proto(proto, comment):
comment = ""
state = "header comment"
elif state == "comment":
if (l == " */"):
if l == " */":
state = "prototypes"
else:
l = de_comment(l)
comment += l + "\n"
elif state == "header comment":
if (l == " */"):
if l == " */":
state = "scanning"
else:
l = de_comment(l)
Expand All @@ -229,7 +284,7 @@ def commit_proto(proto, comment):
comment = ""
state = "comment"
elif p != None:
proto += p.group(0) + "\n"
proto += p.group(0) + "\n"
elif l == "{":
proto += "{ ... "
state = "function body"
Expand All @@ -243,23 +298,24 @@ def commit_proto(proto, comment):
elif state == "function body":
if l == "}":
proto += "}\n"
state = "prototypes"
state = "prototypes"

doc += commit_proto(proto,comment)
doc += commit_proto(proto, comment)

return doc_header + header_comment + doc


for (b, d) in ifdeps.items():
path = d["path"] + b + ".h"
path = d["path"] + b + ".h"
contents = readfile(path)
doc = gen_doc(contents)
d["doc"] = doc;
doc = gen_doc(contents)
d["doc"] = doc

for (b, d) in libdeps.items():
path = d["path"] + b + ".h"
path = d["path"] + b + ".h"
contents = readfile(path)
doc = gen_doc(contents)
d["doc"] = doc;
doc = gen_doc(contents)
d["doc"] = doc

# At this point, we have all of the pdfs generated for the component
# dependencies, and need to simply generate the markdown that links to
Expand All @@ -269,17 +325,26 @@ def gen_md(header, blobs):
output = "\n" + copy.copy(header) + "\n\n"
for (b, d) in blobs.items():
output += readfile(d["path"] + "doc.md")
output += "\n### Dependencies and Exports\n\n![Exports and dependencies for " + b + ". Teal hexagons are *component* implementations, slate rectangles are *interfaces*, and gray ellipses are *libraries*. Dotted lines denote an *export* relation, and solid lines denote a *dependency*.](" + d["pdf"] + ")\n\n"
if d.has_key("doc"):
output += (
"\n### Dependencies and Exports\n\n![Exports and dependencies for "
+ b
+ ". Teal hexagons are *component* implementations, slate rectangles are *interfaces*, and gray ellipses are *libraries*. Dotted lines denote an *export* relation, and solid lines denote a *dependency*.]("
+ d["pdf"]
+ ")\n\n"
)
if "doc" in d:
output += d["doc"] + "\n\n"
return output

comphead=readfile("./resources/component_doc.md")
ifhead =readfile("./resources/interface_doc.md")
libhead =readfile("./resources/lib_doc.md")

comphead = readfile("./resources/component_doc.md")
ifhead = readfile("./resources/interface_doc.md")
libhead = readfile("./resources/lib_doc.md")

md = gen_md(comphead, compdeps) + gen_md(ifhead, ifdeps) + gen_md(libhead, libdeps)
(mdfd, mdpath) = tempfile.mkstemp(suffix=".md", prefix="cos_docgen_per-blob");
if isinstance(md, str):
md = md.encode("utf-8")
(mdfd, mdpath) = tempfile.mkstemp(suffix=".md", prefix="cos_docgen_per-blob")
os.write(mdfd, md)

print(mdpath)
15 changes: 9 additions & 6 deletions doc/dev_manual/resources/gen_toc.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,21 @@

import glob
import re
import string

chapters = sorted(glob.glob("[0-9][0-9]_*.md"))
version = glob.glob("composite_dev_manual_v*.pdf")[0]
version = glob.glob("composite_dev_manual_v*.pdf")[0]

header = "# Composite Developer Manual\n\nPlease see the [pdf](./" + version + ") for a nicely rendered version of this manual.\n\n## Table of Contents\n\n"
header = (
"# Composite Developer Manual\n\nPlease see the [pdf](./"
+ version
+ ") for a nicely rendered version of this manual.\n\n## Table of Contents\n\n"
)

toc = ""
for c in chapters:
underscores = re.split("\.", string.rstrip(c))[0]
word_list = re.split("_", underscores)[1:]
words = " ".join(word_list)
underscores = re.split("\.", c.rstrip())[0]
word_list = re.split("_", underscores)[1:]
words = " ".join(word_list)
toc += "- [" + words.capitalize() + "](./" + c + ")\n"

print(header + toc)
Loading