Skip to content

Commit

Permalink
scripts: cogeno: edts: add extended DTS database module
Browse files Browse the repository at this point in the history
The Extended Device Tree Specification database collates
device tree (dts) information with information taken from
the device tree bindings.

The EDTS database may be loaded from a json file, stored
to a json file or extracted from the DTS files and the
bindings yaml files.

The database is integrated into cogeno as a module.

The commit integrates database development done in zephyrproject-rtos#9876
which was based on zephyrproject-rtos#6762.

Major differences/ improvements to zephyrproject-rtos#9876 are:
- the database now has an own extraction function that
  can be used instead of e.g. extract_dts_includes.
  The extraction function follows the design of
  the extract_dts_includes script and the additions that
  were done in zephyrproject-rtos#9876. It is restructured and several globals
  are now classes and objects. All functionality of
  extract_dts_includes related to the generation of defines
  is not part of the database extract function. It's sole
  purpose is to fill the database directly from the compiled
  DTS file.
- the database got itś own directory 'edtsdb' to structure
  all files related to the database.
- The EDTSDevice class from zephyrproject-rtos#9876 was enhanced to allow
  devices to access the database they are taken from.

Mayor compatibility issues to zephyrproject-rtos#9876.
- The consumer, provider API and the internal structure
  of the database is copied from zephyrproject-rtos#9876.
  - API should be fully compatible.
  - Extraction of children was replaced as it broke the
    concept of the devices struct as a list of devices.
    The functions device.get_children() and
    device.get_parent() may be used to acess the
    parent<->child relation.
- The EDTSDevice class is copied from zephyrproject-rtos#9876.
  - The device API should be compatible except for
    - the constructor which now needs the EDTS database and
    - the unique id feature. To ge an unique id the
      device.get_name() function can be used instead.

Signed off from zephyrproject-rtos#9876 added to attribute for the changes done
there and copied.

Signed-off-by: Erwan Gouriou <erwan.gouriou@linaro.org>
Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
Signed-off-by: Bobby Noelte <b0661n0e17e@gmail.com>
  • Loading branch information
b0661 committed Feb 22, 2019
1 parent 8d17c4c commit a584005
Show file tree
Hide file tree
Showing 38 changed files with 4,741 additions and 0 deletions.
7 changes: 7 additions & 0 deletions scripts/cogeno/cogeno/modules/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#
# Copyright (c) 2017 Bobby Noelte
#
# SPDX-License-Identifier: Apache-2.0
#

# Empty to allow all modules to be imported
104 changes: 104 additions & 0 deletions scripts/cogeno/cogeno/modules/edtsdatabase.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
#!/usr/bin/env python3
#
# Copyright (c) 2018 Bobby Noelte
# Copyright (c) 2018 Linaro Limited
#
# SPDX-License-Identifier: Apache-2.0
#

import sys
import argparse
from pathlib import Path
from pprint import pprint

##
# Make relative import work also with __main__
if __package__ is None or __package__ == '':
# use current directory visibility
from edtsdb.database import EDTSDb
else:
# use current package visibility
from .edtsdb.database import EDTSDb


##
# @brief Extended DTS database
#
class EDTSDatabase(EDTSDb):

@staticmethod
def is_valid_directory(parser, arg):
try:
path = Path(arg).resolve()
except:
path = Path(arg)
if not path.is_dir():
parser.error('The directory {} does not exist!'.format(path))
else:
# File directory exists so return the directory
return str(path)

@staticmethod
def is_valid_file(parser, arg):
try:
path = Path(arg).resolve()
except:
path = Path(arg)
if not path.is_file():
parser.error('The file {} does not exist!'.format(path))
else:
# File exists so return the file
return str(path)

def __init__(self, *args, **kw):
super().__init__(*args, **kw)

def callable_main(self, args):
self._parser = argparse.ArgumentParser(
description='Extended Device Tree Specification Database.')
self._parser.add_argument('-l', '--load', nargs=1, metavar='FILE',
dest='load_file', action='store',
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
help='Load the input from FILE.')
self._parser.add_argument('-s', '--save', nargs=1, metavar='FILE',
dest='save_file', action='store',
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
help='Save the database to Json FILE.')
self._parser.add_argument('-i', '--export-header', nargs=1, metavar='FILE',
dest='export_header', action='store',
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
help='Export the database to header FILE.')
self._parser.add_argument('-e', '--extract', nargs=1, metavar='FILE',
dest='extract_file', action='store',
type=lambda x: EDTSDatabase.is_valid_file(self._parser, x),
help='Extract the database from dts FILE.')
self._parser.add_argument('-b', '--bindings', nargs='+', metavar='DIR',
dest='bindings_dirs', action='store',
type=lambda x: EDTSDatabase.is_valid_directory(self._parser, x),
help='Use bindings from bindings DIR for extraction.' +
' We allow multiple')
self._parser.add_argument('-p', '--print',
dest='print_it', action='store_true',
help='Print EDTS database content.')

args = self._parser.parse_args(args)

if args.load_file is not None:
self.load(args.load_file[0])
if args.extract_file is not None:
self.extract(args.extract_file[0], args.bindings_dirs)
if args.save_file is not None:
self.save(args.save_file[0])
if args.export_header is not None:
self.export_header(args.export_header[0])
if args.print_it:
pprint(self._edts)

return 0

def main():
EDTSDatabase().callable_main(sys.argv[1:])

if __name__ == '__main__':
main()

7 changes: 7 additions & 0 deletions scripts/cogeno/cogeno/modules/edtsdb/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#
# Copyright (c) 2017 Bobby Noelte
#
# SPDX-License-Identifier: Apache-2.0
#

# Empty to allow all modules to be imported
208 changes: 208 additions & 0 deletions scripts/cogeno/cogeno/modules/edtsdb/binder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
#!/usr/bin/env python3
#
# Copyright (c) 2017, Linaro Limited
# Copyright (c) 2018, Bobby Noelte
#
# SPDX-License-Identifier: Apache-2.0
#

import os, fnmatch
import re
import yaml
from collections.abc import Mapping

from pathlib import Path

class Binder(yaml.Loader):

##
# List of all yaml files available for yaml loaders
# of this class. Must be preset before the first
# load operation.
_files = []

##
# Files that are already included.
# Must be reset on the load of every new binding
_included = []

@classmethod
def _merge_binding_dicts(cls, parent, fname, dct, merge_dct):
# from https://gist.github.com/angstwad/bf22d1822c38a92ec0a9

""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, _merge_binding_dicts recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param parent: parent tuple key
:param fname: yaml file being processed
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], Mapping)):
Binder._merge_binding_dicts(k, fname, dct[k], merge_dct[k])
else:
if k in dct and dct[k] != merge_dct[k]:
# type, title, description, version of inherited node
# are overwritten by intention.
if k == 'type':
# collect types
if not isinstance(dct['type'], list):
dct['type'] = [dct['type'],]
if isinstance(merge_dct['type'], list):
dct['type'].extend(merge_dct['type'])
else:
dct['type'].append(merge_dct['type'])
continue
elif k in ("title", "version", "description"):
# do not warn
pass
elif (k == "category") and (merge_dct[k] == "required") \
and (dct[k] == "optional"):
# do not warn
pass
else:
print("binder.py: {}('{}') merge of property '{}': "
"'{}' overwrites '{}'."
.format(fname, parent, k, merge_dct[k], dct[k]))
dct[k] = merge_dct[k]

@classmethod
def _traverse_inherited(cls, fname, node):
""" Recursive overload procedure inside ``node``
``inherits`` section is searched for and used as node base when found.
Base values are then overloaded by node values
and some consistency checks are done.
:param fname: initial yaml file being processed
:param node:
:return: node
"""

# do some consistency checks. Especially id is needed for further
# processing. title must be first to check.
if 'title' not in node:
# If 'title' is missing, make fault finding more easy.
# Give a hint what node we are looking at.
print("binder.py: {} node without 'title' -", fname, node)
for prop in ('title', 'version', 'description'):
if prop not in node:
node[prop] = "<unknown {}>".format(prop)
print("binder.py: {} WARNING:".format(fname),
"'{}' property missing in".format(prop),
"'{}' binding. Using '{}'.".format(node['title'],
node[prop]))

# warn if we have an 'id' field
if 'id' in node:
print("binder.py: {} WARNING: id field set".format(fname),
"in '{}', should be removed.".format(node['title']))

if 'inherits' in node:
if isinstance(node['inherits'], list):
inherits_list = node['inherits']
else:
inherits_list = [node['inherits'],]
node.pop('inherits')
for inherits in inherits_list:
if 'inherits' in inherits:
inherits = cls._traverse_inherited(fname, inherits)
cls._merge_binding_dicts(None, fname, inherits, node)
node = inherits
return node

##
# @brief Get bindings for given compatibles.
#
# @param compatibles
# @param bindings_paths directories to search for binding files
# @return dictionary of bindings found
@classmethod
def bindings(cls, compatibles, bindings_paths):
# find unique set of compatibles across all active nodes
s = set()
for k, v in compatibles.items():
if isinstance(v, list):
for item in v:
s.add(item)
else:
s.add(v)

# scan YAML files and find the ones we are interested in
# We add our own bindings directory first (highest priority)
# We only allow one binding file with the same name
bindings_paths.insert(0, Path(Path(__file__).resolve().parent,
'bindings'))
cls._files = []
binding_files = []
for path in bindings_paths:
for root, dirnames, filenames in os.walk(str(path)):
for filename in fnmatch.filter(filenames, '*.yaml'):
if not filename in binding_files:
binding_files.append(filename)
cls._files.append(os.path.join(root, filename))

bindings_list = {}
file_load_list = set()
for file in cls._files:
for line in open(file, 'r', encoding='utf-8'):
if re.search('^\s+constraint:*', line):
c = line.split(':')[1].strip()
c = c.strip('"')
if c in s:
if file not in file_load_list:
file_load_list.add(file)
with open(file, 'r', encoding='utf-8') as yf:
cls._included = []
# collapse the bindings inherited information before return
bindings_list[c] = cls._traverse_inherited(file, yaml.load(yf, cls))

return bindings_list

def __init__(self, stream):
filepath = os.path.realpath(stream.name)
if filepath in self._included:
print("Error:: circular inclusion for file name '{}'".
format(stream.name))
raise yaml.constructor.ConstructorError
self._included.append(filepath)
super(Binder, self).__init__(stream)
Binder.add_constructor('!include', Binder._include)
Binder.add_constructor('!import', Binder._include)

def _include(self, node):
if isinstance(node, yaml.ScalarNode):
return self._extract_file(self.construct_scalar(node))

elif isinstance(node, yaml.SequenceNode):
result = []
for filename in self.construct_sequence(node):
result.append(self._extract_file(filename))
return result

elif isinstance(node, yaml.MappingNode):
result = {}
for k, v in self.construct_mapping(node).iteritems():
result[k] = self._extract_file(v)
return result

else:
print("Error: unrecognised node type in !include statement")
raise yaml.constructor.ConstructorError

def _extract_file(self, filename):
filepaths = [filepath for filepath in self._files
if os.path.basename(filepath) == filename]
if len(filepaths) == 0:
print("Error: unknown file name '{}' in !include statement".
format(filename))
raise yaml.constructor.ConstructorError
elif len(filepaths) > 1:
# multiple candidates for filename
print("Warning: multiple candidates for file name "
"'{}' in !include statement - using first of".
format(filename), filepaths)
with open(filepaths[0], 'r', encoding='utf-8') as f:
return yaml.load(f, Binder)
Loading

0 comments on commit a584005

Please sign in to comment.