diff --git a/tuf/client/__init__.py b/tuf/client/__init__.py deleted file mode 100755 index e69de29bb2..0000000000 diff --git a/tuf/client/fetcher.py b/tuf/client/fetcher.py deleted file mode 100644 index 8768bdd4b9..0000000000 --- a/tuf/client/fetcher.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2021, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -"""Provides an interface for network IO abstraction. -""" - -# Imports -import abc - -# Classes -class FetcherInterface(): - """Defines an interface for abstract network download. - - By providing a concrete implementation of the abstract interface, - users of the framework can plug-in their preferred/customized - network stack. - """ - - __metaclass__ = abc.ABCMeta - - @abc.abstractmethod - def fetch(self, url, required_length): - """Fetches the contents of HTTP/HTTPS url from a remote server. - - Ensures the length of the downloaded data is up to 'required_length'. - - Arguments: - url: A URL string that represents a file location. - required_length: An integer value representing the file length in bytes. - - Raises: - tuf.exceptions.SlowRetrievalError: A timeout occurs while receiving data. - tuf.exceptions.FetcherHTTPError: An HTTP error code is received. - - Returns: - A bytes iterator - """ - raise NotImplementedError # pragma: no cover diff --git a/tuf/client/updater.py b/tuf/client/updater.py deleted file mode 100755 index 9d08e4d020..0000000000 --- a/tuf/client/updater.py +++ /dev/null @@ -1,3071 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - updater.py - - - Geremy Condra - Vladimir Diaz - - - July 2012. Based on a previous version of this module. (VLAD) - - - See LICENSE-MIT OR LICENSE for licensing information. - - - 'updater.py' is intended to be the only TUF module that software update - systems need to utilize. It provides a single class representing an - updater that includes methods to download, install, and verify - metadata/target files in a secure manner. Importing 'updater.py' and - instantiating its main class is all that is required by the client prior - to a TUF update request. The importation and instantiation steps allow - TUF to load all of the required metadata files and set the repository mirror - information. - - An overview of the update process: - - 1. The software update system instructs TUF to check for updates. - - 2. TUF downloads and verifies timestamp.json. - - 3. If timestamp.json indicates that snapshot.json has changed, TUF downloads - and verifies snapshot.json. - - 4. TUF determines which metadata files listed in snapshot.json differ from - those described in the last snapshot.json that TUF has seen. If root.json - has changed, the update process starts over using the new root.json. - - 5. TUF provides the software update system with a list of available files - according to targets.json. - - 6. The software update system instructs TUF to download a specific target - file. - - 7. TUF downloads and verifies the file and then makes the file available to - the software update system. - - - - # The client first imports the 'updater.py' module, the only module the - # client is required to import. The client will utilize a single class - # from this module. - from tuf.client.updater import Updater - - # The only other module the client interacts with is 'tuf.settings'. The - # client accesses this module solely to set the repository directory. - # This directory will hold the files downloaded from a remote repository. - from tuf import settings - settings.repositories_directory = 'local-repository' - - # Next, the client creates a dictionary object containing the repository - # mirrors. The client may download content from any one of these mirrors. - # In the example below, a single mirror named 'mirror1' is defined. The - # mirror is located at 'http://localhost:8001', and all of the metadata - # and targets files can be found in the 'metadata' and 'targets' directory, - # respectively. If the client wishes to only download target files from - # specific directories on the mirror, the 'confined_target_dirs' field - # should be set. In this example, the client hasn't set confined_target_dirs, - # which is interpreted as no confinement. - # In other words, the client can download - # targets from any directory or subdirectories. If the client had chosen - # 'targets1/', they would have been confined to the '/targets/targets1/' - # directory on the 'http://localhost:8001' mirror. - repository_mirrors = {'mirror1': {'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata', - 'targets_path': 'targets'}} - - # The updater may now be instantiated. The Updater class of 'updater.py' - # is called with two arguments. The first argument assigns a name to this - # particular updater and the second argument the repository mirrors defined - # above. - updater = Updater('updater', repository_mirrors) - - # The client next calls the refresh() method to ensure it has the latest - # copies of the metadata files. - updater.refresh() - - # get_one_valid_targetinfo() updates role metadata when required. In other - # words, if the client doesn't possess the metadata that lists 'LICENSE.txt', - # get_one_valid_targetinfo() will try to fetch / update it. - target = updater.get_one_valid_targetinfo('LICENSE.txt') - - # Determine if 'target' has changed since the client's last refresh(). A - # target is considered updated if it does not exist in - # 'destination_directory' (current directory) or the target located there has - # changed. - destination_directory = '.' - updated_target = updater.updated_targets([target], destination_directory) - - for target in updated_target: - updater.download_target(target, destination_directory) - # Client code here may also reference target information (including - # 'custom') by directly accessing the dictionary entries of the target. - # The 'custom' entry is additional file information explicitly set by the - # remote repository. - target_path = target['filepath'] - target_length = target['fileinfo']['length'] - target_hashes = target['fileinfo']['hashes'] - target_custom_data = target['fileinfo']['custom'] -""" - -import errno -import logging -import os -import shutil -import time -import fnmatch -import copy -import warnings -import io -from urllib import parse - -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import hash as sslib_hash -from securesystemslib import keys as sslib_keys -from securesystemslib import util as sslib_util - -import tuf -from tuf import download -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import log # pylint: disable=unused-import -from tuf import mirrors -from tuf import roledb -from tuf import settings -from tuf import sig -from tuf import requests_fetcher - -# The Timestamp role does not have signed metadata about it; otherwise we -# would need an infinite regress of metadata. Therefore, we use some -# default, but sane, upper file length for its metadata. -DEFAULT_TIMESTAMP_UPPERLENGTH = settings.DEFAULT_TIMESTAMP_REQUIRED_LENGTH - -# The Root role may be updated without knowing its version number if -# top-level metadata cannot be safely downloaded (e.g., keys may have been -# revoked, thus requiring a new Root file that includes the updated keys) -# and 'unsafely_update_root_if_necessary' is True. -# We use some default, but sane, upper file length for its metadata. -DEFAULT_ROOT_UPPERLENGTH = settings.DEFAULT_ROOT_REQUIRED_LENGTH - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - - -class MultiRepoUpdater(object): - """ - - Provide a way for clients to request a target file from multiple - repositories. Which repositories to query is determined by the map - file (i.e,. map.json). - - See TAP 4 for more information on the map file and how to request updates - from multiple repositories. TAP 4 describes how users may specify that a - particular threshold of repositories be used for some targets, while a - different threshold of repositories be used for others. - - - map_file: - The path of the map file. The map file is needed to determine which - repositories to query given a target file. - - - securesystemslib.exceptions.FormatError, if the map file is improperly - formatted. - - tuf.exceptions.Error, if the map file cannot be loaded. - - - None. - - - None. - """ - - def __init__(self, map_file): - # Is 'map_file' a path? If not, raise - # 'securesystemslib.exceptions.FormatError'. The actual content of the map - # file is validated later on in this method. - sslib_formats.PATH_SCHEMA.check_match(map_file) - - # A dictionary mapping repositories to TUF updaters. - self.repository_names_to_updaters = {} - - try: - # The map file dictionary that associates targets with repositories. - self.map_file = sslib_util.load_json_file(map_file) - - except (sslib_exceptions.Error) as e: - raise exceptions.Error('Cannot load the map file: ' + str(e)) - - # Raise securesystemslib.exceptions.FormatError if the map file is - # improperly formatted. - formats.MAPFILE_SCHEMA.check_match(self.map_file) - - # Save the "repositories" entry of the map file, with the following - # example format: - # - # "repositories": { - # "Django": ["https://djangoproject.com/"], - # "PyPI": ["https://pypi.python.org/"] - # } - self.repository_names_to_mirrors = self.map_file['repositories'] - - - - def get_valid_targetinfo(self, target_filename, match_custom_field=True): - """ - - Get valid targetinfo, if any, for the given 'target_filename'. The map - file controls the targetinfo returned (see TAP 4). Return a dict of the - form {updater1: targetinfo, updater2: targetinfo, ...}, where the dict - keys are updater objects, and the dict values the matching targetinfo for - 'target_filename'. - - - target_filename: - The relative path of the target file to update. - - match_custom_field: - Boolean that indicates whether the optional custom field in targetinfo - should match across the targetinfo provided by the threshold of - repositories. - - - tuf.exceptions.FormatError, if the argument is improperly formatted. - - tuf.exceptions.Error, if the required local metadata directory or the - Root file does not exist. - - tuf.exceptions.UnknownTargetError, if the repositories in the map file do - not agree on the target, or none of them have signed for the target. - - - None. - - - A dict of the form: {updater1: targetinfo, updater2: targetinfo, ...}. - The targetinfo (conformant with tuf.formats.TARGETINFO_SCHEMA) is for - 'target_filename'. - """ - - # Is the argument properly formatted? If not, raise - # 'tuf.exceptions.FormatError'. - formats.RELPATH_SCHEMA.check_match(target_filename) - - # TAP 4 requires that the following attributes be present in mappings: - # "paths", "repositories", "terminating", and "threshold". - formats.MAPPING_SCHEMA.check_match(self.map_file['mapping']) - - # Set the top-level directory containing the metadata for each repository. - repositories_directory = settings.repositories_directory - - # Verify that the required local directories exist for each repository. - self._verify_metadata_directories(repositories_directory) - - # Iterate mappings. - # [{"paths": [], "repositories": [], "terminating": Boolean, "threshold": - # NUM}, ...] - for mapping in self.map_file['mapping']: - - logger.debug('Interrogating mappings..' + repr(mapping)) - if not self._target_matches_path_pattern( - target_filename, mapping['paths']): - # The mapping is irrelevant to the target file. Try the next one, if - # any. - continue - - # The mapping is relevant to the target... - else: - # Do the repositories in the mapping provide a threshold of matching - # targetinfo? - valid_targetinfo = self._matching_targetinfo(target_filename, - mapping, match_custom_field) - - if valid_targetinfo: - return valid_targetinfo - - else: - # If we are here, it means either (1) the mapping is irrelevant to - # the target, (2) the targets were missing from all repositories in - # this mapping, or (3) the targets on all repositories did not match. - # Whatever the case may be, are we allowed to continue to the next - # mapping? Let's check the terminating entry! - if not mapping['terminating']: - logger.debug('The mapping was irrelevant to the target, and' - ' "terminating" was set to False. Trying the next mapping...') - continue - - else: - raise exceptions.UnknownTargetError('The repositories in the' - ' mapping do not agree on the target, or none of them have' - ' signed for the target, and "terminating" was set to True.') - - # If we are here, it means either there were no mappings, or none of the - # mappings provided the target. - logger.debug('Did not find valid targetinfo for ' + repr(target_filename)) - raise exceptions.UnknownTargetError('The repositories in the map' - ' file do not agree on the target, or none of them have signed' - ' for the target.') - - - - - - def _verify_metadata_directories(self, repositories_directory): - # Iterate 'self.repository_names_to_mirrors' and verify that the expected - # local files and directories exist. TAP 4 requires a separate local - # directory for each repository. - for repository_name in self.repository_names_to_mirrors: - - logger.debug('Interrogating repository: ' + repr(repository_name)) - # Each repository must cache its metadata in a separate location. - repository_directory = os.path.join(repositories_directory, - repository_name) - - if not os.path.isdir(repository_directory): - raise exceptions.Error('The metadata directory' - ' for ' + repr(repository_name) + ' must exist' - ' at ' + repr(repository_directory)) - - else: - logger.debug('Found local directory for ' + repr(repository_name)) - - # The latest known root metadata file must also exist on disk. - root_file = os.path.join( - repository_directory, 'metadata', 'current', 'root.json') - - if not os.path.isfile(root_file): - raise exceptions.Error( - 'The Root file must exist at ' + repr(root_file)) - - else: - logger.debug('Found local Root file at ' + repr(root_file)) - - - - - - def _matching_targetinfo( - self, target_filename, mapping, match_custom_field=True): - valid_targetinfo = {} - - # Retrieve the targetinfo from each repository using the underlying - # Updater() instance. - for repository_name in mapping['repositories']: - logger.debug('Retrieving targetinfo for ' + repr(target_filename) + - ' from repository...') - - try: - targetinfo, updater = self._update_from_repository( - repository_name, target_filename) - - except (exceptions.UnknownTargetError, exceptions.Error): - continue - - valid_targetinfo[updater] = targetinfo - - matching_targetinfo = {} - logger.debug('Verifying that a threshold of targetinfo are equal...') - - # Iterate 'valid_targetinfo', looking for a threshold number of matches - # for 'targetinfo'. The first targetinfo to reach the required threshold - # is returned. For example, suppose the following list of targetinfo and - # a threshold of 2: - # [A, B, C, B, A, C] - # In this case, targetinfo B is returned. - for valid_updater, compared_targetinfo in valid_targetinfo.items(): - - if not self._targetinfo_match( - targetinfo, compared_targetinfo, match_custom_field): - continue - - else: - - matching_targetinfo[valid_updater] = targetinfo - - if not len(matching_targetinfo) >= mapping['threshold']: - continue - - else: - logger.debug('Found a threshold of matching targetinfo!') - # We now have a targetinfo (that matches across a threshold of - # repositories as instructed by the map file), along with the - # updaters that sign for it. - logger.debug( - 'Returning updaters for targetinfo: ' + repr(targetinfo)) - - return matching_targetinfo - - return None - - - - - - def _targetinfo_match(self, targetinfo1, targetinfo2, match_custom_field=True): - if match_custom_field: - return (targetinfo1 == targetinfo2) - - else: - targetinfo1_without_custom = copy.deepcopy(targetinfo1) - targetinfo2_without_custom = copy.deepcopy(targetinfo2) - targetinfo1_without_custom['fileinfo'].pop('custom', None) - targetinfo2_without_custom['fileinfo'].pop('custom', None) - - return (targetinfo1_without_custom == targetinfo2_without_custom) - - - - - - def _target_matches_path_pattern(self, target_filename, path_patterns): - for path_pattern in path_patterns: - logger.debug('Interrogating pattern ' + repr(path_pattern) + 'for' - ' target: ' + repr(target_filename)) - - # Example: "foo.tgz" should match with "/*.tgz". Make sure to strip any - # leading path separators so that a match is made if a repo maintainer - # uses a leading separator with a delegated glob pattern, but a client - # doesn't include one when a target file is requested. - if fnmatch.fnmatch(target_filename.lstrip(os.sep), path_pattern.lstrip(os.sep)): - logger.debug('Found a match for ' + repr(target_filename)) - return True - - else: - logger.debug('Continue searching for relevant paths.') - continue - - # If we are here, then none of the paths are relevant to the target. - logger.debug('None of the paths are relevant.') - return False - - - - - - - def get_updater(self, repository_name): - """ - - Get the updater instance corresponding to 'repository_name'. - - - repository_name: - The name of the repository as it appears in the map file. For example, - "Django" and "PyPI" in the "repositories" entry of the map file. - - "repositories": { - "Django": ["https://djangoproject.com/"], - "PyPI": ["https://pypi.python.org/"] - } - - - tuf.exceptions.FormatError, if any of the arguments are improperly - formatted. - - - None. - - - Returns the Updater() instance for 'repository_name'. If the instance - does not exist, return None. - """ - - # Are the arguments properly formatted? If not, raise - # 'tuf.exceptions.FormatError'. - formats.NAME_SCHEMA.check_match(repository_name) - - updater = self.repository_names_to_updaters.get(repository_name) - - if not updater: - - if repository_name not in self.repository_names_to_mirrors: - return None - - else: - # Create repository mirrors object needed by the - # tuf.client.updater.Updater(). Each 'repository_name' can have more - # than one mirror. - repo_mirrors = {} - - for url in self.repository_names_to_mirrors[repository_name]: - repo_mirrors[url] = { - 'url_prefix': url, - 'metadata_path': 'metadata', - 'targets_path': 'targets'} - - try: - # NOTE: State (e.g., keys) should NOT be shared across different - # updater instances. - logger.debug('Adding updater for ' + repr(repository_name)) - updater = Updater(repository_name, repo_mirrors) - - except Exception: - return None - - else: - self.repository_names_to_updaters[repository_name] = updater - - else: - logger.debug('Found an updater for ' + repr(repository_name)) - - # Ensure the updater's metadata is the latest before returning it. - updater.refresh() - return updater - - - - - - def _update_from_repository(self, repository_name, target_filename): - - updater = self.get_updater(repository_name) - - if not updater: - raise exceptions.Error( - 'Cannot load updater for ' + repr(repository_name)) - - else: - # Get one valid target info from the Updater object. - # 'tuf.exceptions.UnknownTargetError' raised by get_one_valid_targetinfo - # if a valid target cannot be found. - return updater.get_one_valid_targetinfo(target_filename), updater - - - - - -class Updater(object): - """ - - Provide a class that can download target files securely. The updater - keeps track of currently and previously trusted metadata, target files - available to the client, target file attributes such as file size and - hashes, key and role information, metadata signatures, and the ability - to determine when the download of a file should be permitted. - - - self.metadata: - Dictionary holding the currently and previously trusted metadata. - - Example: {'current': {'root': ROOT_SCHEMA, - 'targets':TARGETS_SCHEMA, ...}, - 'previous': {'root': ROOT_SCHEMA, - 'targets':TARGETS_SCHEMA, ...}} - - self.metadata_directory: - The directory where trusted metadata is stored. - - self.versioninfo: - A cache of version numbers for the roles available on the repository. - - Example: {'targets.json': {'version': 128}, ...} - - self.mirrors: - The repository mirrors from which metadata and targets are available. - Conformant to 'tuf.formats.MIRRORDICT_SCHEMA'. - - self.repository_name: - The name of the updater instance. - - - refresh(): - This method downloads, verifies, and loads metadata for the top-level - roles in a specific order (i.e., root -> timestamp -> snapshot -> targets) - The expiration time for downloaded metadata is also verified. - - The metadata for delegated roles are not refreshed by this method, but by - the method that returns targetinfo (i.e., get_one_valid_targetinfo()). - The refresh() method should be called by the client before any target - requests. - - get_one_valid_targetinfo(file_path): - Returns the target information for a specific file identified by its file - path. This target method also downloads the metadata of updated targets. - - updated_targets(targets, destination_directory): - After the client has retrieved the target information for those targets - they are interested in updating, they would call this method to determine - which targets have changed from those saved locally on disk. All the - targets that have changed are returns in a list. From this list, they - can request a download by calling 'download_target()'. - - download_target(target, destination_directory): - This method performs the actual download of the specified target. The - file is saved to the 'destination_directory' argument. - - remove_obsolete_targets(destination_directory): - Any files located in 'destination_directory' that were previously - served by the repository but have since been removed, can be deleted - from disk by the client by calling this method. - - Note: The methods listed above are public and intended for the software - updater integrating TUF with this module. All other methods that may begin - with a single leading underscore are non-public and only used internally. - updater.py is not subclassed in TUF, nor is it designed to be subclassed, - so double leading underscores is not used. - http://www.python.org/dev/peps/pep-0008/#method-names-and-instance-variables - """ - - def __init__(self, repository_name, repository_mirrors, fetcher=None): - """ - - Constructor. Instantiating an updater object causes all the metadata - files for the top-level roles to be read from disk, including the key and - role information for the delegated targets of 'targets'. The actual - metadata for delegated roles is not loaded in __init__. The metadata for - these delegated roles, including nested delegated roles, are loaded, - updated, and saved to the 'self.metadata' store, as needed, by - get_one_valid_targetinfo(). - - The initial set of metadata files are provided by the software update - system utilizing TUF. - - In order to use an updater, the following directories must already - exist locally: - - {tuf.settings.repositories_directory}/{repository_name}/metadata/current - {tuf.settings.repositories_directory}/{repository_name}/metadata/previous - - and, at a minimum, the root metadata file must exist: - - {tuf.settings.repositories_directory}/{repository_name}/metadata/current/root.json - - - repository_name: - The name of the repository. - - repository_mirrors: - A dictionary holding repository mirror information, conformant to - 'tuf.formats.MIRRORDICT_SCHEMA'. This dictionary holds - information such as the directory containing the metadata and target - files, the server's URL prefix, and the target content directories the - client should be confined to. - - repository_mirrors = {'mirror1': {'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata', - 'targets_path': 'targets', - 'confined_target_dirs': ['']}} - - fetcher: - A concrete 'FetcherInterface' implementation. Performs the network - related download operations. If an external implementation is not - provided, tuf.fetcher.RequestsFetcher is used. - - - securesystemslib.exceptions.FormatError: - If the arguments are improperly formatted. - - tuf.exceptions.RepositoryError: - If there is an error with the updater's repository files, such - as a missing 'root.json' file. - - - Th metadata files (e.g., 'root.json', 'targets.json') for the top- level - roles are read from disk and stored in dictionaries. In addition, the - key and roledb modules are populated with 'repository_name' entries. - - - None. - """ - - # Do the arguments have the correct format? - # These checks ensure the arguments have the appropriate - # number of objects and object types and that all dict - # keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mistmatch. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors) - - # Save the validated arguments. - self.repository_name = repository_name - self.mirrors = repository_mirrors - - # Initialize Updater with an externally provided 'fetcher' implementing - # the network download. By default tuf.fetcher.RequestsFetcher is used. - if fetcher is None: - self.fetcher = requests_fetcher.RequestsFetcher() - else: - self.fetcher = fetcher - - # Store the trusted metadata read from disk. - self.metadata = {} - - # Store the currently trusted/verified metadata. - self.metadata['current'] = {} - - # Store the previously trusted/verified metadata. - self.metadata['previous'] = {} - - # Store the version numbers of roles available on the repository. The dict - # keys are paths, and the dict values versioninfo data. This information - # can help determine whether a metadata file has changed and needs to be - # re-downloaded. - self.versioninfo = {} - - # Store the file information of the root and snapshot roles. The dict keys - # are paths, the dict values fileinfo data. This information can help - # determine whether a metadata file has changed and so needs to be - # re-downloaded. - self.fileinfo = {} - - # Store the location of the client's metadata directory. - self.metadata_directory = {} - - # Store the 'consistent_snapshot' of the Root role. This setting - # determines if metadata and target files downloaded from remote - # repositories include the digest. - self.consistent_snapshot = False - - # Ensure the repository metadata directory has been set. - if settings.repositories_directory is None: - raise exceptions.RepositoryError('The TUF update client' - ' module must specify the directory containing the local repository' - ' files. "tuf.settings.repositories_directory" MUST be set.') - - # Set the path for the current set of metadata files. - repositories_directory = settings.repositories_directory - repository_directory = os.path.join(repositories_directory, self.repository_name) - - # raise MissingLocalRepository if the repo does not exist at all. - if not os.path.exists(repository_directory): - raise exceptions.MissingLocalRepositoryError('Local repository ' + - repr(repository_directory) + ' does not exist.') - - current_path = os.path.join(repository_directory, 'metadata', 'current') - - # Ensure the current path is valid/exists before saving it. - if not os.path.exists(current_path): - raise exceptions.RepositoryError('Missing' - ' ' + repr(current_path) + '. This path must exist and, at a minimum,' - ' contain the Root metadata file.') - - self.metadata_directory['current'] = current_path - - # Set the path for the previous set of metadata files. - previous_path = os.path.join(repository_directory, 'metadata', 'previous') - - # Ensure the previous path is valid/exists. - if not os.path.exists(previous_path): - raise exceptions.RepositoryError('Missing ' + repr(previous_path) + '.' - ' This path MUST exist.') - - self.metadata_directory['previous'] = previous_path - - # Load current and previous metadata. - for metadata_set in ['current', 'previous']: - for metadata_role in roledb.TOP_LEVEL_ROLES: - self._load_metadata_from_file(metadata_set, metadata_role) - - # Raise an exception if the repository is missing the required 'root' - # metadata. - if 'root' not in self.metadata['current']: - raise exceptions.RepositoryError('No root of trust!' - ' Could not find the "root.json" file.') - - - - - - def __str__(self): - """ - The string representation of an Updater object. - """ - - return self.repository_name - - - @staticmethod - def _get_local_filename(rolename: str) -> str: - """Return safe local filename for roles metadata - - Use URL encoding to prevent issues with path separators and - with forbidden characters in Windows filesystems""" - return parse.quote(rolename, '') + '.json' - - - def _load_metadata_from_file(self, metadata_set, metadata_role): - """ - - Non-public method that loads current or previous metadata if there is a - local file. If the expected file belonging to 'metadata_role' (e.g., - 'root.json') cannot be loaded, raise an exception. The extracted metadata - object loaded from file is saved to the metadata store (i.e., - self.metadata). - - - metadata_set: - The string 'current' or 'previous', depending on whether one wants to - load the currently or previously trusted metadata file. - - metadata_role: - The name of the metadata. This is a role name and should - not end in '.json'. Examples: 'root', 'targets', 'unclaimed'. - - - securesystemslib.exceptions.FormatError: - If the role object loaded for 'metadata_role' is improperly formatted. - - securesystemslib.exceptions.Error: - If there was an error importing a delegated role of 'metadata_role' - or the 'metadata_set' is not one currently supported. - - - If the metadata is loaded successfully, it is saved to the metadata - store. If 'metadata_role' is 'root', the role and key databases - are reloaded. If 'metadata_role' is a target metadata, all its - delegated roles are refreshed. - - - None. - """ - - # Ensure we have a valid metadata set. - if metadata_set not in ['current', 'previous']: - raise sslib_exceptions.Error( - 'Invalid metadata set: ' + repr(metadata_set)) - - # Save and construct the full metadata path. - metadata_directory = self.metadata_directory[metadata_set] - metadata_filename = self._get_local_filename(metadata_role) - metadata_filepath = os.path.join(metadata_directory, metadata_filename) - - # Ensure the metadata path is valid/exists, else ignore the call. - if os.path.exists(metadata_filepath): - # Load the file. The loaded object should conform to - # 'tuf.formats.SIGNABLE_SCHEMA'. - try: - metadata_signable = sslib_util.load_json_file( - metadata_filepath) - - # Although the metadata file may exist locally, it may not - # be a valid json file. On the next refresh cycle, it will be - # updated as required. If Root if cannot be loaded from disk - # successfully, an exception should be raised by the caller. - except sslib_exceptions.Error: - return - - formats.check_signable_object_format(metadata_signable) - - # Extract the 'signed' role object from 'metadata_signable'. - metadata_object = metadata_signable['signed'] - - # Save the metadata object to the metadata store. - self.metadata[metadata_set][metadata_role] = metadata_object - - # If 'metadata_role' is 'root' or targets metadata, the key and role - # databases must be rebuilt. If 'root', ensure self.consistent_snaptshots - # is updated. - if metadata_set == 'current': - if metadata_role == 'root': - self._rebuild_key_and_role_db() - self.consistent_snapshot = metadata_object['consistent_snapshot'] - - elif metadata_object['_type'] == 'targets': - # TODO: Should we also remove the keys of the delegated roles? - self._import_delegations(metadata_role) - - - - - - def _rebuild_key_and_role_db(self): - """ - - Non-public method that rebuilds the key and role databases from the - currently trusted 'root' metadata object extracted from 'root.json'. - This private method is called when a new/updated 'root' metadata file is - loaded or when updater.refresh() is called. This method will only store - the role information of the top-level roles (i.e., 'root', 'targets', - 'snapshot', 'timestamp'). - - - None. - - - securesystemslib.exceptions.FormatError: - If the 'root' metadata is improperly formatted. - - securesystemslib.exceptions.Error: - If there is an error loading a role contained in the 'root' - metadata. - - - The key and role databases are reloaded for the top-level roles. - - - None. - """ - - # Clobbering this means all delegated metadata files are rendered outdated - # and will need to be reloaded. However, reloading the delegated metadata - # files is avoided here because fetching target information with - # get_one_valid_targetinfo() always causes a refresh of these files. The - # metadata files for delegated roles are also not loaded when the - # repository is first instantiated. Due to this setup, reloading delegated - # roles is not required here. - keydb.create_keydb_from_root_metadata(self.metadata['current']['root'], - self.repository_name) - - roledb.create_roledb_from_root_metadata(self.metadata['current']['root'], - self.repository_name) - - - - - - def _import_delegations(self, parent_role): - """ - - Non-public method that imports all the roles delegated by 'parent_role'. - - - parent_role: - The role whose delegations will be imported. - - - securesystemslib.exceptions.FormatError: - If a key attribute of a delegated role's signing key is - improperly formatted. - - securesystemslib.exceptions.Error: - If the signing key of a delegated role cannot not be loaded. - - - The key and role databases are modified to include the newly loaded roles - delegated by 'parent_role'. - - - None. - """ - - current_parent_metadata = self.metadata['current'][parent_role] - - if 'delegations' not in current_parent_metadata: - return - - # This could be quite slow with a large number of delegations. - keys_info = current_parent_metadata['delegations'].get('keys', {}) - roles_info = current_parent_metadata['delegations'].get('roles', []) - - logger.debug('Adding roles delegated from ' + repr(parent_role) + '.') - - # Iterate the keys of the delegated roles of 'parent_role' and load them. - for keyid, keyinfo in keys_info.items(): - if keyinfo['keytype'] in ['rsa', 'ed25519', 'ecdsa', 'ecdsa-sha2-nistp256']: - - # We specify the keyid to ensure that it's the correct keyid - # for the key. - try: - key, _ = sslib_keys.format_metadata_to_key(keyinfo, keyid) - - keydb.add_key(key, repository_name=self.repository_name) - - except exceptions.KeyAlreadyExistsError: - pass - - except (sslib_exceptions.FormatError, sslib_exceptions.Error): - logger.warning('Invalid key: ' + repr(keyid) + '. Aborting role ' + - 'delegation for parent role \'' + parent_role + '\'.') - raise - - else: - logger.warning('Invalid key type for ' + repr(keyid) + '.') - continue - - # Add the roles to the role database. - for roleinfo in roles_info: - try: - # NOTE: roledb.add_role will take care of the case where rolename - # is None. - rolename = roleinfo.get('name') - logger.debug('Adding delegated role: ' + str(rolename) + '.') - roledb.add_role(rolename, roleinfo, self.repository_name) - - except exceptions.RoleAlreadyExistsError: - logger.warning('Role already exists: ' + rolename) - - except Exception: - logger.warning('Failed to add delegated role: ' + repr(rolename) + '.') - raise - - - - - - def refresh(self, unsafely_update_root_if_necessary=True): - """ - - Update the latest copies of the metadata for the top-level roles. The - update request process follows a specific order to ensure the metadata - files are securely updated: - root (if necessary) -> timestamp -> snapshot -> targets. - - Delegated metadata is not refreshed by this method. After this method is - called, the use of get_one_valid_targetinfo() will update delegated - metadata, when required. Calling refresh() ensures that top-level - metadata is up-to-date, so that the target methods can refer to the - latest available content. Thus, refresh() should always be called by the - client before any requests of target file information. - - The expiration time for downloaded metadata is also verified, including - local metadata that the repository claims is up to date. - - If the refresh fails for any reason, then unless - 'unsafely_update_root_if_necessary' is set, refresh will be retried once - after first attempting to update the root metadata file. Only after this - check will the exceptions listed here potentially be raised. - - - unsafely_update_root_if_necessary: - Boolean that indicates whether to unsafely update the Root metadata if - any of the top-level metadata cannot be downloaded successfully. The - Root role is unsafely updated if its current version number is unknown. - - - tuf.exceptions.NoWorkingMirrorError: - If the metadata for any of the top-level roles cannot be updated. - - tuf.exceptions.ExpiredMetadataError: - If any of the top-level metadata is expired and no new version was - found. - - - Updates the metadata files of the top-level roles with the latest - information. - - - None. - """ - - # Do the arguments have the correct format? - # This check ensures the arguments have the appropriate - # number of objects and object types, and that all dict - # keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fail. - sslib_formats.BOOLEAN_SCHEMA.check_match( - unsafely_update_root_if_necessary) - - # Update the top-level metadata. The _update_metadata_if_changed() and - # _update_metadata() calls below do NOT perform an update if there - # is insufficient trusted signatures for the specified metadata. - # Raise 'tuf.exceptions.NoWorkingMirrorError' if an update fails. - root_metadata = self.metadata['current']['root'] - - try: - self._ensure_not_expired(root_metadata, 'root') - - except exceptions.ExpiredMetadataError: - # Raise 'tuf.exceptions.NoWorkingMirrorError' if a valid (not - # expired, properly signed, and valid metadata) 'root.json' cannot be - # installed. - if unsafely_update_root_if_necessary: - logger.info('Expired Root metadata was loaded from disk.' - ' Try to update it now.' ) - - # The caller explicitly requested not to unsafely fetch an expired Root. - else: - logger.info('An expired Root metadata was loaded and must be updated.') - raise - - # Update the root metadata and verify it by building a chain of trusted root - # keys from the current trusted root metadata file - self._update_root_metadata(root_metadata) - - # Ensure that the role and key information of the top-level roles is the - # latest. We do this whether or not Root needed to be updated, in order to - # ensure that, e.g., the entries in roledb for top-level roles are - # populated with expected keyid info so that roles can be validated. In - # certain circumstances, top-level metadata might be missing because it was - # marked obsolete and deleted after a failed attempt, and thus we should - # refresh them here as a protective measure. See Issue #736. - self._rebuild_key_and_role_db() - self.consistent_snapshot = \ - self.metadata['current']['root']['consistent_snapshot'] - - # Use default but sane information for timestamp metadata, and do not - # require strict checks on its required length. - self._update_metadata('timestamp', DEFAULT_TIMESTAMP_UPPERLENGTH) - - self._update_metadata_if_changed('snapshot', - referenced_metadata='timestamp') - self._update_metadata_if_changed('targets') - - - - def _update_root_metadata(self, current_root_metadata): - """ - - The root file must be signed by the current root threshold and keys as - well as the previous root threshold and keys. The update process for root - files means that each intermediate root file must be downloaded, to build - a chain of trusted root keys from keys already trusted by the client: - - 1.root -> 2.root -> 3.root - - 3.root must be signed by the threshold and keys of 2.root, and 2.root - must be signed by the threshold and keys of 1.root. - - - current_root_metadata: - The currently held version of root. - - - Updates the root metadata files with the latest information. - - - None. - """ - - def neither_403_nor_404(mirror_error): - if isinstance(mirror_error, tuf.exceptions.FetcherHTTPError): - if mirror_error.status_code in {403, 404}: - return False - return True - - # Temporarily set consistent snapshot. Will be updated to whatever is set - # in the latest root.json after running through the intermediates with - # _update_metadata(). - self.consistent_snapshot = True - - # Following the spec, try downloading the N+1th root for a certain maximum - # number of times. - lower_bound = current_root_metadata['version'] + 1 - upper_bound = lower_bound + settings.MAX_NUMBER_ROOT_ROTATIONS - - # Try downloading the next root. - for next_version in range(lower_bound, upper_bound): - try: - # Thoroughly verify it. - self._update_metadata('root', DEFAULT_ROOT_UPPERLENGTH, - version=next_version) - # When we run into HTTP 403/404 error from ALL mirrors, break out of - # loop, because the next root metadata file is most likely missing. - except exceptions.NoWorkingMirrorError as exception: - for mirror_error in exception.mirror_errors.values(): - # Otherwise, reraise the error, because it is not a simple HTTP - # error. - if neither_403_nor_404(mirror_error): - logger.info('Misc error for root version ' + str(next_version)) - raise - else: - logger.debug('HTTP error for root version ' + str(next_version)) - # If we are here, then we ran into only 403 / 404 errors, which are - # good reasons to suspect that the next root metadata file does not - # exist. - break - - # Ensure that the role and key information of the top-level roles is the - # latest. We do this whether or not Root needed to be updated, in order - # to ensure that, e.g., the entries in roledb for top-level roles are - # populated with expected keyid info so that roles can be validated. In - # certain circumstances, top-level metadata might be missing because it - # was marked obsolete and deleted after a failed attempt, and thus we - # should refresh them here as a protective measure. See Issue #736. - self._rebuild_key_and_role_db() - - # Set our consistent snapshot property to what the latest root has said. - self.consistent_snapshot = \ - self.metadata['current']['root']['consistent_snapshot'] - - - - def _check_hashes(self, file_object, trusted_hashes): - """ - - Non-public method that verifies multiple secure hashes of 'file_object'. - - - file_object: - A file object. - - trusted_hashes: - A dictionary with hash-algorithm names as keys and hashes as dict values. - The hashes should be in the hexdigest format. Should be Conformant to - 'securesystemslib.formats.HASHDICT_SCHEMA'. - - - securesystemslib.exceptions.BadHashError, if the hashes don't match. - - - Hash digest object is created using the 'securesystemslib.hash' module. - Position within file_object is changed. - - - None. - """ - - # Verify each hash, raise an exception if any hash fails to verify - for algorithm, trusted_hash in trusted_hashes.items(): - digest_object = sslib_hash.digest_fileobject(file_object, - algorithm) - computed_hash = digest_object.hexdigest() - - if trusted_hash != computed_hash: - raise sslib_exceptions.BadHashError(trusted_hash, - computed_hash) - - else: - logger.info('Verified ' + algorithm + ' hash: ' + trusted_hash) - - - - - - def _check_file_length(self, file_object, trusted_file_length): - """ - - Non-public method that ensures the length of 'file_object' is strictly - equal to 'trusted_file_length'. This is a deliberately redundant - implementation designed to complement - download._check_downloaded_length(). - - - file_object: - A file object. - - trusted_file_length: - A non-negative integer that is the trusted length of the file. - - - tuf.exceptions.DownloadLengthMismatchError, if the lengths do not match. - - - Reads the contents of 'file_object' and logs a message if 'file_object' - matches the trusted length. - Position within file_object is changed. - - - None. - """ - - file_object.seek(0, io.SEEK_END) - observed_length = file_object.tell() - - # Return and log a message if the length 'file_object' is equal to - # 'trusted_file_length', otherwise raise an exception. A hard check - # ensures that a downloaded file strictly matches a known, or trusted, - # file length. - if observed_length != trusted_file_length: - raise exceptions.DownloadLengthMismatchError(trusted_file_length, - observed_length) - - else: - logger.debug('Observed length (' + str(observed_length) +\ - ') == trusted length (' + str(trusted_file_length) + ')') - - - - - - def _get_target_file(self, target_filepath, file_length, file_hashes, - prefix_filename_with_hash): - """ - - Non-public method that safely (i.e., the file length and hash are - strictly equal to the trusted) downloads a target file up to a certain - length, and checks its hashes thereafter. - - - target_filepath: - The target filepath (relative to the repository targets directory) - obtained from TUF targets metadata. - - file_length: - The expected compressed length of the target file. If the file is not - compressed, then it will simply be its uncompressed length. - - file_hashes: - The expected hashes of the target file. - - prefix_filename_with_hash: - Whether to prefix the targets file names with their hash when using - consistent snapshot. - This should be set to False when the served target filenames are not - prefixed with hashes (in this case the server uses other means - to ensure snapshot consistency). - - - tuf.exceptions.NoWorkingMirrorError: - The target could not be fetched. This is raised only when all known - mirrors failed to provide a valid copy of the desired target file. - - - The target file is downloaded from all known repository mirrors in the - worst case. If a valid copy of the target file is found, it is stored in - a temporary file and returned. - - - A file object containing the target. - """ - - if self.consistent_snapshot and prefix_filename_with_hash: - # Note: values() does not return a list in Python 3. Use list() - # on values() for Python 2+3 compatibility. - target_digest = list(file_hashes.values()).pop() - dirname, basename = os.path.split(target_filepath) - target_filepath = os.path.join(dirname, target_digest + '.' + basename) - - file_mirrors = mirrors.get_list_of_mirrors('target', target_filepath, - self.mirrors) - - # file_mirror (URL): error (Exception) - file_mirror_errors = {} - file_object = None - - for file_mirror in file_mirrors: - try: - file_object = download.safe_download(file_mirror, - file_length, self.fetcher) - - # Verify 'file_object' against the expected length and hashes. - self._check_file_length(file_object, file_length) - self._check_hashes(file_object, file_hashes) - # If the file verifies, we don't need to try more mirrors - return file_object - - except Exception as exception: - # Remember the error from this mirror, close tempfile if one was opened - logger.debug('Update failed from ' + file_mirror + '.') - file_mirror_errors[file_mirror] = exception - if file_object is not None: - file_object.close() - file_object = None - - logger.debug('Failed to update ' + repr(target_filepath) + ' from' - ' all mirrors: ' + repr(file_mirror_errors)) - raise exceptions.NoWorkingMirrorError(file_mirror_errors) - - - - - - def _verify_root_self_signed(self, signable): - """ - Verify the root metadata in signable is signed by a threshold of keys, - where the threshold and valid keys are defined by itself - """ - threshold = signable['signed']['roles']['root']['threshold'] - keyids = signable['signed']['roles']['root']['keyids'] - keys = signable['signed']['keys'] - signatures = signable['signatures'] - signed = sslib_formats.encode_canonical( - signable['signed']).encode('utf-8') - verified_sig_keyids = set() - - for signature in signatures: - keyid = signature['keyid'] - - # At this point we are verifying that the root metadata is signed by a - # threshold of keys listed in the current root role, therefore skip - # keys with a keyid that is not listed in the current root role. - if keyid not in keyids: - continue - - key = keys[keyid] - # The ANYKEY_SCHEMA check in verify_signature expects the keydict to - # include a keyid - key['keyid'] = keyid - valid_sig = sslib_keys.verify_signature(key, signature, signed) - - if valid_sig: - verified_sig_keyids.add(keyid) - - if len(verified_sig_keyids) >= threshold: - return True - return False - - - - - - def _verify_metadata_file(self, metadata_file_object, - metadata_role): - """ - - Non-public method that verifies a metadata file. An exception is - raised if 'metadata_file_object is invalid. There is no - return value. - - - metadata_file_object: - A file object containing the metadata file. - - metadata_role: - The role name of the metadata (e.g., 'root', 'targets', - 'unclaimed'). - - - securesystemslib.exceptions.FormatError: - In case the metadata file is valid JSON, but not valid TUF metadata. - - tuf.exceptions.InvalidMetadataJSONError: - In case the metadata file is not valid JSON. - - tuf.exceptions.ReplayedMetadataError: - In case the downloaded metadata file is older than the current one. - - tuf.exceptions.RepositoryError: - In case the repository is somehow inconsistent; e.g. a parent has not - delegated to a child (contrary to expectations). - - tuf.SignatureError: - In case the metadata file does not have a valid signature. - - - The content of 'metadata_file_object' is read and loaded, the current - position within the file is changed. - - - None. - """ - - metadata_file_object.seek(0) - metadata = metadata_file_object.read().decode('utf-8') - - try: - metadata_signable = sslib_util.load_json_string(metadata) - - except Exception as exception: - raise exceptions.InvalidMetadataJSONError(exception) - - else: - # Ensure the loaded 'metadata_signable' is properly formatted. Raise - # 'securesystemslib.exceptions.FormatError' if not. - formats.check_signable_object_format(metadata_signable) - - # Is 'metadata_signable' expired? - self._ensure_not_expired(metadata_signable['signed'], metadata_role) - - # We previously verified version numbers in this function, but have since - # moved version number verification to the functions that retrieve - # metadata. - - # Verify the signature on the downloaded metadata object. - valid = sig.verify(metadata_signable, metadata_role, - self.repository_name) - - if not valid: - raise sslib_exceptions.BadSignatureError(metadata_role) - - # For root metadata, verify the downloaded root metadata object with the - # new threshold of new signatures contained within the downloaded root - # metadata object - # NOTE: we perform the checks on root metadata here because this enables - # us to perform the check before the tempfile is persisted. Furthermore, - # by checking here we can easily perform the check for each download - # mirror. Whereas if we check after _verify_metadata_file we may be - # persisting invalid files and we cannot try copies of the file from other - # mirrors. - if valid and metadata_role == 'root': - valid = self._verify_root_self_signed(metadata_signable) - if not valid: - raise sslib_exceptions.BadSignatureError(metadata_role) - - - - - - def _get_metadata_file(self, metadata_role, remote_filename, - upperbound_filelength, expected_version): - """ - - Non-public method that tries downloading, up to a certain length, a - metadata file from a list of known mirrors. As soon as the first valid - copy of the file is found, the downloaded file is returned and the - remaining mirrors are skipped. - - - metadata_role: - The role name of the metadata (e.g., 'root', 'targets', 'unclaimed'). - - remote_filename: - The relative file path (on the remove repository) of 'metadata_role'. - - upperbound_filelength: - The expected length, or upper bound, of the metadata file to be - downloaded. - - expected_version: - The expected and required version number of the 'metadata_role' file - downloaded. 'expected_version' is an integer. - - - tuf.exceptions.NoWorkingMirrorError: - The metadata could not be fetched. This is raised only when all known - mirrors failed to provide a valid copy of the desired metadata file. - - - The file is downloaded from all known repository mirrors in the worst - case. If a valid copy of the file is found, it is stored in a temporary - file and returned. - - - A file object containing the metadata. - """ - - file_mirrors = mirrors.get_list_of_mirrors('meta', remote_filename, - self.mirrors) - - # file_mirror (URL): error (Exception) - file_mirror_errors = {} - file_object = None - - for file_mirror in file_mirrors: - try: - file_object = download.unsafe_download(file_mirror, - upperbound_filelength, self.fetcher) - file_object.seek(0) - - # Verify 'file_object' according to the callable function. - # 'file_object' is also verified if decompressed above (i.e., the - # uncompressed version). - metadata_signable = \ - sslib_util.load_json_string(file_object.read().decode('utf-8')) - - # Determine if the specification version number is supported. It is - # assumed that "spec_version" is in (major.minor.fix) format, (for - # example: "1.4.3") and that releases with the same major version - # number maintain backwards compatibility. Consequently, if the major - # version number of new metadata equals our expected major version - # number, the new metadata is safe to parse. - try: - metadata_spec_version = metadata_signable['signed']['spec_version'] - metadata_spec_version_split = metadata_spec_version.split('.') - metadata_spec_major_version = int(metadata_spec_version_split[0]) - metadata_spec_minor_version = int(metadata_spec_version_split[1]) - - code_spec_version_split = tuf.SPECIFICATION_VERSION.split('.') - code_spec_major_version = int(code_spec_version_split[0]) - code_spec_minor_version = int(code_spec_version_split[1]) - - if metadata_spec_major_version != code_spec_major_version: - raise exceptions.UnsupportedSpecificationError( - 'Downloaded metadata that specifies an unsupported ' - 'spec_version. This code supports major version number: ' + - repr(code_spec_major_version) + '; however, the obtained ' - 'metadata lists version number: ' + str(metadata_spec_version)) - - #report to user if minor versions do not match, continue with update - if metadata_spec_minor_version != code_spec_minor_version: - logger.info("Downloaded metadata that specifies a different minor " + - "spec_version. This code has version " + - str(tuf.SPECIFICATION_VERSION) + - " and the metadata lists version number " + - str(metadata_spec_version) + - ". The update will continue as the major versions match.") - - except (ValueError, TypeError) as error: - raise sslib_exceptions.FormatError('Improperly' - ' formatted spec_version, which must be in major.minor.fix format') from error - - # If the version number is unspecified, ensure that the version number - # downloaded is greater than the currently trusted version number for - # 'metadata_role'. - version_downloaded = metadata_signable['signed']['version'] - - if expected_version is not None: - # Verify that the downloaded version matches the version expected by - # the caller. - if version_downloaded != expected_version: - raise exceptions.BadVersionNumberError('Downloaded' - ' version number: ' + repr(version_downloaded) + '. Version' - ' number MUST be: ' + repr(expected_version)) - - # The caller does not know which version to download. Verify that the - # downloaded version is at least greater than the one locally - # available. - else: - # Verify that the version number of the locally stored - # 'timestamp.json', if available, is less than what was downloaded. - # Otherwise, accept the new timestamp with version number - # 'version_downloaded'. - - try: - current_version = \ - self.metadata['current'][metadata_role]['version'] - - if version_downloaded < current_version: - raise exceptions.ReplayedMetadataError(metadata_role, - version_downloaded, current_version) - - except KeyError: - logger.info(metadata_role + ' not available locally.') - - self._verify_metadata_file(file_object, metadata_role) - - except Exception as exception: - # Remember the error from this mirror, and "reset" the target file. - logger.debug('Update failed from ' + file_mirror + '.') - file_mirror_errors[file_mirror] = exception - if file_object: - file_object.close() - file_object = None - - else: - break - - if file_object: - return file_object - - else: - logger.debug('Failed to update ' + repr(remote_filename) + ' from all' - ' mirrors: ' + repr(file_mirror_errors)) - raise exceptions.NoWorkingMirrorError(file_mirror_errors) - - - - - - def _update_metadata(self, metadata_role, upperbound_filelength, version=None): - """ - - Non-public method that downloads, verifies, and 'installs' the metadata - belonging to 'metadata_role'. Calling this method implies that the - 'metadata_role' on the repository is newer than the client's, and thus - needs to be re-downloaded. The current and previous metadata stores are - updated if the newly downloaded metadata is successfully downloaded and - verified. This method also assumes that the store of top-level metadata - is the latest and exists. - - - metadata_role: - The name of the metadata. This is a role name and should not end - in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'. - - upperbound_filelength: - The expected length, or upper bound, of the metadata file to be - downloaded. - - version: - The expected and required version number of the 'metadata_role' file - downloaded. 'expected_version' is an integer. - - - tuf.exceptions.NoWorkingMirrorError: - The metadata cannot be updated. This is not specific to a single - failure but rather indicates that all possible ways to update the - metadata have been tried and failed. - - - The metadata file belonging to 'metadata_role' is downloaded from a - repository mirror. If the metadata is valid, it is stored in the - metadata store. - - - None. - """ - - # Attempt a file download from each mirror until the file is downloaded and - # verified. If the signature of the downloaded file is valid, proceed, - # otherwise log a warning and try the next mirror. 'metadata_file_object' - # is the file-like object returned by 'download.py'. 'metadata_signable' - # is the object extracted from 'metadata_file_object'. Metadata saved to - # files are regarded as 'signable' objects, conformant to - # 'tuf.formats.SIGNABLE_SCHEMA'. - # - # Some metadata (presently timestamp) will be downloaded "unsafely", in the - # sense that we can only estimate its true length and know nothing about - # its version. This is because not all metadata will have other metadata - # for it; otherwise we will have an infinite regress of metadata signing - # for each other. In this case, we will download the metadata up to the - # best length we can get for it, not request a specific version, but - # perform the rest of the checks (e.g., signature verification). - - # Construct the metadata filename as expected by the download/mirror - # modules. Local filename is quoted to protect against names like"../file". - - remote_filename = metadata_role + '.json' - local_filename = self._get_local_filename(metadata_role) - filename_version = '' - - if self.consistent_snapshot and version: - filename_version = version - dirname, basename = os.path.split(remote_filename) - remote_filename = os.path.join( - dirname, str(filename_version) + '.' + basename) - - metadata_file_object = \ - self._get_metadata_file(metadata_role, remote_filename, - upperbound_filelength, version) - - # The metadata has been verified. Move the metadata file into place. - # First, move the 'current' metadata file to the 'previous' directory - # if it exists. - current_filepath = os.path.join(self.metadata_directory['current'], - local_filename) - current_filepath = os.path.abspath(current_filepath) - sslib_util.ensure_parent_dir(current_filepath) - - previous_filepath = os.path.join(self.metadata_directory['previous'], - local_filename) - previous_filepath = os.path.abspath(previous_filepath) - - if os.path.exists(current_filepath): - # Previous metadata might not exist, say when delegations are added. - sslib_util.ensure_parent_dir(previous_filepath) - shutil.move(current_filepath, previous_filepath) - - # Next, move the verified updated metadata file to the 'current' directory. - metadata_file_object.seek(0) - metadata_signable = \ - sslib_util.load_json_string(metadata_file_object.read().decode('utf-8')) - - sslib_util.persist_temp_file(metadata_file_object, current_filepath) - - # Extract the metadata object so we can store it to the metadata store. - # 'current_metadata_object' set to 'None' if there is not an object - # stored for 'metadata_role'. - updated_metadata_object = metadata_signable['signed'] - current_metadata_object = self.metadata['current'].get(metadata_role) - - # Finally, update the metadata and fileinfo stores, and rebuild the - # key and role info for the top-level roles if 'metadata_role' is root. - # Rebuilding the key and role info is required if the newly-installed - # root metadata has revoked keys or updated any top-level role information. - logger.debug('Updated ' + repr(current_filepath) + '.') - self.metadata['previous'][metadata_role] = current_metadata_object - self.metadata['current'][metadata_role] = updated_metadata_object - self._update_versioninfo(remote_filename) - - - - - - def _update_metadata_if_changed(self, metadata_role, - referenced_metadata='snapshot'): - """ - - Non-public method that updates the metadata for 'metadata_role' if it has - changed. All top-level roles other than the 'timestamp' and 'root' - roles are updated by this method. The 'timestamp' role is always - downloaded from a mirror without first checking if it has been updated; - it is updated in refresh() by calling _update_metadata('timestamp'). - The 'root' role is always updated first and verified based on the trusted - root metadata file the client already has a copy of; it is updated in - refresh() by calling _update_root_metadata(). - This method is also called for delegated role metadata, which are - referenced by 'snapshot'. - - If the metadata needs to be updated but an update cannot be obtained, - this method will delete the file. - - Due to the way in which metadata files are updated, it is expected that - 'referenced_metadata' is not out of date and trusted. The refresh() - method updates the top-level roles in 'root -> timestamp -> snapshot -> - targets' order. For delegated metadata, the parent role is - updated before the delegated role. Taking into account that - 'referenced_metadata' is updated and verified before 'metadata_role', - this method determines if 'metadata_role' has changed by checking - the 'meta' field of the newly updated 'referenced_metadata'. - - - metadata_role: - The name of the metadata. This is a role name and should not end - in '.json'. Examples: 'root', 'targets', 'unclaimed'. - - referenced_metadata: - This is the metadata that provides the role information for - 'metadata_role'. For the top-level roles, the 'snapshot' role - is the referenced metadata for the 'root', and 'targets' roles. - The 'timestamp' metadata is always downloaded regardless. In - other words, it is updated by calling _update_metadata('timestamp') - and not by this method. The referenced metadata for 'snapshot' - is 'timestamp'. See refresh(). - - - tuf.exceptions.ExpiredMetadataError: - If local metadata is expired and newer metadata is not available. - - tuf.exceptions.NoWorkingMirrorError: - If 'metadata_role' could not be downloaded after determining that it - had changed. - - tuf.exceptions.RepositoryError: - If the referenced metadata is missing. - - - If it is determined that 'metadata_role' has been updated, the metadata - store (i.e., self.metadata) is updated with the new metadata and the - affected stores modified (i.e., the previous metadata store is updated). - If the metadata is 'targets' or a delegated targets role, the role - database is updated with the new information, including its delegated - roles. - - - None. - """ - - metadata_filename = metadata_role + '.json' - expected_versioninfo = None - - # Ensure the referenced metadata has been loaded. The 'root' role may be - # updated without having 'snapshot' available. - if referenced_metadata not in self.metadata['current']: - raise exceptions.RepositoryError('Cannot update' - ' ' + repr(metadata_role) + ' because ' + referenced_metadata + ' is' - ' missing.') - - # The referenced metadata has been loaded. Extract the new versioninfo for - # 'metadata_role' from it. - else: - logger.debug(repr(metadata_role) + ' referenced in ' + - repr(referenced_metadata)+ '. ' + repr(metadata_role) + - ' may be updated.') - - # Simply return if the metadata for 'metadata_role' has not been updated, - # according to the uncompressed metadata provided by the referenced - # metadata. The metadata is considered updated if its version number is - # strictly greater than its currently trusted version number. - expected_versioninfo = self.metadata['current'][referenced_metadata] \ - ['meta'][metadata_filename] - - if not self._versioninfo_has_been_updated(metadata_filename, - expected_versioninfo): - logger.info(repr(metadata_filename) + ' up-to-date.') - - # Since we have not downloaded a new version of this metadata, we should - # check to see if our local version is stale and notify the user if so. - # This raises tuf.exceptions.ExpiredMetadataError if the metadata we have - # is expired. Resolves issue #322. - self._ensure_not_expired(self.metadata['current'][metadata_role], - metadata_role) - - # TODO: If metadata role is snapshot, we should verify that snapshot's - # hash matches what's listed in timestamp.json per step 3.1 of the - # detailed workflows in the specification - - return - - logger.debug('Metadata ' + repr(metadata_filename) + ' has changed.') - - # The file lengths of metadata are unknown, only their version numbers are - # known. Set an upper limit for the length of the downloaded file for each - # expected role. Note: The Timestamp role is not updated via this - # function. - if metadata_role == 'snapshot': - upperbound_filelength = settings.DEFAULT_SNAPSHOT_REQUIRED_LENGTH - - # The metadata is considered Targets (or delegated Targets metadata). - else: - upperbound_filelength = settings.DEFAULT_TARGETS_REQUIRED_LENGTH - - try: - self._update_metadata(metadata_role, upperbound_filelength, - expected_versioninfo['version']) - - except Exception: - # The current metadata we have is not current but we couldn't get new - # metadata. We shouldn't use the old metadata anymore. This will get rid - # of in-memory knowledge of the role and delegated roles, but will leave - # delegated metadata files as current files on disk. - # - # TODO: Should we get rid of the delegated metadata files? We shouldn't - # need to, but we need to check the trust implications of the current - # implementation. - self._delete_metadata(metadata_role) - logger.warning('Metadata for ' + repr(metadata_role) + ' cannot' - ' be updated.') - raise - - else: - # We need to import the delegated roles of 'metadata_role', since its - # list of delegations might have changed from what was previously - # loaded.. - # TODO: Should we remove the keys of the delegated roles? - self._import_delegations(metadata_role) - - - - - - def _versioninfo_has_been_updated(self, metadata_filename, new_versioninfo): - """ - - Non-public method that determines whether the current versioninfo of - 'metadata_filename' is less than 'new_versioninfo' (i.e., the version - number has been incremented). The 'new_versioninfo' argument should be - extracted from the latest copy of the metadata that references - 'metadata_filename'. Example: 'root.json' would be referenced by - 'snapshot.json'. - - 'new_versioninfo' should only be 'None' if this is for updating - 'root.json' without having 'snapshot.json' available. - - - metadadata_filename: - The metadata filename for the role. For the 'root' role, - 'metadata_filename' would be 'root.json'. - - new_versioninfo: - A dict object representing the new file information for - 'metadata_filename'. 'new_versioninfo' may be 'None' when - updating 'root' without having 'snapshot' available. This - dict conforms to 'tuf.formats.VERSIONINFO_SCHEMA' and has - the form: - - {'version': 288} - - - None. - - - If there is no versioninfo currently loaded for 'metadata_filename', try - to load it. - - - Boolean. True if the versioninfo has changed, False otherwise. - """ - - # If there is no versioninfo currently stored for 'metadata_filename', - # try to load the file, calculate the versioninfo, and store it. - if metadata_filename not in self.versioninfo: - self._update_versioninfo(metadata_filename) - - # Return true if there is no versioninfo for 'metadata_filename'. - # 'metadata_filename' is not in the 'self.versioninfo' store - # and it doesn't exist in the 'current' metadata location. - if self.versioninfo[metadata_filename] is None: - return True - - current_versioninfo = self.versioninfo[metadata_filename] - - logger.debug('New version for ' + repr(metadata_filename) + - ': ' + repr(new_versioninfo['version']) + '. Old version: ' + - repr(current_versioninfo['version'])) - - if new_versioninfo['version'] > current_versioninfo['version']: - return True - - else: - return False - - - - - - def _update_versioninfo(self, metadata_filename): - """ - - Non-public method that updates the 'self.versioninfo' entry for the - metadata belonging to 'metadata_filename'. If the current metadata for - 'metadata_filename' cannot be loaded, set its 'versioninfo' to 'None' to - signal that it is not in 'self.versioninfo' AND it also doesn't exist - locally. - - - metadata_filename: - The metadata filename for the role. For the 'root' role, - 'metadata_filename' would be 'root.json'. - - - None. - - - The version number of 'metadata_filename' is calculated and stored in its - corresponding entry in 'self.versioninfo'. - - - None. - """ - - # In case we delayed loading the metadata and didn't do it in - # __init__ (such as with delegated metadata), then get the version - # info now. - - # 'metadata_filename' is the key from meta dictionary: build the - # corresponding local filepath like _get_local_filename() - local_filename = parse.quote(metadata_filename, "") - current_filepath = os.path.join(self.metadata_directory['current'], - local_filename) - - # If the path is invalid, simply return and leave versioninfo unset. - if not os.path.exists(current_filepath): - self.versioninfo[metadata_filename] = None - return - - # Extract the version information from the trusted snapshot role and save - # it to the 'self.versioninfo' store. - if metadata_filename == 'timestamp.json': - trusted_versioninfo = \ - self.metadata['current']['timestamp']['version'] - - # When updating snapshot.json, the client either (1) has a copy of - # snapshot.json, or (2) is in the process of obtaining it by first - # downloading timestamp.json. Note: Clients are allowed to have only - # root.json initially, and perform a refresh of top-level metadata to - # obtain the remaining roles. - elif metadata_filename == 'snapshot.json': - - # Verify the version number of the currently trusted snapshot.json in - # snapshot.json itself. Checking the version number specified in - # timestamp.json may be greater than the version specified in the - # client's copy of snapshot.json. - try: - timestamp_version_number = self.metadata['current']['snapshot']['version'] - trusted_versioninfo = formats.make_versioninfo( - timestamp_version_number) - - except KeyError: - trusted_versioninfo = \ - self.metadata['current']['timestamp']['meta']['snapshot.json'] - - else: - - try: - # The metadata file names in 'self.metadata' exclude the role - # extension. Strip the '.json' extension when checking if - # 'metadata_filename' currently exists. - targets_version_number = \ - self.metadata['current'][metadata_filename[:-len('.json')]]['version'] - trusted_versioninfo = \ - formats.make_versioninfo(targets_version_number) - - except KeyError: - trusted_versioninfo = \ - self.metadata['current']['snapshot']['meta'][metadata_filename] - - self.versioninfo[metadata_filename] = trusted_versioninfo - - - - - def _move_current_to_previous(self, metadata_role): - """ - - Non-public method that moves the current metadata file for 'metadata_role' - to the previous directory. - - - metadata_role: - The name of the metadata. This is a role name and should not end - in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'. - - - None. - - - The metadata file for 'metadata_role' is removed from 'current' - and moved to the 'previous' directory. - - - None. - """ - - # Get the 'current' and 'previous' full file paths for 'metadata_role' - metadata_filepath = self._get_local_filename(metadata_role) - previous_filepath = os.path.join(self.metadata_directory['previous'], - metadata_filepath) - current_filepath = os.path.join(self.metadata_directory['current'], - metadata_filepath) - - # Remove the previous path if it exists. - if os.path.exists(previous_filepath): - os.remove(previous_filepath) - - # Move the current path to the previous path. - if os.path.exists(current_filepath): - sslib_util.ensure_parent_dir(previous_filepath) - os.rename(current_filepath, previous_filepath) - - - - - - def _delete_metadata(self, metadata_role): - """ - - Non-public method that removes all (current) knowledge of 'metadata_role'. - The metadata belonging to 'metadata_role' is removed from the current - 'self.metadata' store and from the role database. The 'root.json' role - file is never removed. - - - metadata_role: - The name of the metadata. This is a role name and should not end - in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'. - - - None. - - - The role database is modified and the metadata for 'metadata_role' - removed from the 'self.metadata' store. - - - None. - """ - - # The root metadata role is never deleted without a replacement. - if metadata_role == 'root': - return - - # Get rid of the current metadata file. - self._move_current_to_previous(metadata_role) - - # Remove knowledge of the role. - if metadata_role in self.metadata['current']: - del self.metadata['current'][metadata_role] - roledb.remove_role(metadata_role, self.repository_name) - - - - - - def _ensure_not_expired(self, metadata_object, metadata_rolename): - """ - - Non-public method that raises an exception if the current specified - metadata has expired. - - - metadata_object: - The metadata that should be expired, a 'tuf.formats.ANYROLE_SCHEMA' - object. - - metadata_rolename: - The name of the metadata. This is a role name and should not end - in '.json'. Examples: 'root', 'targets', 'targets/linux/x86'. - - - tuf.exceptions.ExpiredMetadataError: - If 'metadata_rolename' has expired. - securesystemslib.exceptions.FormatError: - If the expiration cannot be parsed correctly - - None. - - - None. - """ - - # Extract the expiration time. Convert it to a unix timestamp and compare it - # against the current time.time() (also in Unix/POSIX time format, although - # with microseconds attached.) - expires_datetime = formats.expiry_string_to_datetime( - metadata_object['expires']) - expires_timestamp = formats.datetime_to_unix_timestamp(expires_datetime) - - current_time = int(time.time()) - if expires_timestamp <= current_time: - message = 'Metadata '+repr(metadata_rolename)+' expired on ' + \ - expires_datetime.ctime() + ' (UTC).' - raise exceptions.ExpiredMetadataError(message) - - - - - - def all_targets(self): - """ - - - NOTE: This function is deprecated. Its behavior with regard to which - delegating Targets roles are trusted to determine how to validate a - delegated Targets role is NOT WELL DEFINED. Please transition to use of - get_one_valid_targetinfo()! - - Get a list of the target information for all the trusted targets on the - repository. This list also includes all the targets of delegated roles. - Targets of the list returned are ordered according the trusted order of - the delegated roles, where parent roles come before children. The list - conforms to 'tuf.formats.TARGETINFOS_SCHEMA' and has the form: - - [{'filepath': 'a/b/c.txt', - 'fileinfo': {'length': 13323, - 'hashes': {'sha256': dbfac345..}} - ...] - - - None. - - - tuf.exceptions.RepositoryError: - If the metadata for the 'targets' role is missing from - the 'snapshot' metadata. - - tuf.exceptions.UnknownRoleError: - If one of the roles could not be found in the role database. - - - The metadata for target roles is updated and stored. - - - A list of targets, conformant to - 'tuf.formats.TARGETINFOS_SCHEMA'. - """ - - warnings.warn( - 'Support for all_targets() will be removed in a future release.' - ' get_one_valid_targetinfo() should be used instead.', - DeprecationWarning) - - # Load the most up-to-date targets of the 'targets' role and all - # delegated roles. - self._refresh_targets_metadata(refresh_all_delegated_roles=True) - - # Fetch the targets for the 'targets' role. - all_targets = self._targets_of_role('targets', skip_refresh=True) - - # Fetch the targets of the delegated roles. get_rolenames returns - # all roles available on the repository. - delegated_targets = [] - for role in roledb.get_rolenames(self.repository_name): - if role in roledb.TOP_LEVEL_ROLES: - continue - - else: - delegated_targets.extend(self._targets_of_role(role, skip_refresh=True)) - - all_targets.extend(delegated_targets) - - return all_targets - - - - - - def _refresh_targets_metadata(self, rolename='targets', - refresh_all_delegated_roles=False): - """ - - Non-public method that refreshes the targets metadata of 'rolename'. If - 'refresh_all_delegated_roles' is True, include all the delegations that - follow 'rolename'. The metadata for the 'targets' role is updated in - refresh() by the _update_metadata_if_changed('targets') call, not here. - Delegated roles are not loaded when the repository is first initialized. - They are loaded from disk, updated if they have changed, and stored to - the 'self.metadata' store by this method. This method is called by - get_one_valid_targetinfo(). - - - rolename: - This is a delegated role name and should not end in '.json'. Example: - 'unclaimed'. - - refresh_all_delegated_roles: - Boolean indicating if all the delegated roles available in the - repository (via snapshot.json) should be refreshed. - - - tuf.exceptions.ExpiredMetadataError: - If local metadata is expired and newer metadata is not available. - - tuf.exceptions.RepositoryError: - If the metadata file for the 'targets' role is missing from the - 'snapshot' metadata. - - - The metadata for the delegated roles are loaded and updated if they - have changed. Delegated metadata is removed from the role database if - it has expired. - - - None. - """ - - roles_to_update = [] - - if rolename + '.json' in self.metadata['current']['snapshot']['meta']: - roles_to_update.append(rolename) - - if refresh_all_delegated_roles: - - for role in self.metadata['current']['snapshot']['meta'].keys(): - # snapshot.json keeps track of root.json, targets.json, and delegated - # roles (e.g., django.json, unclaimed.json). Remove the 'targets' role - # because it gets updated when the targets.json file is updated in - # _update_metadata_if_changed('targets') and root. - if role.endswith('.json'): - role = role[:-len('.json')] - if role not in ['root', 'targets', rolename]: - roles_to_update.append(role) - - else: - continue - - # If there is nothing to refresh, we are done. - if not roles_to_update: - return - - logger.debug('Roles to update: ' + repr(roles_to_update) + '.') - - # Iterate 'roles_to_update', and load and update its metadata file if it - # has changed. - for rolename in roles_to_update: - self._load_metadata_from_file('previous', rolename) - self._load_metadata_from_file('current', rolename) - - self._update_metadata_if_changed(rolename) - - - - - - def _targets_of_role(self, rolename, targets=None, skip_refresh=False): - """ - - Non-public method that returns the target information of all the targets - of 'rolename'. The returned information is a list conformant to - 'tuf.formats.TARGETINFOS_SCHEMA', and has the form: - - [{'filepath': 'a/b/c.txt', - 'fileinfo': {'length': 13323, - 'hashes': {'sha256': dbfac345..}} - ...] - - - rolename: - This is a role name and should not end in '.json'. Examples: 'targets', - 'unclaimed'. - - targets: - A list of targets containing target information, conformant to - 'tuf.formats.TARGETINFOS_SCHEMA'. - - skip_refresh: - A boolean indicating if the target metadata for 'rolename' - should be refreshed. - - - tuf.exceptions.UnknownRoleError: - If 'rolename' is not found in the role database. - - - The metadata for 'rolename' is refreshed if 'skip_refresh' is False. - - - A list of dict objects containing the target information of all the - targets of 'rolename'. Conformant to - 'tuf.formats.TARGETINFOS_SCHEMA'. - """ - - if targets is None: - targets = [] - - targets_of_role = list(targets) - logger.debug('Getting targets of role: ' + repr(rolename) + '.') - - if not roledb.role_exists(rolename, self.repository_name): - raise exceptions.UnknownRoleError(rolename) - - # We do not need to worry about the target paths being trusted because - # this is enforced before any new metadata is accepted. - if not skip_refresh: - self._refresh_targets_metadata(rolename) - - # Do we have metadata for 'rolename'? - if rolename not in self.metadata['current']: - logger.debug('No metadata for ' + repr(rolename) + '.' - ' Unable to determine targets.') - return [] - - # Get the targets specified by the role itself. - for filepath, fileinfo in self.metadata['current'][rolename].get('targets', []).items(): - new_target = {} - new_target['filepath'] = filepath - new_target['fileinfo'] = fileinfo - - targets_of_role.append(new_target) - - return targets_of_role - - - - - - def targets_of_role(self, rolename='targets'): - """ - - - NOTE: This function is deprecated. Use with rolename 'targets' is secure - and the behavior well-defined, but use with any delegated targets role is - not. Please transition use for delegated targets roles to - get_one_valid_targetinfo(). More information is below. - - Return a list of trusted targets directly specified by 'rolename'. - The returned information is a list conformant to - 'tuf.formats.TARGETINFOS_SCHEMA', and has the form: - - [{'filepath': 'a/b/c.txt', - 'fileinfo': {'length': 13323, - 'hashes': {'sha256': dbfac345..}} - ...] - - The metadata of 'rolename' is updated if out of date, including the - metadata of its parent roles (i.e., the minimum roles needed to set the - chain of trust). - - - rolename: - The name of the role whose list of targets are wanted. - The name of the role should start with 'targets'. - - - securesystemslib.exceptions.FormatError: - If 'rolename' is improperly formatted. - - tuf.exceptions.RepositoryError: - If the metadata of 'rolename' cannot be updated. - - tuf.exceptions.UnknownRoleError: - If 'rolename' is not found in the role database. - - - The metadata of updated delegated roles are downloaded and stored. - - - A list of targets, conformant to - 'tuf.formats.TARGETINFOS_SCHEMA'. - """ - - warnings.warn( - 'Support for targets_of_role() will be removed in a future release.' - ' get_one_valid_targetinfo() should be used instead.', - DeprecationWarning) - - # Does 'rolename' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.RELPATH_SCHEMA.check_match(rolename) - - # If we've been given a delegated targets role, we don't know how to - # validate it without knowing what the delegating role is -- there could - # be several roles that delegate to the given role. Behavior of this - # function for roles other than Targets is not well defined as a result. - # This function is deprecated, but: - # - Usage of this function or a future successor makes sense when the - # role of interest is Targets, since we always know exactly how to - # validate Targets (We use root.). - # - Until it's removed (hopefully soon), we'll try to provide what it has - # always provided. To do this, we fetch and "validate" all delegated - # roles listed by snapshot. For delegated roles only, the order of the - # validation impacts the security of the validation -- the most- - # recently-validated role delegating to a role you are currently - # validating determines the expected keyids and threshold of the role - # you are currently validating. That is NOT GOOD. Again, please switch - # to get_one_valid_targetinfo, which is well-defined and secure. - if rolename != 'targets': - self._refresh_targets_metadata(refresh_all_delegated_roles=True) - - - if not roledb.role_exists(rolename, self.repository_name): - raise exceptions.UnknownRoleError(rolename) - - return self._targets_of_role(rolename, skip_refresh=True) - - - - - - def get_one_valid_targetinfo(self, target_filepath): - """ - - Return the target information for 'target_filepath', and update its - corresponding metadata, if necessary. 'target_filepath' must match - exactly as it appears in metadata, and should not contain URL encoding - escapes. - - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - - tuf.exceptions.ExpiredMetadataError: - If local metadata is expired and newer metadata is not available. - - securesystemslib.exceptions.FormatError: - If 'target_filepath' is improperly formatted. - - tuf.exceptions.UnknownTargetError: - If 'target_filepath' was not found. - - Any other unforeseen runtime exception. - - - The metadata for updated delegated roles are downloaded and stored. - - - The target information for 'target_filepath', conformant to - 'tuf.formats.TARGETINFO_SCHEMA'. - """ - - # Does 'target_filepath' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.RELPATH_SCHEMA.check_match(target_filepath) - - target_filepath = target_filepath.replace('\\', '/') - - if target_filepath.startswith('/'): - raise exceptions.FormatError('The requested target file cannot' - ' contain a leading path separator: ' + repr(target_filepath)) - - # Get target by looking at roles in order of priority tags. - target = self._preorder_depth_first_walk(target_filepath) - - # Raise an exception if the target information could not be retrieved. - if target is None: - raise exceptions.UnknownTargetError(repr(target_filepath) + ' not' - ' found.') - - # Otherwise, return the found target. - else: - return target - - - - - - def _preorder_depth_first_walk(self, target_filepath): - """ - - Non-public method that interrogates the tree of target delegations in - order of appearance (which implicitly order trustworthiness), and returns - the matching target found in the most trusted role. - - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - - tuf.exceptions.ExpiredMetadataError: - If local metadata is expired and newer metadata is not available. - - securesystemslib.exceptions.FormatError: - If 'target_filepath' is improperly formatted. - - tuf.exceptions.RepositoryError: - If 'target_filepath' is not found. - - - The metadata for updated delegated roles are downloaded and stored. - - - The target information for 'target_filepath', conformant to - 'tuf.formats.TARGETINFO_SCHEMA'. - """ - - target = None - current_metadata = self.metadata['current'] - role_names = ['targets'] - visited_role_names = set() - number_of_delegations = settings.MAX_NUMBER_OF_DELEGATIONS - - # Ensure the client has the most up-to-date version of 'targets.json'. - # Raise 'tuf.exceptions.NoWorkingMirrorError' if the changed metadata - # cannot be successfully downloaded and 'tuf.exceptions.RepositoryError' if - # the referenced metadata is missing. Target methods such as this one are - # called after the top-level metadata have been refreshed (i.e., - # updater.refresh()). - self._update_metadata_if_changed('targets') - - # Preorder depth-first traversal of the graph of target delegations. - while target is None and number_of_delegations > 0 and len(role_names) > 0: - - # Pop the role name from the top of the stack. - role_name = role_names.pop(-1) - - # Skip any visited current role to prevent cycles. - if role_name in visited_role_names: - logger.debug('Skipping visited current role ' + repr(role_name)) - continue - - # The metadata for 'role_name' must be downloaded/updated before its - # targets, delegations, and child roles can be inspected. - # self.metadata['current'][role_name] is currently missing. - # _refresh_targets_metadata() does not refresh 'targets.json', it - # expects _update_metadata_if_changed() to have already refreshed it, - # which this function has checked above. - self._refresh_targets_metadata(role_name, - refresh_all_delegated_roles=False) - - role_metadata = current_metadata[role_name] - targets = role_metadata['targets'] - delegations = role_metadata.get('delegations', {}) - child_roles = delegations.get('roles', []) - target = self._get_target_from_targets_role(role_name, targets, - target_filepath) - # After preorder check, add current role to set of visited roles. - visited_role_names.add(role_name) - - # And also decrement number of visited roles. - number_of_delegations -= 1 - - if target is None: - - child_roles_to_visit = [] - # NOTE: This may be a slow operation if there are many delegated roles. - for child_role in child_roles: - child_role_name = self._visit_child_role(child_role, target_filepath) - if child_role['terminating'] and child_role_name is not None: - logger.debug('Adding child role ' + repr(child_role_name)) - logger.debug('Not backtracking to other roles.') - role_names = [] - child_roles_to_visit.append(child_role_name) - break - - elif child_role_name is None: - logger.debug('Skipping child role ' + repr(child_role_name)) - - else: - logger.debug('Adding child role ' + repr(child_role_name)) - child_roles_to_visit.append(child_role_name) - - # Push 'child_roles_to_visit' in reverse order of appearance onto - # 'role_names'. Roles are popped from the end of the 'role_names' - # list. - child_roles_to_visit.reverse() - role_names.extend(child_roles_to_visit) - - else: - logger.debug('Found target in current role ' + repr(role_name)) - - if target is None and number_of_delegations == 0 and len(role_names) > 0: - logger.debug(repr(len(role_names)) + ' roles left to visit, ' + - 'but allowed to visit at most ' + - repr(settings.MAX_NUMBER_OF_DELEGATIONS) + ' delegations.') - - return target - - - - - - def _get_target_from_targets_role(self, role_name, targets, target_filepath): - """ - - Non-public method that determines whether the targets role with the given - 'role_name' has the target with the name 'target_filepath'. - - - role_name: - The name of the targets role that we are inspecting. - - targets: - The targets of the Targets role with the name 'role_name'. - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - - None. - - - None. - - - The target information for 'target_filepath', conformant to - 'tuf.formats.TARGETINFO_SCHEMA'. - """ - - # Does the current role name have our target? - logger.debug('Asking role ' + repr(role_name) + ' about' - ' target ' + repr(target_filepath)) - - target = targets.get(target_filepath) - - if target: - logger.debug('Found target ' + target_filepath + ' in role ' + role_name) - return {'filepath': target_filepath, 'fileinfo': target} - - else: - logger.debug( - 'Target file ' + target_filepath + ' not found in role ' + role_name) - return None - - - - - - def _visit_child_role(self, child_role, target_filepath): - """ - - Non-public method that determines whether the given 'target_filepath' - is an allowed path of 'child_role'. - - Ensure that we explore only delegated roles trusted with the target. The - metadata for 'child_role' should have been refreshed prior to this point, - however, the paths/targets that 'child_role' signs for have not been - verified (as intended). The paths/targets that 'child_role' is allowed - to specify in its metadata depends on the delegating role, and thus is - left to the caller to verify. We verify here that 'target_filepath' - is an allowed path according to the delegated 'child_role'. - - TODO: Should the TUF spec restrict the repository to one particular - algorithm? Should we allow the repository to specify in the role - dictionary the algorithm used for these generated hashed paths? - - - child_role: - The delegation targets role object of 'child_role', containing its - paths, path_hash_prefixes, keys, and so on. - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - - None. - - - None. - - - If 'child_role' has been delegated the target with the name - 'target_filepath', then we return the role name of 'child_role'. - - Otherwise, we return None. - """ - - child_role_name = child_role['name'] - child_role_paths = child_role.get('paths') - child_role_path_hash_prefixes = child_role.get('path_hash_prefixes') - - if child_role_path_hash_prefixes is not None: - target_filepath_hash = self._get_target_hash(target_filepath) - for child_role_path_hash_prefix in child_role_path_hash_prefixes: - if target_filepath_hash.startswith(child_role_path_hash_prefix): - return child_role_name - - else: - continue - - elif child_role_paths is not None: - # Is 'child_role_name' allowed to sign for 'target_filepath'? - for child_role_path in child_role_paths: - # A child role path may be an explicit path or glob pattern (Unix - # shell-style wildcards). The child role 'child_role_name' is returned - # if 'target_filepath' is equal to or matches 'child_role_path'. - # Explicit filepaths are also considered matches. A repo maintainer - # might delegate a glob pattern with a leading path separator, while - # the client requests a matching target without a leading path - # separator - make sure to strip any leading path separators so that a - # match is made. Example: "foo.tgz" should match with "/*.tgz". - if fnmatch.fnmatch(target_filepath.lstrip(os.sep), child_role_path.lstrip(os.sep)): - logger.debug('Child role ' + repr(child_role_name) + ' is allowed to' - ' sign for ' + repr(target_filepath)) - - return child_role_name - - else: - logger.debug( - 'The given target path ' + repr(target_filepath) + ' does not' - ' match the trusted path or glob pattern: ' + repr(child_role_path)) - continue - - else: - # 'role_name' should have been validated when it was downloaded. - # The 'paths' or 'path_hash_prefixes' fields should not be missing, - # so we raise a format error here in case they are both missing. - raise sslib_exceptions.FormatError(repr(child_role_name) + ' ' - 'has neither a "paths" nor "path_hash_prefixes". At least' - ' one of these attributes must be present.') - - return None - - - - def _get_target_hash(self, target_filepath, hash_function='sha256'): - """ - - Non-public method that computes the hash of 'target_filepath'. This is - useful in conjunction with the "path_hash_prefixes" attribute in a - delegated targets role, which tells us which paths it is implicitly - responsible for. - - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - hash_function: - The algorithm used by the repository to generate the hashes of the - target filepaths. The repository may optionally organize targets into - hashed bins to ease target delegations and role metadata management. - The use of consistent hashing allows for a uniform distribution of - targets into bins. - - - None. - - - None. - - - The hash of 'target_filepath'. - """ - - # Calculate the hash of the filepath to determine which bin to find the - # target. The client currently assumes the repository (i.e., repository - # tool) uses 'hash_function' to generate hashes and UTF-8. - digest_object = sslib_hash.digest(hash_function) - encoded_target_filepath = target_filepath.encode('utf-8') - digest_object.update(encoded_target_filepath) - target_filepath_hash = digest_object.hexdigest() - - return target_filepath_hash - - - - - - def remove_obsolete_targets(self, destination_directory): - """ - - Remove any files that are in 'previous' but not 'current'. This makes it - so if you remove a file from a repository, it actually goes away. The - targets for the 'targets' role and all delegated roles are checked. - - - destination_directory: - The directory containing the target files tracked by TUF. - - - securesystemslib.exceptions.FormatError: - If 'destination_directory' is improperly formatted. - - tuf.exceptions.RepositoryError: - If an error occurred removing any files. - - - Target files are removed from disk. - - - None. - """ - - # Does 'destination_directory' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(destination_directory) - - # Iterate the rolenames and verify whether the 'previous' directory - # contains a target no longer found in 'current'. - for role in roledb.get_rolenames(self.repository_name): - if role.startswith('targets'): - if role in self.metadata['previous'] and self.metadata['previous'][role] != None: - for target in self.metadata['previous'][role]['targets']: - if target not in self.metadata['current'][role]['targets']: - # 'target' is only in 'previous', so remove it. - logger.warning('Removing obsolete file: ' + repr(target) + '.') - - # Remove the file if it hasn't been removed already. - destination = \ - os.path.join(destination_directory, target.lstrip(os.sep)) - try: - os.remove(destination) - - except OSError as e: - # If 'filename' already removed, just log it. - if e.errno == errno.ENOENT: - logger.info('File ' + repr(destination) + ' was already' - ' removed.') - - else: - logger.warning('Failed to remove obsolete target: ' + str(e) ) - - else: - logger.debug('Skipping: ' + repr(target) + '. It is still' - ' a current target.') - else: - logger.debug('Skipping: ' + repr(role) + '. Not in the previous' - ' metadata') - - - - - - def updated_targets(self, targets, destination_directory): - """ - - Checks files in the provided directory against the provided file metadata. - - Filters the provided target info, returning a subset: only the metadata - for targets for which the target file either does not exist in the - provided directory, or for which the target file in the provided directory - does not match the provided metadata. - - A principle use of this function is to determine which target files need - to be downloaded. If the caller first uses get_one_valid_target_info() - calls to obtain up-to-date, valid metadata for targets, the caller can - then call updated_targets() to determine if that metadata does not match - what exists already on disk (in the provided directory). The returned - values can then be used in download_file() calls to update the files that - didn't exist or didn't match. - - The returned information is a list conformant to - 'tuf.formats.TARGETINFOS_SCHEMA' and has the form: - - [{'filepath': 'a/b/c.txt', - 'fileinfo': {'length': 13323, - 'hashes': {'sha256': dbfac345..}} - ...] - - - targets: - Metadata about the expected state of target files, against which local - files will be checked. This should be a list of target info - dictionaries; i.e. 'targets' must be conformant to - tuf.formats.TARGETINFOS_SCHEMA. - - destination_directory: - The directory containing the target files. - - - securesystemslib.exceptions.FormatError: - If the arguments are improperly formatted. - - - The files in 'targets' are read and their hashes computed. - - - A list of target info dictionaries. The list conforms to - 'tuf.formats.TARGETINFOS_SCHEMA'. - This is a strict subset of the argument 'targets'. - """ - - # Do the arguments have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.TARGETINFOS_SCHEMA.check_match(targets) - sslib_formats.PATH_SCHEMA.check_match(destination_directory) - - # Keep track of the target objects and filepaths of updated targets. - # Return 'updated_targets' and use 'updated_targetpaths' to avoid - # duplicates. - updated_targets = [] - updated_targetpaths = [] - - for target in targets: - # Prepend 'destination_directory' to the target's relative filepath (as - # stored in metadata.) Verify the hash of 'target_filepath' against - # each hash listed for its fileinfo. Note: join() discards - # 'destination_directory' if 'filepath' contains a leading path separator - # (i.e., is treated as an absolute path). - filepath = target['filepath'] - if filepath[0] == '/': - filepath = filepath[1:] - target_filepath = os.path.join(destination_directory, filepath) - - if target_filepath in updated_targetpaths: - continue - - # Try one of the algorithm/digest combos for a mismatch. We break - # as soon as we find a mismatch. - for algorithm, digest in target['fileinfo']['hashes'].items(): - digest_object = None - try: - digest_object = sslib_hash.digest_filename(target_filepath, - algorithm=algorithm) - - # This exception would occur if the target does not exist locally. - except sslib_exceptions.StorageError: - updated_targets.append(target) - updated_targetpaths.append(target_filepath) - break - - # The file does exist locally, check if its hash differs. - if digest_object.hexdigest() != digest: - updated_targets.append(target) - updated_targetpaths.append(target_filepath) - break - - return updated_targets - - - - - - def download_target(self, target, destination_directory, - prefix_filename_with_hash=True): - """ - - Download 'target' and verify it is trusted. - - This will only store the file at 'destination_directory' if the - downloaded file matches the description of the file in the trusted - metadata. - - - target: - The target to be downloaded. Conformant to - 'tuf.formats.TARGETINFO_SCHEMA'. - - destination_directory: - The directory to save the downloaded target file. - - prefix_filename_with_hash: - Whether to prefix the targets file names with their hash when using - consistent snapshot. - This should be set to False when the served target filenames are not - prefixed with hashes (in this case the server uses other means - to ensure snapshot consistency). - Default is True. - - - securesystemslib.exceptions.FormatError: - If 'target' is not properly formatted. - - tuf.exceptions.NoWorkingMirrorError: - If a target could not be downloaded from any of the mirrors. - - Although expected to be rare, there might be OSError exceptions (except - errno.EEXIST) raised when creating the destination directory (if it - doesn't exist). - - - A target file is saved to the local system. - - - None. - """ - - # Do the arguments have the correct format? - # This check ensures the arguments have the appropriate - # number of objects and object types, and that all dict - # keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fail. - formats.TARGETINFO_SCHEMA.check_match(target) - sslib_formats.PATH_SCHEMA.check_match(destination_directory) - - # Extract the target file information. - target_filepath = target['filepath'] - trusted_length = target['fileinfo']['length'] - trusted_hashes = target['fileinfo']['hashes'] - - # Build absolute 'destination' file path. - # Note: join() discards 'destination_directory' if 'target_path' contains - # a leading path separator (i.e., is treated as an absolute path). - destination = os.path.join(destination_directory, - target_filepath.lstrip(os.sep)) - destination = os.path.abspath(destination) - target_dirpath = os.path.dirname(destination) - - # When attempting to create the leaf directory of 'target_dirpath', ignore - # any exceptions raised if the root directory already exists. All other - # exceptions potentially thrown by os.makedirs() are re-raised. - # Note: os.makedirs can raise OSError if the leaf directory already exists - # or cannot be created. - try: - os.makedirs(target_dirpath) - - except OSError as e: - if e.errno == errno.EEXIST: - pass - - else: - raise - - # '_get_target_file()' checks every mirror and returns the first target - # that passes verification. - target_file_object = self._get_target_file(target_filepath, trusted_length, - trusted_hashes, prefix_filename_with_hash) - - sslib_util.persist_temp_file(target_file_object, destination) diff --git a/tuf/developer_tool.py b/tuf/developer_tool.py deleted file mode 100755 index 82d936c072..0000000000 --- a/tuf/developer_tool.py +++ /dev/null @@ -1,1023 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - developer_tool.py - - - Santiago Torres - Zane Fisher - - Based on the work done for 'repository_tool.py' by Vladimir Diaz. - - - January 22, 2014. - - - See LICENCE-MIT OR LICENCE for licensing information. - - - See 'tuf/README-developer-tools.md' for a complete guide on using - 'developer_tool.py'. -""" - -import os -import errno -import logging -import shutil -import tempfile -import json - - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import storage as sslib_storage -from securesystemslib import util as sslib_util - -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import log # pylint: disable=unused-import -from tuf import repository_lib as repo_lib -from tuf import roledb -from tuf import sig - -from tuf.repository_tool import Targets -from tuf.repository_lib import _check_role_keys -from tuf.repository_lib import _metadata_is_partially_loaded - - -# Copy API -# pylint: disable=unused-import - -# Copy generic repository API functions to be used via `developer_tool` -from tuf.repository_lib import ( - generate_targets_metadata, - create_tuf_client_directory, - disable_console_log_messages) - -# Copy key-related API functions to be used via `developer_tool` -from tuf.repository_lib import ( - import_rsa_privatekey_from_file) - -from securesystemslib.keys import ( - format_keyval_to_metadata, - format_metadata_to_key) - -from securesystemslib.interface import ( - generate_and_write_rsa_keypair, - generate_and_write_rsa_keypair_with_prompt, - generate_and_write_unencrypted_rsa_keypair, - generate_and_write_ecdsa_keypair, - generate_and_write_ecdsa_keypair_with_prompt, - generate_and_write_unencrypted_ecdsa_keypair, - generate_and_write_ed25519_keypair, - generate_and_write_ed25519_keypair_with_prompt, - generate_and_write_unencrypted_ed25519_keypair, - import_rsa_publickey_from_file, - import_ed25519_publickey_from_file, - import_ed25519_privatekey_from_file) - - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -# The extension of TUF metadata. -from tuf.repository_lib import METADATA_EXTENSION as METADATA_EXTENSION - -# Project configuration filename. This file is intended to hold all of the -# supporting information about the project that's not contained in a usual -# TUF metadata file. 'project.cfg' consists of the following fields: -# -# targets_location: the location of the targets folder. -# -# prefix: the directory location to prepend to the metadata so it -# matches the metadata signed in the repository. -# -# metadata_location: the location of the metadata files. -# -# threshold: the threshold for this project object, it is fixed to -# one in the current version. -# -# public_keys: a list of the public keys used to verify the metadata -# in this project. -# -# layout_type: a field describing the directory layout: -# -# repo-like: matches the layout of the repository tool. -# the targets and metadata folders are -# located under a common directory for the -# project. -# -# flat: the targets directory and the -# metadata directory are located in different -# paths. -# -# project_name: The name of the current project, this value is used to -# match the resulting filename with the one in upstream. -PROJECT_FILENAME = 'project.cfg' - -# The targets and metadata directory names. Metadata files are written -# to the staged metadata directory instead of the "live" one. -from tuf.repository_tool import METADATA_DIRECTORY_NAME -from tuf.repository_tool import TARGETS_DIRECTORY_NAME - - -class Project(Targets): - """ - - Simplify the publishing process of third-party projects by handling all of - the bookkeeping, signature handling, and integrity checks of delegated TUF - metadata. 'repository_tool.py' is responsible for publishing and - maintaining metadata of the top-level roles, and 'developer_tool.py' is - used by projects that have been delegated responsibility for a delegated - projects role. Metadata created by this module may then be added to other - metadata available in a TUF repository. - - Project() is the representation of a project's metadata file(s), with the - ability to modify this data in an OOP manner. Project owners do not have to - manually verify that metadata files are properly formatted or that they - contain valid data. - - - project_name: - The name of the metadata file as it should be named in the upstream - repository. - - metadata_directory: - The metadata sub-directory contains the metadata file(s) of this project, - including any of its delegated roles. - - targets_directory: - The targets sub-directory contains the project's target files that are - downloaded by clients and are referenced in its metadata. The hashes and - file lengths are listed in Metadata files so that they are securely - downloaded. Metadata files are similarly referenced in the top-level - metadata. - - file_prefix: - The path string that will be prepended to the generated metadata - (e.g., targets/foo -> targets/prefix/foo) so that it matches the actual - targets location in the upstream repository. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - Creates a project Targets role object, with the same object attributes of - the top-level targets role. - - - None. - """ - - def __init__(self, project_name, metadata_directory, targets_directory, - file_prefix, repository_name='default'): - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly - # formatted. - sslib_formats.NAME_SCHEMA.check_match(project_name) - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - sslib_formats.ANY_STRING_SCHEMA.check_match(file_prefix) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - self.metadata_directory = metadata_directory - self.targets_directory = targets_directory - self.project_name = project_name - self.prefix = file_prefix - self.repository_name = repository_name - - # Layout type defaults to "flat" unless explicitly specified in - # create_new_project(). - self.layout_type = 'flat' - - # Set the top-level Targets object. Set the rolename to be the project's - # name. - super(Project, self).__init__(self.targets_directory, project_name) - - - - - - def write(self, write_partial=False): - """ - - Write all the JSON Metadata objects to their corresponding files. - write() raises an exception if any of the role metadata to be written to - disk is invalid, such as an insufficient threshold of signatures, missing - private keys, etc. - - - write_partial: - A boolean indicating whether partial metadata should be written to - disk. Partial metadata may be written to allow multiple maintainters - to independently sign and update role metadata. write() raises an - exception if a metadata role cannot be written due to not having enough - signatures. - - - securesystemslib.exceptions.Error, if any of the project roles do not - have a minimum threshold of signatures. - - - Creates metadata files in the project's metadata directory. - - - None. - """ - - # Does 'write_partial' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.BOOLEAN_SCHEMA.check_match(write_partial) - - # At this point the keydb and roledb stores must be fully - # populated, otherwise write() throwns a 'tuf.Repository' exception if - # any of the project roles are missing signatures, keys, etc. - - # Write the metadata files of all the delegated roles of the project. - delegated_rolenames = roledb.get_delegated_rolenames(self.project_name, - self.repository_name) - - for delegated_rolename in delegated_rolenames: - delegated_filename = os.path.join(self.metadata_directory, - delegated_rolename + METADATA_EXTENSION) - - # Ensure the parent directories of 'metadata_filepath' exist, otherwise an - # IO exception is raised if 'metadata_filepath' is written to a - # sub-directory. - sslib_util.ensure_parent_dir(delegated_filename) - - _generate_and_write_metadata(delegated_rolename, delegated_filename, - write_partial, self.targets_directory, prefix=self.prefix, - repository_name=self.repository_name) - - - # Generate the 'project_name' metadata file. - targets_filename = self.project_name + METADATA_EXTENSION - targets_filename = os.path.join(self.metadata_directory, targets_filename) - junk, targets_filename = _generate_and_write_metadata(self.project_name, - targets_filename, write_partial, self.targets_directory, - prefix=self.prefix, repository_name=self.repository_name) - - # Save configuration information that is not stored in the project's - # metadata - _save_project_configuration(self.metadata_directory, - self.targets_directory, self.keys, self.prefix, self.threshold, - self.layout_type, self.project_name) - - - - - - def add_verification_key(self, key, expires=None): - """ - - Function as a thin wrapper call for the project._targets call - with the same name. This wrapper is only for usability purposes. - - - key: - The role key to be added, conformant to - 'securesystemslib.formats.ANYKEY_SCHEMA'. Adding a public key to a - role means that its corresponding private key must generate and add - its signture to the role. - - - securesystemslib.exceptions.FormatError, if the 'key' argument is - improperly formatted. - - securesystemslib.exceptions.Error, if the project already contains a key. - - - The role's entries in 'keydb' and 'roledb' are updated. - - - None - """ - - # Verify that this role does not already contain a key. The parent project - # role is restricted to one key. Any of its delegated roles may have - # more than one key. - # TODO: Add condition check for the requirement stated above. - if len(self.keys) > 0: - raise sslib_exceptions.Error("This project already contains a key.") - - super(Project, self).add_verification_key(key, expires) - - - - - - def status(self): - """ - - Determine the status of the project, including its delegated roles. - status() checks if each role provides sufficient public keys, signatures, - and that a valid metadata file is generated if write() were to be called. - Metadata files are temporarily written to check that proper metadata files - is written, where file hashes and lengths are calculated and referenced - by the project. status() does not do a simple check for number of - threshold keys and signatures. - - - None. - - - securesystemslib.exceptions.Error, if the project, or any of its - delegated roles, do not have a minimum threshold of signatures. - - - Generates and writes temporary metadata files. - - - None. - """ - - temp_project_directory = None - - try: - temp_project_directory = tempfile.mkdtemp() - - metadata_directory = os.path.join(temp_project_directory, 'metadata') - targets_directory = self.targets_directory - - os.makedirs(metadata_directory) - - # TODO: We should do the schema check. - filenames = {} - filenames['targets'] = os.path.join(metadata_directory, self.project_name) - - # Delegated roles. - delegated_roles = roledb.get_delegated_rolenames(self.project_name, - self.repository_name) - insufficient_keys = [] - insufficient_signatures = [] - - for delegated_role in delegated_roles: - try: - _check_role_keys(delegated_role, self.repository_name) - - except exceptions.InsufficientKeysError: - insufficient_keys.append(delegated_role) - continue - - try: - signable = _generate_and_write_metadata(delegated_role, - filenames['targets'], False, targets_directory, False, - repository_name=self.repository_name) - self._log_status(delegated_role, signable[0], self.repository_name) - - except sslib_exceptions.Error: - insufficient_signatures.append(delegated_role) - - if len(insufficient_keys): - message = 'Delegated roles with insufficient keys: ' +\ - repr(insufficient_keys) - logger.info(message) - return - - if len(insufficient_signatures): - message = 'Delegated roles with insufficient signatures: ' +\ - repr(insufficient_signatures) - logger.info(message) - return - - # Targets role. - try: - _check_role_keys(self.rolename, self.repository_name) - - except exceptions.InsufficientKeysError as e: - logger.info(str(e)) - return - - try: - signable, junk = _generate_and_write_metadata(self.project_name, - filenames['targets'], False, targets_directory, metadata_directory, - self.repository_name) - self._log_status(self.project_name, signable, self.repository_name) - - except exceptions.UnsignedMetadataError as e: - # This error is raised if the metadata has insufficient signatures to - # meet the threshold. - self._log_status(self.project_name, e.signable, self.repository_name) - return - - finally: - shutil.rmtree(temp_project_directory, ignore_errors=True) - - - - - - def _log_status(self, rolename, signable, repository_name): - """ - Non-public function prints the number of (good/threshold) signatures of - 'rolename'. - """ - - status = sig.get_signature_status(signable, rolename, repository_name) - - message = repr(rolename) + ' role contains ' +\ - repr(len(status['good_sigs'])) + ' / ' + repr(status['threshold']) +\ - ' signatures.' - logger.info(message) - - - - - -def _generate_and_write_metadata(rolename, metadata_filename, write_partial, - targets_directory, prefix='', repository_name='default'): - """ - Non-public function that can generate and write the metadata of the - specified 'rolename'. It also increments version numbers if: - - 1. write_partial==True and the metadata is the first to be written. - - 2. write_partial=False (i.e., write()), the metadata was not loaded as - partially written, and a write_partial is not needed. - """ - - metadata = None - - # Retrieve the roleinfo of 'rolename' to extract the needed metadata - # attributes, such as version number, expiration, etc. - roleinfo = roledb.get_roleinfo(rolename, repository_name) - - metadata = generate_targets_metadata(targets_directory, roleinfo['paths'], - roleinfo['version'], roleinfo['expires'], roleinfo['delegations'], - False) - - # Prepend the prefix to the project's filepath to avoid signature errors in - # upstream. - for element in list(metadata['targets']): - junk, relative_target = os.path.split(element) - prefixed_path = os.path.join(prefix, relative_target) - metadata['targets'][prefixed_path] = metadata['targets'][element] - if prefix != '': - del(metadata['targets'][element]) - - signable = repo_lib.sign_metadata(metadata, roleinfo['signing_keyids'], - metadata_filename, repository_name) - - # Check if the version number of 'rolename' may be automatically incremented, - # depending on whether if partial metadata is loaded or if the metadata is - # written with write() / write_partial(). - # Increment the version number if this is the first partial write. - if write_partial: - temp_signable = repo_lib.sign_metadata(metadata, [], metadata_filename, - repository_name) - temp_signable['signatures'].extend(roleinfo['signatures']) - status = sig.get_signature_status(temp_signable, rolename, - repository_name) - if len(status['good_sigs']) == 0: - metadata['version'] = metadata['version'] + 1 - signable = repo_lib.sign_metadata(metadata, roleinfo['signing_keyids'], - metadata_filename, repository_name) - - # non-partial write() - else: - if sig.verify(signable, rolename, repository_name): - metadata['version'] = metadata['version'] + 1 - signable = repo_lib.sign_metadata(metadata, roleinfo['signing_keyids'], - metadata_filename, repository_name) - - # Write the metadata to file if contains a threshold of signatures. - signable['signatures'].extend(roleinfo['signatures']) - - if sig.verify(signable, rolename, repository_name) or write_partial: - repo_lib._remove_invalid_and_duplicate_signatures(signable, repository_name) - storage_backend = sslib_storage.FilesystemBackend() - filename = repo_lib.write_metadata_file(signable, metadata_filename, - metadata['version'], False, storage_backend) - - # 'signable' contains an invalid threshold of signatures. - else: - message = 'Not enough signatures for ' + repr(metadata_filename) - raise sslib_exceptions.Error(message, signable) - - return signable, filename - - - - -def create_new_project(project_name, metadata_directory, - location_in_repository = '', targets_directory=None, key=None, - repository_name='default'): - """ - - Create a new project object, instantiate barebones metadata for the - targets, and return a blank project object. On disk, create_new_project() - only creates the directories needed to hold the metadata and targets files. - The project object returned can be directly modified to meet the designer's - criteria and then written using the method project.write(). - - The project name provided is the one that will be added to the resulting - metadata file as it should be named in upstream. - - - project_name: - The name of the project as it should be called in upstream. For example, - targets/unclaimed/django should have its project_name set to "django" - - metadata_directory: - The directory that will eventually hold the metadata and target files of - the project. - - location_in_repository: - An optional argument to hold the "prefix" or the expected location for - the project files in the "upstream" repository. This value is only - used to sign metadata in a way that it matches the future location - of the files. - - For example, targets/unclaimed/django should have its project name set to - "targets/unclaimed" - - targets_directory: - An optional argument to point the targets directory somewhere else than - the metadata directory if, for example, a project structure already - exists and the user does not want to move it. - - key: - The public key to verify the project's metadata. Projects can only - handle one key with a threshold of one. If a project were to modify it's - key it should be removed and updated. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted or if the public key is not a valid one (if it's not none.) - - OSError, if the filepaths provided do not have write permissions. - - - The 'metadata_directory' and 'targets_directory' directories are created - if they do not exist. - - - A 'tuf.developer_tool.Project' object. - """ - - # Does 'metadata_directory' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - - # Do the same for the location in the repo and the project name, we must - # ensure they are valid pathnames. - sslib_formats.NAME_SCHEMA.check_match(project_name) - sslib_formats.ANY_STRING_SCHEMA.check_match(location_in_repository) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # for the targets directory we do the same, but first, let's find out what - # layout the user needs, layout_type is a variable that is usually set to - # 1, which means "flat" (i.e. the cfg file is where the metadata folder is - # located), with a two, the cfg file goes to the "metadata" folder, and a - # new metadata folder is created inside the tree, to separate targets and - # metadata. - layout_type = 'flat' - if targets_directory is None: - targets_directory = os.path.join(metadata_directory, TARGETS_DIRECTORY_NAME) - metadata_directory = \ - os.path.join(metadata_directory, METADATA_DIRECTORY_NAME) - layout_type = 'repo-like' - - if targets_directory is not None: - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - - if key is not None: - sslib_formats.KEY_SCHEMA.check_match(key) - - # Set the metadata and targets directories. These directories - # are created if they do not exist. - metadata_directory = os.path.abspath(metadata_directory) - targets_directory = os.path.abspath(targets_directory) - - # Try to create the metadata directory that will hold all of the metadata - # files, such as 'root.txt' and 'release.txt'. - try: - message = 'Creating ' + repr(metadata_directory) - logger.info(message) - os.makedirs(metadata_directory) - - # 'OSError' raised if the leaf directory already exists or cannot be created. - # Check for case where 'repository_directory' has already been created. - except OSError as e: - if e.errno == errno.EEXIST: - # Should check if we have write permissions here. - pass - - # Testing of non-errno.EEXIST exceptions have been verified on all - # supported # OSs. An unexpected exception (the '/' directory exists, - # rather than disallowed path) is possible on Travis, so the '#pragma: no - # branch' below is included to prevent coverage failure. - else: #pragma: no branch - raise - - # Try to create the targets directory that will hold all of the target files. - try: - message = 'Creating ' + repr(targets_directory) - logger.info(message) - os.mkdir(targets_directory) - - except OSError as e: - if e.errno == errno.EEXIST: - pass - - else: - raise - - # Create the bare bones project object, where project role contains default - # values (e.g., threshold of 1, expires 1 year into the future, etc.) - project = Project(project_name, metadata_directory, targets_directory, - location_in_repository, repository_name) - - # Add 'key' to the project. - # TODO: Add check for expected number of keys for the project (must be 1) and - # its delegated roles (may be greater than one.) - if key is not None: - project.add_verification_key(key) - - # Save the layout information. - project.layout_type = layout_type - - return project - - - - - - -def _save_project_configuration(metadata_directory, targets_directory, - public_keys, prefix, threshold, layout_type, project_name): - """ - - Persist the project's information to a file. The saved project information - can later be loaded with Project.load_project(). - - - metadata_directory: - Where the project's metadata is located. - - targets_directory: - The location of the target files for this project. - - public_keys: - A list containing the public keys for the project role. - - prefix: - The project's prefix (if any.) - - threshold: - The threshold value for the project role. - - layout_type: - The layout type being used by the project, "flat" stands for separated - targets and metadata directories, "repo-like" emulates the layout used - by the repository tools - - project_name: - The name given to the project, this sets the metadata filename so it - matches the one stored in upstream. - - - securesystemslib.exceptions.FormatError are also expected if any of the arguments are malformed. - - OSError may rise if the metadata_directory/project.cfg file exists and - is non-writeable - - - A 'project.cfg' configuration file is created or overwritten. - - - None. - """ - - # Schema check for the arguments. - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - sslib_formats.PATH_SCHEMA.check_match(prefix) - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - formats.RELPATH_SCHEMA.check_match(project_name) - - cfg_file_directory = metadata_directory - - # Check whether the layout type is 'flat' or 'repo-like'. - # If it is, the .cfg file should be saved in the previous directory. - if layout_type == 'repo-like': - cfg_file_directory = os.path.dirname(metadata_directory) - junk, targets_directory = os.path.split(targets_directory) - - junk, metadata_directory = os.path.split(metadata_directory) - - # Can the file be opened? - project_filename = os.path.join(cfg_file_directory, PROJECT_FILENAME) - - # Build the fields of the configuration file. - project_config = {} - project_config['prefix'] = prefix - project_config['public_keys'] = {} - project_config['metadata_location'] = metadata_directory - project_config['targets_location'] = targets_directory - project_config['threshold'] = threshold - project_config['layout_type'] = layout_type - project_config['project_name'] = project_name - - # Build a dictionary containing the actual keys. - for key in public_keys: - key_info = keydb.get_key(key) - key_metadata = format_keyval_to_metadata(key_info['keytype'], - key_info['scheme'], key_info['keyval']) - project_config['public_keys'][key] = key_metadata - - # Save the actual file. - with open(project_filename, 'wt', encoding='utf8') as fp: - json.dump(project_config, fp) - - - - - -def load_project(project_directory, prefix='', new_targets_location=None, - repository_name='default'): - """ - - Return a Project object initialized with the contents of the metadata - files loaded from 'project_directory'. - - - project_directory: - The path to the project's metadata and configuration file. - - prefix: - The prefix for the metadata, if defined. It will replace the current - prefix, by first removing the existing one (saved). - - new_targets_location: - For flat project configurations, project owner might want to reload the - project with a new location for the target files. This overwrites the - previous path to search for the target files. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if 'project_directory' or any of - the metadata files are improperly formatted. - - - All the metadata files found in the project are loaded and their contents - stored in a libtuf.Repository object. - - - A tuf.developer_tool.Project object. - """ - - # Does 'repository_directory' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(project_directory) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Do the same for the prefix - sslib_formats.ANY_STRING_SCHEMA.check_match(prefix) - - # Clear the role and key databases since we are loading in a new project. - roledb.clear_roledb(clear_all=True) - keydb.clear_keydb(clear_all=True) - - # Locate metadata filepaths and targets filepath. - project_directory = os.path.abspath(project_directory) - - # Load the cfg file and the project. - config_filename = os.path.join(project_directory, PROJECT_FILENAME) - - project_configuration = sslib_util.load_json_file(config_filename) - formats.PROJECT_CFG_SCHEMA.check_match(project_configuration) - - targets_directory = os.path.join(project_directory, - project_configuration['targets_location']) - - if project_configuration['layout_type'] == 'flat': - project_directory, junk = os.path.split(project_directory) - targets_directory = project_configuration['targets_location'] - - if new_targets_location is not None: - targets_directory = new_targets_location - - metadata_directory = os.path.join(project_directory, - project_configuration['metadata_location']) - - new_prefix = None - - if prefix != '': - new_prefix = prefix - - prefix = project_configuration['prefix'] - - # Load the project's filename. - project_name = project_configuration['project_name'] - project_filename = project_name + METADATA_EXTENSION - - # Create a blank project on the target directory. - project = Project(project_name, metadata_directory, targets_directory, prefix, - repository_name) - - project.threshold = project_configuration['threshold'] - project.prefix = project_configuration['prefix'] - project.layout_type = project_configuration['layout_type'] - - # Traverse the public keys and add them to the project. - keydict = project_configuration['public_keys'] - - for keyid in keydict: - key, junk = format_metadata_to_key(keydict[keyid]) - project.add_verification_key(key) - - # Load the project's metadata. - targets_metadata_path = os.path.join(project_directory, metadata_directory, - project_filename) - signable = sslib_util.load_json_file(targets_metadata_path) - try: - formats.check_signable_object_format(signable) - except exceptions.UnsignedMetadataError: - # Downgrade the error to a warning because a use case exists where - # metadata may be generated unsigned on one machine and signed on another. - logger.warning('Unsigned metadata object: ' + repr(signable)) - targets_metadata = signable['signed'] - - # Remove the prefix from the metadata. - targets_metadata = _strip_prefix_from_targets_metadata(targets_metadata, - prefix) - for signature in signable['signatures']: - project.add_signature(signature) - - # Update roledb.py containing the loaded project attributes. - roleinfo = roledb.get_roleinfo(project_name, repository_name) - roleinfo['signatures'].extend(signable['signatures']) - roleinfo['version'] = targets_metadata['version'] - roleinfo['paths'] = targets_metadata['targets'] - roleinfo['delegations'] = targets_metadata['delegations'] - roleinfo['partial_loaded'] = False - - # Check if the loaded metadata was partially written and update the - # flag in 'roledb.py'. - if _metadata_is_partially_loaded(project_name, signable, - repository_name=repository_name): - roleinfo['partial_loaded'] = True - - roledb.update_roleinfo(project_name, roleinfo, mark_role_as_dirty=False, - repository_name=repository_name) - - for key_metadata in targets_metadata['delegations']['keys'].values(): - key_object, junk = format_metadata_to_key(key_metadata) - keydb.add_key(key_object, repository_name=repository_name) - - for role in targets_metadata['delegations']['roles']: - rolename = role['name'] - roleinfo = {'name': role['name'], 'keyids': role['keyids'], - 'threshold': role['threshold'], - 'signing_keyids': [], 'signatures': [], 'partial_loaded':False, - 'delegations': {'keys':{}, 'roles':[]} - } - roledb.add_role(rolename, roleinfo, repository_name=repository_name) - - # Load the delegated metadata and generate their fileinfo. - targets_objects = {} - loaded_metadata = [project_name] - targets_objects[project_name] = project - metadata_directory = os.path.join(project_directory, metadata_directory) - - if os.path.exists(metadata_directory) and \ - os.path.isdir(metadata_directory): - for metadata_role in os.listdir(metadata_directory): - metadata_path = os.path.join(metadata_directory, metadata_role) - metadata_name = \ - metadata_path[len(metadata_directory):].lstrip(os.path.sep) - - # Strip the extension. The roledb does not include an appended '.json' - # extension for each role. - if metadata_name.endswith(METADATA_EXTENSION): - extension_length = len(METADATA_EXTENSION) - metadata_name = metadata_name[:-extension_length] - - else: - continue - - if metadata_name in loaded_metadata: - continue - - signable = None - signable = sslib_util.load_json_file(metadata_path) - - # Strip the prefix from the local working copy, it will be added again - # when the targets metadata is written to disk. - metadata_object = signable['signed'] - metadata_object = _strip_prefix_from_targets_metadata(metadata_object, - prefix) - - roleinfo = roledb.get_roleinfo(metadata_name, repository_name) - roleinfo['signatures'].extend(signable['signatures']) - roleinfo['version'] = metadata_object['version'] - roleinfo['expires'] = metadata_object['expires'] - roleinfo['paths'] = {} - - for filepath, fileinfo in metadata_object['targets'].items(): - roleinfo['paths'].update({filepath: fileinfo.get('custom', {})}) - roleinfo['delegations'] = metadata_object['delegations'] - roleinfo['partial_loaded'] = False - - # If the metadata was partially loaded, update the roleinfo flag. - if _metadata_is_partially_loaded(metadata_name, signable, - repository_name=repository_name): - roleinfo['partial_loaded'] = True - - - roledb.update_roleinfo(metadata_name, roleinfo, - mark_role_as_dirty=False, repository_name=repository_name) - - # Append to list of elements to avoid reloading repeated metadata. - loaded_metadata.append(metadata_name) - - # Generate the Targets objects of the delegated roles. - new_targets_object = Targets(targets_directory, metadata_name, roleinfo, - repository_name=repository_name) - targets_object = targets_objects[project_name] - - targets_object._delegated_roles[metadata_name] = new_targets_object - - # Add the keys specified in the delegations field of the Targets role. - for key_metadata in metadata_object['delegations']['keys'].values(): - key_object, junk = format_metadata_to_key(key_metadata) - - try: - keydb.add_key(key_object, repository_name=repository_name) - - except exceptions.KeyAlreadyExistsError: - pass - - for role in metadata_object['delegations']['roles']: - rolename = role['name'] - roleinfo = {'name': role['name'], 'keyids': role['keyids'], - 'threshold': role['threshold'], - 'signing_keyids': [], 'signatures': [], - 'partial_loaded': False, - 'delegations': {'keys': {}, - 'roles': []}} - roledb.add_role(rolename, roleinfo, repository_name=repository_name) - - if new_prefix: - project.prefix = new_prefix - - return project - - - - - -def _strip_prefix_from_targets_metadata(targets_metadata, prefix): - """ - Non-public method that removes the prefix from each of the target paths in - 'targets_metadata' so they can be used again in compliance with the local - copies. The prefix is needed in metadata to match the layout of the remote - repository. - """ - - unprefixed_targets_metadata = {} - - for targets in targets_metadata['targets'].keys(): - unprefixed_target = os.path.relpath(targets, prefix) - unprefixed_targets_metadata[unprefixed_target] = \ - targets_metadata['targets'][targets] - targets_metadata['targets'] = unprefixed_targets_metadata - - return targets_metadata - - - - - -if __name__ == '__main__': - # The interactive sessions of the documentation strings can - # be tested by running 'developer_tool.py' as a standalone module: - # $ python3 developer_tool.py - import doctest - doctest.testmod() diff --git a/tuf/download.py b/tuf/download.py deleted file mode 100755 index af12af614b..0000000000 --- a/tuf/download.py +++ /dev/null @@ -1,314 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - download.py - - - February 21, 2012. Based on previous version by Geremy Condra. - - - Konstantin Andrianov - Vladimir Diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Download metadata and target files and check their validity. The hash and - length of a downloaded file has to match the hash and length supplied by the - metadata of that file. -""" - -import logging -import timeit -import tempfile -from urllib import parse - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import formats as sslib_formats - -from tuf import exceptions -from tuf import formats -from tuf import settings - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - - -def safe_download(url, required_length, fetcher): - """ - - Given the 'url' and 'required_length' of the desired file, open a connection - to 'url', download it, and return the contents of the file. Also ensure - the length of the downloaded file matches 'required_length' exactly. - download.unsafe_download() may be called if an upper download limit is - preferred. - - - url: - A URL string that represents the location of the file. - - required_length: - An integer value representing the length of the file. This is an exact - limit. - - fetcher: - An object implementing FetcherInterface that performs the network IO - operations. - - - A file object is created on disk to store the contents of 'url'. - - - tuf.ssl_commons.exceptions.DownloadLengthMismatchError, if there was a - mismatch of observed vs expected lengths while downloading the file. - - securesystemslib.exceptions.FormatError, if any of the arguments are - improperly formatted. - - Any other unforeseen runtime exception. - - - A file object that points to the contents of 'url'. - """ - - # Do all of the arguments have the appropriate format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.URL_SCHEMA.check_match(url) - formats.LENGTH_SCHEMA.check_match(required_length) - - return _download_file(url, required_length, fetcher, STRICT_REQUIRED_LENGTH=True) - - - - - -def unsafe_download(url, required_length, fetcher): - """ - - Given the 'url' and 'required_length' of the desired file, open a connection - to 'url', download it, and return the contents of the file. Also ensure - the length of the downloaded file is up to 'required_length', and no larger. - download.safe_download() may be called if an exact download limit is - preferred. - - - url: - A URL string that represents the location of the file. - - required_length: - An integer value representing the length of the file. This is an upper - limit. - - fetcher: - An object implementing FetcherInterface that performs the network IO - operations. - - - A file object is created on disk to store the contents of 'url'. - - - tuf.ssl_commons.exceptions.DownloadLengthMismatchError, if there was a - mismatch of observed vs expected lengths while downloading the file. - - securesystemslib.exceptions.FormatError, if any of the arguments are - improperly formatted. - - Any other unforeseen runtime exception. - - - A file object that points to the contents of 'url'. - """ - - # Do all of the arguments have the appropriate format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.URL_SCHEMA.check_match(url) - formats.LENGTH_SCHEMA.check_match(required_length) - - return _download_file(url, required_length, fetcher, STRICT_REQUIRED_LENGTH=False) - - - - - -def _download_file(url, required_length, fetcher, STRICT_REQUIRED_LENGTH=True): - """ - - Given the url and length of the desired file, this function opens a - connection to 'url' and downloads the file while ensuring its length - matches 'required_length' if 'STRICT_REQUIRED_LENGH' is True (If False, - the file's length is not checked and a slow retrieval exception is raised - if the downloaded rate falls below the acceptable rate). - - - url: - A URL string that represents the location of the file. - - required_length: - An integer value representing the length of the file. - - STRICT_REQUIRED_LENGTH: - A Boolean indicator used to signal whether we should perform strict - checking of required_length. True by default. We explicitly set this to - False when we know that we want to turn this off for downloading the - timestamp metadata, which has no signed required_length. - - - A file object is created on disk to store the contents of 'url'. - - - tuf.exceptions.DownloadLengthMismatchError, if there was a - mismatch of observed vs expected lengths while downloading the file. - - securesystemslib.exceptions.FormatError, if any of the arguments are - improperly formatted. - - Any other unforeseen runtime exception. - - - A file object that points to the contents of 'url'. - """ - # 'url.replace('\\', '/')' is needed for compatibility with Windows-based - # systems, because they might use back-slashes in place of forward-slashes. - # This converts it to the common format. unquote() replaces %xx escapes in a - # url with their single-character equivalent. A back-slash may be encoded as - # %5c in the url, which should also be replaced with a forward slash. - url = parse.unquote(url).replace('\\', '/') - logger.info('Downloading: ' + repr(url)) - - # This is the temporary file that we will return to contain the contents of - # the downloaded file. - temp_file = tempfile.TemporaryFile() - - average_download_speed = 0 - number_of_bytes_received = 0 - - try: - chunks = fetcher.fetch(url, required_length) - start_time = timeit.default_timer() - for chunk in chunks: - - stop_time = timeit.default_timer() - temp_file.write(chunk) - - # Measure the average download speed. - number_of_bytes_received += len(chunk) - seconds_spent_receiving = stop_time - start_time - average_download_speed = number_of_bytes_received / seconds_spent_receiving - - if average_download_speed < settings.MIN_AVERAGE_DOWNLOAD_SPEED: - logger.debug('The average download speed dropped below the minimum' - ' average download speed set in settings. Stopping the download!.') - break - - else: - logger.debug('The average download speed has not dipped below the' - ' minimum average download speed set in settings.') - - # Does the total number of downloaded bytes match the required length? - _check_downloaded_length(number_of_bytes_received, required_length, - STRICT_REQUIRED_LENGTH=STRICT_REQUIRED_LENGTH, - average_download_speed=average_download_speed) - - except Exception: - # Close 'temp_file'. Any written data is lost. - temp_file.close() - logger.debug('Could not download URL: ' + repr(url)) - raise - - else: - return temp_file - - - - -def _check_downloaded_length(total_downloaded, required_length, - STRICT_REQUIRED_LENGTH=True, - average_download_speed=None): - """ - - A helper function which checks whether the total number of downloaded bytes - matches our expectation. - - - total_downloaded: - The total number of bytes supposedly downloaded for the file in question. - - required_length: - The total number of bytes expected of the file as seen from its metadata. - The Timestamp role is always downloaded without a known file length, and - the Root role when the client cannot download any of the required - top-level roles. In both cases, 'required_length' is actually an upper - limit on the length of the downloaded file. - - STRICT_REQUIRED_LENGTH: - A Boolean indicator used to signal whether we should perform strict - checking of required_length. True by default. We explicitly set this to - False when we know that we want to turn this off for downloading the - timestamp metadata, which has no signed required_length. - - average_download_speed: - The average download speed for the downloaded file. - - - None. - - - securesystemslib.exceptions.DownloadLengthMismatchError, if - STRICT_REQUIRED_LENGTH is True and total_downloaded is not equal - required_length. - - tuf.exceptions.SlowRetrievalError, if the total downloaded was - done in less than the acceptable download speed (as set in - tuf.settings). - - - None. - """ - - if total_downloaded == required_length: - logger.info('Downloaded ' + str(total_downloaded) + ' bytes out of the' - ' expected ' + str(required_length) + ' bytes.') - - else: - difference_in_bytes = abs(total_downloaded - required_length) - - # What we downloaded is not equal to the required length, but did we ask - # for strict checking of required length? - if STRICT_REQUIRED_LENGTH: - logger.info('Downloaded ' + str(total_downloaded) + ' bytes, but' - ' expected ' + str(required_length) + ' bytes. There is a difference' - ' of ' + str(difference_in_bytes) + ' bytes.') - - # If the average download speed is below a certain threshold, we flag - # this as a possible slow-retrieval attack. - logger.debug('Average download speed: ' + repr(average_download_speed)) - logger.debug('Minimum average download speed: ' + repr(settings.MIN_AVERAGE_DOWNLOAD_SPEED)) - - if average_download_speed < settings.MIN_AVERAGE_DOWNLOAD_SPEED: - raise exceptions.SlowRetrievalError(average_download_speed) - - else: - logger.debug('Good average download speed: ' + - repr(average_download_speed) + ' bytes per second') - - raise exceptions.DownloadLengthMismatchError(required_length, total_downloaded) - - else: - # We specifically disabled strict checking of required length, but we - # will log a warning anyway. This is useful when we wish to download the - # Timestamp or Root metadata, for which we have no signed metadata; so, - # we must guess a reasonable required_length for it. - if average_download_speed < settings.MIN_AVERAGE_DOWNLOAD_SPEED: - raise exceptions.SlowRetrievalError(average_download_speed) - - else: - logger.debug('Good average download speed: ' + - repr(average_download_speed) + ' bytes per second') - - logger.info('Downloaded ' + str(total_downloaded) + ' bytes out of an' - ' upper limit of ' + str(required_length) + ' bytes.') diff --git a/tuf/exceptions.py b/tuf/exceptions.py deleted file mode 100755 index 8ebc92c7d1..0000000000 --- a/tuf/exceptions.py +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - exceptions.py - - - Vladimir Diaz - - - January 10, 2017 - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Define TUF Exceptions. - The names chosen for TUF Exception classes should end in 'Error' except where - there is a good reason not to, and provide that reason in those cases. -""" - -from urllib import parse - -from typing import Any, Dict, Optional - -import logging -logger = logging.getLogger(__name__) - - -class Error(Exception): - """Indicate a generic error.""" - - -class UnsupportedSpecificationError(Error): - """ - Metadata received claims to conform to a version of the specification that is - not supported by this client. - """ - -class FormatError(Error): - """Indicate an error while validating an object's format.""" - - -class InvalidMetadataJSONError(FormatError): - """Indicate that a metadata file is not valid JSON.""" - - def __init__(self, exception: BaseException): - super(InvalidMetadataJSONError, self).__init__() - - # Store the original exception. - self.exception = exception - - def __str__(self) -> str: - return repr(self) - - def __repr__(self) -> str: - # Show the original exception. - return self.__class__.__name__ + ' : wraps error: ' + repr(self.exception) - - # # Directly instance-reproducing: - # return self.__class__.__name__ + '(' + repr(self.exception) + ')' - - -class UnsupportedAlgorithmError(Error): - """Indicate an error while trying to identify a user-specified algorithm.""" - -class LengthOrHashMismatchError(Error): - """Indicate an error while checking the length and hash values of an object""" - -class RepositoryError(Error): - """Indicate an error with a repository's state, such as a missing file.""" - -class BadHashError(RepositoryError): - """Indicate an error while checking the value of a hash object.""" - - def __init__(self, expected_hash: str, observed_hash: str): - super(BadHashError, self).__init__() - - self.expected_hash = expected_hash - self.observed_hash = observed_hash - - def __str__(self) -> str: - return ( - 'Observed hash (' + repr(self.observed_hash) + ') != expected hash (' + - repr(self.expected_hash) + ')') - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.expected_hash) + ', ' + - # repr(self.observed_hash) + ')') - - -class BadPasswordError(Error): - """Indicate an error after encountering an invalid password.""" - - -class UnknownKeyError(Error): - """Indicate an error while verifying key-like objects (e.g., keyids).""" - - -class BadVersionNumberError(RepositoryError): - """Indicate an error for metadata that contains an invalid version number.""" - - -class MissingLocalRepositoryError(RepositoryError): - """Raised when a local repository could not be found.""" - - -class InsufficientKeysError(Error): - """Indicate that metadata role lacks a threshold of pubic or private keys.""" - - -class ForbiddenTargetError(RepositoryError): - """Indicate that a role signed for a target that it was not delegated to.""" - - -class ExpiredMetadataError(RepositoryError): - """Indicate that a TUF Metadata file has expired.""" - - -class ReplayedMetadataError(RepositoryError): - """Indicate that some metadata has been replayed to the client.""" - - def __init__(self, metadata_role: str, downloaded_version: int, current_version: int): - super(ReplayedMetadataError, self).__init__() - - self.metadata_role = metadata_role - self.downloaded_version = downloaded_version - self.current_version = current_version - - def __str__(self) -> str: - return ( - 'Downloaded ' + repr(self.metadata_role) + ' is older (' + - repr(self.downloaded_version) + ') than the version currently ' - 'installed (' + repr(self.current_version) + ').') - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - -class CryptoError(Error): - """Indicate any cryptography-related errors.""" - - -class BadSignatureError(CryptoError): - """Indicate that some metadata file has a bad signature.""" - - def __init__(self, metadata_role_name: str): - super(BadSignatureError, self).__init__() - - self.metadata_role_name = metadata_role_name - - def __str__(self) -> str: - return repr(self.metadata_role_name) + ' metadata has a bad signature.' - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.metadata_role_name) + ')') - - -class UnknownMethodError(CryptoError): - """Indicate that a user-specified cryptograpthic method is unknown.""" - - -class UnsupportedLibraryError(Error): - """Indicate that a supported library could not be located or imported.""" - - -class DownloadError(Error): - """Indicate an error occurred while attempting to download a file.""" - - -class DownloadLengthMismatchError(DownloadError): - """Indicate that a mismatch of lengths was seen while downloading a file.""" - - def __init__(self, expected_length: int, observed_length: int): - super(DownloadLengthMismatchError, self).__init__() - - self.expected_length = expected_length #bytes - self.observed_length = observed_length #bytes - - def __str__(self) -> str: - return ( - 'Observed length (' + repr(self.observed_length) + - ') < expected length (' + repr(self.expected_length) + ').') - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.expected_length) + ', ' + - # self.observed_length + ')') - - - -class SlowRetrievalError(DownloadError): - """"Indicate that downloading a file took an unreasonably long time.""" - - def __init__(self, average_download_speed: Optional[int] = None): - super(SlowRetrievalError, self).__init__() - - self.__average_download_speed = average_download_speed #bytes/second - - def __str__(self) -> str: - msg = 'Download was too slow.' - if self.__average_download_speed is not None: - msg = ('Download was too slow. Average speed: ' + - repr(self.__average_download_speed) + ' bytes per second.') - - return msg - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.__average_download_speed + ')') - - -class KeyAlreadyExistsError(Error): - """Indicate that a key already exists and cannot be added.""" - - -class RoleAlreadyExistsError(Error): - """Indicate that a role already exists and cannot be added.""" - - -class UnknownRoleError(Error): - """Indicate an error trying to locate or identify a specified TUF role.""" - - -class UnknownTargetError(Error): - """Indicate an error trying to locate or identify a specified target.""" - - -class InvalidNameError(Error): - """Indicate an error while trying to validate any type of named object.""" - - -class UnsignedMetadataError(RepositoryError): - """Indicate metadata object with insufficient threshold of signatures.""" - - # signable is not used but kept in method signature for backwards compat - def __init__(self, message: str, signable: Any = None): - super(UnsignedMetadataError, self).__init__() - - self.exception_message = message - self.signable = signable - - def __str__(self) -> str: - return self.exception_message - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.exception_message) + ', ' + - # repr(self.signable) + ')') - - -class NoWorkingMirrorError(Error): - """ - An updater will throw this exception in case it could not download a - metadata or target file. - A dictionary of Exception instances indexed by every mirror URL will also be - provided. - """ - - def __init__(self, mirror_errors: Dict[str, BaseException]): - super(NoWorkingMirrorError, self).__init__() - - # Dictionary of URL strings to Exception instances - self.mirror_errors = mirror_errors - - def __str__(self) -> str: - all_errors = 'No working mirror was found:' - - for mirror_url, mirror_error in self.mirror_errors.items(): - try: - # http://docs.python.org/2/library/urlparse.html#urlparse.urlparse - mirror_url_tokens = parse.urlparse(mirror_url) - - except Exception: - logger.exception('Failed to parse mirror URL: ' + repr(mirror_url)) - mirror_netloc = mirror_url - - else: - mirror_netloc = mirror_url_tokens.netloc - - all_errors += '\n ' + repr(mirror_netloc) + ': ' + repr(mirror_error) - - return all_errors - - def __repr__(self) -> str: - return self.__class__.__name__ + ' : ' + str(self) - - # # Directly instance-reproducing: - # return ( - # self.__class__.__name__ + '(' + repr(self.mirror_errors) + ')') - - - -class NotFoundError(Error): - """If a required configuration or resource is not found.""" - - -class URLMatchesNoPatternError(Error): - """If a URL does not match a user-specified regular expression.""" - -class URLParsingError(Error): - """If we are unable to parse a URL -- for example, if a hostname element - cannot be isoalted.""" - -class InvalidConfigurationError(Error): - """If a configuration object does not match the expected format.""" - -class FetcherHTTPError(Exception): - """ - Returned by FetcherInterface implementations for HTTP errors. - - Args: - message (str): The HTTP error messsage - status_code (int): The HTTP status code - """ - def __init__(self, message: str, status_code: int): - super(FetcherHTTPError, self).__init__(message) - self.status_code = status_code diff --git a/tuf/formats.py b/tuf/formats.py deleted file mode 100755 index ca304ca9e4..0000000000 --- a/tuf/formats.py +++ /dev/null @@ -1,1009 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - formats.py - - - Geremy Condra - Vladimir Diaz - - - Refactored April 30, 2012. -vladimir.v.diaz - - - See LICENSE-MIT OR LICENSE for licensing information. - - - A central location for all format-related checking of TUF objects. - Some crypto-related formats may also be defined in securesystemslib. - Note: 'formats.py' depends heavily on 'schema.py', so the 'schema.py' - module should be read and understood before tackling this module. - - 'formats.py' can be broken down into two sections. (1) Schemas and object - matching. (2) Functions that help produce or verify TUF objects. - - The first section deals with schemas and object matching based on format. - There are two ways of checking the format of objects. The first method - raises a 'securesystemslib.exceptions.FormatError' exception if the match - fails and the other returns a Boolean result. - - tuf.formats..check_match(object) - tuf.formats..matches(object) - - Example: - - rsa_key = {'keytype': 'rsa' - 'keyid': 34892fc465ac76bc3232fab - 'keyval': {'public': 'public_key', - 'private': 'private_key'} - - securesystemslib.formats.RSAKEY_SCHEMA.check_match(rsa_key) - securesystemslib.formats.RSAKEY_SCHEMA.matches(rsa_key) - - In this example, if a dict key or dict value is missing or incorrect, - the match fails. There are numerous variations of object checking - provided by 'formats.py' and 'schema.py'. - - The second section contains miscellaneous functions related to the format of - TUF objects. - Example: - - signable_object = make_signable(unsigned_object) -""" - -import binascii -import calendar -import datetime -import time -import copy - -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import schema as SCHEMA - -import tuf -from tuf import exceptions - -# As per TUF spec 1.0.0 the spec version field must follow the Semantic -# Versioning 2.0.0 (semver) format. The regex pattern is provided by semver. -# https://semver.org/spec/v2.0.0.html#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string -SEMVER_2_0_0_SCHEMA = SCHEMA.RegularExpression( - r'(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)' - r'(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)' - r'(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?' - r'(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?' -) -SPECIFICATION_VERSION_SCHEMA = SCHEMA.OneOf([ - # However, temporarily allow "1.0" for backwards-compatibility in tuf-0.12.PATCH. - SCHEMA.String("1.0"), - SEMVER_2_0_0_SCHEMA -]) - -# A datetime in 'YYYY-MM-DDTHH:MM:SSZ' ISO 8601 format. The "Z" zone designator -# for the zero UTC offset is always used (i.e., a numerical offset is not -# supported.) Example: '2015-10-21T13:20:00Z'. Note: This is a simple format -# check, and an ISO8601 string should be fully verified when it is parsed. -ISO8601_DATETIME_SCHEMA = SCHEMA.RegularExpression(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z') - -# An integer representing the numbered version of a metadata file. -# Must be 1, or greater. -METADATAVERSION_SCHEMA = SCHEMA.Integer(lo=0) - -# A relative file path (e.g., 'metadata/root/'). -RELPATH_SCHEMA = SCHEMA.AnyString() -RELPATHS_SCHEMA = SCHEMA.ListOf(RELPATH_SCHEMA) - -VERSIONINFO_SCHEMA = SCHEMA.Object( - object_name = 'VERSIONINFO_SCHEMA', - version = METADATAVERSION_SCHEMA) - -# A string representing a role's name. -ROLENAME_SCHEMA = SCHEMA.AnyString() - -# A role's threshold value (i.e., the minimum number -# of signatures required to sign a metadata file). -# Must be 1 and greater. -THRESHOLD_SCHEMA = SCHEMA.Integer(lo=1) - -# A hexadecimal value in '23432df87ab..' format. -HEX_SCHEMA = SCHEMA.RegularExpression(r'[a-fA-F0-9]+') - -# A path hash prefix is a hexadecimal string. -PATH_HASH_PREFIX_SCHEMA = HEX_SCHEMA - -# A list of path hash prefixes. -PATH_HASH_PREFIXES_SCHEMA = SCHEMA.ListOf(PATH_HASH_PREFIX_SCHEMA) - -# Role object in {'keyids': [keydids..], 'name': 'ABC', 'threshold': 1, -# 'paths':[filepaths..]} format. -# TODO: This is not a role. In further #660-related PRs, fix it, similar to -# the way I did in Uptane's TUF fork. -ROLE_SCHEMA = SCHEMA.Object( - object_name = 'ROLE_SCHEMA', - name = SCHEMA.Optional(ROLENAME_SCHEMA), - keyids = sslib_formats.KEYIDS_SCHEMA, - threshold = THRESHOLD_SCHEMA, - terminating = SCHEMA.Optional(sslib_formats.BOOLEAN_SCHEMA), - paths = SCHEMA.Optional(RELPATHS_SCHEMA), - path_hash_prefixes = SCHEMA.Optional(PATH_HASH_PREFIXES_SCHEMA)) - -# A dict of roles where the dict keys are role names and the dict values holding -# the role data/information. -ROLEDICT_SCHEMA = SCHEMA.DictOf( - key_schema = ROLENAME_SCHEMA, - value_schema = ROLE_SCHEMA) - -# A dictionary of ROLEDICT, where dictionary keys can be repository names, and -# dictionary values containing information for each role available on the -# repository (corresponding to the repository belonging to named repository in -# the dictionary key) -ROLEDICTDB_SCHEMA = SCHEMA.DictOf( - key_schema = sslib_formats.NAME_SCHEMA, - value_schema = ROLEDICT_SCHEMA) - -# Command argument list, as used by the CLI tool. -# Example: {'keytype': ed25519, 'expires': 365,} -COMMAND_SCHEMA = SCHEMA.DictOf( - key_schema = sslib_formats.NAME_SCHEMA, - value_schema = SCHEMA.Any()) - -# A dictionary holding version information. -VERSION_SCHEMA = SCHEMA.Object( - object_name = 'VERSION_SCHEMA', - major = SCHEMA.Integer(lo=0), - minor = SCHEMA.Integer(lo=0), - fix = SCHEMA.Integer(lo=0)) - -# A value that is either True or False, on or off, etc. -BOOLEAN_SCHEMA = SCHEMA.Boolean() - -# A hexadecimal value in '23432df87ab..' format. -HASH_SCHEMA = SCHEMA.RegularExpression(r'[a-fA-F0-9]+') - -# A key identifier (e.g., a hexadecimal value identifying an RSA key). -KEYID_SCHEMA = HASH_SCHEMA - -# A list of KEYID_SCHEMA. -KEYIDS_SCHEMA = SCHEMA.ListOf(KEYID_SCHEMA) - -# The actual values of a key, as opposed to meta data such as a key type and -# key identifier ('rsa', 233df889cb). For RSA keys, the key value is a pair of -# public and private keys in PEM Format stored as strings. -KEYVAL_SCHEMA = SCHEMA.Object( - object_name = 'KEYVAL_SCHEMA', - public = SCHEMA.AnyString(), - private = SCHEMA.Optional(SCHEMA.AnyString())) - -# A generic TUF key. All TUF keys should be saved to metadata files in this -# format. -KEY_SCHEMA = SCHEMA.Object( - object_name = 'KEY_SCHEMA', - keytype = SCHEMA.AnyString(), - keyval = KEYVAL_SCHEMA, - expires = SCHEMA.Optional(ISO8601_DATETIME_SCHEMA)) - -# A dict where the dict keys hold a keyid and the dict values a key object. -KEYDICT_SCHEMA = SCHEMA.DictOf( - key_schema = KEYID_SCHEMA, - value_schema = KEY_SCHEMA) - -# The format used by the key database to store keys. The dict keys hold a key -# identifier and the dict values any object. The key database should store -# key objects in the values (e.g., 'RSAKEY_SCHEMA', 'DSAKEY_SCHEMA'). -KEYDB_SCHEMA = SCHEMA.DictOf( - key_schema = KEYID_SCHEMA, - value_schema = SCHEMA.Any()) - -# A schema holding the result of checking the signatures of a particular -# 'SIGNABLE_SCHEMA' role. -# For example, how many of the signatures for the 'Target' role are -# valid? This SCHEMA holds this information. See 'sig.py' for -# more information. -SIGNATURESTATUS_SCHEMA = SCHEMA.Object( - object_name = 'SIGNATURESTATUS_SCHEMA', - threshold = SCHEMA.Integer(), - good_sigs = KEYIDS_SCHEMA, - bad_sigs = KEYIDS_SCHEMA, - unknown_sigs = KEYIDS_SCHEMA, - untrusted_sigs = KEYIDS_SCHEMA) - -# An integer representing length. Must be 0, or greater. -LENGTH_SCHEMA = SCHEMA.Integer(lo=0) - -# A dict in {'sha256': '23432df87ab..', 'sha512': '34324abc34df..', ...} format. -HASHDICT_SCHEMA = SCHEMA.DictOf( - key_schema = SCHEMA.AnyString(), - value_schema = HASH_SCHEMA) - -# Information about target files, like file length and file hash(es). This -# schema allows the storage of multiple hashes for the same file (e.g., sha256 -# and sha512 may be computed for the same file and stored). -TARGETS_FILEINFO_SCHEMA = SCHEMA.Object( - object_name = 'TARGETS_FILEINFO_SCHEMA', - length = LENGTH_SCHEMA, - hashes = HASHDICT_SCHEMA, - custom = SCHEMA.Optional(SCHEMA.Object())) - -# Information about snapshot and timestamp files. This schema allows for optional -# length and hashes, but version is mandatory. -METADATA_FILEINFO_SCHEMA = SCHEMA.Object( - object_name = 'METADATA_FILEINFO_SCHEMA', - length = SCHEMA.Optional(LENGTH_SCHEMA), - hashes = SCHEMA.Optional(HASHDICT_SCHEMA), - version = METADATAVERSION_SCHEMA) - -# A dict holding the version or file information for a particular metadata -# role. The dict keys hold the relative file paths, and the dict values the -# corresponding version numbers and/or file information. -FILEINFODICT_SCHEMA = SCHEMA.DictOf( - key_schema = RELPATH_SCHEMA, - value_schema = SCHEMA.OneOf([VERSIONINFO_SCHEMA, - METADATA_FILEINFO_SCHEMA])) - -# A dict holding the information for a particular target / file. The dict keys -# hold the relative file paths, and the dict values the corresponding file -# information. -FILEDICT_SCHEMA = SCHEMA.DictOf( - key_schema = RELPATH_SCHEMA, - value_schema = TARGETS_FILEINFO_SCHEMA) - -# A dict holding a target info. -TARGETINFO_SCHEMA = SCHEMA.Object( - object_name = 'TARGETINFO_SCHEMA', - filepath = RELPATH_SCHEMA, - fileinfo = TARGETS_FILEINFO_SCHEMA) - -# A list of TARGETINFO_SCHEMA. -TARGETINFOS_SCHEMA = SCHEMA.ListOf(TARGETINFO_SCHEMA) - -# A string representing a named oject. -NAME_SCHEMA = SCHEMA.AnyString() - -# A dict of repository names to mirrors. -REPO_NAMES_TO_MIRRORS_SCHEMA = SCHEMA.DictOf( - key_schema = NAME_SCHEMA, - value_schema = SCHEMA.ListOf(sslib_formats.URL_SCHEMA)) - -# An object containing the map file's "mapping" attribute. -MAPPING_SCHEMA = SCHEMA.ListOf(SCHEMA.Object( - paths = RELPATHS_SCHEMA, - repositories = SCHEMA.ListOf(NAME_SCHEMA), - terminating = BOOLEAN_SCHEMA, - threshold = THRESHOLD_SCHEMA)) - -# A dict containing the map file (named 'map.json', by default). The format of -# the map file is covered in TAP 4: Multiple repository consensus on entrusted -# targets. -MAPFILE_SCHEMA = SCHEMA.Object( - repositories = REPO_NAMES_TO_MIRRORS_SCHEMA, - mapping = MAPPING_SCHEMA) - -# Like ROLEDICT_SCHEMA, except that ROLE_SCHEMA instances are stored in order. -ROLELIST_SCHEMA = SCHEMA.ListOf(ROLE_SCHEMA) - -# The delegated roles of a Targets role (a parent). -DELEGATIONS_SCHEMA = SCHEMA.Object( - keys = KEYDICT_SCHEMA, - roles = ROLELIST_SCHEMA) - -# The number of hashed bins, or the number of delegated roles. See -# delegate_hashed_bins() in 'repository_tool.py' for an example. Note: -# Tools may require further restrictions on the number of bins, such -# as requiring them to be a power of 2. -NUMBINS_SCHEMA = SCHEMA.Integer(lo=1) - -# The fileinfo format of targets specified in the repository and -# developer tools. The fields match that of TARGETS_FILEINFO_SCHEMA, only all -# fields are optional. -CUSTOM_SCHEMA = SCHEMA.DictOf( - key_schema = SCHEMA.AnyString(), - value_schema = SCHEMA.Any() -) -LOOSE_TARGETS_FILEINFO_SCHEMA = SCHEMA.Object( - object_name = "LOOSE_TARGETS_FILEINFO_SCHEMA", - length = SCHEMA.Optional(LENGTH_SCHEMA), - hashes = SCHEMA.Optional(HASHDICT_SCHEMA), - version = SCHEMA.Optional(METADATAVERSION_SCHEMA), - custom = SCHEMA.Optional(SCHEMA.Object()) -) - -PATH_FILEINFO_SCHEMA = SCHEMA.DictOf( - key_schema = RELPATH_SCHEMA, - value_schema = LOOSE_TARGETS_FILEINFO_SCHEMA) - -# TUF roledb -ROLEDB_SCHEMA = SCHEMA.Object( - object_name = 'ROLEDB_SCHEMA', - keyids = SCHEMA.Optional(KEYIDS_SCHEMA), - signing_keyids = SCHEMA.Optional(KEYIDS_SCHEMA), - previous_keyids = SCHEMA.Optional(KEYIDS_SCHEMA), - threshold = SCHEMA.Optional(THRESHOLD_SCHEMA), - previous_threshold = SCHEMA.Optional(THRESHOLD_SCHEMA), - version = SCHEMA.Optional(METADATAVERSION_SCHEMA), - expires = SCHEMA.Optional(ISO8601_DATETIME_SCHEMA), - signatures = SCHEMA.Optional(sslib_formats.SIGNATURES_SCHEMA), - paths = SCHEMA.Optional(SCHEMA.OneOf([RELPATHS_SCHEMA, PATH_FILEINFO_SCHEMA])), - path_hash_prefixes = SCHEMA.Optional(PATH_HASH_PREFIXES_SCHEMA), - delegations = SCHEMA.Optional(DELEGATIONS_SCHEMA), - partial_loaded = SCHEMA.Optional(BOOLEAN_SCHEMA)) - -# A signable object. Holds the signing role and its associated signatures. -SIGNABLE_SCHEMA = SCHEMA.Object( - object_name = 'SIGNABLE_SCHEMA', - signed = SCHEMA.Any(), - signatures = SCHEMA.ListOf(sslib_formats.SIGNATURE_SCHEMA)) - -# Root role: indicates root keys and top-level roles. -ROOT_SCHEMA = SCHEMA.Object( - object_name = 'ROOT_SCHEMA', - _type = SCHEMA.String('root'), - spec_version = SPECIFICATION_VERSION_SCHEMA, - version = METADATAVERSION_SCHEMA, - consistent_snapshot = BOOLEAN_SCHEMA, - expires = ISO8601_DATETIME_SCHEMA, - keys = KEYDICT_SCHEMA, - roles = ROLEDICT_SCHEMA) - -# Targets role: Indicates targets and delegates target paths to other roles. -TARGETS_SCHEMA = SCHEMA.Object( - object_name = 'TARGETS_SCHEMA', - _type = SCHEMA.String('targets'), - spec_version = SPECIFICATION_VERSION_SCHEMA, - version = METADATAVERSION_SCHEMA, - expires = ISO8601_DATETIME_SCHEMA, - targets = FILEDICT_SCHEMA, - delegations = SCHEMA.Optional(DELEGATIONS_SCHEMA)) - -# Snapshot role: indicates the latest versions of all metadata (except -# timestamp). -SNAPSHOT_SCHEMA = SCHEMA.Object( - object_name = 'SNAPSHOT_SCHEMA', - _type = SCHEMA.String('snapshot'), - version = METADATAVERSION_SCHEMA, - expires = sslib_formats.ISO8601_DATETIME_SCHEMA, - spec_version = SPECIFICATION_VERSION_SCHEMA, - meta = FILEINFODICT_SCHEMA) - -# Timestamp role: indicates the latest version of the snapshot file. -TIMESTAMP_SCHEMA = SCHEMA.Object( - object_name = 'TIMESTAMP_SCHEMA', - _type = SCHEMA.String('timestamp'), - spec_version = SPECIFICATION_VERSION_SCHEMA, - version = METADATAVERSION_SCHEMA, - expires = sslib_formats.ISO8601_DATETIME_SCHEMA, - meta = FILEINFODICT_SCHEMA) - - -# project.cfg file: stores information about the project in a json dictionary -PROJECT_CFG_SCHEMA = SCHEMA.Object( - object_name = 'PROJECT_CFG_SCHEMA', - project_name = SCHEMA.AnyString(), - layout_type = SCHEMA.OneOf([SCHEMA.String('repo-like'), SCHEMA.String('flat')]), - targets_location = sslib_formats.PATH_SCHEMA, - metadata_location = sslib_formats.PATH_SCHEMA, - prefix = sslib_formats.PATH_SCHEMA, - public_keys = sslib_formats.KEYDICT_SCHEMA, - threshold = SCHEMA.Integer(lo = 0, hi = 2) - ) - -# A schema containing information a repository mirror may require, -# such as a url, the path of the directory metadata files, etc. -MIRROR_SCHEMA = SCHEMA.Object( - object_name = 'MIRROR_SCHEMA', - url_prefix = sslib_formats.URL_SCHEMA, - metadata_path = SCHEMA.Optional(RELPATH_SCHEMA), - targets_path = SCHEMA.Optional(RELPATH_SCHEMA), - confined_target_dirs = SCHEMA.Optional(RELPATHS_SCHEMA), - custom = SCHEMA.Optional(SCHEMA.Object())) - -# A dictionary of mirrors where the dict keys hold the mirror's name and -# and the dict values the mirror's data (i.e., 'MIRROR_SCHEMA'). -# The repository class of 'updater.py' accepts dictionaries -# of this type provided by the TUF client. -MIRRORDICT_SCHEMA = SCHEMA.DictOf( - key_schema = SCHEMA.AnyString(), - value_schema = MIRROR_SCHEMA) - -# A Mirrorlist: indicates all the live mirrors, and what documents they -# serve. -MIRRORLIST_SCHEMA = SCHEMA.Object( - object_name = 'MIRRORLIST_SCHEMA', - _type = SCHEMA.String('mirrors'), - version = METADATAVERSION_SCHEMA, - expires = sslib_formats.ISO8601_DATETIME_SCHEMA, - mirrors = SCHEMA.ListOf(MIRROR_SCHEMA)) - -# Any of the role schemas (e.g., TIMESTAMP_SCHEMA, SNAPSHOT_SCHEMA, etc.) -ANYROLE_SCHEMA = SCHEMA.OneOf([ROOT_SCHEMA, TARGETS_SCHEMA, SNAPSHOT_SCHEMA, - TIMESTAMP_SCHEMA, MIRROR_SCHEMA]) - -# The format of the resulting "scp config dict" after extraction from the -# push configuration file (i.e., push.cfg). In the case of a config file -# utilizing the scp transfer module, it must contain the 'general' and 'scp' -# sections, where 'general' must contain a 'transfer_module' and -# 'metadata_path' entry, and 'scp' the 'host', 'user', 'identity_file', and -# 'remote_directory' entries. -SCPCONFIG_SCHEMA = SCHEMA.Object( - object_name = 'SCPCONFIG_SCHEMA', - general = SCHEMA.Object( - object_name = '[general]', - transfer_module = SCHEMA.String('scp'), - metadata_path = sslib_formats.PATH_SCHEMA, - targets_directory = sslib_formats.PATH_SCHEMA), - scp=SCHEMA.Object( - object_name = '[scp]', - host = sslib_formats.URL_SCHEMA, - user = sslib_formats.NAME_SCHEMA, - identity_file = sslib_formats.PATH_SCHEMA, - remote_directory = sslib_formats.PATH_SCHEMA)) - -# The format of the resulting "receive config dict" after extraction from the -# receive configuration file (i.e., receive.cfg). The receive config file -# must contain a 'general' section, and this section the 'pushroots', -# 'repository_directory', 'metadata_directory', 'targets_directory', and -# 'backup_directory' entries. -RECEIVECONFIG_SCHEMA = SCHEMA.Object( - object_name = 'RECEIVECONFIG_SCHEMA', general=SCHEMA.Object( - object_name = '[general]', - pushroots = SCHEMA.ListOf(sslib_formats.PATH_SCHEMA), - repository_directory = sslib_formats.PATH_SCHEMA, - metadata_directory = sslib_formats.PATH_SCHEMA, - targets_directory = sslib_formats.PATH_SCHEMA, - backup_directory = sslib_formats.PATH_SCHEMA)) - - - -def make_signable(role_schema): - """ - - Return the role metadata 'role_schema' in 'SIGNABLE_SCHEMA' format. - 'role_schema' is added to the 'signed' key, and an empty list - initialized to the 'signatures' key. The caller adds signatures - to this second field. - Note: check_signable_object_format() should be called after - make_signable() and signatures added to ensure the final - signable object has a valid format (i.e., a signable containing - a supported role metadata). - - - role_schema: - A role schema dict (e.g., 'ROOT_SCHEMA', 'SNAPSHOT_SCHEMA'). - - - None. - - - None. - - - A dict in 'SIGNABLE_SCHEMA' format. - """ - - if not isinstance(role_schema, dict) or 'signed' not in role_schema: - return { 'signed' : role_schema, 'signatures' : [] } - - else: - return role_schema - - - - - - -def build_dict_conforming_to_schema(schema, **kwargs): - """ - - Given a schema.Object object (for example, TIMESTAMP_SCHEMA from this - module) and a set of keyword arguments, create a dictionary that conforms - to the given schema, using the keyword arguments to define the elements of - the new dict. - - Checks the result to make sure that it conforms to the given schema, raising - an error if not. - - - schema - A schema.Object, like TIMESTAMP_SCHEMA, TARGETS_FILEINFO_SCHEMA, - securesystemslib.formats.SIGNATURE_SCHEMA, etc. - - **kwargs - A keyword argument for each element of the schema. Optional arguments - may be included or skipped, but all required arguments must be included. - - For example, for TIMESTAMP_SCHEMA, a call might look like: - build_dict_conforming_to_schema( - TIMESTAMP_SCHEMA, - _type='timestamp', - spec_version='1.0.0', - version=1, - expires='2020-01-01T00:00:00Z', - meta={...}) - Some arguments will be filled in if excluded: _type, spec_version - - - A dictionary conforming to the given schema. Adds certain required fields - if they are missing and can be deduced from the schema. The data returned - is a deep copy. - - - securesystemslib.exceptions.FormatError - if the provided data does not match the schema when assembled. - - - None. In particular, the provided values are not modified, and the - returned dictionary does not include references to them. - - """ - - # Check the schema argument type (must provide check_match and _required). - if not isinstance(schema, SCHEMA.Object): - raise ValueError( - 'The first argument must be a schema.Object instance, but is not. ' - 'Given schema: ' + repr(schema)) - - # Make a copy of the provided fields so that the caller's provided values - # do not change when the returned values are changed. - dictionary = copy.deepcopy(kwargs) - - - # Automatically provide certain schema properties if they are not already - # provided and are required in objects of class . - # This includes: - # _type: - # spec_version: SPECIFICATION_VERSION_SCHEMA - # - # (Please note that _required is slightly misleading, as it includes both - # required and optional elements. It should probably be called _components.) - # - for key, element_type in schema._required: #pylint: disable=protected-access - - if key in dictionary: - # If the field has been provided, proceed normally. - continue - - elif isinstance(element_type, SCHEMA.Optional): - # If the field has NOT been provided but IS optional, proceed without it. - continue - - else: - # If the field has not been provided and is required, check to see if - # the field is one of the fields we automatically fill. - - # Currently, the list is limited to ['_type', 'spec_version']. - - if key == '_type' and isinstance(element_type, SCHEMA.String): - # A SCHEMA.String stores its expected value in _string, so use that. - dictionary[key] = element_type._string #pylint: disable=protected-access - - elif (key == 'spec_version' and - element_type == SPECIFICATION_VERSION_SCHEMA): - # If not provided, use the specification version in tuf/__init__.py - dictionary[key] = tuf.SPECIFICATION_VERSION - - - # If what we produce does not match the provided schema, raise a FormatError. - schema.check_match(dictionary) - - return dictionary - - - - - -# A dict holding the recognized schemas for the top-level roles. -SCHEMAS_BY_TYPE = { - 'root' : ROOT_SCHEMA, - 'targets' : TARGETS_SCHEMA, - 'snapshot' : SNAPSHOT_SCHEMA, - 'timestamp' : TIMESTAMP_SCHEMA, - 'mirrors' : MIRRORLIST_SCHEMA} - - - - -def expiry_string_to_datetime(expires): - """ - - Convert an expiry string to a datetime object. - - expires: - The expiry date-time string in the ISO8601 format that is defined - in securesystemslib.ISO8601_DATETIME_SCHEMA. E.g. '2038-01-19T03:14:08Z' - - securesystemslib.exceptions.FormatError, if 'expires' cannot be - parsed correctly. - - None. - - A datetime object representing the expiry time. - """ - - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.ISO8601_DATETIME_SCHEMA.check_match(expires) - - try: - return datetime.datetime.strptime(expires, "%Y-%m-%dT%H:%M:%SZ") - except ValueError as error: - raise sslib_exceptions.FormatError( - 'Failed to parse ' + repr(expires) + ' as an expiry time') from error - - - - -def datetime_to_unix_timestamp(datetime_object): - """ - - Convert 'datetime_object' (in datetime.datetime()) format) to a Unix/POSIX - timestamp. For example, Python's time.time() returns a Unix timestamp, and - includes the number of microseconds. 'datetime_object' is converted to UTC. - - >>> datetime_object = datetime.datetime(1985, 10, 26, 1, 22) - >>> timestamp = datetime_to_unix_timestamp(datetime_object) - >>> timestamp - 499137720 - - - datetime_object: - The datetime.datetime() object to convert to a Unix timestamp. - - - securesystemslib.exceptions.FormatError, if 'datetime_object' is not a - datetime.datetime() object. - - - None. - - - A unix (posix) timestamp (e.g., 499137660). - """ - - # Is 'datetime_object' a datetime.datetime() object? - # Raise 'securesystemslib.exceptions.FormatError' if not. - if not isinstance(datetime_object, datetime.datetime): - message = repr(datetime_object) + ' is not a datetime.datetime() object.' - raise sslib_exceptions.FormatError(message) - - unix_timestamp = calendar.timegm(datetime_object.timetuple()) - - return unix_timestamp - - - - - -def unix_timestamp_to_datetime(unix_timestamp): - """ - - Convert 'unix_timestamp' (i.e., POSIX time, in UNIX_TIMESTAMP_SCHEMA format) - to a datetime.datetime() object. 'unix_timestamp' is the number of seconds - since the epoch (January 1, 1970.) - - >>> datetime_object = unix_timestamp_to_datetime(1445455680) - >>> datetime_object - datetime.datetime(2015, 10, 21, 19, 28) - - - unix_timestamp: - An integer representing the time (e.g., 1445455680). Conformant to - 'securesystemslib.formats.UNIX_TIMESTAMP_SCHEMA'. - - - securesystemslib.exceptions.FormatError, if 'unix_timestamp' is improperly - formatted. - - - None. - - - A datetime.datetime() object corresponding to 'unix_timestamp'. - """ - - # Is 'unix_timestamp' properly formatted? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.UNIX_TIMESTAMP_SCHEMA.check_match(unix_timestamp) - - # Convert 'unix_timestamp' to a 'time.struct_time', in UTC. The Daylight - # Savings Time (DST) flag is set to zero. datetime.fromtimestamp() is not - # used because it returns a local datetime. - struct_time = time.gmtime(unix_timestamp) - - # Extract the (year, month, day, hour, minutes, seconds) arguments for the - # datetime object to be returned. - datetime_object = datetime.datetime(*struct_time[:6]) - - return datetime_object - - - -def format_base64(data): - """ - - Return the base64 encoding of 'data' with whitespace and '=' signs omitted. - - - data: - Binary or buffer of data to convert. - - - securesystemslib.exceptions.FormatError, if the base64 encoding fails or the - argument is invalid. - - - None. - - - A base64-encoded string. - """ - - try: - return binascii.b2a_base64(data).decode('utf-8').rstrip('=\n ') - - except (TypeError, binascii.Error) as e: - raise sslib_exceptions.FormatError('Invalid base64' - ' encoding: ' + str(e)) - - - - -def parse_base64(base64_string): - """ - - Parse a base64 encoding with whitespace and '=' signs omitted. - - - base64_string: - A string holding a base64 value. - - - securesystemslib.exceptions.FormatError, if 'base64_string' cannot be parsed - due to an invalid base64 encoding. - - - None. - - - A byte string representing the parsed based64 encoding of - 'base64_string'. - """ - - if not isinstance(base64_string, str): - message = 'Invalid argument: '+repr(base64_string) - raise sslib_exceptions.FormatError(message) - - extra = len(base64_string) % 4 - if extra: - padding = '=' * (4 - extra) - base64_string = base64_string + padding - - try: - return binascii.a2b_base64(base64_string.encode('utf-8')) - - except (TypeError, binascii.Error) as e: - raise sslib_exceptions.FormatError('Invalid base64' - ' encoding: ' + str(e)) - - - -def make_targets_fileinfo(length, hashes, custom=None): - """ - - Create a dictionary conformant to 'TARGETS_FILEINFO_SCHEMA'. - This dict describes a target file. - - - length: - An integer representing the size of the file. - - hashes: - A dict of hashes in 'HASHDICT_SCHEMA' format, which has the form: - {'sha256': 123df8a9b12, 'sha512': 324324dfc121, ...} - - custom: - An optional object providing additional information about the file. - - - securesystemslib.exceptions.FormatError, if the 'TARGETS_FILEINFO_SCHEMA' to be - returned does not have the correct format. - - - A dictionary conformant to 'TARGETS_FILEINFO_SCHEMA', representing the file - information of a target file. - """ - - fileinfo = {'length' : length, 'hashes' : hashes} - - if custom is not None: - fileinfo['custom'] = custom - - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - TARGETS_FILEINFO_SCHEMA.check_match(fileinfo) - - return fileinfo - - - -def make_metadata_fileinfo(version, length=None, hashes=None): - """ - - Create a dictionary conformant to 'METADATA_FILEINFO_SCHEMA'. - This dict describes one of the metadata files used for timestamp and - snapshot roles. - - - version: - An integer representing the version of the file. - - length: - An optional integer representing the size of the file. - - hashes: - An optional dict of hashes in 'HASHDICT_SCHEMA' format, which has the form: - {'sha256': 123df8a9b12, 'sha512': 324324dfc121, ...} - - - - securesystemslib.exceptions.FormatError, if the 'METADATA_FILEINFO_SCHEMA' to be - returned does not have the correct format. - - - A dictionary conformant to 'METADATA_FILEINFO_SCHEMA', representing the file - information of a metadata file. - """ - - fileinfo = {'version' : version} - - if length: - fileinfo['length'] = length - - if hashes: - fileinfo['hashes'] = hashes - - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - METADATA_FILEINFO_SCHEMA.check_match(fileinfo) - - return fileinfo - - - -def make_versioninfo(version_number): - """ - - Create a dictionary conformant to 'VERSIONINFO_SCHEMA'. This dict - describes both metadata and target files. - - - version_number: - An integer representing the version of a particular metadata role. - The dictionary returned by this function is expected to be included - in Snapshot metadata. - - - securesystemslib.exceptions.FormatError, if the dict to be returned does not - have the correct format (i.e., VERSIONINFO_SCHEMA). - - - None. - - - A dictionary conformant to 'VERSIONINFO_SCHEMA', containing the version - information of a metadata role. - """ - - versioninfo = {'version': version_number} - - # Raise 'securesystemslib.exceptions.FormatError' if 'versioninfo' is - # improperly formatted. - VERSIONINFO_SCHEMA.check_match(versioninfo) - - return versioninfo - - - - - -def expected_meta_rolename(meta_rolename): - """ - - Ensure 'meta_rolename' is properly formatted. - 'targets' is returned as 'Targets'. - 'targets role1' is returned as 'Targets Role1'. - - The words in the string (i.e., separated by whitespace) - are capitalized. - - - meta_rolename: - A string representing the rolename. - E.g., 'root', 'targets'. - - - securesystemslib.exceptions.FormatError, if 'meta_rolename' is improperly - formatted. - - - None. - - - A string (e.g., 'Root', 'Targets'). - """ - - # Does 'meta_rolename' have the correct type? - # This check ensures 'meta_rolename' conforms to - # 'securesystemslib.formats.NAME_SCHEMA'. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.NAME_SCHEMA.check_match(meta_rolename) - - return meta_rolename.lower() - - - -def check_signable_object_format(signable): - """ - - Ensure 'signable' is properly formatted, conformant to - 'SIGNABLE_SCHEMA'. Return the signing role on - success. Note: The 'signed' field of a 'SIGNABLE_SCHEMA' is checked - against securesystemslib.schema.Any(). The 'signed' field, however, should - actually hold one of the supported role schemas (e.g., 'ROOT_SCHEMA', - 'TARGETS_SCHEMA'). The role schemas all differ in their format, so this - function determines exactly which schema is listed in the 'signed' field. - - - signable: - The signable object compared against 'SIGNABLE.SCHEMA'. - - - securesystemslib.exceptions.FormatError, if 'signable' does not have the - correct format. - - tuf.exceptions.UnsignedMetadataError, if 'signable' does not have any - signatures - - - None. - - - A string representing the signing role (e.g., 'root', 'targets'). - The role string is returned with characters all lower case. - """ - - # Does 'signable' have the correct type? - # This check ensures 'signable' conforms to - # 'SIGNABLE_SCHEMA'. - SIGNABLE_SCHEMA.check_match(signable) - - try: - role_type = signable['signed']['_type'] - - except (KeyError, TypeError) as error: - raise sslib_exceptions.FormatError('Untyped signable object.') from error - - try: - schema = SCHEMAS_BY_TYPE[role_type] - - except KeyError as error: - raise sslib_exceptions.FormatError('Unrecognized type ' - + repr(role_type)) from error - - if not signable['signatures']: - raise exceptions.UnsignedMetadataError('Signable object of type ' + - repr(role_type) + ' has no signatures ', signable) - - # 'securesystemslib.exceptions.FormatError' raised if 'signable' does not - # have a properly formatted role schema. - schema.check_match(signable['signed']) - - return role_type.lower() - - - -if __name__ == '__main__': - # The interactive sessions of the documentation strings can - # be tested by running formats.py as a standalone module. - # python3 -B formats.py - import doctest - doctest.testmod() diff --git a/tuf/keydb.py b/tuf/keydb.py deleted file mode 100755 index e06571b06f..0000000000 --- a/tuf/keydb.py +++ /dev/null @@ -1,440 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - keydb.py - - - Vladimir Diaz - - - March 21, 2012. Based on a previous version of this module by Geremy Condra. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Represent a collection of keys and their organization. This module ensures - the layout of the collection remain consistent and easily verifiable. - Provided are functions to add and delete keys from the database, retrieve a - single key, and assemble a collection from keys stored in TUF 'Root' Metadata. - The Update Framework process maintains a set of role info for multiple - repositories. - - RSA keys are currently supported and a collection of keys is organized as a - dictionary indexed by key ID. Key IDs are used as identifiers for keys - (e.g., RSA key). They are the hexadecimal representations of the hash of key - objects (specifically, the key object containing only the public key). See - 'rsa_key.py' and the '_get_keyid()' function to learn precisely how keyids - are generated. One may get the keyid of a key object by simply accessing the - dictionary's 'keyid' key (i.e., rsakey['keyid']). -""" - -import logging -import copy - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import keys as sslib_keys - -from tuf import exceptions -from tuf import formats - -# List of strings representing the key types supported by TUF. -_SUPPORTED_KEY_TYPES = ['rsa', 'ed25519', 'ecdsa', 'ecdsa-sha2-nistp256'] - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -# The key database. -_keydb_dict = {} -_keydb_dict['default'] = {} - - -def create_keydb_from_root_metadata(root_metadata, repository_name='default'): - """ - - Populate the key database with the unique keys found in 'root_metadata'. - The database dictionary will conform to - 'tuf.formats.KEYDB_SCHEMA' and have the form: {keyid: key, - ...}. The 'keyid' conforms to 'securesystemslib.formats.KEYID_SCHEMA' and - 'key' to its respective type. In the case of RSA keys, this object would - match 'RSAKEY_SCHEMA'. - - - root_metadata: - A dictionary conformant to 'tuf.formats.ROOT_SCHEMA'. The keys found - in the 'keys' field of 'root_metadata' are needed by this function. - - repository_name: - The name of the repository to store the key information. If not supplied, - the key database is populated for the 'default' repository. - - - securesystemslib.exceptions.FormatError, if 'root_metadata' does not have the correct format. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not exist in the key - database. - - - A function to add the key to the database is called. In the case of RSA - keys, this function is add_key(). - - The old keydb key database is replaced. - - - None. - """ - - # Does 'root_metadata' have the correct format? - # This check will ensure 'root_metadata' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - formats.ROOT_SCHEMA.check_match(root_metadata) - - # Does 'repository_name' have the correct format? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Clear the key database for 'repository_name', or create it if non-existent. - if repository_name in _keydb_dict: - _keydb_dict[repository_name].clear() - - else: - create_keydb(repository_name) - - # Iterate the keys found in 'root_metadata' by converting them to - # 'RSAKEY_SCHEMA' if their type is 'rsa', and then adding them to the - # key database using the provided keyid. - for keyid, key_metadata in root_metadata['keys'].items(): - if key_metadata['keytype'] in _SUPPORTED_KEY_TYPES: - # 'key_metadata' is stored in 'KEY_SCHEMA' format. Call - # create_from_metadata_format() to get the key in 'RSAKEY_SCHEMA' format, - # which is the format expected by 'add_key()'. Note: This call to - # format_metadata_to_key() uses the provided keyid as the default keyid. - # All other keyids returned are ignored. - - key_dict, _ = sslib_keys.format_metadata_to_key(key_metadata, - keyid) - - # Make sure to update key_dict['keyid'] to use one of the other valid - # keyids, otherwise add_key() will have no reference to it. - try: - add_key(key_dict, repository_name=repository_name) - - # Although keyid duplicates should *not* occur (unique dict keys), log a - # warning and continue. However, 'key_dict' may have already been - # adding to the keydb elsewhere. - except exceptions.KeyAlreadyExistsError as e: # pragma: no cover - logger.warning(e) - continue - - else: - logger.warning('Root Metadata file contains a key with an invalid keytype.') - - - - - -def create_keydb(repository_name): - """ - - Create a key database for a non-default repository named 'repository_name'. - - - repository_name: - The name of the repository. An empty key database is created, and keys - may be added to via add_key(keyid, repository_name). - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' already exists. - - - None. - - - None. - """ - - # Is 'repository_name' properly formatted? Raise 'securesystemslib.exceptions.FormatError' if not. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name in _keydb_dict: - raise sslib_exceptions.InvalidNameError('Repository name already exists:' - ' ' + repr(repository_name)) - - _keydb_dict[repository_name] = {} - - - - - -def remove_keydb(repository_name): - """ - - Remove a key database for a non-default repository named 'repository_name'. - The 'default' repository cannot be removed. - - - repository_name: - The name of the repository to remove. The 'default' repository should - not be removed, so 'repository_name' cannot be 'default'. - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' is 'default'. - - - None. - - - None. - """ - - # Is 'repository_name' properly formatted? Raise 'securesystemslib.exceptions.FormatError' if not. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _keydb_dict: - logger.warning('Repository name does not exist: ' + repr(repository_name)) - return - - if repository_name == 'default': - raise sslib_exceptions.InvalidNameError('Cannot remove the default repository:' - ' ' + repr(repository_name)) - - del _keydb_dict[repository_name] - - - - -def add_key(key_dict, keyid=None, repository_name='default'): - """ - - Add 'rsakey_dict' to the key database while avoiding duplicates. - If keyid is provided, verify it is the correct keyid for 'rsakey_dict' - and raise an exception if it is not. - - - key_dict: - A dictionary conformant to 'securesystemslib.formats.ANYKEY_SCHEMA'. - It has the form: - - {'keytype': 'rsa', - 'keyid': keyid, - 'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...', - 'private': '-----BEGIN RSA PRIVATE KEY----- ...'}} - - keyid: - An object conformant to 'KEYID_SCHEMA'. It is used as an identifier - for RSA keys. - - repository_name: - The name of the repository to add the key. If not supplied, the key is - added to the 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the correct format. - - securesystemslib.exceptions.Error, if 'keyid' does not match the keyid for 'rsakey_dict'. - - tuf.exceptions.KeyAlreadyExistsError, if 'rsakey_dict' is found in the key database. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not exist in the key - database. - - - The keydb key database is modified. - - - None. - """ - - # Does 'key_dict' have the correct format? - # This check will ensure 'key_dict' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError if the check fails. - sslib_formats.ANYKEY_SCHEMA.check_match(key_dict) - - # Does 'repository_name' have the correct format? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Does 'keyid' have the correct format? - if keyid is not None: - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - sslib_formats.KEYID_SCHEMA.check_match(keyid) - - # Check if each keyid found in 'key_dict' matches 'keyid'. - if keyid != key_dict['keyid']: - raise sslib_exceptions.Error('Incorrect keyid. Got ' + key_dict['keyid'] + ' but expected ' + keyid) - - # Ensure 'repository_name' is actually set in the key database. - if repository_name not in _keydb_dict: - raise sslib_exceptions.InvalidNameError('Repository name does not exist:' - ' ' + repr(repository_name)) - - # Check if the keyid belonging to 'key_dict' is not already - # available in the key database before returning. - keyid = key_dict['keyid'] - if keyid in _keydb_dict[repository_name]: - raise exceptions.KeyAlreadyExistsError('Key: ' + keyid) - - _keydb_dict[repository_name][keyid] = copy.deepcopy(key_dict) - - - - - -def get_key(keyid, repository_name='default'): - """ - - Return the key belonging to 'keyid'. - - - keyid: - An object conformant to 'securesystemslib.formats.KEYID_SCHEMA'. It is used as an - identifier for keys. - - repository_name: - The name of the repository to get the key. If not supplied, the key is - retrieved from the 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the correct format. - - tuf.exceptions.UnknownKeyError, if 'keyid' is not found in the keydb database. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not exist in the key - database. - - - None. - - - The key matching 'keyid'. In the case of RSA keys, a dictionary conformant - to 'securesystemslib.formats.RSAKEY_SCHEMA' is returned. - """ - - # Does 'keyid' have the correct format? - # This check will ensure 'keyid' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' is the match fails. - sslib_formats.KEYID_SCHEMA.check_match(keyid) - - # Does 'repository_name' have the correct format? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _keydb_dict: - raise sslib_exceptions.InvalidNameError('Repository name does not exist:' - ' ' + repr(repository_name)) - - # Return the key belonging to 'keyid', if found in the key database. - try: - return copy.deepcopy(_keydb_dict[repository_name][keyid]) - - except KeyError as error: - raise exceptions.UnknownKeyError('Key: ' + keyid) from error - - - - - -def remove_key(keyid, repository_name='default'): - """ - - Remove the key belonging to 'keyid'. - - - keyid: - An object conformant to 'securesystemslib.formats.KEYID_SCHEMA'. It is used as an - identifier for keys. - - repository_name: - The name of the repository to remove the key. If not supplied, the key - is removed from the 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the correct format. - - tuf.exceptions.UnknownKeyError, if 'keyid' is not found in key database. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not exist in the key - database. - - - The key, identified by 'keyid', is deleted from the key database. - - - None. - """ - - # Does 'keyid' have the correct format? - # This check will ensure 'keyid' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' is the match fails. - sslib_formats.KEYID_SCHEMA.check_match(keyid) - - # Does 'repository_name' have the correct format? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _keydb_dict: - raise sslib_exceptions.InvalidNameError('Repository name does not exist:' - ' ' + repr(repository_name)) - - # Remove the key belonging to 'keyid' if found in the key database. - if keyid in _keydb_dict[repository_name]: - del _keydb_dict[repository_name][keyid] - - else: - raise exceptions.UnknownKeyError('Key: ' + keyid) - - - - - -def clear_keydb(repository_name='default', clear_all=False): - - """ - - Clear the keydb key database. - - - repository_name: - The name of the repository to clear the key database. If not supplied, - the key database is cleared for the 'default' repository. - - clear_all: - Boolean indicating whether to clear the entire keydb. - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not exist in the key - database. - - - The keydb key database is reset. - - - None. - """ - - # Do the arguments have the correct format? Raise 'securesystemslib.exceptions.FormatError' if - # 'repository_name' is improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - sslib_formats.BOOLEAN_SCHEMA.check_match(clear_all) - - if clear_all: - _keydb_dict.clear() - _keydb_dict['default'] = {} - - if repository_name not in _keydb_dict: - raise sslib_exceptions.InvalidNameError('Repository name does not exist:' - ' ' + repr(repository_name)) - - _keydb_dict[repository_name] = {} diff --git a/tuf/log.py b/tuf/log.py deleted file mode 100755 index f9ae6c7721..0000000000 --- a/tuf/log.py +++ /dev/null @@ -1,448 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - log.py - - - Vladimir Diaz - - - April 4, 2012. Based on a previous version of this module by Geremy Condra. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - A central location for all logging-related configuration. This module should - be imported once by the main program. If other modules wish to incorporate - 'tuf' logging, they should do the following: - - import logging - logger = logging.getLogger('tuf') - - 'logging' refers to the module name. logging.getLogger() is a function of - the module 'logging'. logging.getLogger(name) returns a Logger instance - associated with 'name'. Calling getLogger(name) will always return the same - instance. In this 'log.py' module, we perform the initial setup for the name - 'tuf'. The 'log.py' module should only be imported once by the main program. - When any other module does a logging.getLogger('tuf'), it is referring to the - same 'tuf' instance, and its associated settings, set here in 'log.py'. - See http://docs.python.org/library/logging.html#logger-objects for more - information. - - We use multiple handlers to process log messages in various ways and to - configure each one independently. Instead of using one single manner of - processing log messages, we can use two built-in handlers that have already - been configured for us. For example, the built-in FileHandler will catch - log messages and dump them to a file. If we wanted, we could set this file - handler to only catch CRITICAL (and greater) messages and save them to a - file. Other handlers (e.g., StreamHandler) could handle INFO-level - (and greater) messages. - - Logging Levels: - - --Level-- --Value-- - logging.CRITICAL 50 - logging.ERROR 40 - logging.WARNING 30 - logging.INFO 20 - logging.DEBUG 10 - logging.NOTSET 0 - - The logging module is thread-safe. Logging to a single file from - multiple threads in a single process is also thread-safe. The logging - module is NOT thread-safe when logging to a single file across multiple - processes: - http://docs.python.org/library/logging.html#thread-safety - http://docs.python.org/howto/logging-cookbook.html -""" - -import logging -import time - -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats - -from tuf import exceptions -from tuf import settings - - -# Setting a handler's log level filters only logging messages of that level -# (and above). For example, setting the built-in StreamHandler's log level to -# 'logging.WARNING' will cause the stream handler to only process messages -# of levels: WARNING, ERROR, and CRITICAL. -_DEFAULT_LOG_LEVEL = logging.DEBUG -_DEFAULT_CONSOLE_LOG_LEVEL = logging.INFO -_DEFAULT_FILE_LOG_LEVEL = logging.DEBUG - -# Set the format for logging messages. -# Example format for '_FORMAT_STRING': -# [2013-08-13 15:21:18,068 localtime] [tuf] -# [INFO][_update_metadata:851@updater.py] -_FORMAT_STRING = '[%(asctime)s UTC] [%(name)s] [%(levelname)s] '+\ - '[%(funcName)s:%(lineno)s@%(filename)s]\n%(message)s\n' - -# Ask all Formatter instances to talk GMT. Set the 'converter' attribute of -# 'logging.Formatter' so that all formatters use Greenwich Mean Time. -# http://docs.python.org/library/logging.html#logging.Formatter.formatTime -# The 2nd paragraph in the link above contains the relevant information. -# GMT = UTC (Coordinated Universal Time). TUF metadata stores timestamps in UTC. -# We previously displayed the local time but this lead to confusion when -# visually comparing logger events and metadata information. Unix time stamps -# are fine but they may be less human-readable than UTC. -logging.Formatter.converter = time.gmtime -formatter = logging.Formatter(_FORMAT_STRING) - -# Set the handlers for the logger. The console handler is unset by default. A -# module importing 'log.py' should explicitly set the console handler if -# outputting log messages to the screen is needed. Adding a console handler can -# be done with tuf.log.add_console_handler(). Logging messages to a file is not -# set by default. -console_handler = None -file_handler = None - -# Set the logger and its settings. -# Note: we're configuring the top-level hierarchy for the tuf package, -# therefore we explicitly request the 'tuf' logger, rather than following -# the standard pattern of logging.getLogger(__name__) -logger = logging.getLogger('tuf') -logger.setLevel(_DEFAULT_LOG_LEVEL) -logger.addHandler(logging.NullHandler()) - -# Set the built-in file handler. Messages will be logged to -# 'settings.LOG_FILENAME', and only those messages with a log level of -# '_DEFAULT_LOG_LEVEL'. The log level of messages handled by 'file_handler' -# may be modified with 'set_filehandler_log_level()'. 'settings.LOG_FILENAME' -# will be opened in append mode. -if settings.ENABLE_FILE_LOGGING: - file_handler = logging.FileHandler(settings.LOG_FILENAME) - file_handler.setLevel(_DEFAULT_FILE_LOG_LEVEL) - file_handler.setFormatter(formatter) - logger.addHandler(file_handler) - -else: - pass - -# Silently ignore logger exceptions. -logging.raiseExceptions = False - - - - - -class ConsoleFilter(logging.Filter): - def filter(self, record): - """ - - Use Vinay Sajip's recommendation from Python issue #6435 to modify a - LogRecord object. This is meant to be used with our console handler. - - http://stackoverflow.com/q/6177520 - http://stackoverflow.com/q/5875225 - http://bugs.python.org/issue6435 - http://docs.python.org/howto/logging-cookbook.html#filters-contextual - http://docs.python.org/library/logging.html#logrecord-attributes - - - record: - A logging.LogRecord object. - - - None. - - - Replaces the LogRecord exception text attribute. - - - True. - """ - - # If this LogRecord object has an exception, then we will replace its text. - if record.exc_info: - # We place the record's cached exception text (which usually contains the - # exception traceback) with much simpler exception information. This is - # most useful for the console handler, which we do not wish to deluge - # with too much data. Assuming that this filter is not applied to the - # file logging handler, the user may always consult the file log for the - # original exception traceback. The exc_info is explained here: - # http://docs.python.org/library/sys.html#sys.exc_info - exc_type, _, _ = record.exc_info - - # Simply set the class name as the exception text. - record.exc_text = exc_type.__name__ - - # Always return True to signal that any given record must be formatted. - return True - - - - - -def set_log_level(log_level: int=_DEFAULT_LOG_LEVEL): - """ - - Allow the default log level to be overridden. If 'log_level' is not - provided, log level defaults to 'logging.DEBUG'. - - - log_level: - The log level to set for the 'log.py' file handler. - 'log_level' examples: logging.INFO; logging.CRITICAL. - - - None. - - - Overrides the logging level for the 'log.py' file handler. - - - None. - """ - - # Does 'log_level' have the correct format? - # Raise 'securesystems.exceptions.FormatError' if there is a mismatch. - sslib_formats.LOGLEVEL_SCHEMA.check_match(log_level) - - logger.setLevel(log_level) - - - - - -def set_filehandler_log_level(log_level=_DEFAULT_FILE_LOG_LEVEL): - """ - - Allow the default file handler log level to be overridden. If 'log_level' - is not provided, log level defaults to 'logging.DEBUG'. - - - log_level: - The log level to set for the 'log.py' file handler. - 'log_level' examples: logging.INFO; logging.CRITICAL. - - - None. - - - Overrides the logging level for the 'log.py' file handler. - - - None. - """ - - # Does 'log_level' have the correct format? - # Raise 'securesystems.exceptions.FormatError' if there is a mismatch. - sslib_formats.LOGLEVEL_SCHEMA.check_match(log_level) - - if file_handler: - file_handler.setLevel(log_level) - - else: - raise exceptions.Error( - 'File handler has not been set. Enable file logging' - ' before attempting to set its log level') - - - - - -def set_console_log_level(log_level=_DEFAULT_CONSOLE_LOG_LEVEL): - """ - - Allow the default log level for console messages to be overridden. If - 'log_level' is not provided, log level defaults to 'logging.INFO'. - - - log_level: - The log level to set for the console handler. - 'log_level' examples: logging.INFO; logging.CRITICAL. - - - securesystemslib.exceptions.Error, if the 'log.py' console handler has not - been set yet with add_console_handler(). - - - Overrides the logging level for the console handler. - - - None. - """ - - # Does 'log_level' have the correct format? - # Raise 'securesystems.exceptions.FormatError' if there is a mismatch. - sslib_formats.LOGLEVEL_SCHEMA.check_match(log_level) - - if console_handler is not None: - console_handler.setLevel(log_level) - - else: - message = 'The console handler has not been set with add_console_handler().' - raise sslib_exceptions.Error(message) - - - - - -def add_console_handler(log_level=_DEFAULT_CONSOLE_LOG_LEVEL): - """ - - Add a console handler and set its log level to 'log_level'. - - - log_level: - The log level to set for the console handler. - 'log_level' examples: logging.INFO; logging.CRITICAL. - - - None. - - - Adds a console handler to the 'log.py' logger and sets its logging level to - 'log_level'. - - - None. - """ - - # Does 'log_level' have the correct format? - # Raise 'securesystems.exceptions.FormatError' if there is a mismatch. - sslib_formats.LOGLEVEL_SCHEMA.check_match(log_level) - - # Assign to the global console_handler object. - global console_handler - - if not console_handler: - # Set the console handler for the logger. The built-in console handler will - # log messages to 'sys.stderr' and capture 'log_level' messages. - console_handler = logging.StreamHandler() - - # Get our filter for the console handler. - console_filter = ConsoleFilter() - console_format_string = '%(message)s' - console_formatter = logging.Formatter(console_format_string) - - console_handler.setLevel(log_level) - console_handler.setFormatter(console_formatter) - console_handler.addFilter(console_filter) - logger.addHandler(console_handler) - logger.debug('Added a console handler.') - - else: - logger.warning('We already have a console handler.') - - - - - -def remove_console_handler(): - """ - - Remove the console handler from the logger in 'log.py', if previously added. - - - None. - - - None. - - - A handler belonging to the console is removed from the 'log.py' logger - and the console handler is marked as unset. - - - - None. - """ - - # Assign to the global 'console_handler' object. - global console_handler - - if console_handler: - logger.removeHandler(console_handler) - console_handler = None - logger.debug('Removed a console handler.') - - else: - logger.warning('We do not have a console handler.') - - - -def enable_file_logging(log_filename=settings.LOG_FILENAME): - """ - - Log messages to a file (i.e., 'log_filename'). The log level for the file - handler can be set with set_filehandler_log_level(). - - - log_filename: - Logging messages are saved to this file. If not provided, the log - filename specified in tuf.settings.LOG_FILENAME is used. - - - securesystemslib.exceptions.FormatError, if any of the arguments are - not the expected format. - - tuf.exceptions.Error, if the file handler has already been set. - - - The global file handler is set. - - - None. - """ - - # Are the arguments properly formatted? - sslib_formats.PATH_SCHEMA.check_match(log_filename) - - global file_handler - - # Add a file handler to the logger if not already set. - if not file_handler: - file_handler = logging.FileHandler(log_filename) - file_handler.setLevel(_DEFAULT_FILE_LOG_LEVEL) - file_handler.setFormatter(formatter) - logger.addHandler(file_handler) - - else: - raise exceptions.Error( - 'The file handler has already been been set. A new file handler' - ' can be set by first calling disable_file_logging()') - - - -def disable_file_logging(): - """ - - Disable file logging by removing any previously set file handler. - A warning is logged if the file handler cannot be removed. - - The file that was written to will not be deleted. - - - None. - - - None. - - - The global file handler is unset. - - - None. - """ - - # Assign to the global 'file_handler' object. - global file_handler - - if file_handler: - logger.removeHandler(file_handler) - file_handler.close() - file_handler = None - logger.debug('Removed the file handler.') - - else: - logger.warning('A file handler has not been set.') diff --git a/tuf/mirrors.py b/tuf/mirrors.py deleted file mode 100755 index c7662d3eec..0000000000 --- a/tuf/mirrors.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - mirrors.py - - - Konstantin Andrianov. - Derived from original mirrors.py written by Geremy Condra. - - - March 12, 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Extract a list of mirror urls corresponding to the file type and the location - of the file with respect to the base url. -""" - -import os -from urllib import parse - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib.util import file_in_confined_directories - -from tuf import formats - - -# The type of file to be downloaded from a repository. The -# 'get_list_of_mirrors' function supports these file types. -_SUPPORTED_FILE_TYPES = ['meta', 'target'] - - -def get_list_of_mirrors(file_type, file_path, mirrors_dict): - """ - - Get a list of mirror urls from a mirrors dictionary, provided the type - and the path of the file with respect to the base url. - - - file_type: - Type of data needed for download, must correspond to one of the strings - in the list ['meta', 'target']. 'meta' for metadata file type or - 'target' for target file type. It should correspond to - NAME_SCHEMA format. - - file_path: - A relative path to the file that corresponds to RELPATH_SCHEMA format. - Ex: 'http://url_prefix/targets_path/file_path' - - mirrors_dict: - A mirrors_dict object that corresponds to MIRRORDICT_SCHEMA, where - keys are strings and values are MIRROR_SCHEMA. An example format - of MIRROR_SCHEMA: - - {'url_prefix': 'http://localhost:8001', - 'metadata_path': 'metadata/', - 'targets_path': 'targets/', - 'confined_target_dirs': ['targets/snapshot1/', ...], - 'custom': {...}} - - The 'custom' field is optional. - - - securesystemslib.exceptions.Error, on unsupported 'file_type'. - - securesystemslib.exceptions.FormatError, on bad argument. - - - List of mirror urls corresponding to the file_type and file_path. If no - match is found, empty list is returned. - """ - - # Checking if all the arguments have appropriate format. - formats.RELPATH_SCHEMA.check_match(file_path) - formats.MIRRORDICT_SCHEMA.check_match(mirrors_dict) - sslib_formats.NAME_SCHEMA.check_match(file_type) - - # Verify 'file_type' is supported. - if file_type not in _SUPPORTED_FILE_TYPES: - raise sslib_exceptions.Error('Invalid file_type argument.' - ' Supported file types: ' + repr(_SUPPORTED_FILE_TYPES)) - path_key = 'metadata_path' if file_type == 'meta' else 'targets_path' - - list_of_mirrors = [] - for junk, mirror_info in mirrors_dict.items(): - # Does mirror serve this file type at all? - path = mirror_info.get(path_key) - if path is None: - continue - - # for targets, ensure directory confinement - if path_key == 'targets_path': - full_filepath = os.path.join(path, file_path) - confined_target_dirs = mirror_info.get('confined_target_dirs') - # confined_target_dirs is optional and can used to confine the client to - # certain paths on a repository mirror when fetching target files. - if confined_target_dirs and not file_in_confined_directories(full_filepath, - confined_target_dirs): - continue - - # parse.quote(string) replaces special characters in string using the %xx - # escape. This is done to avoid parsing issues of the URL on the server - # side. Do *NOT* pass URLs with Unicode characters without first encoding - # the URL as UTF-8. We need a long-term solution with #61. - # http://bugs.python.org/issue1712522 - file_path = parse.quote(file_path) - url = os.path.join(mirror_info['url_prefix'], path, file_path) - - # The above os.path.join() result as well as input file_path may be - # invalid on windows (might contain both separator types), see #1077. - # Make sure the URL doesn't contain backward slashes on Windows. - list_of_mirrors.append(url.replace('\\', '/')) - - return list_of_mirrors diff --git a/tuf/repository_lib.py b/tuf/repository_lib.py deleted file mode 100644 index 642447d8b3..0000000000 --- a/tuf/repository_lib.py +++ /dev/null @@ -1,2306 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - repository_lib.py - - - Vladimir Diaz - - - June 1, 2014. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Provide a library for the repository tool that can create a TUF repository. - The repository tool can be used with the Python interpreter in interactive - mode, or imported directly into a Python module. See 'tuf/README' for the - complete guide to using 'tuf.repository_tool.py'. -""" - -import os -import errno -import time -import logging -import shutil -import json -import tempfile - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import hash as sslib_hash -from securesystemslib import interface as sslib_interface -from securesystemslib import keys as sslib_keys -from securesystemslib import util as sslib_util -from securesystemslib import storage as sslib_storage - -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import log -from tuf import roledb -from tuf import settings -from tuf import sig - - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -# The extension of TUF metadata. -METADATA_EXTENSION = '.json' - -# The targets and metadata directory names. Metadata files are written -# to the staged metadata directory instead of the "live" one. -METADATA_STAGED_DIRECTORY_NAME = 'metadata.staged' -METADATA_DIRECTORY_NAME = 'metadata' -TARGETS_DIRECTORY_NAME = 'targets' - -# The metadata filenames of the top-level roles. -ROOT_FILENAME = 'root' + METADATA_EXTENSION -TARGETS_FILENAME = 'targets' + METADATA_EXTENSION -SNAPSHOT_FILENAME = 'snapshot' + METADATA_EXTENSION -TIMESTAMP_FILENAME = 'timestamp' + METADATA_EXTENSION - -# Log warning when metadata expires in n days, or less. -# root = 1 month, snapshot = 1 day, targets = 10 days, timestamp = 1 day. -ROOT_EXPIRES_WARN_SECONDS = 2630000 -SNAPSHOT_EXPIRES_WARN_SECONDS = 86400 -TARGETS_EXPIRES_WARN_SECONDS = 864000 -TIMESTAMP_EXPIRES_WARN_SECONDS = 86400 - -# Supported key types. -SUPPORTED_KEY_TYPES = ['rsa', 'ed25519', 'ecdsa', 'ecdsa-sha2-nistp256'] - -# The algorithm used by the repository to generate the path hash prefixes -# of hashed bin delegations. Please see delegate_hashed_bins() -HASH_FUNCTION = settings.DEFAULT_HASH_ALGORITHM - - - - -def _generate_and_write_metadata(rolename, metadata_filename, - targets_directory, metadata_directory, storage_backend, - consistent_snapshot=False, filenames=None, allow_partially_signed=False, - increment_version_number=True, repository_name='default', - use_existing_fileinfo=False, use_timestamp_length=True, - use_timestamp_hashes=True, use_snapshot_length=False, - use_snapshot_hashes=False): - """ - Non-public function that can generate and write the metadata for the - specified 'rolename'. It also increments the version number of 'rolename' if - the 'increment_version_number' argument is True. - """ - - metadata = None - - # Retrieve the roleinfo of 'rolename' to extract the needed metadata - # attributes, such as version number, expiration, etc. - roleinfo = roledb.get_roleinfo(rolename, repository_name) - previous_keyids = roleinfo.get('previous_keyids', []) - previous_threshold = roleinfo.get('previous_threshold', 1) - signing_keyids = sorted(set(roleinfo['signing_keyids'])) - - # Generate the appropriate role metadata for 'rolename'. - if rolename == 'root': - metadata = generate_root_metadata(roleinfo['version'], roleinfo['expires'], - consistent_snapshot, repository_name) - - _log_warning_if_expires_soon(ROOT_FILENAME, roleinfo['expires'], - ROOT_EXPIRES_WARN_SECONDS) - - - - elif rolename == 'snapshot': - metadata = generate_snapshot_metadata(metadata_directory, - roleinfo['version'], roleinfo['expires'], - storage_backend, consistent_snapshot, repository_name, - use_length=use_snapshot_length, use_hashes=use_snapshot_hashes) - - - _log_warning_if_expires_soon(SNAPSHOT_FILENAME, roleinfo['expires'], - SNAPSHOT_EXPIRES_WARN_SECONDS) - - elif rolename == 'timestamp': - # If filenames don't have "snapshot_filename" key, defaults to "snapshot.json" - snapshot_file_path = (filenames and filenames['snapshot']) \ - or SNAPSHOT_FILENAME - - metadata = generate_timestamp_metadata(snapshot_file_path, roleinfo['version'], - roleinfo['expires'], storage_backend, repository_name, - use_length=use_timestamp_length, use_hashes=use_timestamp_hashes) - - _log_warning_if_expires_soon(TIMESTAMP_FILENAME, roleinfo['expires'], - TIMESTAMP_EXPIRES_WARN_SECONDS) - - # All other roles are either the top-level 'targets' role, or - # a delegated role. - else: - # Only print a warning if the top-level 'targets' role expires soon. - if rolename == 'targets': - _log_warning_if_expires_soon(TARGETS_FILENAME, roleinfo['expires'], - TARGETS_EXPIRES_WARN_SECONDS) - - # Don't hash-prefix consistent target files if they are handled out of band - consistent_targets = consistent_snapshot and not use_existing_fileinfo - - metadata = generate_targets_metadata(targets_directory, - roleinfo['paths'], roleinfo['version'], roleinfo['expires'], - roleinfo['delegations'], consistent_targets, use_existing_fileinfo, - storage_backend, repository_name) - - # Update roledb with the latest delegations info collected during - # generate_targets_metadata() - roledb.update_roleinfo(rolename, roleinfo, - repository_name=repository_name) - - - # Before writing 'rolename' to disk, automatically increment its version - # number (if 'increment_version_number' is True) so that the caller does not - # have to manually perform this action. The version number should be - # incremented in both the metadata file and roledb (required so that Snapshot - # references the latest version). - - # Store the 'current_version' in case the version number must be restored - # (e.g., if 'rolename' cannot be written to disk because its metadata is not - # properly signed). - current_version = metadata['version'] - if increment_version_number: - roleinfo = roledb.get_roleinfo(rolename, repository_name) - metadata['version'] = metadata['version'] + 1 - roleinfo['version'] = roleinfo['version'] + 1 - roledb.update_roleinfo(rolename, roleinfo, - repository_name=repository_name) - - else: - logger.debug('Not incrementing ' + repr(rolename) + '\'s version number.') - - if rolename in roledb.TOP_LEVEL_ROLES and not allow_partially_signed: - # Verify that the top-level 'rolename' is fully signed. Only a delegated - # role should not be written to disk without full verification of its - # signature(s), since it can only be considered fully signed depending on - # the delegating role. - signable = sign_metadata(metadata, signing_keyids, metadata_filename, - repository_name) - - - def should_write(): - # Root must be signed by its previous keys and threshold. - if rolename == 'root' and len(previous_keyids) > 0: - if not sig.verify(signable, rolename, repository_name, - previous_threshold, previous_keyids): - return False - - else: - logger.debug('Root is signed by a threshold of its previous keyids.') - - # In the normal case, we should write metadata if the threshold is met. - return sig.verify(signable, rolename, repository_name, - roleinfo['threshold'], roleinfo['signing_keyids']) - - - if should_write(): - _remove_invalid_and_duplicate_signatures(signable, repository_name) - - # Root should always be written as if consistent_snapshot is True (i.e., - # write .root.json and root.json to disk). - if rolename == 'root': - consistent_snapshot = True - filename = write_metadata_file(signable, metadata_filename, - metadata['version'], consistent_snapshot, storage_backend) - - # 'signable' contains an invalid threshold of signatures. - else: - # Since new metadata cannot be successfully written, restore the current - # version number. - roleinfo = roledb.get_roleinfo(rolename, repository_name) - roleinfo['version'] = current_version - roledb.update_roleinfo(rolename, roleinfo, - repository_name=repository_name) - - # Note that 'signable' is an argument to tuf.UnsignedMetadataError(). - raise exceptions.UnsignedMetadataError('Not enough' - ' signatures for ' + repr(metadata_filename), signable) - - # 'rolename' is a delegated role or a top-level role that is partially - # signed, and thus its signatures should not be verified. - else: - signable = sign_metadata(metadata, signing_keyids, metadata_filename, - repository_name) - _remove_invalid_and_duplicate_signatures(signable, repository_name) - - # Root should always be written as if consistent_snapshot is True (i.e., - # .root.json and root.json). - if rolename == 'root': - filename = write_metadata_file(signable, metadata_filename, - metadata['version'], consistent_snapshot=True, - storage_backend=storage_backend) - - else: - filename = write_metadata_file(signable, metadata_filename, - metadata['version'], consistent_snapshot, storage_backend) - - return signable, filename - - - - - -def _metadata_is_partially_loaded(rolename, signable, repository_name): - """ - Non-public function that determines whether 'rolename' is loaded with - at least zero good signatures, but an insufficient threshold (which means - 'rolename' was written to disk with repository.write_partial()). A repository - maintainer may write partial metadata without including a valid signature. - However, the final repository.write() must include a threshold number of - signatures. - - If 'rolename' is found to be partially loaded, mark it as partially loaded in - its 'roledb' roleinfo. This function exists to assist in deciding whether - a role's version number should be incremented when write() or write_parital() - is called. Return True if 'rolename' was partially loaded, False otherwise. - """ - - # The signature status lists the number of good signatures, including - # bad, untrusted, unknown, etc. - status = sig.get_signature_status(signable, rolename, repository_name) - - if len(status['good_sigs']) < status['threshold'] and \ - len(status['good_sigs']) >= 0: - return True - - else: - return False - - - - - -def _check_role_keys(rolename, repository_name): - """ - Non-public function that verifies the public and signing keys of 'rolename'. - If either contain an invalid threshold of keys, raise an exception. - """ - - # Extract the total number of public and private keys of 'rolename' from its - # roleinfo in 'roledb'. - roleinfo = roledb.get_roleinfo(rolename, repository_name) - total_keyids = len(roleinfo['keyids']) - threshold = roleinfo['threshold'] - total_signatures = len(roleinfo['signatures']) - total_signing_keys = len(roleinfo['signing_keyids']) - - # Raise an exception for an invalid threshold of public keys. - if total_keyids < threshold: - raise exceptions.InsufficientKeysError(repr(rolename) + ' role contains' - ' ' + repr(total_keyids) + ' / ' + repr(threshold) + ' public keys.') - - # Raise an exception for an invalid threshold of signing keys. - if total_signatures == 0 and total_signing_keys < threshold: - raise exceptions.InsufficientKeysError(repr(rolename) + ' role contains' - ' ' + repr(total_signing_keys) + ' / ' + repr(threshold) + ' signing keys.') - - - - - -def _remove_invalid_and_duplicate_signatures(signable, repository_name): - """ - Non-public function that removes invalid or duplicate signatures from - 'signable'. 'signable' may contain signatures (invalid) from previous - versions of the metadata that were loaded with load_repository(). Invalid, - or duplicate signatures, are removed from 'signable'. - """ - - # Store the keyids of valid signatures. 'signature_keyids' is checked for - # duplicates rather than comparing signature objects because PSS may generate - # duplicate valid signatures for the same data, yet contain different - # signatures. - signature_keyids = [] - - for signature in signable['signatures']: - signed = sslib_formats.encode_canonical(signable['signed']).encode('utf-8') - keyid = signature['keyid'] - key = None - - # Remove 'signature' from 'signable' if the listed keyid does not exist - # in 'keydb'. - try: - key = keydb.get_key(keyid, repository_name=repository_name) - - except exceptions.UnknownKeyError: - signable['signatures'].remove(signature) - continue - - # Remove 'signature' from 'signable' if it is an invalid signature. - if not sslib_keys.verify_signature(key, signature, signed): - logger.debug('Removing invalid signature for ' + repr(keyid)) - signable['signatures'].remove(signature) - - # Although valid, it may still need removal if it is a duplicate. Check - # the keyid, rather than the signature, to remove duplicate PSS signatures. - # PSS may generate multiple different signatures for the same keyid. - else: - if keyid in signature_keyids: - signable['signatures'].remove(signature) - - # 'keyid' is valid and not a duplicate, so add it to 'signature_keyids'. - else: - signature_keyids.append(keyid) - - - - - -def _delete_obsolete_metadata(metadata_directory, snapshot_metadata, - consistent_snapshot, repository_name, storage_backend): - """ - Non-public function that deletes metadata files marked as removed by - 'repository_tool.py'. Revoked metadata files are not actually deleted until - this function is called. Obsolete metadata should *not* be retained in - "metadata.staged", otherwise they may be re-loaded by 'load_repository()'. - - Note: Obsolete metadata may not always be easily detected (by inspecting - top-level metadata during loading) due to partial metadata and top-level - metadata that have not been written yet. - """ - - # Walk the repository's metadata sub-directory, which is where all metadata - # is stored (including delegated roles). The 'django.json' role (e.g., - # delegated by Targets) would be located in the - # '{repository_directory}/metadata/' directory. - metadata_files = sorted(storage_backend.list_folder(metadata_directory)) - for metadata_role in metadata_files: - if metadata_role.endswith('root.json'): - continue - - metadata_path = os.path.join(metadata_directory, metadata_role) - - # Strip the version number if 'consistent_snapshot' is True. Example: - # '10.django.json' --> 'django.json'. Consistent and non-consistent - # metadata might co-exist if write() and - # write(consistent_snapshot=True) are mixed, so ensure only - # '.filename' metadata is stripped. - - # Should we check if 'consistent_snapshot' is True? It might have been - # set previously, but 'consistent_snapshot' can potentially be False - # now. We'll proceed with the understanding that 'metadata_name' can - # have a prepended version number even though the repository is now - # a non-consistent one. - if metadata_role not in snapshot_metadata['meta']: - metadata_role, junk = _strip_version_number(metadata_role, - consistent_snapshot) - - else: - logger.debug(repr(metadata_role) + ' found in the snapshot role.') - - # Strip metadata extension from filename. The role database does not - # include the metadata extension. - if metadata_role.endswith(METADATA_EXTENSION): - metadata_role = metadata_role[:-len(METADATA_EXTENSION)] - else: - logger.debug(repr(metadata_role) + ' does not match' - ' supported extension ' + repr(METADATA_EXTENSION)) - - if metadata_role in roledb.TOP_LEVEL_ROLES: - logger.debug('Not removing top-level metadata ' + repr(metadata_role)) - return - - # Delete the metadata file if it does not exist in 'roledb'. - # 'repository_tool.py' might have removed 'metadata_name,' - # but its metadata file is not actually deleted yet. Do it now. - if not roledb.role_exists(metadata_role, repository_name): - logger.info('Removing outdated metadata: ' + repr(metadata_path)) - storage_backend.remove(metadata_path) - - else: - logger.debug('Not removing metadata: ' + repr(metadata_path)) - - # TODO: Should we delete outdated consistent snapshots, or does it make - # more sense for integrators to remove outdated consistent snapshots? - - - - -def _get_written_metadata(metadata_signable): - """ - Non-public function that returns the actual content of written metadata. - """ - - # Explicitly specify the JSON separators for Python 2 + 3 consistency. - written_metadata_content = json.dumps(metadata_signable, indent=1, - separators=(',', ': '), sort_keys=True).encode('utf-8') - - return written_metadata_content - - - - - -def _strip_version_number(metadata_filename, consistent_snapshot): - """ - Strip from 'metadata_filename' any version number (in the - expected '{dirname}/.rolename.' format) that - it may contain, and return the stripped filename and version number, - as a tuple. 'consistent_snapshot' is a boolean indicating if a version - number is prepended to 'metadata_filename'. - """ - - # Strip the version number if 'consistent_snapshot' is True. - # Example: '10.django.json' --> 'django.json' - if consistent_snapshot: - dirname, basename = os.path.split(metadata_filename) - version_number, basename = basename.split('.', 1) - stripped_metadata_filename = os.path.join(dirname, basename) - - if not version_number.isdigit(): - return metadata_filename, '' - - else: - return stripped_metadata_filename, version_number - - else: - return metadata_filename, '' - - - - -def _load_top_level_metadata(repository, top_level_filenames, repository_name): - """ - Load the metadata of the Root, Timestamp, Targets, and Snapshot roles. At a - minimum, the Root role must exist and load successfully. - """ - - root_filename = top_level_filenames[ROOT_FILENAME] - targets_filename = top_level_filenames[TARGETS_FILENAME] - snapshot_filename = top_level_filenames[SNAPSHOT_FILENAME] - timestamp_filename = top_level_filenames[TIMESTAMP_FILENAME] - - root_metadata = None - targets_metadata = None - snapshot_metadata = None - timestamp_metadata = None - - # Load 'root.json'. A Root role file without a version number is always - # written. - try: - # Initialize the key and role metadata of the top-level roles. - signable = sslib_util.load_json_file(root_filename) - try: - formats.check_signable_object_format(signable) - except exceptions.UnsignedMetadataError: - # Downgrade the error to a warning because a use case exists where - # metadata may be generated unsigned on one machine and signed on another. - logger.warning('Unsigned metadata object: ' + repr(signable)) - - root_metadata = signable['signed'] - keydb.create_keydb_from_root_metadata(root_metadata, repository_name) - roledb.create_roledb_from_root_metadata(root_metadata, repository_name) - - # Load Root's roleinfo and update 'roledb'. - roleinfo = roledb.get_roleinfo('root', repository_name) - roleinfo['consistent_snapshot'] = root_metadata['consistent_snapshot'] - roleinfo['signatures'] = [] - for signature in signable['signatures']: - if signature not in roleinfo['signatures']: - roleinfo['signatures'].append(signature) - - else: - logger.debug('Found a Root signature that is already loaded:' - ' ' + repr(signature)) - - # By default, roleinfo['partial_loaded'] of top-level roles should be set - # to False in 'create_roledb_from_root_metadata()'. Update this field, if - # necessary, now that we have its signable object. - if _metadata_is_partially_loaded('root', signable, repository_name): - roleinfo['partial_loaded'] = True - - else: - logger.debug('Root was not partially loaded.') - - _log_warning_if_expires_soon(ROOT_FILENAME, roleinfo['expires'], - ROOT_EXPIRES_WARN_SECONDS) - - roledb.update_roleinfo('root', roleinfo, mark_role_as_dirty=False, - repository_name=repository_name) - - # Ensure the 'consistent_snapshot' field is extracted. - consistent_snapshot = root_metadata['consistent_snapshot'] - - except sslib_exceptions.StorageError as error: - raise exceptions.RepositoryError('Cannot load the required' - ' root file: ' + repr(root_filename)) from error - - # Load 'timestamp.json'. A Timestamp role file without a version number is - # always written. - try: - signable = sslib_util.load_json_file(timestamp_filename) - timestamp_metadata = signable['signed'] - for signature in signable['signatures']: - repository.timestamp.add_signature(signature, mark_role_as_dirty=False) - - # Load Timestamp's roleinfo and update 'roledb'. - roleinfo = roledb.get_roleinfo('timestamp', repository_name) - roleinfo['expires'] = timestamp_metadata['expires'] - roleinfo['version'] = timestamp_metadata['version'] - - if _metadata_is_partially_loaded('timestamp', signable, repository_name): - roleinfo['partial_loaded'] = True - - else: - logger.debug('The Timestamp role was not partially loaded.') - - _log_warning_if_expires_soon(TIMESTAMP_FILENAME, roleinfo['expires'], - TIMESTAMP_EXPIRES_WARN_SECONDS) - - roledb.update_roleinfo('timestamp', roleinfo, mark_role_as_dirty=False, - repository_name=repository_name) - - except sslib_exceptions.StorageError as error: - raise exceptions.RepositoryError('Cannot load the Timestamp ' - 'file: ' + repr(timestamp_filename)) from error - - # Load 'snapshot.json'. A consistent snapshot.json must be calculated if - # 'consistent_snapshot' is True. - # The Snapshot and Root roles are both accessed by their hashes. - if consistent_snapshot: - snapshot_version = timestamp_metadata['meta'][SNAPSHOT_FILENAME]['version'] - - dirname, basename = os.path.split(snapshot_filename) - basename = basename.split(METADATA_EXTENSION, 1)[0] - snapshot_filename = os.path.join(dirname, - str(snapshot_version) + '.' + basename + METADATA_EXTENSION) - - try: - signable = sslib_util.load_json_file(snapshot_filename) - try: - formats.check_signable_object_format(signable) - except exceptions.UnsignedMetadataError: - # Downgrade the error to a warning because a use case exists where - # metadata may be generated unsigned on one machine and signed on another. - logger.warning('Unsigned metadata object: ' + repr(signable)) - - snapshot_metadata = signable['signed'] - - for signature in signable['signatures']: - repository.snapshot.add_signature(signature, mark_role_as_dirty=False) - - # Load Snapshot's roleinfo and update 'roledb'. - roleinfo = roledb.get_roleinfo('snapshot', repository_name) - roleinfo['expires'] = snapshot_metadata['expires'] - roleinfo['version'] = snapshot_metadata['version'] - - if _metadata_is_partially_loaded('snapshot', signable, repository_name): - roleinfo['partial_loaded'] = True - - else: - logger.debug('Snapshot was not partially loaded.') - - _log_warning_if_expires_soon(SNAPSHOT_FILENAME, roleinfo['expires'], - SNAPSHOT_EXPIRES_WARN_SECONDS) - - roledb.update_roleinfo('snapshot', roleinfo, mark_role_as_dirty=False, - repository_name=repository_name) - - except sslib_exceptions.StorageError as error: - raise exceptions.RepositoryError('The Snapshot file ' - 'cannot be loaded: '+ repr(snapshot_filename)) from error - - # Load 'targets.json'. A consistent snapshot of the Targets role must be - # calculated if 'consistent_snapshot' is True. - if consistent_snapshot: - targets_version = snapshot_metadata['meta'][TARGETS_FILENAME]['version'] - dirname, basename = os.path.split(targets_filename) - targets_filename = os.path.join(dirname, str(targets_version) + '.' + basename) - - try: - signable = sslib_util.load_json_file(targets_filename) - try: - formats.check_signable_object_format(signable) - except exceptions.UnsignedMetadataError: - # Downgrade the error to a warning because a use case exists where - # metadata may be generated unsigned on one machine and signed on another. - logger.warning('Unsigned metadata object: ' + repr(signable)) - - targets_metadata = signable['signed'] - - for signature in signable['signatures']: - repository.targets.add_signature(signature, mark_role_as_dirty=False) - - # Update 'targets.json' in 'roledb' - roleinfo = roledb.get_roleinfo('targets', repository_name) - roleinfo['paths'] = targets_metadata['targets'] - roleinfo['version'] = targets_metadata['version'] - roleinfo['expires'] = targets_metadata['expires'] - roleinfo['delegations'] = targets_metadata['delegations'] - - if _metadata_is_partially_loaded('targets', signable, repository_name): - roleinfo['partial_loaded'] = True - - else: - logger.debug('Targets file was not partially loaded.') - - _log_warning_if_expires_soon(TARGETS_FILENAME, roleinfo['expires'], - TARGETS_EXPIRES_WARN_SECONDS) - - roledb.update_roleinfo('targets', roleinfo, mark_role_as_dirty=False, - repository_name=repository_name) - - # Add the keys specified in the delegations field of the Targets role. - for keyid, key_metadata in targets_metadata['delegations']['keys'].items(): - - # Use the keyid found in the delegation - key_object, _ = sslib_keys.format_metadata_to_key(key_metadata, - keyid) - - # Add 'key_object' to the list of recognized keys. Keys may be shared, - # so do not raise an exception if 'key_object' has already been loaded. - # In contrast to the methods that may add duplicate keys, do not log - # a warning as there may be many such duplicate key warnings. The - # repository maintainer should have also been made aware of the duplicate - # key when it was added. - try: - keydb.add_key(key_object, keyid=None, repository_name=repository_name) - - except exceptions.KeyAlreadyExistsError: - pass - - except sslib_exceptions.StorageError as error: - raise exceptions.RepositoryError('The Targets file ' - 'can not be loaded: ' + repr(targets_filename)) from error - - return repository, consistent_snapshot - - - - -def _log_warning_if_expires_soon(rolename, expires_iso8601_timestamp, - seconds_remaining_to_warn): - """ - Non-public function that logs a warning if 'rolename' expires in - 'seconds_remaining_to_warn' seconds, or less. - """ - - # Metadata stores expiration datetimes in ISO8601 format. Convert to - # unix timestamp, subtract from current time.time() (also in POSIX time) - # and compare against 'seconds_remaining_to_warn'. Log a warning message - # to console if 'rolename' expires soon. - datetime_object = formats.expiry_string_to_datetime( - expires_iso8601_timestamp) - expires_unix_timestamp = \ - formats.datetime_to_unix_timestamp(datetime_object) - seconds_until_expires = expires_unix_timestamp - int(time.time()) - - if seconds_until_expires <= seconds_remaining_to_warn: - if seconds_until_expires <= 0: - logger.warning( - repr(rolename) + ' expired ' + repr(datetime_object.ctime() + ' (UTC).')) - - else: - days_until_expires = seconds_until_expires / 86400 - logger.warning(repr(rolename) + ' expires ' + datetime_object.ctime() + '' - ' (UTC). ' + repr(days_until_expires) + ' day(s) until it expires.') - - else: - pass - - - - - -def import_rsa_privatekey_from_file(filepath, password=None): - """ - - Import the encrypted PEM file in 'filepath', decrypt it, and return the key - object in 'securesystemslib.RSAKEY_SCHEMA' format. - - - filepath: - file, an RSA encrypted PEM file. Unlike the public RSA PEM - key file, 'filepath' does not have an extension. - - password: - The passphrase to decrypt 'filepath'. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.CryptoError, if 'filepath' is not a valid - encrypted key file. - - - The contents of 'filepath' is read, decrypted, and the key stored. - - - An RSA key object, conformant to 'securesystemslib.RSAKEY_SCHEMA'. - """ - - # Note: securesystemslib.interface.import_rsa_privatekey_from_file() does not - # allow both 'password' and 'prompt' to be True, nor does it automatically - # prompt for a password if the key file is encrypted and a password isn't - # given. - try: - private_key = sslib_interface.import_rsa_privatekey_from_file( - filepath, password) - - # The user might not have given a password for an encrypted private key. - # Prompt for a password for convenience. - except sslib_exceptions.CryptoError: - if password is None: - private_key = sslib_interface.import_rsa_privatekey_from_file( - filepath, password, prompt=True) - - else: - raise - - return private_key - - - - - - - -def import_ed25519_privatekey_from_file(filepath, password=None): - """ - - Import the encrypted ed25519 TUF key file in 'filepath', decrypt it, and - return the key object in 'securesystemslib.ED25519KEY_SCHEMA' format. - - The TUF private key (may also contain the public part) is encrypted with - AES 256 and CTR the mode of operation. The password is strengthened with - PBKDF2-HMAC-SHA256. - - - filepath: - file, an RSA encrypted TUF key file. - - password: - The password, or passphrase, to import the private key (i.e., the - encrypted key file 'filepath' must be decrypted before the ed25519 key - object can be returned. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted or the imported key object contains an invalid key type (i.e., - not 'ed25519'). - - securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted. - - securesystemslib.exceptions.UnsupportedLibraryError, if 'filepath' cannot be - decrypted due to an invalid configuration setting (i.e., invalid - 'tuf.settings' setting). - - - 'password' is used to decrypt the 'filepath' key file. - - - An ed25519 key object of the form: 'securesystemslib.ED25519KEY_SCHEMA'. - """ - - # Note: securesystemslib.interface.import_ed25519_privatekey_from_file() does - # not allow both 'password' and 'prompt' to be True, nor does it - # automatically prompt for a password if the key file is encrypted and a - # password isn't given. - try: - private_key = sslib_interface.import_ed25519_privatekey_from_file( - filepath, password) - - # The user might not have given a password for an encrypted private key. - # Prompt for a password for convenience. - except sslib_exceptions.CryptoError: - if password is None: - private_key = sslib_interface.import_ed25519_privatekey_from_file( - filepath, password, prompt=True) - - else: - raise - - return private_key - - - -def get_delegated_roles_metadata_filenames(metadata_directory, - consistent_snapshot, storage_backend=None): - """ - Return a dictionary containing all filenames in 'metadata_directory' - except the top-level roles. - If multiple versions of a file exist because of a consistent snapshot, - only the file with biggest version prefix is included. - """ - - filenames = {} - metadata_files = sorted(storage_backend.list_folder(metadata_directory), - reverse=True) - - # Iterate over role metadata files, sorted by their version-number prefix, with - # more recent versions first, and only add the most recent version of any - # (non top-level) metadata to the list of returned filenames. Note that there - # should only be one version of each file, if consistent_snapshot is False. - for metadata_role in metadata_files: - metadata_path = os.path.join(metadata_directory, metadata_role) - - # Strip the version number if 'consistent_snapshot' is True, - # or if 'metadata_role' is Root. - # Example: '10.django.json' --> 'django.json' - consistent = \ - metadata_role.endswith('root.json') or consistent_snapshot == True - metadata_name, junk = _strip_version_number(metadata_role, - consistent) - - if metadata_name.endswith(METADATA_EXTENSION): - extension_length = len(METADATA_EXTENSION) - metadata_name = metadata_name[:-extension_length] - - else: - logger.debug('Skipping file with unsupported metadata' - ' extension: ' + repr(metadata_path)) - continue - - # Skip top-level roles, only interested in delegated roles. - if metadata_name in roledb.TOP_LEVEL_ROLES: - continue - - # Prevent reloading duplicate versions if consistent_snapshot is True - if metadata_name not in filenames: - filenames[metadata_name] = metadata_path - - return filenames - - - -def get_top_level_metadata_filenames(metadata_directory): - """ - - Return a dictionary containing the filenames of the top-level roles. - If 'metadata_directory' is set to 'metadata', the dictionary - returned would contain: - - filenames = {'root.json': 'metadata/root.json', - 'targets.json': 'metadata/targets.json', - 'snapshot.json': 'metadata/snapshot.json', - 'timestamp.json': 'metadata/timestamp.json'} - - If 'metadata_directory' is not set by the caller, the current directory is - used. - - - metadata_directory: - The directory containing the metadata files. - - - securesystemslib.exceptions.FormatError, if 'metadata_directory' is - improperly formatted. - - - None. - - - A dictionary containing the expected filenames of the top-level - metadata files, such as 'root.json' and 'snapshot.json'. - """ - - # Does 'metadata_directory' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - - # Store the filepaths of the top-level roles, including the - # 'metadata_directory' for each one. - filenames = {} - - filenames[ROOT_FILENAME] = \ - os.path.join(metadata_directory, ROOT_FILENAME) - - filenames[TARGETS_FILENAME] = \ - os.path.join(metadata_directory, TARGETS_FILENAME) - - filenames[SNAPSHOT_FILENAME] = \ - os.path.join(metadata_directory, SNAPSHOT_FILENAME) - - filenames[TIMESTAMP_FILENAME] = \ - os.path.join(metadata_directory, TIMESTAMP_FILENAME) - - return filenames - - - - - -def get_targets_metadata_fileinfo(filename, storage_backend, custom=None): - """ - - Retrieve the file information of 'filename'. The object returned - conforms to 'tuf.formats.TARGETS_FILEINFO_SCHEMA'. The information - generated for 'filename' is stored in metadata files like 'targets.json'. - The fileinfo object returned has the form: - - fileinfo = {'length': 1024, - 'hashes': {'sha256': 1233dfba312, ...}, - 'custom': {...}} - - - filename: - The metadata file whose file information is needed. It must exist. - - custom: - An optional object providing additional information about the file. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - - securesystemslib.exceptions.FormatError, if 'filename' is improperly - formatted. - - - The file is opened and information about the file is generated, - such as file size and its hash. - - - A dictionary conformant to 'tuf.formats.TARGETS_FILEINFO_SCHEMA'. This - dictionary contains the length, hashes, and custom data about the - 'filename' metadata file. SHA256 hashes are generated by default. - """ - - # Does 'filename' and 'custom' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(filename) - if custom is not None: - formats.CUSTOM_SCHEMA.check_match(custom) - - # Note: 'filehashes' is a dictionary of the form - # {'sha256': 1233dfba312, ...}. 'custom' is an optional - # dictionary that a client might define to include additional - # file information, such as the file's author, version/revision - # numbers, etc. - filesize, filehashes = sslib_util.get_file_details(filename, - settings.FILE_HASH_ALGORITHMS, storage_backend) - - return formats.make_targets_fileinfo(filesize, filehashes, custom=custom) - - - - - -def get_metadata_versioninfo(rolename, repository_name): - """ - - Retrieve the version information of 'rolename'. The object returned - conforms to 'tuf.formats.VERSIONINFO_SCHEMA'. The information - generated for 'rolename' is stored in 'snapshot.json'. - The versioninfo object returned has the form: - - versioninfo = {'version': 14} - - - rolename: - The metadata role whose versioninfo is needed. It must exist, otherwise - a 'tuf.exceptions.UnknownRoleError' exception is raised. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if 'rolename' is improperly - formatted. - - tuf.exceptions.UnknownRoleError, if 'rolename' does not exist. - - - None. - - - A dictionary conformant to 'tuf.formats.VERSIONINFO_SCHEMA'. - This dictionary contains the version number of 'rolename'. - """ - - # Does 'rolename' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - formats.ROLENAME_SCHEMA.check_match(rolename) - - roleinfo = roledb.get_roleinfo(rolename, repository_name) - versioninfo = {'version': roleinfo['version']} - - return versioninfo - - - - - -def create_bin_name(low, high, prefix_len): - """ - - Create a string name of a delegated hash bin, where name will be a range of - zero-padded (up to prefix_len) strings i.e. for low=00, high=07, - prefix_len=3 the returned name would be '000-007'. - - - low: - The low end of the prefix range to be binned - - high: - The high end of the prefix range to be binned - - prefix_len: - The length of the prefix range components - - - A string bin name, with each end of the range zero-padded up to prefix_len - """ - if low == high: - return "{low:0{len}x}".format(low=low, len=prefix_len) - - return "{low:0{len}x}-{high:0{len}x}".format(low=low, high=high, - len=prefix_len) - - - - - -def get_bin_numbers(number_of_bins): - """ - - Given the desired number of bins (number_of_bins) calculate the prefix - length (prefix_length), total number of prefixes (prefix_count) and the - number of prefixes to be stored in each bin (bin_size). - Example: number_of_bins = 32 - prefix_length = 2 - prefix_count = 256 - bin_size = 8 - That is, each of the 32 hashed bins are responsible for 8 hash prefixes, - i.e. 00-07, 08-0f, ..., f8-ff. - - - number_of_bins: - The number of hashed bins in use - - - A tuple of three values: - 1. prefix_length: the length of each prefix - 2. prefix_count: the total number of prefixes in use - 3. bin_size: the number of hash prefixes to be stored in each bin - """ - # Convert 'number_of_bins' to hexadecimal and determine the number of - # hexadecimal digits needed by each hash prefix - prefix_length = len("{:x}".format(number_of_bins - 1)) - # Calculate the total number of hash prefixes (e.g., 000 - FFF total values) - prefix_count = 16 ** prefix_length - # Determine how many prefixes to assign to each bin - bin_size = prefix_count // number_of_bins - - # For simplicity, ensure that 'prefix_count' (16 ^ n) can be evenly - # distributed over 'number_of_bins' (must be 2 ^ n). Each bin will contain - # (prefix_count / number_of_bins) hash prefixes. - if prefix_count % number_of_bins != 0: - # Note: x % y != 0 does not guarantee that y is not a power of 2 for - # arbitrary x and y values. However, due to the relationship between - # number_of_bins and prefix_count, it is true for them. - raise sslib_exceptions.Error('The "number_of_bins" argument' - ' must be a power of 2.') - - return prefix_length, prefix_count, bin_size - - - - - -def find_bin_for_target_hash(target_hash, number_of_bins): - """ - - For a given hashed filename, target_hash, calculate the name of a hashed bin - into which this file would be delegated given number_of_bins bins are in - use. - - - target_hash: - The hash of the target file's path - - number_of_bins: - The number of hashed_bins in use - - - The name of the hashed bin target_hash would be binned into - """ - - prefix_length, _, bin_size = get_bin_numbers(number_of_bins) - - prefix = int(target_hash[:prefix_length], 16) - - low = prefix - (prefix % bin_size) - high = (low + bin_size - 1) - - return create_bin_name(low, high, prefix_length) - - - - - -def get_target_hash(target_filepath): - """ - - Compute the hash of 'target_filepath'. This is useful in conjunction with - the "path_hash_prefixes" attribute in a delegated targets role, which - tells us which paths a role is implicitly responsible for. - - The repository may optionally organize targets into hashed bins to ease - target delegations and role metadata management. The use of consistent - hashing allows for a uniform distribution of targets into bins. - - - target_filepath: - The path to the target file on the repository. This will be relative to - the 'targets' (or equivalent) directory on a given mirror. - - - None. - - - None. - - - The hash of 'target_filepath'. - - """ - formats.RELPATH_SCHEMA.check_match(target_filepath) - - digest_object = sslib_hash.digest(algorithm=HASH_FUNCTION) - digest_object.update(target_filepath.encode('utf-8')) - return digest_object.hexdigest() - - - - -def generate_root_metadata(version, expiration_date, consistent_snapshot, - repository_name='default'): - """ - - Create the root metadata. 'roledb' and 'keydb' - are read and the information returned by these modules is used to generate - the root metadata object. - - - version: - The metadata version number. Clients use the version number to - determine if the downloaded version is newer than the one currently - trusted. - - expiration_date: - The expiration date of the metadata file. Conformant to - 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA'. - - consistent_snapshot: - Boolean. If True, a file digest is expected to be prepended to the - filename of any target file located in the targets directory. Each digest - is stripped from the target filename and listed in the snapshot metadata. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if the generated root metadata - object could not be generated with the correct format. - - securesystemslib.exceptions.Error, if an error is encountered while - generating the root metadata object (e.g., a required top-level role not - found in 'roledb'.) - - - The contents of 'keydb' and 'roledb' are read. - - - A root metadata object, conformant to 'tuf.formats.ROOT_SCHEMA'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any of the arguments are - # improperly formatted. - formats.METADATAVERSION_SCHEMA.check_match(version) - sslib_formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date) - sslib_formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # The role and key dictionaries to be saved in the root metadata object. - # Conformant to 'ROLEDICT_SCHEMA' and 'KEYDICT_SCHEMA', respectively. - roledict = {} - keydict = {} - keylist = [] - - # Extract the role, threshold, and keyid information of the top-level roles, - # which Root stores in its metadata. The necessary role metadata is generated - # from this information. - for rolename in roledb.TOP_LEVEL_ROLES: - - # If a top-level role is missing from 'roledb', raise an exception. - if not roledb.role_exists(rolename, repository_name): - raise sslib_exceptions.Error(repr(rolename) + ' not in' - ' "roledb".') - - # Collect keys from all roles in a list - keyids = roledb.get_role_keyids(rolename, repository_name) - for keyid in keyids: - key = keydb.get_key(keyid, repository_name=repository_name) - keylist.append(key) - - # Generate the authentication information Root establishes for each - # top-level role. - role_threshold = roledb.get_role_threshold(rolename, repository_name) - role_metadata = formats.build_dict_conforming_to_schema( - formats.ROLE_SCHEMA, - keyids=keyids, - threshold=role_threshold) - roledict[rolename] = role_metadata - - # Create the root metadata 'keys' dictionary - _, keydict = keys_to_keydict(keylist) - - # Use generalized build_dict_conforming_to_schema func to produce a dict that - # contains all the appropriate information for this type of metadata, - # checking that the result conforms to the appropriate schema. - # TODO: Later, probably after the rewrite for TUF Issue #660, generalize - # further, upward, by replacing generate_targets_metadata, - # generate_root_metadata, etc. with one function that generates - # metadata, possibly rolling that upwards into the calling function. - # There are very few things that really need to be done differently. - return formats.build_dict_conforming_to_schema( - formats.ROOT_SCHEMA, - version=version, - expires=expiration_date, - keys=keydict, - roles=roledict, - consistent_snapshot=consistent_snapshot) - - - - - -def generate_targets_metadata(targets_directory, target_files, version, - expiration_date, delegations=None, write_consistent_targets=False, - use_existing_fileinfo=False, storage_backend=None, - repository_name='default'): - """ - - Generate the targets metadata object. The targets in 'target_files' must - exist at the same path they should on the repo. 'target_files' is a list - of targets. The 'custom' field of the targets metadata is not currently - supported. - - - targets_directory: - The absolute path to a directory containing the target files and - directories of the repository. - - target_files: - The target files tracked by 'targets.json'. 'target_files' is a - dictionary mapping target paths (relative to the targets directory) to - a dict matching tuf.formats.LOOSE_FILEINFO_SCHEMA. LOOSE_FILEINFO_SCHEMA - can support multiple different value patterns: - 1) an empty dictionary - for when fileinfo should be generated - 2) a dictionary matching tuf.formats.CUSTOM_SCHEMA - for when fileinfo - should be generated, with the supplied custom metadata attached - 3) a dictionary matching tuf.formats.FILEINFO_SCHEMA - for when full - fileinfo is provided in conjunction with use_existing_fileinfo - - version: - The metadata version number. Clients use the version number to - determine if the downloaded version is newer than the one currently - trusted. - - expiration_date: - The expiration date of the metadata file. Conformant to - 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA'. - - delegations: - The delegations made by the targets role to be generated. 'delegations' - must match 'tuf.formats.DELEGATIONS_SCHEMA'. - - write_consistent_targets: - Boolean that indicates whether file digests should be prepended to the - target files. - NOTE: it is an error for write_consistent_targets to be True when - use_existing_fileinfo is also True. We can not create consistent targets - for a target file where the fileinfo isn't generated by tuf. - - use_existing_fileinfo: - Boolean that indicates whether to use the complete fileinfo, including - hashes, as already exists in the roledb (True) or whether to generate - hashes (False). - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - repository_name: - The name of the repository. If not supplied, 'default' repository - is used. - - - securesystemslib.exceptions.FormatError, if an error occurred trying to - generate the targets metadata object. - - securesystemslib.exceptions.Error, if use_existing_fileinfo is False and - any of the target files cannot be read. - - securesystemslib.exceptions.Error, if use_existing_fileinfo is True and - some of the target files do not have corresponding hashes in the roledb. - - securesystemslib.exceptions.Error, if both of use_existing_fileinfo and - write_consistent_targets are True. - - - If use_existing_fileinfo is False, the target files are read from storage - and file information about them is generated. - If 'write_consistent_targets' is True, each target in 'target_files' will be - copied to a file with a digest prepended to its filename. For example, if - 'some_file.txt' is one of the targets of 'target_files', consistent targets - .some_file.txt, .some_file.txt, etc., are created - and the content of 'some_file.txt' will be copied into them. - - - A targets metadata object, conformant to - 'tuf.formats.TARGETS_SCHEMA'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - formats.PATH_FILEINFO_SCHEMA.check_match(target_files) - formats.METADATAVERSION_SCHEMA.check_match(version) - sslib_formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date) - sslib_formats.BOOLEAN_SCHEMA.check_match(write_consistent_targets) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_existing_fileinfo) - - if write_consistent_targets and use_existing_fileinfo: - raise sslib_exceptions.Error('Cannot support writing consistent' - ' targets and using existing fileinfo.') - - if delegations is not None: - formats.DELEGATIONS_SCHEMA.check_match(delegations) - # If targets role has delegations, collect the up-to-date 'keyids' and - # 'threshold' for each role. Update the delegations keys dictionary. - delegations_keys = [] - # Update 'keyids' and 'threshold' for each delegated role - for role in delegations['roles']: - role['keyids'] = roledb.get_role_keyids(role['name'], - repository_name) - role['threshold'] = roledb.get_role_threshold(role['name'], - repository_name) - - # Collect all delegations keys for generating the delegations keydict - for keyid in role['keyids']: - key = keydb.get_key(keyid, repository_name=repository_name) - delegations_keys.append(key) - - _, delegations['keys'] = keys_to_keydict(delegations_keys) - - - # Store the file attributes of targets in 'target_files'. 'filedict', - # conformant to 'tuf.formats.FILEDICT_SCHEMA', is added to the - # targets metadata object returned. - filedict = {} - - if use_existing_fileinfo: - # Use the provided fileinfo dicts, conforming to FILEINFO_SCHEMA, rather than - # generating fileinfo - for target, fileinfo in target_files.items(): - - # Ensure all fileinfo entries in target_files have a non-empty hashes dict - if not fileinfo.get('hashes', None): - raise sslib_exceptions.Error('use_existing_fileinfo option' - ' set but no hashes exist in fileinfo for ' + repr(target)) - - # and a non-empty length - if fileinfo.get('length', -1) < 0: - raise sslib_exceptions.Error('use_existing_fileinfo option' - ' set but no length exists in fileinfo for ' + repr(target)) - - filedict[target] = fileinfo - - else: - # Generate the fileinfo dicts by accessing the target files on storage. - # Default to accessing files on local storage. - if storage_backend is None: - storage_backend = sslib_storage.FilesystemBackend() - - filedict = _generate_targets_fileinfo(target_files, targets_directory, - write_consistent_targets, storage_backend) - - # Generate the targets metadata object. - # Use generalized build_dict_conforming_to_schema func to produce a dict that - # contains all the appropriate information for targets metadata, - # checking that the result conforms to the appropriate schema. - # TODO: Later, probably after the rewrite for TUF Issue #660, generalize - # further, upward, by replacing generate_targets_metadata, - # generate_root_metadata, etc. with one function that generates - # metadata, possibly rolling that upwards into the calling function. - # There are very few things that really need to be done differently. - if delegations is not None: - return formats.build_dict_conforming_to_schema( - formats.TARGETS_SCHEMA, - version=version, - expires=expiration_date, - targets=filedict, - delegations=delegations) - else: - return formats.build_dict_conforming_to_schema( - formats.TARGETS_SCHEMA, - version=version, - expires=expiration_date, - targets=filedict) - # TODO: As an alternative to the odd if/else above where we decide whether or - # not to include the delegations argument based on whether or not it is - # None, consider instead adding a check in - # build_dict_conforming_to_schema that skips a keyword if that keyword - # is optional in the schema and the value passed in is set to None.... - - - - - -def _generate_targets_fileinfo(target_files, targets_directory, - write_consistent_targets, storage_backend): - """ - Iterate over target_files and: - * ensure they exist in the targets_directory - * generate a fileinfo dict for the target file, including hashes - * copy 'target_path' to 'digest_target' if write_consistent_targets - add all generated fileinfo dicts to a dictionary mapping - targetpath: fileinfo and return the dict. - """ - - filedict = {} - - # Generate the fileinfo of all the target files listed in 'target_files'. - for target, fileinfo in target_files.items(): - - # The root-most folder of the targets directory should not be included in - # target paths listed in targets metadata. - # (e.g., 'targets/more_targets/somefile.txt' -> 'more_targets/somefile.txt') - relative_targetpath = target - - # Note: join() discards 'targets_directory' if 'target' contains a leading - # path separator (i.e., is treated as an absolute path). - target_path = os.path.join(targets_directory, target.lstrip(os.sep)) - - # Add 'custom' if it has been provided. Custom data about the target is - # optional and will only be included in metadata (i.e., a 'custom' field in - # the target's fileinfo dictionary) if specified here. - custom_data = fileinfo.get('custom', None) - - filedict[relative_targetpath] = \ - get_targets_metadata_fileinfo(target_path, storage_backend, custom_data) - - # Copy 'target_path' to 'digest_target' if consistent hashing is enabled. - if write_consistent_targets: - for target_digest in filedict[relative_targetpath]['hashes'].values(): - dirname, basename = os.path.split(target_path) - digest_filename = target_digest + '.' + basename - digest_target = os.path.join(dirname, digest_filename) - shutil.copyfile(target_path, digest_target) - - return filedict - - - -def _get_hashes_and_length_if_needed(use_length, use_hashes, full_file_path, - storage_backend): - """ - Calculate length and hashes only if they are required, - otherwise, for adopters of tuf with lots of delegations, - this will cause unnecessary overhead. - """ - - length = None - hashes = None - if use_length: - length = sslib_util.get_file_length(full_file_path, - storage_backend) - - if use_hashes: - hashes = sslib_util.get_file_hashes(full_file_path, - settings.FILE_HASH_ALGORITHMS, storage_backend) - - return length, hashes - - - -def generate_snapshot_metadata(metadata_directory, version, expiration_date, - storage_backend, consistent_snapshot=False, - repository_name='default', use_length=False, use_hashes=False): - """ - - Create the snapshot metadata. The minimum metadata must exist (i.e., - 'root.json' and 'targets.json'). This function searches - 'metadata_directory' and the resulting snapshot file will list all the - delegated roles found there. - - - metadata_directory: - The directory containing the 'root.json' and 'targets.json' metadata - files. - - version: - The metadata version number. Clients use the version number to - determine if the downloaded version is newer than the one currently - trusted. - - expiration_date: - The expiration date of the metadata file. - Conformant to 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA'. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - consistent_snapshot: - Boolean. If True, a file digest is expected to be prepended to the - filename of any target file located in the targets directory. Each digest - is stripped from the target filename and listed in the snapshot metadata. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - use_length: - Whether to include the optional length attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - use_hashes: - Whether to include the optional hashes attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.Error, if an error occurred trying to generate - the snapshot metadata object. - - - The 'root.json' and 'targets.json' files are read. - - - The snapshot metadata object, conformant to 'tuf.formats.SNAPSHOT_SCHEMA'. - """ - - # Do the arguments have the correct format? - # This check ensures arguments have the appropriate number of objects and - # object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - formats.METADATAVERSION_SCHEMA.check_match(version) - sslib_formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date) - sslib_formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_length) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_hashes) - - # Snapshot's 'fileinfodict' shall contain the version number of Root, - # Targets, and all delegated roles of the repository. - fileinfodict = {} - - length, hashes = _get_hashes_and_length_if_needed(use_length, use_hashes, - os.path.join(metadata_directory, TARGETS_FILENAME), storage_backend) - - targets_role = TARGETS_FILENAME[:-len(METADATA_EXTENSION)] - - targets_file_version = get_metadata_versioninfo(targets_role, - repository_name) - - # Make file info dictionary with make_metadata_fileinfo because - # in the tuf spec length and hashes are optional for all - # METAFILES in snapshot.json including the top-level targets file. - fileinfodict[TARGETS_FILENAME] = formats.make_metadata_fileinfo( - targets_file_version['version'], length, hashes) - - # Search the metadata directory and generate the versioninfo of all the role - # files found there. This information is stored in the 'meta' field of - # 'snapshot.json'. - - metadata_files = sorted(storage_backend.list_folder(metadata_directory), - reverse=True) - for metadata_filename in metadata_files: - # Strip the version number if 'consistent_snapshot' is True. - # Example: '10.django.json' --> 'django.json' - metadata_name, junk = _strip_version_number(metadata_filename, - consistent_snapshot) - - # All delegated roles are added to the snapshot file. - if metadata_filename.endswith(METADATA_EXTENSION): - rolename = metadata_filename[:-len(METADATA_EXTENSION)] - - # Obsolete role files may still be found. Ensure only roles loaded - # in the roledb are included in the Snapshot metadata. Since the - # snapshot and timestamp roles are not listed in snapshot.json, do not - # list these roles found in the metadata directory. - if roledb.role_exists(rolename, repository_name) and \ - rolename not in roledb.TOP_LEVEL_ROLES: - - length, hashes = _get_hashes_and_length_if_needed(use_length, use_hashes, - os.path.join(metadata_directory, metadata_filename), storage_backend) - - file_version = get_metadata_versioninfo(rolename, - repository_name) - - fileinfodict[metadata_name] = formats.make_metadata_fileinfo( - file_version['version'], length, hashes) - - else: - logger.debug('Metadata file has an unsupported file' - ' extension: ' + metadata_filename) - - # Generate the Snapshot metadata object. - # Use generalized build_dict_conforming_to_schema func to produce a dict that - # contains all the appropriate information for snapshot metadata, - # checking that the result conforms to the appropriate schema. - # TODO: Later, probably after the rewrite for TUF Issue #660, generalize - # further, upward, by replacing generate_targets_metadata, - # generate_root_metadata, etc. with one function that generates - # metadata, possibly rolling that upwards into the calling function. - # There are very few things that really need to be done differently. - return formats.build_dict_conforming_to_schema( - formats.SNAPSHOT_SCHEMA, - version=version, - expires=expiration_date, - meta=fileinfodict) - - - - - - -def generate_timestamp_metadata(snapshot_file_path, version, expiration_date, - storage_backend, repository_name, use_length=True, use_hashes=True): - """ - - Generate the timestamp metadata object. The 'snapshot.json' file must - exist. - - - snapshot_file_path: - Path to the required snapshot metadata file. The timestamp role - needs to the calculate the file size and hash of this file. - - version: - The timestamp's version number. Clients use the version number to - determine if the downloaded version is newer than the one currently - trusted. - - expiration_date: - The expiration date of the metadata file, conformant to - 'securesystemslib.formats.ISO8601_DATETIME_SCHEMA'. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - use_length: - Whether to include the optional length attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_hashes: - Whether to include the optional hashes attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - - securesystemslib.exceptions.FormatError, if the generated timestamp metadata - object cannot be formatted correctly, or one of the arguments is improperly - formatted. - - - None. - - - A timestamp metadata object, conformant to 'tuf.formats.TIMESTAMP_SCHEMA'. - """ - - # Do the arguments have the correct format? - # This check ensures arguments have the appropriate number of objects and - # object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - sslib_formats.PATH_SCHEMA.check_match(snapshot_file_path) - formats.METADATAVERSION_SCHEMA.check_match(version) - sslib_formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_length) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_hashes) - - snapshot_fileinfo = {} - - length, hashes = _get_hashes_and_length_if_needed(use_length, use_hashes, - snapshot_file_path, storage_backend) - - snapshot_filename = os.path.basename(snapshot_file_path) - # Retrieve the versioninfo of the Snapshot metadata file. - snapshot_version = get_metadata_versioninfo('snapshot', repository_name) - snapshot_fileinfo[snapshot_filename] = \ - formats.make_metadata_fileinfo(snapshot_version['version'], - length, hashes) - - # Generate the timestamp metadata object. - # Use generalized build_dict_conforming_to_schema func to produce a dict that - # contains all the appropriate information for timestamp metadata, - # checking that the result conforms to the appropriate schema. - # TODO: Later, probably after the rewrite for TUF Issue #660, generalize - # further, upward, by replacing generate_targets_metadata, - # generate_root_metadata, etc. with one function that generates - # metadata, possibly rolling that upwards into the calling function. - # There are very few things that really need to be done differently. - return formats.build_dict_conforming_to_schema( - formats.TIMESTAMP_SCHEMA, - version=version, - expires=expiration_date, - meta=snapshot_fileinfo) - - - - - -def sign_metadata(metadata_object, keyids, filename, repository_name): - """ - - Sign a metadata object. If any of the keyids have already signed the file, - the old signature is replaced. The keys in 'keyids' must already be - loaded in 'keydb'. - - - metadata_object: - The metadata object to sign. For example, 'metadata' might correspond to - 'tuf.formats.ROOT_SCHEMA' or - 'tuf.formats.TARGETS_SCHEMA'. - - keyids: - The keyids list of the signing keys. - - filename: - The intended filename of the signed metadata object. - For example, 'root.json' or 'targets.json'. This function - does NOT save the signed metadata to this filename. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if a valid 'signable' object could - not be generated or the arguments are improperly formatted. - - securesystemslib.exceptions.Error, if an invalid keytype was found in the - keystore. - - - None. - - - A signable object conformant to 'tuf.formats.SIGNABLE_SCHEMA'. - """ - - # Do the arguments have the correct format? - # This check ensures arguments have the appropriate number of objects and - # object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - formats.ANYROLE_SCHEMA.check_match(metadata_object) - sslib_formats.KEYIDS_SCHEMA.check_match(keyids) - sslib_formats.PATH_SCHEMA.check_match(filename) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Make sure the metadata is in 'signable' format. That is, - # it contains a 'signatures' field containing the result - # of signing the 'signed' field of 'metadata' with each - # keyid of 'keyids'. - signable = formats.make_signable(metadata_object) - - # Sign the metadata with each keyid in 'keyids'. 'signable' should have - # zero signatures (metadata_object contained none). - for keyid in keyids: - - # Load the signing key. - key = keydb.get_key(keyid, repository_name=repository_name) - # Generate the signature using the appropriate signing method. - if key['keytype'] in SUPPORTED_KEY_TYPES: - if 'private' in key['keyval']: - signed = sslib_formats.encode_canonical(signable['signed']).encode('utf-8') - try: - signature = sslib_keys.create_signature(key, signed) - signable['signatures'].append(signature) - - except Exception: - logger.warning('Unable to create signature for keyid: ' + repr(keyid)) - - else: - logger.debug('Private key unset. Skipping: ' + repr(keyid)) - - else: - raise sslib_exceptions.Error('The keydb contains a key with' - ' an invalid key type.' + repr(key['keytype'])) - - # Raise 'securesystemslib.exceptions.FormatError' if the resulting 'signable' - # is not formatted correctly. - try: - formats.check_signable_object_format(signable) - except exceptions.UnsignedMetadataError: - # Downgrade the error to a warning because a use case exists where - # metadata may be generated unsigned on one machine and signed on another. - logger.warning('Unsigned metadata object: ' + repr(signable)) - - - return signable - - - - - -def write_metadata_file(metadata, filename, version_number, consistent_snapshot, - storage_backend): - """ - - If necessary, write the 'metadata' signable object to 'filename'. - - - metadata: - The object that will be saved to 'filename', conformant to - 'tuf.formats.SIGNABLE_SCHEMA'. - - filename: - The filename of the metadata to be written (e.g., 'root.json'). - - version_number: - The version number of the metadata file to be written. The version - number is needed for consistent snapshots, which prepend the version - number to 'filename'. - - consistent_snapshot: - Boolean that determines whether the metadata file's digest should be - prepended to the filename. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.Error, if the directory of 'filename' does not - exist. - - Any other runtime (e.g., IO) exception. - - - The 'filename' file is created, or overwritten if it exists. - - - The filename of the written file. - """ - - # Do the arguments have the correct format? - # This check ensures arguments have the appropriate number of objects and - # object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - formats.SIGNABLE_SCHEMA.check_match(metadata) - sslib_formats.PATH_SCHEMA.check_match(filename) - formats.METADATAVERSION_SCHEMA.check_match(version_number) - sslib_formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) - - if storage_backend is None: - storage_backend = sslib_storage.FilesystemBackend() - - # Generate the actual metadata file content of 'metadata'. Metadata is - # saved as JSON and includes formatting, such as indentation and sorted - # objects. The new digest of 'metadata' is also calculated to help determine - # if re-saving is required. - file_content = _get_written_metadata(metadata) - - # We previously verified whether new metadata needed to be written (i.e., has - # not been previously written or has changed). It is now assumed that the - # caller intends to write changes that have been marked as dirty. - - # The 'metadata' object is written to 'file_object'. To avoid partial - # metadata from being written, 'metadata' is first written to a temporary - # location (i.e., 'file_object') and then moved to 'filename'. - file_object = tempfile.TemporaryFile() - - # Serialize 'metadata' to the file-like object and then write 'file_object' - # to disk. The dictionary keys of 'metadata' are sorted and indentation is - # used. - file_object.write(file_content) - - if consistent_snapshot: - dirname, basename = os.path.split(filename) - basename = basename.split(METADATA_EXTENSION, 1)[0] - version_and_filename = str(version_number) + '.' + basename + METADATA_EXTENSION - written_consistent_filename = os.path.join(dirname, version_and_filename) - - # If we were to point consistent snapshots to 'written_filename', they - # would always point to the current version. Example: 1.root.json and - # 2.root.json -> root.json. If consistent snapshot is True, we should save - # the consistent snapshot and point 'written_filename' to it. - logger.debug('Creating a consistent file for ' + repr(filename)) - logger.debug('Saving ' + repr(written_consistent_filename)) - sslib_util.persist_temp_file(file_object, - written_consistent_filename, should_close=False) - - else: - logger.debug('Not creating a consistent snapshot for ' + repr(filename)) - - logger.debug('Saving ' + repr(filename)) - storage_backend.put(file_object, filename) - - file_object.close() - - return filename - - - - - -def _log_status_of_top_level_roles(targets_directory, metadata_directory, - repository_name, storage_backend): - """ - Non-public function that logs whether any of the top-level roles contain an - invalid number of public and private keys, or an insufficient threshold of - signatures. Considering that the top-level metadata have to be verified in - the expected root -> targets -> snapshot -> timestamp order, this function - logs the error message and returns as soon as a required metadata file is - found to be invalid. It is assumed here that the delegated roles have been - written and verified. Example output: - - 'root' role contains 1 / 1 signatures. - 'targets' role contains 1 / 1 signatures. - 'snapshot' role contains 1 / 1 signatures. - 'timestamp' role contains 1 / 1 signatures. - - Note: Temporary metadata is generated so that file hashes & sizes may be - computed and verified against the attached signatures. 'metadata_directory' - should be a directory in a temporary repository directory. - """ - - # The expected full filenames of the top-level roles needed to write them to - # disk. - filenames = get_top_level_metadata_filenames(metadata_directory) - root_filename = filenames[ROOT_FILENAME] - targets_filename = filenames[TARGETS_FILENAME] - snapshot_filename = filenames[SNAPSHOT_FILENAME] - timestamp_filename = filenames[TIMESTAMP_FILENAME] - - # Verify that the top-level roles contain a valid number of public keys and - # that their corresponding private keys have been loaded. - for rolename in ['root', 'targets', 'snapshot', 'timestamp']: - try: - _check_role_keys(rolename, repository_name) - - except exceptions.InsufficientKeysError as e: - logger.info(str(e)) - - # Do the top-level roles contain a valid threshold of signatures? Top-level - # metadata is verified in Root -> Targets -> Snapshot -> Timestamp order. - # Verify the metadata of the Root role. - dirty_rolenames = roledb.get_dirty_roles(repository_name) - - root_roleinfo = roledb.get_roleinfo('root', repository_name) - root_is_dirty = None - if 'root' in dirty_rolenames: - root_is_dirty = True - - else: - root_is_dirty = False - - try: - signable, root_filename = \ - _generate_and_write_metadata('root', root_filename, targets_directory, - metadata_directory, storage_backend, repository_name=repository_name) - _log_status('root', signable, repository_name) - - # 'tuf.exceptions.UnsignedMetadataError' raised if metadata contains an - # invalid threshold of signatures. log the valid/threshold message, where - # valid < threshold. - except exceptions.UnsignedMetadataError as e: - _log_status('root', e.signable, repository_name) - return - - finally: - roledb.unmark_dirty(['root'], repository_name) - roledb.update_roleinfo('root', root_roleinfo, - mark_role_as_dirty=root_is_dirty, repository_name=repository_name) - - # Verify the metadata of the Targets role. - targets_roleinfo = roledb.get_roleinfo('targets', repository_name) - targets_is_dirty = None - if 'targets' in dirty_rolenames: - targets_is_dirty = True - - else: - targets_is_dirty = False - - try: - signable, targets_filename = \ - _generate_and_write_metadata('targets', targets_filename, - targets_directory, metadata_directory, storage_backend, - repository_name=repository_name) - _log_status('targets', signable, repository_name) - - except exceptions.UnsignedMetadataError as e: - _log_status('targets', e.signable, repository_name) - return - - finally: - roledb.unmark_dirty(['targets'], repository_name) - roledb.update_roleinfo('targets', targets_roleinfo, - mark_role_as_dirty=targets_is_dirty, repository_name=repository_name) - - # Verify the metadata of the snapshot role. - snapshot_roleinfo = roledb.get_roleinfo('snapshot', repository_name) - snapshot_is_dirty = None - if 'snapshot' in dirty_rolenames: - snapshot_is_dirty = True - - else: - snapshot_is_dirty = False - - filenames = {'root': root_filename, 'targets': targets_filename} - try: - signable, snapshot_filename = \ - _generate_and_write_metadata('snapshot', snapshot_filename, - targets_directory, metadata_directory, storage_backend, False, - filenames, repository_name=repository_name) - _log_status('snapshot', signable, repository_name) - - except exceptions.UnsignedMetadataError as e: - _log_status('snapshot', e.signable, repository_name) - return - - finally: - roledb.unmark_dirty(['snapshot'], repository_name) - roledb.update_roleinfo('snapshot', snapshot_roleinfo, - mark_role_as_dirty=snapshot_is_dirty, repository_name=repository_name) - - # Verify the metadata of the Timestamp role. - timestamp_roleinfo = roledb.get_roleinfo('timestamp', repository_name) - timestamp_is_dirty = None - if 'timestamp' in dirty_rolenames: - timestamp_is_dirty = True - - else: - timestamp_is_dirty = False - - filenames = {'snapshot': snapshot_filename} - try: - signable, timestamp_filename = \ - _generate_and_write_metadata('timestamp', timestamp_filename, - targets_directory, metadata_directory, storage_backend, - False, filenames, repository_name=repository_name) - _log_status('timestamp', signable, repository_name) - - except exceptions.UnsignedMetadataError as e: - _log_status('timestamp', e.signable, repository_name) - return - - finally: - roledb.unmark_dirty(['timestamp'], repository_name) - roledb.update_roleinfo('timestamp', timestamp_roleinfo, - mark_role_as_dirty=timestamp_is_dirty, repository_name=repository_name) - - - -def _log_status(rolename, signable, repository_name): - """ - Non-public function logs the number of (good/threshold) signatures of - 'rolename'. - """ - - status = sig.get_signature_status(signable, rolename, repository_name) - - logger.info(repr(rolename) + ' role contains ' + \ - repr(len(status['good_sigs'])) + ' / ' + repr(status['threshold']) + \ - ' signatures.') - - - - - -def create_tuf_client_directory(repository_directory, client_directory): - """ - - Create client directory structure as 'tuf.client.updater' expects it. - Metadata files downloaded from a remote TUF repository are saved to - 'client_directory'. - The Root file must initially exist before an update request can be - satisfied. create_tuf_client_directory() ensures the minimum metadata - is copied and that required directories ('previous' and 'current') are - created in 'client_directory'. Software updaters integrating TUF may - use the client directory created as an initial copy of the repository's - metadata. - - - repository_directory: - The path of the root repository directory. The 'metadata' and 'targets' - sub-directories should be available in 'repository_directory'. The - metadata files of 'repository_directory' are copied to 'client_directory'. - - client_directory: - The path of the root client directory. The 'current' and 'previous' - sub-directories are created and will store the metadata files copied - from 'repository_directory'. 'client_directory' will store metadata - and target files downloaded from a TUF repository. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - tuf.exceptions.RepositoryError, if the metadata directory in - 'client_directory' already exists. - - - Copies metadata files and directories from 'repository_directory' to - 'client_directory'. Parent directories are created if they do not exist. - - - None. - """ - - # Do the arguments have the correct format? - # This check ensures arguments have the appropriate number of objects and - # object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - sslib_formats.PATH_SCHEMA.check_match(repository_directory) - sslib_formats.PATH_SCHEMA.check_match(client_directory) - - # Set the absolute path of the Repository's metadata directory. The metadata - # directory should be the one served by the Live repository. At a minimum, - # the repository's root file must be copied. - repository_directory = os.path.abspath(repository_directory) - metadata_directory = os.path.join(repository_directory, - METADATA_DIRECTORY_NAME) - - # Set the client's metadata directory, which will store the metadata copied - # from the repository directory set above. - client_directory = os.path.abspath(client_directory) - client_metadata_directory = os.path.join(client_directory, - METADATA_DIRECTORY_NAME) - - # If the client's metadata directory does not already exist, create it and - # any of its parent directories, otherwise raise an exception. An exception - # is raised to avoid accidentally overwriting previous metadata. - try: - os.makedirs(client_metadata_directory) - - except OSError as e: - if e.errno == errno.EEXIST: - message = 'Cannot create a fresh client metadata directory: ' +\ - repr(client_metadata_directory) + '. Already exists.' - raise exceptions.RepositoryError(message) - - # Testing of non-errno.EEXIST exceptions have been verified on all - # supported OSs. An unexpected exception (the '/' directory exists, rather - # than disallowed path) is possible on Travis, so the '#pragma: no branch' - # below is included to prevent coverage failure. - else: #pragma: no branch - raise - - # Move all metadata to the client's 'current' and 'previous' directories. - # The root metadata file MUST exist in '{client_metadata_directory}/current'. - # 'tuf.client.updater' expects the 'current' and 'previous' directories to - # exist under 'metadata'. - client_current = os.path.join(client_metadata_directory, 'current') - client_previous = os.path.join(client_metadata_directory, 'previous') - shutil.copytree(metadata_directory, client_current) - shutil.copytree(metadata_directory, client_previous) - - - -def disable_console_log_messages(): - """ - - Disable logger messages printed to the console. For example, repository - maintainers may want to call this function if many roles will be sharing - keys, otherwise detected duplicate keys will continually log a warning - message. - - - None. - - - None. - - - Removes the 'tuf.log' console handler, added by default when - 'tuf.repository_tool.py' is imported. - - - None. - """ - - log.remove_console_handler() - - - -def keys_to_keydict(keys): - """ - - Iterate over a list of keys and return a list of keyids and a dict mapping - keyid to key metadata - - - keys: - A list of key objects conforming to - securesystemslib.formats.ANYKEYLIST_SCHEMA. - - - keyids: - A list of keyids conforming to securesystemslib.formats.KEYID_SCHEMA - keydict: - A dictionary conforming to securesystemslib.formats.KEYDICT_SCHEMA - """ - keyids = [] - keydict = {} - - for key in keys: - keyid = key['keyid'] - key_metadata_format = sslib_keys.format_keyval_to_metadata( - key['keytype'], key['scheme'], key['keyval']) - - new_keydict = {keyid: key_metadata_format} - keydict.update(new_keydict) - keyids.append(keyid) - return keyids, keydict - - - - -if __name__ == '__main__': - # The interactive sessions of the documentation strings can - # be tested by running repository_lib.py as a standalone module: - # $ python repository_lib.py. - import doctest - doctest.testmod() diff --git a/tuf/repository_tool.py b/tuf/repository_tool.py deleted file mode 100755 index af78b2ba32..0000000000 --- a/tuf/repository_tool.py +++ /dev/null @@ -1,3291 +0,0 @@ - -#!/usr/bin/env python - -# Copyright 2013 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - repository_tool.py - - - Vladimir Diaz - - - October 19, 2013 - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Provide a tool that can create a TUF repository. It can be used with the - Python interpreter in interactive mode, or imported directly into a Python - module. See 'tuf/README' for the complete guide to using - 'tuf.repository_tool.py'. -""" - -import os -import time -import datetime -import logging -import tempfile -import shutil -import json - -from collections import deque - -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import util as sslib_util -from securesystemslib import storage as sslib_storage - -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import log -from tuf import repository_lib as repo_lib -from tuf import roledb - - -# Copy API -# pylint: disable=unused-import - -# Copy generic repository API functions to be used via `repository_tool` -from tuf.repository_lib import ( - create_tuf_client_directory, - disable_console_log_messages) - - -# Copy key-related API functions to be used via `repository_tool` -from tuf.repository_lib import ( - import_rsa_privatekey_from_file, - import_ed25519_privatekey_from_file) - -from securesystemslib.interface import ( - generate_and_write_rsa_keypair, - generate_and_write_rsa_keypair_with_prompt, - generate_and_write_unencrypted_rsa_keypair, - generate_and_write_ecdsa_keypair, - generate_and_write_ecdsa_keypair_with_prompt, - generate_and_write_unencrypted_ecdsa_keypair, - generate_and_write_ed25519_keypair, - generate_and_write_ed25519_keypair_with_prompt, - generate_and_write_unencrypted_ed25519_keypair, - import_rsa_publickey_from_file, - import_ecdsa_publickey_from_file, - import_ed25519_publickey_from_file, - import_ecdsa_privatekey_from_file) - -from securesystemslib.keys import ( - format_metadata_to_key, - generate_rsa_key, - generate_ecdsa_key, - generate_ed25519_key, - import_rsakey_from_pem, - import_ecdsakey_from_pem) - - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -# Add a console handler so that users are aware of potentially unintended -# states, such as multiple roles that share keys. -log.add_console_handler() -log.set_console_log_level(logging.INFO) - -# Recommended RSA key sizes: -# https://en.wikipedia.org/wiki/Key_size#Asymmetric_algorithm_key_lengths -# Based on the above, RSA keys of size 3072 are expected to provide security -# through 2031 and beyond. -DEFAULT_RSA_KEY_BITS=3072 - -# The default number of hashed bin delegations -DEFAULT_NUM_BINS=1024 - -# The targets and metadata directory names. Metadata files are written -# to the staged metadata directory instead of the "live" one. -METADATA_STAGED_DIRECTORY_NAME = 'metadata.staged' -METADATA_DIRECTORY_NAME = 'metadata' -TARGETS_DIRECTORY_NAME = 'targets' - -# The extension of TUF metadata. -METADATA_EXTENSION = '.json' - -# Expiration date delta, in seconds, of the top-level roles. A metadata -# expiration date is set by taking the current time and adding the expiration -# seconds listed below. - -# Initial 'root.json' expiration time of 1 year. -ROOT_EXPIRATION = 31556900 - -# Initial 'targets.json' expiration time of 3 months. -TARGETS_EXPIRATION = 7889230 - -# Initial 'snapshot.json' expiration time of 1 week. -SNAPSHOT_EXPIRATION = 604800 - -# Initial 'timestamp.json' expiration time of 1 day. -TIMESTAMP_EXPIRATION = 86400 - - -class Repository(object): - """ - - Represent a TUF repository that contains the metadata of the top-level - roles, including all those delegated from the 'targets.json' role. The - repository object returned provides access to the top-level roles, and any - delegated targets that are added as the repository is modified. For - example, a Repository object named 'repository' provides the following - access by default: - - repository.root.version = 2 - repository.timestamp.expiration = datetime.datetime(2015, 8, 8, 12, 0) - repository.snapshot.add_verification_key(...) - repository.targets.delegate('unclaimed', ...) - - Delegating a role from 'targets' updates the attributes of the parent - delegation, which then provides: - - repository.targets('unclaimed').add_verification_key(...) - - - - repository_directory: - The root folder of the repository that contains the metadata and targets - sub-directories. - - metadata_directory: - The metadata sub-directory contains the files of the top-level - roles, including all roles delegated from 'targets.json'. - - targets_directory: - The targets sub-directory contains all the target files that are - downloaded by clients and are referenced in TUF Metadata. The hashes and - file lengths are listed in Metadata files so that they are securely - downloaded. Metadata files are similarly referenced in the top-level - metadata. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - use_timestamp_length: - Whether to include the optional length attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_timestamp_hashes: - Whether to include the optional hashes attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_snapshot_length: - Whether to include the optional length attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - use_snapshot_hashes: - Whether to include the optional hashes attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - Creates top-level role objects and assigns them as attributes. - - - A Repository object that contains default Metadata objects for the top-level - roles. - """ - - def __init__(self, repository_directory, metadata_directory, - targets_directory, storage_backend, repository_name='default', - use_timestamp_length=True, use_timestamp_hashes=True, - use_snapshot_length=False, use_snapshot_hashes=False): - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.PATH_SCHEMA.check_match(repository_directory) - sslib_formats.PATH_SCHEMA.check_match(metadata_directory) - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_timestamp_length) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_timestamp_hashes) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_snapshot_length) - sslib_formats.BOOLEAN_SCHEMA.check_match(use_snapshot_hashes) - - self._repository_directory = repository_directory - self._metadata_directory = metadata_directory - self._targets_directory = targets_directory - self._repository_name = repository_name - self._storage_backend = storage_backend - self._use_timestamp_length = use_timestamp_length - self._use_timestamp_hashes = use_timestamp_hashes - self._use_snapshot_length = use_snapshot_length - self._use_snapshot_hashes = use_snapshot_hashes - - try: - roledb.create_roledb(repository_name) - keydb.create_keydb(repository_name) - - except sslib_exceptions.InvalidNameError: - logger.debug(repr(repository_name) + ' already exists. Overwriting' - ' its contents.') - - # Set the top-level role objects. - self.root = Root(self._repository_name) - self.snapshot = Snapshot(self._repository_name) - self.timestamp = Timestamp(self._repository_name) - self.targets = Targets(self._targets_directory, 'targets', - repository_name=self._repository_name) - - - - def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False): - """ - - Write all the JSON Metadata objects to their corresponding files for - roles which have changed. - writeall() raises an exception if any of the role metadata to be written - to disk is invalid, such as an insufficient threshold of signatures, - missing private keys, etc. - - - consistent_snapshot: - A boolean indicating whether role metadata files should have their - version numbers as filename prefix when written to disk, i.e - 'VERSION.ROLENAME.json', and target files should be copied to a - filename that has their hex digest as filename prefix, i.e - 'HASH.FILENAME'. Note that: - - root metadata is always written with a version prefix, independently - of 'consistent_snapshot' - - the latest version of each metadata file is always also written - without version prefix - - target files are only copied to a hash-prefixed filename if - 'consistent_snapshot' is True and 'use_existing_fileinfo' is False. - If both are True hash-prefixed target file copies must be created - out-of-band. - - use_existing_fileinfo: - Boolean indicating whether the fileinfo dicts in the roledb should be - written as-is (True) or whether hashes should be generated (False, - requires access to the targets files on-disk). - - - tuf.exceptions.UnsignedMetadataError, if any of the top-level - and delegated roles do not have the minimum threshold of signatures. - - - Creates metadata files in the repository's metadata directory. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly - # formatted. - sslib_formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot) - - # At this point, keydb and roledb must be fully populated, - # otherwise writeall() throws a 'tuf.exceptions.UnsignedMetadataError' for - # the top-level roles. exception if any of the top-level roles are missing - # signatures, keys, etc. - - # Write the metadata files of all the Targets roles that are dirty (i.e., - # have been modified via roledb.update_roleinfo()). - filenames = {'root': os.path.join(self._metadata_directory, - repo_lib.ROOT_FILENAME), 'targets': os.path.join(self._metadata_directory, - repo_lib.TARGETS_FILENAME), 'snapshot': os.path.join(self._metadata_directory, - repo_lib.SNAPSHOT_FILENAME), 'timestamp': os.path.join(self._metadata_directory, - repo_lib.TIMESTAMP_FILENAME)} - - snapshot_signable = None - dirty_rolenames = roledb.get_dirty_roles(self._repository_name) - - for dirty_rolename in dirty_rolenames: - - # Ignore top-level roles, they will be generated later in this method. - if dirty_rolename in roledb.TOP_LEVEL_ROLES: - continue - - dirty_filename = os.path.join(self._metadata_directory, - dirty_rolename + METADATA_EXTENSION) - repo_lib._generate_and_write_metadata(dirty_rolename, dirty_filename, - self._targets_directory, self._metadata_directory, - self._storage_backend, consistent_snapshot, filenames, - repository_name=self._repository_name, - use_existing_fileinfo=use_existing_fileinfo) - - # Metadata should be written in (delegated targets -> root -> targets -> - # snapshot -> timestamp) order. Begin by generating the 'root.json' - # metadata file. _generate_and_write_metadata() raises a - # 'securesystemslib.exceptions.Error' exception if the metadata cannot be - # written. - root_roleinfo = roledb.get_roleinfo('root', self._repository_name) - old_consistent_snapshot = root_roleinfo['consistent_snapshot'] - if 'root' in dirty_rolenames or consistent_snapshot != old_consistent_snapshot: - repo_lib._generate_and_write_metadata('root', filenames['root'], - self._targets_directory, self._metadata_directory, - self._storage_backend, consistent_snapshot, filenames, - repository_name=self._repository_name) - - # Generate the 'targets.json' metadata file. - if 'targets' in dirty_rolenames: - repo_lib._generate_and_write_metadata('targets', filenames['targets'], - self._targets_directory, self._metadata_directory, - self._storage_backend, consistent_snapshot, - repository_name=self._repository_name, - use_existing_fileinfo=use_existing_fileinfo) - - # Generate the 'snapshot.json' metadata file. - if 'snapshot' in dirty_rolenames: - snapshot_signable, junk = repo_lib._generate_and_write_metadata('snapshot', - filenames['snapshot'], self._targets_directory, - self._metadata_directory, self._storage_backend, - consistent_snapshot, filenames, - repository_name=self._repository_name, - use_snapshot_length=self._use_snapshot_length, - use_snapshot_hashes=self._use_snapshot_hashes) - - # Generate the 'timestamp.json' metadata file. - if 'timestamp' in dirty_rolenames: - repo_lib._generate_and_write_metadata('timestamp', filenames['timestamp'], - self._targets_directory, self._metadata_directory, - self._storage_backend, consistent_snapshot, - filenames, repository_name=self._repository_name, - use_timestamp_length=self._use_timestamp_length, - use_timestamp_hashes=self._use_timestamp_hashes) - - roledb.unmark_dirty(dirty_rolenames, self._repository_name) - - # Delete the metadata of roles no longer in 'roledb'. Obsolete roles - # may have been revoked and should no longer have their metadata files - # available on disk, otherwise loading a repository may unintentionally - # load them. - if snapshot_signable is not None: - repo_lib._delete_obsolete_metadata(self._metadata_directory, - snapshot_signable['signed'], consistent_snapshot, self._repository_name, - self._storage_backend) - - - - def write(self, rolename, consistent_snapshot=False, increment_version_number=True, - use_existing_fileinfo=False): - """ - - Write the JSON metadata for 'rolename' to its corresponding file on disk. - Unlike writeall(), write() allows the metadata file to contain an invalid - threshold of signatures. - - - rolename: - The name of the role to be written to disk. - - consistent_snapshot: - A boolean indicating whether the role metadata file should have its - version number as filename prefix when written to disk, i.e - 'VERSION.ROLENAME.json'. Note that: - - root metadata is always written with a version prefix, independently - of 'consistent_snapshot' - - the latest version of the metadata file is always also written - without version prefix - - if the metadata is targets metadata and 'consistent_snapshot' is - True, the corresponding target files are copied to a filename with - their hex digest as filename prefix, i.e 'HASH.FILENAME', unless - 'use_existing_fileinfo' is also True. - If 'consistent_snapshot' and 'use_existing_fileinfo' both are True, - hash-prefixed target file copies must be created out-of-band. - - increment_version_number: - Boolean indicating whether the version number of 'rolename' should be - automatically incremented. - - use_existing_fileinfo: - Boolean indicating whether the fileinfo dicts in the roledb should be - written as-is (True) or whether hashes should be generated (False, - requires access to the targets files on-disk). - - - None. - - - Creates metadata files in the repository's metadata directory. - - - None. - """ - - rolename_filename = os.path.join(self._metadata_directory, - rolename + METADATA_EXTENSION) - - filenames = {'root': os.path.join(self._metadata_directory, repo_lib.ROOT_FILENAME), - 'targets': os.path.join(self._metadata_directory, repo_lib.TARGETS_FILENAME), - 'snapshot': os.path.join(self._metadata_directory, repo_lib.SNAPSHOT_FILENAME), - 'timestamp': os.path.join(self._metadata_directory, repo_lib.TIMESTAMP_FILENAME)} - - repo_lib._generate_and_write_metadata(rolename, rolename_filename, - self._targets_directory, self._metadata_directory, - self._storage_backend, consistent_snapshot, - filenames=filenames, allow_partially_signed=True, - increment_version_number=increment_version_number, - repository_name=self._repository_name, - use_existing_fileinfo=use_existing_fileinfo) - - # Ensure 'rolename' is no longer marked as dirty after the successful write(). - roledb.unmark_dirty([rolename], self._repository_name) - - - - - - def status(self): - """ - - Determine the status of the top-level roles. status() checks if each - role provides sufficient public and private keys, signatures, and that a - valid metadata file is generated if writeall() or write() were to be - called. Metadata files are temporarily written so that file hashes and - lengths may be verified, determine if delegated role trust is fully - obeyed, and target paths valid according to parent roles. status() does - not do a simple check for number of threshold keys and signatures. - - - None. - - - None. - - - Generates and writes temporary metadata files. - - - None. - """ - - temp_repository_directory = None - - # Generate and write temporary metadata so that full verification of - # metadata is possible, such as verifying signatures, digests, and file - # content. Ensure temporary files are removed after verification results - # are completed. - try: - temp_repository_directory = tempfile.mkdtemp() - targets_directory = self._targets_directory - metadata_directory = os.path.join(temp_repository_directory, - METADATA_STAGED_DIRECTORY_NAME) - os.mkdir(metadata_directory) - - # Verify the top-level roles and log the results. - repo_lib._log_status_of_top_level_roles(targets_directory, - metadata_directory, self._repository_name, self._storage_backend) - - finally: - shutil.rmtree(temp_repository_directory, ignore_errors=True) - - - - def dirty_roles(self): - """ - - Print/log the roles that have been modified. For example, if some role's - version number is changed (repository.timestamp.version = 2), it is - considered dirty and will be included in the list of dirty roles - printed/logged here. Unlike status(), signatures, public keys, targets, - etc. are not verified. status() should be called instead if the caller - would like to verify if a valid role file is generated if writeall() were - to be called. - - - None. - - - None. - - - None. - - - None. - """ - - logger.info('Dirty roles: ' + str(roledb.get_dirty_roles(self._repository_name))) - - - - def mark_dirty(self, roles): - """ - - Mark the list of 'roles' as dirty. - - - roles: - A list of roles to mark as dirty. on the next write, these roles - will be written to disk. - - - None. - - - None. - - - None. - """ - - roledb.mark_dirty(roles, self._repository_name) - - - - def unmark_dirty(self, roles): - """ - - No longer mark the list of 'roles' as dirty. - - - roles: - A list of roles to mark as dirty. on the next write, these roles - will be written to disk. - - - None. - - - None. - - - None. - """ - - roledb.unmark_dirty(roles, self._repository_name) - - - - @staticmethod - def get_filepaths_in_directory(files_directory, recursive_walk=False, - followlinks=True): - """ - - Walk the given 'files_directory' and build a list of target files found. - - - files_directory: - The path to a directory of target files. - - recursive_walk: - To recursively walk the directory, set recursive_walk=True. - - followlinks: - To follow symbolic links, set followlinks=True. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.Error, if 'file_directory' is not a valid - directory. - - Python IO exceptions. - - - None. - - - A list of absolute paths to target files in the given 'files_directory'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.PATH_SCHEMA.check_match(files_directory) - sslib_formats.BOOLEAN_SCHEMA.check_match(recursive_walk) - sslib_formats.BOOLEAN_SCHEMA.check_match(followlinks) - - # Ensure a valid directory is given. - if not os.path.isdir(files_directory): - raise sslib_exceptions.Error(repr(files_directory) + ' is not' - ' a directory.') - - # A list of the target filepaths found in 'files_directory'. - targets = [] - - # FIXME: We need a way to tell Python 2, but not Python 3, to return - # filenames in Unicode; see #61 and: - # http://docs.python.org/howto/unicode.html#unicode-filenames - for dirpath, dirnames, filenames in os.walk(files_directory, - followlinks=followlinks): - for filename in filenames: - full_target_path = os.path.join(os.path.abspath(dirpath), filename) - targets.append(full_target_path) - - # Prune the subdirectories to walk right now if we do not wish to - # recursively walk 'files_directory'. - if recursive_walk is False: - del dirnames[:] - - else: - logger.debug('Not pruning subdirectories ' + repr(dirnames)) - - return targets - - - - - -class Metadata(object): - """ - - Provide a base class to represent a TUF Metadata role. There are four - top-level roles: Root, Targets, Snapshot, and Timestamp. The Metadata - class provides methods that are needed by all top-level roles, such as - adding and removing public keys, private keys, and signatures. Metadata - attributes, such as rolename, version, threshold, expiration, and key list - are also provided by the Metadata base class. - - - None. - - - None. - - - None. - - - None. - """ - - def __init__(self): - self._rolename = None - self._repository_name = None - - - def add_verification_key(self, key, expires=None): - """ - - Add 'key' to the role. Adding a key, which should contain only the - public portion, signifies the corresponding private key and signatures - the role is expected to provide. A threshold of signatures is required - for a role to be considered properly signed. If a metadata file contains - an insufficient threshold of signatures, it must not be accepted. - - >>> - >>> - >>> - - - key: - The role key to be added, conformant to - 'securesystemslib.formats.ANYKEY_SCHEMA'. Adding a public key to a role - means that its corresponding private key must generate and add its - signature to the role. A threshold number of signatures is required - for a role to be fully signed. - - expires: - The date in which 'key' expires. 'expires' is a datetime.datetime() - object. - - - securesystemslib.exceptions.FormatError, if any of the arguments are - improperly formatted. - - securesystemslib.exceptions.Error, if the 'expires' datetime has already - expired. - - - The role's entries in 'keydb' and 'roledb' are updated. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.ANYKEY_SCHEMA.check_match(key) - - # If 'expires' is unset, choose a default expiration for 'key'. By - # default, Root, Targets, Snapshot, and Timestamp keys are set to expire - # 1 year, 3 months, 1 week, and 1 day from the current time, respectively. - if expires is None: - if self.rolename == 'root': - expires = \ - formats.unix_timestamp_to_datetime(int(time.time() + ROOT_EXPIRATION)) - - elif self.rolename == 'Targets': - expires = \ - formats.unix_timestamp_to_datetime(int(time.time() + TARGETS_EXPIRATION)) - - elif self.rolename == 'Snapshot': - expires = \ - formats.unix_timestamp_to_datetime(int(time.time() + SNAPSHOT_EXPIRATION)) - - elif self.rolename == 'Timestamp': - expires = \ - formats.unix_timestamp_to_datetime(int(time.time() + TIMESTAMP_EXPIRATION)) - - else: - expires = \ - formats.unix_timestamp_to_datetime(int(time.time() + TIMESTAMP_EXPIRATION)) - - # Is 'expires' a datetime.datetime() object? - # Raise 'securesystemslib.exceptions.FormatError' if not. - if not isinstance(expires, datetime.datetime): - raise sslib_exceptions.FormatError(repr(expires) + ' is not a' - ' datetime.datetime() object.') - - # Truncate the microseconds value to produce a correct schema string - # of the form 'yyyy-mm-ddThh:mm:ssZ'. - expires = expires.replace(microsecond = 0) - - # Ensure the expiration has not already passed. - current_datetime = \ - formats.unix_timestamp_to_datetime(int(time.time())) - - if expires < current_datetime: - raise sslib_exceptions.Error(repr(key) + ' has already' - ' expired.') - - # Update the key's 'expires' entry. - expires = expires.isoformat() + 'Z' - key['expires'] = expires - - # Ensure 'key', which should contain the public portion, is added to - # 'keydb'. Add 'key' to the list of recognized keys. - # Keys may be shared, so do not raise an exception if 'key' has already - # been loaded. - try: - keydb.add_key(key, repository_name=self._repository_name) - - except exceptions.KeyAlreadyExistsError: - logger.warning('Adding a verification key that has already been used.') - - keyid = key['keyid'] - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - # Save the keyids that are being replaced since certain roles will need to - # re-sign metadata with these keys (e.g., root). Use list() to make a copy - # of roleinfo['keyids'] to ensure we're modifying distinct lists. - previous_keyids = list(roleinfo['keyids']) - - # Add 'key' to the role's entry in 'roledb', and avoid duplicates. - if keyid not in roleinfo['keyids']: - roleinfo['keyids'].append(keyid) - roleinfo['previous_keyids'] = previous_keyids - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - - - def remove_verification_key(self, key): - """ - - Remove 'key' from the role's currently recognized list of role keys. - The role expects a threshold number of signatures. - - >>> - >>> - >>> - - - key: - The role's key, conformant to 'securesystemslib.formats.ANYKEY_SCHEMA'. - 'key' should contain only the public portion, as only the public key is - needed. The 'add_verification_key()' method should have previously - added 'key'. - - - securesystemslib.exceptions.FormatError, if the 'key' argument is - improperly formatted. - - securesystemslib.exceptions.Error, if the 'key' argument has not been - previously added. - - - Updates the role's 'roledb' entry. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.ANYKEY_SCHEMA.check_match(key) - - keyid = key['keyid'] - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - if keyid in roleinfo['keyids']: - roleinfo['keyids'].remove(keyid) - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - else: - raise sslib_exceptions.Error('Verification key not found.') - - - - def load_signing_key(self, key): - """ - - Load the role key, which must contain the private portion, so that role - signatures may be generated when the role's metadata file is eventually - written to disk. - - >>> - >>> - >>> - - - key: - The role's key, conformant to 'securesystemslib.formats.ANYKEY_SCHEMA'. - It must contain the private key, so that role signatures may be - generated when writeall() or write() is eventually called to generate - valid metadata files. - - - securesystemslib.exceptions.FormatError, if 'key' is improperly formatted. - - securesystemslib.exceptions.Error, if the private key is not found in 'key'. - - - Updates the role's 'keydb' and 'roledb' entries. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.ANYKEY_SCHEMA.check_match(key) - - # Ensure the private portion of the key is available, otherwise signatures - # cannot be generated when the metadata file is written to disk. - if 'private' not in key['keyval'] or not len(key['keyval']['private']): - raise sslib_exceptions.Error('This is not a private key.') - - # Has the key, with the private portion included, been added to the keydb? - # The public version of the key may have been previously added. - try: - keydb.add_key(key, repository_name=self._repository_name) - - except exceptions.KeyAlreadyExistsError: - keydb.remove_key(key['keyid'], self._repository_name) - keydb.add_key(key, repository_name=self._repository_name) - - # Update the role's 'signing_keys' field in 'roledb'. - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - if key['keyid'] not in roleinfo['signing_keyids']: - roleinfo['signing_keyids'].append(key['keyid']) - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - - - def unload_signing_key(self, key): - """ - - Remove a previously loaded role private key (i.e., load_signing_key()). - The keyid of the 'key' is removed from the list of recognized signing - keys. - - >>> - >>> - >>> - - - key: - The role key to be unloaded, conformant to - 'securesystemslib.formats.ANYKEY_SCHEMA'. - - - securesystemslib.exceptions.FormatError, if the 'key' argument is - improperly formatted. - - securesystemslib.exceptions.Error, if the 'key' argument has not been - previously loaded. - - - Updates the signing keys of the role in 'roledb'. - - - None. - """ - - # Does 'key' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.ANYKEY_SCHEMA.check_match(key) - - # Update the role's 'signing_keys' field in 'roledb'. - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - # TODO: Should we consider removing keys from keydb that are no longer - # associated with any roles? There could be many no-longer-used keys - # stored in the keydb if not. For now, just unload the key. - if key['keyid'] in roleinfo['signing_keyids']: - roleinfo['signing_keyids'].remove(key['keyid']) - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - else: - raise sslib_exceptions.Error('Signing key not found.') - - - - def add_signature(self, signature, mark_role_as_dirty=True): - """ - - Add a signature to the role. A role is considered fully signed if it - contains a threshold of signatures. The 'signature' should have been - generated by the private key corresponding to one of the role's expected - keys. - - >>> - >>> - >>> - - - signature: - The signature to be added to the role, conformant to - 'securesystemslib.formats.SIGNATURE_SCHEMA'. - - mark_role_as_dirty: - A boolean indicating whether the updated 'roleinfo' for 'rolename' - should be marked as dirty. The caller might not want to mark - 'rolename' as dirty if it is loading metadata from disk and only wants - to populate roledb.py. Likewise, add_role() would support a similar - boolean to allow the repository tools to successfully load roles via - load_repository() without needing to mark these roles as dirty (default - behavior). - - - securesystemslib.exceptions.FormatError, if the 'signature' argument is - improperly formatted. - - - Adds 'signature', if not already added, to the role's 'signatures' field - in 'roledb'. - - - None. - """ - - # Does 'signature' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.SIGNATURE_SCHEMA.check_match(signature) - sslib_formats.BOOLEAN_SCHEMA.check_match(mark_role_as_dirty) - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - # Ensure the roleinfo contains a 'signatures' field. - if 'signatures' not in roleinfo: - roleinfo['signatures'] = [] - - # Update the role's roleinfo by adding 'signature', if it has not been - # added. - if signature not in roleinfo['signatures']: - roleinfo['signatures'].append(signature) - roledb.update_roleinfo(self.rolename, roleinfo, mark_role_as_dirty, - repository_name=self._repository_name) - - else: - logger.debug('Signature already exists for role: ' + repr(self.rolename)) - - - - def remove_signature(self, signature): - """ - - Remove a previously loaded, or added, role 'signature'. A role must - contain a threshold number of signatures to be considered fully signed. - - >>> - >>> - >>> - - - signature: - The role signature to remove, conformant to - 'securesystemslib.formats.SIGNATURE_SCHEMA'. - - - securesystemslib.exceptions.FormatError, if the 'signature' argument is - improperly formatted. - - securesystemslib.exceptions.Error, if 'signature' has not been previously - added to this role. - - - Updates the 'signatures' field of the role in 'roledb'. - - - None. - """ - - # Does 'signature' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.SIGNATURE_SCHEMA.check_match(signature) - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - if signature in roleinfo['signatures']: - roleinfo['signatures'].remove(signature) - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - else: - raise sslib_exceptions.Error('Signature not found.') - - - - @property - def signatures(self): - """ - - A getter method that returns the role's signatures. A role is considered - fully signed if it contains a threshold number of signatures, where each - signature must be provided by the generated by the private key. Keys - are added to a role with the add_verification_key() method. - - - None. - - - None. - - - None. - - - A list of signatures, conformant to - 'securesystemslib.formats.SIGNATURES_SCHEMA'. - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - signatures = roleinfo['signatures'] - - return signatures - - - - @property - def keys(self): - """ - - A getter method that returns the role's keyids of the keys. The role - is expected to eventually contain a threshold of signatures generated - by the private keys of each of the role's keys (returned here as a keyid.) - - - None. - - - None. - - - None. - - - A list of the role's keyids (i.e., keyids of the keys). - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - keyids = roleinfo['keyids'] - - return keyids - - - - @property - def rolename(self): - """ - - Return the role's name. - Examples: 'root', 'timestamp', 'targets/unclaimed/django'. - - - None. - - - None. - - - None. - - - The role's name, conformant to 'tuf.formats.ROLENAME_SCHEMA'. - Examples: 'root', 'timestamp', 'targets/unclaimed/django'. - """ - - return self._rolename - - - - @property - def version(self): - """ - - A getter method that returns the role's version number, conformant to - 'tuf.formats.VERSION_SCHEMA'. - - - None. - - - None. - - - None. - - - The role's version number, conformant to - 'tuf.formats.VERSION_SCHEMA'. - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - version = roleinfo['version'] - - return version - - - - @version.setter - def version(self, version): - """ - - A setter method that updates the role's version number. TUF clients - download new metadata with version number greater than the version - currently trusted. New metadata start at version 1 when either write() - or write_partial() is called. Version numbers are automatically - incremented, when the write methods are called, as follows: - - 1. write_partial==True and the metadata is the first to be written. - - 2. write_partial=False (i.e., write()), the metadata was not loaded as - partially written, and a write_partial is not needed. - - >>> - >>> - >>> - - - version: - The role's version number, conformant to - 'tuf.formats.VERSION_SCHEMA'. - - - securesystemslib.exceptions.FormatError, if the 'version' argument is - improperly formatted. - - - Modifies the 'version' attribute of the Repository object and updates the - role's version in 'roledb'. - - - None. - """ - - # Does 'version' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - formats.METADATAVERSION_SCHEMA.check_match(version) - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - roleinfo['version'] = version - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - - - @property - def threshold(self): - """ - - Return the role's threshold value. A role is considered fully signed if - a threshold number of signatures is available. - - - None. - - - None. - - - None. - - - The role's threshold value, conformant to - 'tuf.formats.THRESHOLD_SCHEMA'. - """ - - roleinfo = roledb.get_roleinfo(self._rolename, self._repository_name) - threshold = roleinfo['threshold'] - - return threshold - - - - @threshold.setter - def threshold(self, threshold): - """ - - A setter method that modified the threshold value of the role. Metadata - is considered fully signed if a 'threshold' number of signatures is - available. - - >>> - >>> - >>> - - - threshold: - An integer value that sets the role's threshold value, or the minimum - number of signatures needed for metadata to be considered fully - signed. Conformant to 'tuf.formats.THRESHOLD_SCHEMA'. - - - securesystemslib.exceptions.FormatError, if the 'threshold' argument is - improperly formatted. - - - Modifies the threshold attribute of the Repository object and updates - the roles threshold in 'roledb'. - - - None. - """ - - # Does 'threshold' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - formats.THRESHOLD_SCHEMA.check_match(threshold) - - roleinfo = roledb.get_roleinfo(self._rolename, self._repository_name) - roleinfo['previous_threshold'] = roleinfo['threshold'] - roleinfo['threshold'] = threshold - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - - @property - def expiration(self): - """ - - A getter method that returns the role's expiration datetime. - - - None. - - - securesystemslib.exceptions.FormatError, if the expiration cannot be - parsed correctly - - - None. - - - The role's expiration datetime, a datetime.datetime() object. - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - expires = roleinfo['expires'] - - return formats.expiry_string_to_datetime(expires) - - - - @expiration.setter - def expiration(self, datetime_object): - """ - - A setter method for the role's expiration datetime. The top-level - roles have a default expiration (e.g., ROOT_EXPIRATION), but may later - be modified by this setter method. - - >>> - >>> - >>> - - - datetime_object: - The datetime expiration of the role, a datetime.datetime() object. - - - securesystemslib.exceptions.FormatError, if 'datetime_object' is not a - datetime.datetime() object. - - securesystemslib.exceptions.Error, if 'datetime_object' has already - expired. - - - Modifies the expiration attribute of the Repository object. - The datetime given will be truncated to microseconds = 0 - - - None. - """ - - # Is 'datetime_object' a datetime.datetime() object? - # Raise 'securesystemslib.exceptions.FormatError' if not. - if not isinstance(datetime_object, datetime.datetime): - raise sslib_exceptions.FormatError( - repr(datetime_object) + ' is not a datetime.datetime() object.') - - # truncate the microseconds value to produce a correct schema string - # of the form yyyy-mm-ddThh:mm:ssZ - datetime_object = datetime_object.replace(microsecond = 0) - - # Ensure the expiration has not already passed. - current_datetime_object = \ - formats.unix_timestamp_to_datetime(int(time.time())) - - if datetime_object < current_datetime_object: - raise sslib_exceptions.Error(repr(self.rolename) + ' has' - ' already expired.') - - # Update the role's 'expires' entry in 'roledb'. - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - expires = datetime_object.isoformat() + 'Z' - roleinfo['expires'] = expires - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - - - @property - def signing_keys(self): - """ - - A getter method that returns a list of the role's signing keys. - - >>> - >>> - >>> - - - None. - - - None. - - - None. - - - A list of keyids of the role's signing keys, conformant to - 'securesystemslib.formats.KEYIDS_SCHEMA'. - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - signing_keyids = roleinfo['signing_keyids'] - - return signing_keyids - - - - - -class Root(Metadata): - """ - - Represent a Root role object. The root role is responsible for - listing the public keys and threshold of all the top-level roles, including - itself. Top-level metadata is rejected if it does not comply with what is - specified by the Root role. - - This Root object sub-classes Metadata, so the expected Metadata - operations like adding/removing public keys, signatures, private keys, and - updating metadata attributes (e.g., version and expiration) is supported. - Since Root is a top-level role and must exist, a default Root object - is instantiated when a new Repository object is created. - - >>> - >>> - >>> - - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - tuf.exceptions.FormatError, if the argument is improperly formatted. - - - A 'root' role is added to 'roledb'. - - - None. - """ - - def __init__(self, repository_name): - - super(Root, self).__init__() - - self._rolename = 'root' - self._repository_name = repository_name - - # Is 'repository_name' properly formatted? Otherwise, raise a - # tuf.exceptions.FormatError exception. - formats.ROLENAME_SCHEMA.check_match(repository_name) - - # By default, 'snapshot' metadata is set to expire 1 week from the current - # time. The expiration may be modified. - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + ROOT_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, 'consistent_snapshot': False, - 'expires': expiration, 'partial_loaded': False} - try: - roledb.add_role(self._rolename, roleinfo, self._repository_name) - - except exceptions.RoleAlreadyExistsError: - pass - - - - - -class Timestamp(Metadata): - """ - - Represent a Timestamp role object. The timestamp role is responsible for - referencing the latest version of the Snapshot role. Under normal - conditions, it is the only role to be downloaded from a remote repository - without a known file length and hash. An upper length limit is set, though. - Also, its signatures are also verified to be valid according to the Root - role. If invalid metadata can only be downloaded by the client, Root - is the only other role that is downloaded without a known length and hash. - This case may occur if a role's signing keys have been revoked and a newer - Root file is needed to list the updated keys. - - This Timestamp object sub-classes Metadata, so the expected Metadata - operations like adding/removing public keys, signatures, private keys, and - updating metadata attributes (e.g., version and expiration) is supported. - Since Snapshot is a top-level role and must exist, a default Timestamp - object is instantiated when a new Repository object is created. - - >>> - >>> - >>> - - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - tuf.exceptions.FormatError, if the argument is improperly formatted. - - - A 'timestamp' role is added to 'roledb'. - - - None. - """ - - def __init__(self, repository_name): - - super(Timestamp, self).__init__() - - self._rolename = 'timestamp' - self._repository_name = repository_name - - # Is 'repository_name' properly formatted? Otherwise, raise a - # tuf.exceptions.FormatError exception. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # By default, 'root' metadata is set to expire 1 year from the current - # time. The expiration may be modified. - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + TIMESTAMP_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, 'expires': expiration, - 'partial_loaded': False} - - try: - roledb.add_role(self.rolename, roleinfo, self._repository_name) - - except exceptions.RoleAlreadyExistsError: - pass - - - - - -class Snapshot(Metadata): - """ - - Represent a Snapshot role object. The snapshot role is responsible for - referencing the other top-level roles (excluding Timestamp) and all - delegated roles. - - This Snapshot object sub-classes Metadata, so the expected - Metadata operations like adding/removing public keys, signatures, private - keys, and updating metadata attributes (e.g., version and expiration) is - supported. Since Snapshot is a top-level role and must exist, a default - Snapshot object is instantiated when a new Repository object is created. - - >>> - >>> - >>> - - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - tuf.exceptions.FormatError, if the argument is improperly formatted. - - - A 'snapshot' role is added to 'roledb'. - - - None. - """ - - def __init__(self, repository_name): - - super(Snapshot, self).__init__() - - self._rolename = 'snapshot' - self._repository_name = repository_name - - # Is 'repository_name' properly formatted? Otherwise, raise a - # tuf.exceptions.FormatError exception. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # By default, 'snapshot' metadata is set to expire 1 week from the current - # time. The expiration may be modified. - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + SNAPSHOT_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'signatures': [], 'version': 0, 'expires': expiration, - 'partial_loaded': False} - - try: - roledb.add_role(self._rolename, roleinfo, self._repository_name) - - except exceptions.RoleAlreadyExistsError: - pass - - - - - -class Targets(Metadata): - """ - - Represent a Targets role object. Targets roles include the top-level role - 'targets.json' and all delegated roles (e.g., 'targets/unclaimed/django'). - The expected operations of Targets metadata is included, such as adding - and removing repository target files, making and revoking delegations, and - listing the target files provided by it. - - Adding or removing a delegation causes the attributes of the Targets object - to be updated. That is, if the 'django' Targets object is delegated by - 'targets/unclaimed', a new attribute is added so that the following - code statement is supported: - repository.targets('unclaimed')('django').version = 2 - - Likewise, revoking a delegation causes removal of the delegation attribute. - - This Targets object sub-classes Metadata, so the expected Metadata - operations like adding/removing public keys, signatures, private keys, and - updating metadata attributes (e.g., version and expiration) is supported. - Since Targets is a top-level role and must exist, a default Targets object - (for 'targets.json', not delegated roles) is instantiated when a new - Repository object is created. - - >>> - >>> - >>> - - - targets_directory: - The targets directory of the Repository object. - - rolename: - The rolename of this Targets object. - - roleinfo: - An already populated roleinfo object of 'rolename'. Conformant to - 'tuf.formats.ROLEDB_SCHEMA'. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - Modifies the roleinfo of the targets role in 'roledb', or creates - a default one named 'targets'. - - - None. - """ - - def __init__(self, targets_directory, rolename='targets', roleinfo=None, - parent_targets_object=None, repository_name='default'): - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - sslib_formats.PATH_SCHEMA.check_match(targets_directory) - formats.ROLENAME_SCHEMA.check_match(rolename) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if roleinfo is not None: - formats.ROLEDB_SCHEMA.check_match(roleinfo) - - super(Targets, self).__init__() - self._targets_directory = targets_directory - self._rolename = rolename - self._target_files = [] - self._delegated_roles = {} - self._parent_targets_object = self - self._repository_name = repository_name - - # Keep a reference to the top-level 'targets' object. Any delegated roles - # that may be created, can be added to and accessed via the top-level - # 'targets' object. - if parent_targets_object is not None: - self._parent_targets_object = parent_targets_object - - # By default, Targets objects are set to expire 3 months from the current - # time. May be later modified. - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + TARGETS_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - # If 'roleinfo' is not provided, set an initial default. - if roleinfo is None: - roleinfo = {'keyids': [], 'signing_keyids': [], 'threshold': 1, - 'version': 0, 'expires': expiration, - 'signatures': [], 'paths': {}, 'path_hash_prefixes': [], - 'partial_loaded': False, 'delegations': {'keys': {}, - 'roles': []}} - - # Add the new role to the 'roledb'. - try: - roledb.add_role(self.rolename, roleinfo, self._repository_name) - - except exceptions.RoleAlreadyExistsError: - pass - - - - def __call__(self, rolename): - """ - - Allow callable Targets object so that delegated roles may be referenced - by their string rolenames. Rolenames may include characters like '-' and - are not restricted to Python identifiers. - - - rolename: - The rolename of the delegated role. 'rolename' must be a role - previously delegated by this Targets role. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - tuf.exceptions.UnknownRoleError, if 'rolename' has not been - delegated by this Targets object. - - - Modifies the roleinfo of the targets role in 'roledb'. - - - The Targets object of 'rolename'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - formats.ROLENAME_SCHEMA.check_match(rolename) - - if rolename in self._delegated_roles: - return self._delegated_roles[rolename] - - else: - raise exceptions.UnknownRoleError(repr(rolename) + ' has' - ' not been delegated by ' + repr(self.rolename)) - - - - def add_delegated_role(self, rolename, targets_object): - """ - - Add 'targets_object' to this Targets object's list of known delegated - roles. Specifically, delegated Targets roles should call 'super(Targets, - self).add_delegated_role(...)' so that the top-level 'targets' role - contains a dictionary of all the available roles on the repository. - - - rolename: - The rolename of the delegated role. 'rolename' must be a role - previously delegated by this Targets role. - - targets_object: - A Targets() object. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - Updates the Target object's dictionary of delegated targets. - - - The Targets object of 'rolename'. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - formats.ROLENAME_SCHEMA.check_match(rolename) - - if not isinstance(targets_object, Targets): - raise sslib_exceptions.FormatError(repr(targets_object) + ' is' - ' not a Targets object.') - - - if rolename in self._delegated_roles: - logger.debug(repr(rolename) + ' already exists.') - - else: - self._delegated_roles[rolename] = targets_object - - - - def remove_delegated_role(self, rolename): - """ - Remove 'rolename' from this Targets object's list of delegated roles. - This method does not update roledb and others. - - - rolename: - The rolename of the delegated role to remove. 'rolename' should be a - role previously delegated by this Targets role. - - - securesystemslib.exceptions.FormatError, if the argument is improperly - formatted. - - - Updates the Target object's dictionary of delegated targets. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if any are improperly formatted. - formats.ROLENAME_SCHEMA.check_match(rolename) - - if rolename not in self._delegated_roles: - logger.debug(repr(rolename) + ' has not been delegated.') - return - - else: - del self._delegated_roles[rolename] - - - - @property - def target_files(self): - """ - - A getter method that returns the target files added thus far to this - Targets object. - - >>> - >>> - >>> - - - None. - - - None. - - - None. - - - None. - """ - - target_files = roledb.get_roleinfo(self._rolename, - self._repository_name)['paths'] - return target_files - - - - def add_paths(self, paths, child_rolename): - """ - - Add 'paths' to the delegated paths of 'child_rolename'. 'paths' can be a - list of either file paths or glob patterns. The updater client verifies - the target paths specified by child roles, and searches for targets by - visiting these delegated paths. A child role may only provide targets - specifically listed in the delegations field of the delegating role, or a - target that matches a delegated path. - - >>> - >>> - >>> - - - paths: - A list of glob patterns, or file paths, that 'child_rolename' is - trusted to provide. - - child_rolename: - The child delegation that requires an update to its delegated or - trusted paths, as listed in the parent role's delegations (e.g., - 'Django' in 'unclaimed'). - - - securesystemslib.exceptions.FormatError, if a path or glob pattern in - 'paths' is not a string, or if 'child_rolename' is not a formatted - rolename. - - securesystemslib.exceptions.Error, if 'child_rolename' has not been - delegated yet. - - tuf.exceptions.InvalidNameError, if any path in 'paths' does not match - pattern. - - - Modifies this Targets' delegations field. - - - None. - """ - - # Do the argument have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATHS_SCHEMA.check_match(paths) - formats.ROLENAME_SCHEMA.check_match(child_rolename) - - # Ensure that 'child_rolename' exists, otherwise it will not have an entry - # in the parent role's delegations field. - if not roledb.role_exists(child_rolename, self._repository_name): - raise sslib_exceptions.Error(repr(child_rolename) + ' does' - ' not exist.') - - for path in paths: - # Check if the delegated paths or glob patterns are relative and use - # forward slash as a separator or raise an exception. Paths' existence - # on the file system is not verified. If the path is incorrect, - # the targetfile won't be matched successfully during a client update. - self._check_path(path) - - # Get the current role's roleinfo, so that its delegations field can be - # updated. - roleinfo = roledb.get_roleinfo(self._rolename, self._repository_name) - - # Update the delegated paths of 'child_rolename' to add relative paths. - for role in roleinfo['delegations']['roles']: - if role['name'] == child_rolename: - for relative_path in paths: - if relative_path not in role['paths']: - role['paths'].append(relative_path) - - else: - logger.debug(repr(relative_path) + ' is already a delegated path.') - else: - logger.debug(repr(role['name']) + ' does not match child rolename.') - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - - - def add_target(self, filepath, custom=None, fileinfo=None): - """ - - Add a filepath (must be relative to the repository's targets directory) - to the Targets object. - - If 'filepath' has already been added, it will be replaced with any new - file or 'custom' information. - - >>> - >>> - >>> - - - filepath: - The path of the target file. It must be relative to the repository's - targets directory. - - custom: - An optional dictionary providing additional information about the file. - NOTE: if a custom value is passed, the fileinfo parameter must be None. - This parameter will be deprecated in a future release of tuf, use of - the fileinfo parameter is preferred. - - fileinfo: - An optional fileinfo dictionary, conforming to - tuf.formats.TARGETS_FILEINFO_SCHEMA, providing full information about the - file, i.e: - { 'length': 101, - 'hashes': { 'sha256': '123EDF...' }, - 'custom': { 'permissions': '600'} # optional - } - NOTE: if a custom value is passed, the fileinfo parameter must be None. - - - securesystemslib.exceptions.FormatError, if 'filepath' is improperly - formatted. - - tuf.exceptions.InvalidNameError, if 'filepath' does not match pattern. - - - Adds 'filepath' to this role's list of targets. This role's - 'roledb' entry is also updated. - - - None. - """ - - # Does 'filepath' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.RELPATH_SCHEMA.check_match(filepath) - - if fileinfo and custom: - raise sslib_exceptions.Error("Can only take one of" - " custom or fileinfo, not both.") - - if fileinfo: - formats.TARGETS_FILEINFO_SCHEMA.check_match(fileinfo) - - if custom is None: - custom = {} - else: - formats.CUSTOM_SCHEMA.check_match(custom) - - # Add 'filepath' (i.e., relative to the targets directory) to the role's - # list of targets. 'filepath' will not be verified as an allowed path - # according to some delegating role. Not verifying 'filepath' here allows - # freedom to add targets and parent restrictions in any order, minimize - # the number of times these checks are performed, and allow any role to - # delegate trust of packages to this Targets role. - - # Check if the target is relative and uses forward slash as a separator - # or raise an exception. File's existence on the file system is not - # verified. If the file does not exist relative to the targets directory, - # later calls to write() will fail. - self._check_path(filepath) - - # Update the role's 'roledb' entry and avoid duplicates. - roleinfo = roledb.get_roleinfo(self._rolename, self._repository_name) - - if filepath not in roleinfo['paths']: - logger.debug('Adding new target: ' + repr(filepath)) - - else: - logger.debug('Replacing target: ' + repr(filepath)) - - if fileinfo: - roleinfo['paths'].update({filepath: fileinfo}) - else: - roleinfo['paths'].update({filepath: {'custom': custom}}) - - roledb.update_roleinfo(self._rolename, roleinfo, - repository_name=self._repository_name) - - - - def add_targets(self, list_of_targets): - """ - - Add a list of target filepaths (all relative to 'self.targets_directory'). - This method does not actually create files on the file system. The - list of targets must already exist on disk. - - >>> - >>> - >>> - - - list_of_targets: - A list of target filepaths that are added to the paths of this Targets - object. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - tuf.exceptions.InvalidNameError, if any target in 'list_of_targets' - does not match pattern. - - - This Targets' roleinfo is updated with the paths in 'list_of_targets'. - - - None. - """ - - # Does 'list_of_targets' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.RELPATHS_SCHEMA.check_match(list_of_targets) - - # Ensure the paths in 'list_of_targets' are relative and use forward slash - # as a separator or raise an exception. The paths of 'list_of_targets' - # will be verified as existing and allowed paths according to this Targets - # parent role when write() or writeall() is called. Not verifying - # filepaths here allows the freedom to add targets and parent restrictions - # in any order and minimize the number of times these checks are performed. - for target in list_of_targets: - self._check_path(target) - - # Update this Targets 'roledb' entry. - roleinfo = roledb.get_roleinfo(self._rolename, self._repository_name) - for relative_target in list_of_targets: - if relative_target not in roleinfo['paths']: - logger.debug('Adding new target: ' + repr(relative_target)) - else: - logger.debug('Replacing target: ' + repr(relative_target)) - roleinfo['paths'].update({relative_target: {}}) - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - - - def remove_target(self, filepath): - """ - - Remove the target 'filepath' from this Targets' 'paths' field. 'filepath' - is relative to the targets directory. - - >>> - >>> - >>> - - - filepath: - The target to remove from this Targets object, relative to the - repository's targets directory. - - - securesystemslib.exceptions.FormatError, if 'filepath' is improperly - formatted. - - securesystemslib.exceptions.Error, if 'filepath' is not located in the - repository's targets directory, or not found. - - - Modifies this Targets 'roledb' entry. - - - None. - """ - - # Does 'filepath' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.RELPATH_SCHEMA.check_match(filepath) - - # Remove 'relative_filepath', if found, and update this Targets roleinfo. - fileinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - if filepath in fileinfo['paths']: - del fileinfo['paths'][filepath] - roledb.update_roleinfo(self.rolename, fileinfo, - repository_name=self._repository_name) - - else: - raise sslib_exceptions.Error('Target file path not found.') - - - - def clear_targets(self): - """ - - Remove all the target filepaths in the "paths" field of this Targets. - - >>> - >>> - >>> - - - None - - - None. - - - Modifies this Targets' 'roledb' entry. - - - None. - """ - - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - roleinfo['paths'] = {} - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - - - - - def get_delegated_rolenames(self): - """ - - Return all delegations of a role. If ['a/b/', 'a/b/c/', 'a/b/c/d'] have - been delegated by the delegated role 'django', - repository.targets('django').get_delegated_rolenames() returns: ['a/b', - 'a/b/c', 'a/b/c/d']. - - - None. - - - None. - - - None. - - - A list of rolenames. - """ - - return roledb.get_delegated_rolenames(self.rolename, self._repository_name) - - - - - - def _create_delegated_target(self, rolename, keyids, threshold, paths): - """ - Create a new Targets object for the 'rolename' delegation. An initial - expiration is set (3 months from the current time). - """ - - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + TARGETS_EXPIRATION)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'name': rolename, 'keyids': keyids, 'signing_keyids': [], - 'threshold': threshold, 'version': 0, - 'expires': expiration, 'signatures': [], 'partial_loaded': False, - 'paths': paths, 'delegations': {'keys': {}, 'roles': []}} - - # The new targets object is added as an attribute to this Targets object. - new_targets_object = Targets(self._targets_directory, rolename, roleinfo, - parent_targets_object=self._parent_targets_object, - repository_name=self._repository_name) - - return new_targets_object - - - - - - def _update_roledb_delegations(self, keydict, delegations_roleinfo): - """ - Update the roledb to include delegations of the keys in keydict and the - roles in delegations_roleinfo - """ - - current_roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - current_roleinfo['delegations']['keys'].update(keydict) - - for roleinfo in delegations_roleinfo: - current_roleinfo['delegations']['roles'].append(roleinfo) - - roledb.update_roleinfo(self.rolename, current_roleinfo, - repository_name=self._repository_name) - - - - - - def delegate(self, rolename, public_keys, paths, threshold=1, - terminating=False, list_of_targets=None, path_hash_prefixes=None): - """ - - Create a new delegation, where 'rolename' is a child delegation of this - Targets object. The keys and roles database is updated, including the - delegations field of this Targets. The delegation of 'rolename' is added - and accessible (i.e., repository.targets(rolename)). - - Actual metadata files are not created, only when repository.writeall() or - repository.write() is called. - - >>> - >>> - >>> - - - rolename: - The name of the delegated role, as in 'django' or 'unclaimed'. - - public_keys: - A list of TUF key objects in 'ANYKEYLIST_SCHEMA' format. The list - may contain any of the supported key types: RSAKEY_SCHEMA, - ED25519KEY_SCHEMA, etc. - - paths: - The paths, or glob patterns, delegated to 'rolename'. Any targets - added to 'rolename', via add_targets() or 'list_of_targets', must - match one of the paths or glob patterns in 'paths'. Apart from the - public keys of 'rolename', the delegated 'paths' is often known and - specified when a delegation is first performed. If the delegator - is unsure of which 'paths' to delegate, 'paths' can be set to ['']. - - threshold: - The threshold number of keys of 'rolename'. - - terminating: - Boolean that indicates whether this role allows the updater client to - continue searching for targets (target files it is trusted to list but - has not yet specified) in other delegations. If 'terminating' is True - and 'updater.target()' does not find 'example_target.tar.gz' in this - role, a 'tuf.exceptions.UnknownTargetError' exception should be raised. - If 'terminating' is False (default), and 'target/other_role' is also - trusted with 'example_target.tar.gz' and has listed it, - updater.target() should backtrack and return the target file specified - by 'target/other_role'. - - list_of_targets: - A list of target filepaths that are added to 'rolename'. - 'list_of_targets' is a list of target filepaths, can be empty, and each - filepath must be located in the repository's targets directory. The - list of targets should also exist at the specified paths, otherwise - non-existent target paths might not be added when the targets file is - written to disk with writeall() or write(). - - path_hash_prefixes: - A list of hash prefixes in - 'tuf.formats.PATH_HASH_PREFIXES_SCHEMA' format, used in - hashed bin delegations. Targets may be located and stored in hashed - bins by calculating the target path's hash prefix. - - - securesystemslib.exceptions.FormatError, if any of the arguments are - improperly formatted. - - securesystemslib.exceptions.Error, if the delegated role already exists. - - tuf.exceptions.InvalidNameError, if any path in 'paths' or target in - 'list_of_targets' does not match pattern. - - - A new Target object is created for 'rolename' that is accessible to the - caller (i.e., targets.). The 'keydb' and - 'roledb' stores are updated with 'public_keys'. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.ROLENAME_SCHEMA.check_match(rolename) - sslib_formats.ANYKEYLIST_SCHEMA.check_match(public_keys) - formats.RELPATHS_SCHEMA.check_match(paths) - formats.THRESHOLD_SCHEMA.check_match(threshold) - sslib_formats.BOOLEAN_SCHEMA.check_match(terminating) - - if list_of_targets is not None: - formats.RELPATHS_SCHEMA.check_match(list_of_targets) - - if path_hash_prefixes is not None: - formats.PATH_HASH_PREFIXES_SCHEMA.check_match(path_hash_prefixes) - - # Keep track of the valid keyids (added to the new Targets object) and - # their keydicts (added to this Targets delegations). - keyids, keydict = repo_lib.keys_to_keydict(public_keys) - - # Ensure the paths of 'list_of_targets' are located in the repository's - # targets directory. - relative_targetpaths = {} - - if list_of_targets: - for target in list_of_targets: - # Check if the target path is relative or raise an exception. File's - # existence on the file system is not verified. If the file does not - # exist relative to the targets directory, later calls to write() - # will fail. - self._check_path(target) - relative_targetpaths.update({target: {}}) - - for path in paths: - # Check if the delegated paths or glob patterns are relative or - # raise an exception. Paths' existence on the file system is not - # verified. If the path is incorrect, the targetfile won't be matched - # successfully during a client update. - self._check_path(path) - - # The new targets object is added as an attribute to this Targets object. - new_targets_object = self._create_delegated_target(rolename, keyids, - threshold, relative_targetpaths) - - # Update the roleinfo of this role. A ROLE_SCHEMA object requires only - # 'keyids', 'threshold', and 'paths'. - roleinfo = {'name': rolename, - 'keyids': keyids, - 'threshold': threshold, - 'terminating': terminating, - 'paths': list(relative_targetpaths.keys())} - - if paths: - roleinfo['paths'] = paths - - if path_hash_prefixes: - roleinfo['path_hash_prefixes'] = path_hash_prefixes - # A role in a delegations must list either 'path_hash_prefixes' - # or 'paths'. - del roleinfo['paths'] - - # Update the public keys of 'new_targets_object'. - for key in public_keys: - new_targets_object.add_verification_key(key) - - # Add the new delegation to the top-level 'targets' role object (i.e., - # 'repository.targets()'). For example, 'django', which was delegated by - # repository.target('claimed'), is added to 'repository.targets('django')). - if self.rolename != 'targets': - self._parent_targets_object.add_delegated_role(rolename, - new_targets_object) - - # Add 'new_targets_object' to the delegating role object (this object). - self.add_delegated_role(rolename, new_targets_object) - - # Update the 'delegations' field of the current role. - self._update_roledb_delegations(keydict, [roleinfo]) - - - - - - def revoke(self, rolename): - """ - - Revoke this Targets' 'rolename' delegation. Its 'rolename' attribute is - deleted, including the entries in its 'delegations' field and in - 'roledb'. - - Actual metadata files are not updated, only when repository.write() or - repository.write() is called. - - >>> - >>> - >>> - - - rolename: - The rolename (e.g., 'Django' in 'django') of the child delegation the - parent role (this role) wants to revoke. - - - securesystemslib.exceptions.FormatError, if 'rolename' is improperly - formatted. - - - The delegations dictionary of 'rolename' is modified, and its 'roledb' - entry is updated. This Targets' 'rolename' delegation attribute is also - deleted. - - - None. - """ - - # Does 'rolename' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - formats.ROLENAME_SCHEMA.check_match(rolename) - - # Remove 'rolename' from this Target's delegations dict. - roleinfo = roledb.get_roleinfo(self.rolename, self._repository_name) - - for role in roleinfo['delegations']['roles']: - if role['name'] == rolename: - roleinfo['delegations']['roles'].remove(role) - - roledb.update_roleinfo(self.rolename, roleinfo, - repository_name=self._repository_name) - - # Remove 'rolename' from 'roledb'. - try: - roledb.remove_role(rolename, self._repository_name) - # Remove the rolename delegation from the current role. For example, the - # 'django' role is removed from repository.targets('django'). - del self._delegated_roles[rolename] - self._parent_targets_object.remove_delegated_role(rolename) - - except (exceptions.UnknownRoleError, KeyError): - pass - - - - def delegate_hashed_bins(self, list_of_targets, keys_of_hashed_bins, - number_of_bins=DEFAULT_NUM_BINS): - """ - - Distribute a large number of target files over multiple delegated roles - (hashed bins). The metadata files of delegated roles will be nearly - equal in size (i.e., 'list_of_targets' is uniformly distributed by - calculating the target filepath's hash and determining which bin it should - reside in. The updater client will use "lazy bin walk" to find a target - file's hashed bin destination. The parent role lists a range of path - hash prefixes each hashed bin contains. This method is intended for - repositories with a large number of target files, a way of easily - distributing and managing the metadata that lists the targets, and - minimizing the number of metadata files (and their size) downloaded by - the client. See tuf-spec.txt and the following link for more - information: - http://www.python.org/dev/peps/pep-0458/#metadata-scalability - - >>> - >>> - >>> - - - list_of_targets: - The target filepaths of the targets that should be stored in hashed - bins created (i.e., delegated roles). A repository object's - get_filepaths_in_directory() can generate a list of valid target - paths. - - keys_of_hashed_bins: - The initial public keys of the delegated roles. Public keys may be - later added or removed by calling the usual methods of the delegated - Targets object. For example: - repository.targets('000-003').add_verification_key() - - number_of_bins: - The number of delegated roles, or hashed bins, that should be generated - and contain the target file attributes listed in 'list_of_targets'. - 'number_of_bins' must be a power of 2. Each bin may contain a - range of path hash prefixes (e.g., target filepath digests that range - from [000]... - [003]..., where the series of digits in brackets is - considered the hash prefix). - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.Error, if 'number_of_bins' is not a power of - 2, or one of the targets in 'list_of_targets' is not relative to the - repository's targets directory. - - tuf.exceptions.InvalidNameError, if any target in 'list_of_targets' - does not match pattern. - - - Delegates multiple target roles from the current parent role. - - - None. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATHS_SCHEMA.check_match(list_of_targets) - sslib_formats.ANYKEYLIST_SCHEMA.check_match(keys_of_hashed_bins) - formats.NUMBINS_SCHEMA.check_match(number_of_bins) - - prefix_length, prefix_count, bin_size = repo_lib.get_bin_numbers(number_of_bins) - - logger.info('Creating hashed bin delegations.\n' + - repr(len(list_of_targets)) + ' total targets.\n' + - repr(number_of_bins) + ' hashed bins.\n' + - repr(prefix_count) + ' total hash prefixes.\n' + - 'Each bin ranges over ' + repr(bin_size) + ' hash prefixes.') - - # Generate a list of bin names, the range of prefixes to be delegated to - # that bin, along with the corresponding full list of target prefixes - # to be delegated to that bin - ordered_roles = [] - for idx in range(0, prefix_count, bin_size): - high = idx + bin_size - 1 - name = repo_lib.create_bin_name(idx, high, prefix_length) - if bin_size == 1: - target_hash_prefixes = [name] - else: - target_hash_prefixes = [] - for idy in range(idx, idx+bin_size): - target_hash_prefixes.append("{prefix:0{len}x}".format(prefix=idy, - len=prefix_length)) - - role = {"name": name, - "target_paths": [], - "target_hash_prefixes": target_hash_prefixes} - ordered_roles.append(role) - - for target_path in list_of_targets: - # Check if the target path is relative or raise an exception. File's - # existence on the file system is not verified. If the file does not - # exist relative to the targets directory, later calls to write() and - # writeall() will fail. - self._check_path(target_path) - - # Determine the hash prefix of 'target_path' by computing the digest of - # its path relative to the targets directory. - # We must hash a target path as it appears in the metadata - hash_prefix = repo_lib.get_target_hash(target_path)[:prefix_length] - ordered_roles[int(hash_prefix, 16) // bin_size]["target_paths"].append(target_path) - - keyids, keydict = repo_lib.keys_to_keydict(keys_of_hashed_bins) - - # A queue of roleinfo's that need to be updated in the roledb - delegated_roleinfos = [] - - for bin_role in ordered_roles: - # TODO: originally we just called self.delegate() for each item in this - # iteration. However, this is *extremely* slow when creating a large - # number of hashed bins, i.e. 16k as is recommended for PyPI usage in - # PEP 458: https://www.python.org/dev/peps/pep-0458/ - # The source of the slowness is the interactions with the roledb, which - # causes several deep copies of roleinfo dictionaries: - # https://github.com/theupdateframework/python-tuf/issues/1005 - # Once the underlying issues in #1005 are resolved, i.e. some combination - # of the intermediate and long-term fixes, we may simplify here by - # switching back to just calling self.delegate(), but until that time we - # queue roledb interactions and perform all updates to the roledb in one - # operation at the end of the iteration. - - relative_paths = {} - for path in bin_role['target_paths']: - relative_paths.update({path: {}}) - - # Delegate from the "unclaimed" targets role to each 'bin_role' - target = self._create_delegated_target(bin_role['name'], keyids, 1, - relative_paths) - - roleinfo = {'name': bin_role['name'], - 'keyids': keyids, - 'threshold': 1, - 'terminating': False, - 'path_hash_prefixes': bin_role['target_hash_prefixes']} - delegated_roleinfos.append(roleinfo) - - for key in keys_of_hashed_bins: - target.add_verification_key(key) - - # Add the new delegation to the top-level 'targets' role object (i.e., - # 'repository.targets()'). - if self.rolename != 'targets': - self._parent_targets_object.add_delegated_role(bin_role['name'], - target) - - # Add 'new_targets_object' to the 'targets' role object (this object). - self.add_delegated_role(bin_role['name'], target) - logger.debug('Delegated from ' + repr(self.rolename) + ' to ' + repr(bin_role)) - - - self._update_roledb_delegations(keydict, delegated_roleinfos) - - - - - def add_target_to_bin(self, target_filepath, number_of_bins=DEFAULT_NUM_BINS, - fileinfo=None): - """ - - Add the fileinfo of 'target_filepath' to the expected hashed bin, if the - bin is available. The hashed bin should have been created by - {targets_role}.delegate_hashed_bins(). Assuming the target filepath is - located in the repository's targets directory, determine the filepath's - hash prefix, locate the expected bin (if any), and then add the fileinfo - to the expected bin. Example: 'targets/foo.tar.gz' may be added to the - 'targets/unclaimed/58-5f.json' role's list of targets by calling this - method. - - - target_filepath: - The filepath of the target to be added to a hashed bin. The filepath - must be located in the repository's targets directory. - - number_of_bins: - The number of delegated roles, or hashed bins, in use by the repository. - Note: 'number_of_bins' must be a power of 2. - - fileinfo: - An optional fileinfo object, conforming to tuf.formats.TARGETS_FILEINFO_SCHEMA, - providing full information about the file. - - - securesystemslib.exceptions.FormatError, if 'target_filepath' is - improperly formatted. - - securesystemslib.exceptions.Error, if 'target_filepath' cannot be added to - a hashed bin (e.g., an invalid target filepath, or the expected hashed - bin does not exist.) - - - The fileinfo of 'target_filepath' is added to a hashed bin of this Targets - object. - - - The name of the hashed bin that the target was added to. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(target_filepath) - formats.NUMBINS_SCHEMA.check_match(number_of_bins) - - # TODO: check target_filepath is sane - - path_hash = repo_lib.get_target_hash(target_filepath) - bin_name = repo_lib.find_bin_for_target_hash(path_hash, number_of_bins) - - # Ensure the Targets object has delegated to hashed bins - if not self._delegated_roles.get(bin_name, None): - raise sslib_exceptions.Error(self.rolename + ' does not have' - ' a delegated role ' + bin_name) - - self._delegated_roles[bin_name].add_target(target_filepath, - fileinfo=fileinfo) - - return bin_name - - - - def remove_target_from_bin(self, target_filepath, - number_of_bins=DEFAULT_NUM_BINS): - """ - - Remove the fileinfo of 'target_filepath' from the expected hashed bin, if - the bin is available. The hashed bin should have been created by - {targets_role}.delegate_hashed_bins(). Assuming the target filepath is - located in the repository's targets directory, determine the filepath's - hash prefix, locate the expected bin (if any), and then remove the - fileinfo from the expected bin. Example: 'targets/foo.tar.gz' may be - removed from the '58-5f.json' role's list of targets by calling this - method. - - - target_filepath: - The filepath of the target to be added to a hashed bin. The filepath - must be located in the repository's targets directory. - - number_of_bins: - The number of delegated roles, or hashed bins, in use by the repository. - Note: 'number_of_bins' must be a power of 2. - - - securesystemslib.exceptions.FormatError, if 'target_filepath' is - improperly formatted. - - securesystemslib.exceptions.Error, if 'target_filepath' cannot be removed - from a hashed bin (e.g., an invalid target filepath, or the expected - hashed bin does not exist.) - - - The fileinfo of 'target_filepath' is removed from a hashed bin of this - Targets object. - - - The name of the hashed bin that the target was added to. - """ - - # Do the arguments have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(target_filepath) - formats.NUMBINS_SCHEMA.check_match(number_of_bins) - - # TODO: check target_filepath is sane? - - path_hash = repo_lib.get_target_hash(target_filepath) - bin_name = repo_lib.find_bin_for_target_hash(path_hash, number_of_bins) - - # Ensure the Targets object has delegated to hashed bins - if not self._delegated_roles.get(bin_name, None): - raise sslib_exceptions.Error(self.rolename + ' does not have' - ' a delegated role ' + bin_name) - - self._delegated_roles[bin_name].remove_target(target_filepath) - - return bin_name - - - @property - def delegations(self): - """ - - A getter method that returns the delegations made by this Targets role. - - >>> - >>> - >>> - - - None. - - - tuf.exceptions.UnknownRoleError, if this Targets' rolename - does not exist in 'roledb'. - - - None. - - - A list containing the Targets objects of this Targets' delegations. - """ - - return list(self._delegated_roles.values()) - - - - - - def _check_path(self, pathname): - """ - - Check if a path matches the definition of a PATHPATTERN or a - TARGETPATH (uses the forward slash (/) as directory separator and - does not start with a directory separator). Checks are performed only - on the path string, without accessing the file system. - - - pathname: - A file path or a glob pattern. - - - securesystemslib.exceptions.FormatError, if 'pathname' is improperly - formatted. - - tuf.exceptions.InvalidNameError, if 'pathname' does not match pattern. - - - None. - """ - - formats.RELPATH_SCHEMA.check_match(pathname) - - if '\\' in pathname: - raise exceptions.InvalidNameError('Path ' + repr(pathname) - + ' does not use the forward slash (/) as directory separator.') - - if pathname.startswith('/'): - raise exceptions.InvalidNameError('Path ' + repr(pathname) - + ' starts with a directory separator. All paths should be relative' - ' to targets directory.') - - - - -def create_new_repository(repository_directory, repository_name='default', - storage_backend=None, use_timestamp_length=True, use_timestamp_hashes=True, - use_snapshot_length=False, use_snapshot_hashes=False): - """ - - Create a new repository, instantiate barebones metadata for the top-level - roles, and return a Repository object. On disk, create_new_repository() - only creates the directories needed to hold the metadata and targets files. - The repository object returned may be modified to update the newly created - repository. The methods of the returned object may be called to create - actual repository files (e.g., repository.write()). - - - repository_directory: - The directory that will eventually hold the metadata and target files of - the TUF repository. - - repository_name: - The name of the repository. If not supplied, 'rolename' is added to the - 'default' repository. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. When no object is - passed a FilesystemBackend will be instantiated and used. - - use_timestamp_length: - Whether to include the optional length attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_timestamp_hashes: - Whether to include the optional hashes attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_snapshot_length: - Whether to include the optional length attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - use_snapshot_hashes: - Whether to include the optional hashes attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - The 'repository_directory' is created if it does not exist, including its - metadata and targets sub-directories. - - - A 'tuf.repository_tool.Repository' object. - """ - - # Does 'repository_directory' have the correct format? - # Ensure the arguments have the appropriate number of objects and object - # types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(repository_directory) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if storage_backend is None: - storage_backend = sslib_storage.FilesystemBackend() - - # Set the repository, metadata, and targets directories. These directories - # are created if they do not exist. - repository_directory = os.path.abspath(repository_directory) - metadata_directory = None - targets_directory = None - - # Ensure the 'repository_directory' exists - logger.info('Creating ' + repr(repository_directory)) - storage_backend.create_folder(repository_directory) - - # Set the metadata and targets directories. The metadata directory is a - # staged one so that the "live" repository is not affected. The - # staged metadata changes may be moved over to "live" after all updated - # have been completed. - metadata_directory = os.path.join(repository_directory, - METADATA_STAGED_DIRECTORY_NAME) - targets_directory = os.path.join(repository_directory, TARGETS_DIRECTORY_NAME) - - # Ensure the metadata directory exists - logger.info('Creating ' + repr(metadata_directory)) - storage_backend.create_folder(metadata_directory) - - # Ensure the targets directory exists - logger.info('Creating ' + repr(targets_directory)) - storage_backend.create_folder(targets_directory) - - # Create the bare bones repository object, where only the top-level roles - # have been set and contain default values (e.g., Root roles has a threshold - # of 1, expires 1 year into the future, etc.) - repository = Repository(repository_directory, metadata_directory, - targets_directory, storage_backend, repository_name, use_timestamp_length, - use_timestamp_hashes, use_snapshot_length, use_snapshot_hashes) - - return repository - - - - - -def load_repository(repository_directory, repository_name='default', - storage_backend=None, use_timestamp_length=True, use_timestamp_hashes=True, - use_snapshot_length=False, use_snapshot_hashes=False): - """ - - Return a repository object containing the contents of metadata files loaded - from the repository. - - - repository_directory: - The root folder of the repository that contains the metadata and targets - sub-directories. - - repository_name: - The name of the repository. If not supplied, 'default' is used as the - repository name. - - storage_backend: - An object which implements - securesystemslib.storage.StorageBackendInterface. When no object is - passed a FilesystemBackend will be instantiated and used. - - use_timestamp_length: - Whether to include the optional length attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_timestamp_hashes: - Whether to include the optional hashes attribute of the snapshot - metadata file in the timestamp metadata. - Default is True. - - use_snapshot_length: - Whether to include the optional length attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - use_snapshot_hashes: - Whether to include the optional hashes attribute for targets - metadata files in the snapshot metadata. - Default is False to save bandwidth but without losing security - from rollback attacks. - Read more at section 5.6 from the Mercury paper: - https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy - - - securesystemslib.exceptions.FormatError, if 'repository_directory' or any of - the metadata files are improperly formatted. - - tuf.exceptions.RepositoryError, if the Root role cannot be - found. At a minimum, a repository must contain 'root.json' - - - All the metadata files found in the repository are loaded and their contents - stored in a repository_tool.Repository object. - - - repository_tool.Repository object. - """ - - # Does 'repository_directory' have the correct format? - # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. - sslib_formats.PATH_SCHEMA.check_match(repository_directory) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if storage_backend is None: - storage_backend = sslib_storage.FilesystemBackend() - - repository_directory = os.path.abspath(repository_directory) - metadata_directory = os.path.join(repository_directory, - METADATA_STAGED_DIRECTORY_NAME) - targets_directory = os.path.join(repository_directory, TARGETS_DIRECTORY_NAME) - - # The Repository() object loaded (i.e., containing all the metadata roles - # found) and returned. - repository = Repository(repository_directory, metadata_directory, - targets_directory, storage_backend, repository_name, use_timestamp_length, - use_timestamp_hashes, use_snapshot_length, use_snapshot_hashes) - - filenames = repo_lib.get_top_level_metadata_filenames(metadata_directory) - - # The Root file is always available without a version number (a consistent - # snapshot) attached to the filename. Store the 'consistent_snapshot' value - # and read the loaded Root file so that other metadata files may be located. - consistent_snapshot = False - - # Load the metadata of the top-level roles (i.e., Root, Timestamp, Targets, - # and Snapshot). - repository, consistent_snapshot = repo_lib._load_top_level_metadata(repository, - filenames, repository_name) - - delegated_roles_filenames = repo_lib.get_delegated_roles_metadata_filenames( - metadata_directory, consistent_snapshot, storage_backend) - - # Load the delegated targets metadata and their fileinfo. - # The delegated targets roles form a tree/graph which is traversed in a - # breadth-first-search manner starting from 'targets' in order to correctly - # load the delegations hierarchy. - parent_targets_object = repository.targets - - # Keep the next delegations to be loaded in a deque structure which - # has the properties of a list but is designed to have fast appends - # and pops from both ends - delegations = deque() - # A set used to keep the already loaded delegations and avoid an infinite - # loop in case of cycles in the delegations graph - loaded_delegations = set() - - # Top-level roles are already loaded, fetch targets and get its delegations. - # Store the delegations in the form of delegated-delegating role tuples, - # starting from the top-level targets: - # [('role1', 'targets'), ('role2', 'targets'), ... ] - roleinfo = roledb.get_roleinfo('targets', repository_name) - for role in roleinfo['delegations']['roles']: - delegations.append((role, 'targets')) - - # Traverse the graph by appending the next delegation to the deque and - # 'pop'-ing and loading the left-most element. - while delegations: - delegation_info, delegating_role = delegations.popleft() - - rolename = delegation_info['name'] - if (rolename, delegating_role) in loaded_delegations: - logger.warning('Detected cycle in the delegation graph: ' + - repr(delegating_role) + ' -> ' + - repr(rolename) + - ' is reached more than once.') - continue - - # Instead of adding only rolename to the set, store the already loaded - # delegated-delegating role tuples. This way a delegated role is added - # to each of its delegating roles but when the role is reached twice - # from the same delegating role an infinite loop is avoided. - loaded_delegations.add((rolename, delegating_role)) - - metadata_path = delegated_roles_filenames[rolename] - signable = None - - try: - signable = sslib_util.load_json_file(metadata_path) - - except (sslib_exceptions.Error, ValueError, IOError): - logger.debug('Tried to load metadata with invalid JSON' - ' content: ' + repr(metadata_path)) - continue - - metadata_object = signable['signed'] - - # Extract the metadata attributes of 'metadata_object' and update its - # corresponding roleinfo. - roleinfo = {'name': rolename, - 'signing_keyids': [], - 'signatures': [], - 'partial_loaded': False - } - - roleinfo['signatures'].extend(signable['signatures']) - roleinfo['version'] = metadata_object['version'] - roleinfo['expires'] = metadata_object['expires'] - roleinfo['paths'] = metadata_object['targets'] - roleinfo['delegations'] = metadata_object['delegations'] - roleinfo['threshold'] = delegation_info['threshold'] - roleinfo['keyids'] = delegation_info['keyids'] - - # Generate the Targets object of the delegated role, - # add it to the top-level 'targets' object and to its - # direct delegating role object. - new_targets_object = Targets(targets_directory, rolename, - roleinfo, parent_targets_object=parent_targets_object, - repository_name=repository_name) - - parent_targets_object.add_delegated_role(rolename, - new_targets_object) - if delegating_role != 'targets': - parent_targets_object(delegating_role).add_delegated_role(rolename, - new_targets_object) - - # Append the next level delegations to the deque: - # the 'delegated' role becomes the 'delegating' - for delegation in metadata_object['delegations']['roles']: - delegations.append((delegation, rolename)) - - # Extract the keys specified in the delegations field of the Targets - # role. Add 'key_object' to the list of recognized keys. Keys may be - # shared, so do not raise an exception if 'key_object' has already been - # added. In contrast to the methods that may add duplicate keys, do not - # log a warning here as there may be many such duplicate key warnings. - # The repository maintainer should have also been made aware of the - # duplicate key when it was added. - for key_metadata in metadata_object['delegations']['keys'].values(): - - # The repo may have used hashing algorithms for the generated keyids - # that doesn't match the client's set of hash algorithms. Make sure - # to only used the repo's selected hashing algorithms. - key_object, keyids = format_metadata_to_key(key_metadata, - keyid_hash_algorithms=key_metadata['keyid_hash_algorithms']) - try: - for keyid in keyids: # pragma: no branch - key_object['keyid'] = keyid - keydb.add_key(key_object, keyid=None, - repository_name=repository_name) - - except exceptions.KeyAlreadyExistsError: - pass - - return repository - - - - - -def dump_signable_metadata(metadata_filepath): - """ - - Dump the "signed" portion of metadata. It is the portion that is normally - signed by the repository tool, which is in canonicalized JSON form. - This function is intended for external tools that wish to independently - sign metadata. - - The normal workflow for this use case is to: - (1) call dump_signable_metadata(metadata_filepath) - (2) sign the output with an external tool - (3) call append_signature(signature, metadata_filepath) - - - metadata_filepath: - The path to the metadata file. For example, - repository/metadata/root.json. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - IOError, if 'metadata_filepath' cannot be opened. - - - None. - - - Metadata content that is normally signed by the repository tool (i.e., the - "signed" portion of a metadata file). - """ - - # Are the argument properly formatted? - sslib_formats.PATH_SCHEMA.check_match(metadata_filepath) - - signable = sslib_util.load_json_file(metadata_filepath) - - # Is 'signable' a valid metadata file? - formats.SIGNABLE_SCHEMA.check_match(signable) - - return sslib_formats.encode_canonical(signable['signed']) - - - - - -def append_signature(signature, metadata_filepath): - """ - - Append 'signature' to the metadata at 'metadata_filepath'. The signature - is assumed to be valid, and externally generated by signing the output of - dump_signable_metadata(metadata_filepath). This function is intended for - external tools that wish to independently sign metadata. - - The normal workflow for this use case is to: - (1) call dump_signable_metadata(metadata_filepath) - (2) sign the output with an external tool - (3) call append_signature(signature, metadata_filepath) - - - signature: - A TUF signature structure that contains the KEYID, signing method, and - the signature. It conforms to securesystemslib.formats.SIGNATURE_SCHEMA. - - For example: - - { - "keyid": "a0a0f0cf08...", - "method": "ed25519", - "sig": "14f6e6566ec13..." - } - - metadata_filepath: - The path to the metadata file. For example, - repository/metadata/root.json. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - - 'metadata_filepath' is overwritten. - - - None. - """ - - # Are the arguments properly formatted? - sslib_formats.SIGNATURE_SCHEMA.check_match(signature) - sslib_formats.PATH_SCHEMA.check_match(metadata_filepath) - - signable = sslib_util.load_json_file(metadata_filepath) - - # Is 'signable' a valid metadata file? - formats.SIGNABLE_SCHEMA.check_match(signable) - - signable['signatures'].append(signature) - - file_object = tempfile.TemporaryFile() - - written_metadata_content = json.dumps(signable, indent=1, - separators=(',', ': '), sort_keys=True).encode('utf-8') - - file_object.write(written_metadata_content) - sslib_util.persist_temp_file(file_object, metadata_filepath) - - - - - -if __name__ == '__main__': - # The interactive sessions of the documentation strings can - # be tested by running repository_tool.py as a standalone module: - # $ python3 repository_tool.py. - import doctest - doctest.testmod() diff --git a/tuf/requests_fetcher.py b/tuf/requests_fetcher.py deleted file mode 100644 index 1692ebee7c..0000000000 --- a/tuf/requests_fetcher.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2021, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -"""Provides an implementation of FetcherInterface using the Requests HTTP - library. -""" - -# Imports -import requests -import logging -import time -from urllib import parse -from urllib3.exceptions import ReadTimeoutError - -import tuf -from tuf import exceptions -from tuf import settings - -from tuf.client.fetcher import FetcherInterface - -# Globals -logger = logging.getLogger(__name__) - -# Classess -class RequestsFetcher(FetcherInterface): - """A concrete implementation of FetcherInterface based on the Requests - library. - - Attributes: - _sessions: A dictionary of Requests.Session objects storing a separate - session per scheme+hostname combination. - """ - - def __init__(self): - # From http://docs.python-requests.org/en/master/user/advanced/#session-objects: - # - # "The Session object allows you to persist certain parameters across - # requests. It also persists cookies across all requests made from the - # Session instance, and will use urllib3's connection pooling. So if you're - # making several requests to the same host, the underlying TCP connection - # will be reused, which can result in a significant performance increase - # (see HTTP persistent connection)." - # - # NOTE: We use a separate requests.Session per scheme+hostname combination, - # in order to reuse connections to the same hostname to improve efficiency, - # but avoiding sharing state between different hosts-scheme combinations to - # minimize subtle security issues. Some cookies may not be HTTP-safe. - self._sessions = {} - - - def fetch(self, url, required_length): - """Fetches the contents of HTTP/HTTPS url from a remote server. - - Ensures the length of the downloaded data is up to 'required_length'. - - Arguments: - url: A URL string that represents a file location. - required_length: An integer value representing the file length in bytes. - - Raises: - tuf.exceptions.SlowRetrievalError: A timeout occurs while receiving data. - tuf.exceptions.FetcherHTTPError: An HTTP error code is received. - - Returns: - A bytes iterator - """ - # Get a customized session for each new schema+hostname combination. - session = self._get_session(url) - - # Get the requests.Response object for this URL. - # - # Defer downloading the response body with stream=True. - # Always set the timeout. This timeout value is interpreted by requests as: - # - connect timeout (max delay before first byte is received) - # - read (gap) timeout (max delay between bytes received) - response = session.get(url, stream=True, - timeout=settings.SOCKET_TIMEOUT) - # Check response status. - try: - response.raise_for_status() - except requests.HTTPError as e: - response.close() - status = e.response.status_code - raise exceptions.FetcherHTTPError(str(e), status) - - - # Define a generator function to be returned by fetch. This way the caller - # of fetch can differentiate between connection and actual data download - # and measure download times accordingly. - def chunks(): - try: - bytes_received = 0 - while True: - # We download a fixed chunk of data in every round. This is so that we - # can defend against slow retrieval attacks. Furthermore, we do not - # wish to download an extremely large file in one shot. - # Before beginning the round, sleep (if set) for a short amount of - # time so that the CPU is not hogged in the while loop. - if settings.SLEEP_BEFORE_ROUND: - time.sleep(settings.SLEEP_BEFORE_ROUND) - - read_amount = min( - settings.CHUNK_SIZE, required_length - bytes_received) - - # NOTE: This may not handle some servers adding a Content-Encoding - # header, which may cause urllib3 to misbehave: - # https://github.com/pypa/pip/blob/404838abcca467648180b358598c597b74d568c9/src/pip/_internal/download.py#L547-L582 - data = response.raw.read(read_amount) - bytes_received += len(data) - - # We might have no more data to read. Check number of bytes downloaded. - if not data: - logger.debug('Downloaded ' + repr(bytes_received) + '/' + - repr(required_length) + ' bytes.') - - # Finally, we signal that the download is complete. - break - - yield data - - if bytes_received >= required_length: - break - - except ReadTimeoutError as e: - raise exceptions.SlowRetrievalError(str(e)) - - finally: - response.close() - - return chunks() - - - - def _get_session(self, url): - """Returns a different customized requests.Session per schema+hostname - combination. - """ - # Use a different requests.Session per schema+hostname combination, to - # reuse connections while minimizing subtle security issues. - parsed_url = parse.urlparse(url) - - if not parsed_url.scheme or not parsed_url.hostname: - raise exceptions.URLParsingError( - 'Could not get scheme and hostname from URL: ' + url) - - session_index = parsed_url.scheme + '+' + parsed_url.hostname - - logger.debug('url: ' + url) - logger.debug('session index: ' + session_index) - - session = self._sessions.get(session_index) - - if not session: - session = requests.Session() - self._sessions[session_index] = session - - # Attach some default headers to every Session. - requests_user_agent = session.headers['User-Agent'] - # Follows the RFC: https://tools.ietf.org/html/rfc7231#section-5.5.3 - tuf_user_agent = 'tuf/' + tuf.__version__ + ' ' + requests_user_agent - session.headers.update({ - # Tell the server not to compress or modify anything. - # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding#Directives - 'Accept-Encoding': 'identity', - # The TUF user agent. - 'User-Agent': tuf_user_agent}) - - logger.debug('Made new session for ' + session_index) - - else: - logger.debug('Reusing session for ' + session_index) - - return session diff --git a/tuf/roledb.py b/tuf/roledb.py deleted file mode 100755 index 02c7b801eb..0000000000 --- a/tuf/roledb.py +++ /dev/null @@ -1,1013 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - roledb.py - - - Vladimir Diaz - - - March 21, 2012. Based on a previous version of this module by Geremy Condra. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Represent a collection of roles and their organization. The caller may - create a collection of roles from those found in the 'root.json' metadata - file by calling 'create_roledb_from_root_metadata()', or individually by - adding roles with 'add_role()'. There are many supplemental functions - included here that yield useful information about the roles contained in the - database, such as extracting all the parent rolenames for a specified - rolename, deleting all the delegated roles, retrieving role paths, etc. The - Update Framework process maintains a role database for each repository. - - The role database is a dictionary conformant to - 'tuf.formats.ROLEDICT_SCHEMA' and has the form: - - {'repository_name': { - 'rolename': {'keyids': ['34345df32093bd12...'], - 'threshold': 1 - 'signatures': ['abcd3452...'], - 'paths': ['role.json'], - 'path_hash_prefixes': ['ab34df13'], - 'delegations': {'keys': {}, 'roles': {}}} - - The 'name', 'paths', 'path_hash_prefixes', and 'delegations' dict keys are - optional. -""" - -import logging -import copy - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats - -from tuf import exceptions -from tuf import formats - -# See 'tuf.log' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -# The role database. -_roledb_dict = {} -_roledb_dict['default'] = {} - -# A dictionary (where the keys are repository names) containing a set of roles -# that have been modified (e.g., via update_roleinfo()) and should be written -# to disk. -_dirty_roles = {} -_dirty_roles['default'] = set() - - -TOP_LEVEL_ROLES = ['root', 'targets', 'snapshot', 'timestamp'] - - -def create_roledb_from_root_metadata(root_metadata, repository_name='default'): - """ - - Create a role database containing all of the unique roles found in - 'root_metadata'. - - - root_metadata: - A dictionary conformant to 'tuf.formats.ROOT_SCHEMA'. The - roles found in the 'roles' field of 'root_metadata' is needed by this - function. - - repository_name: - The name of the repository to store 'root_metadata'. If not supplied, - 'rolename' is added to the 'default' repository. - - - securesystemslib.exceptions.FormatError, if 'root_metadata' does not have - the correct object format. - - securesystemslib.exceptions.Error, if one of the roles found in - 'root_metadata' contains an invalid delegation (i.e., a nonexistent parent - role). - - - Calls add_role(). The old role database for 'repository_name' is replaced. - - - None. - """ - - # Does 'root_metadata' have the correct object format? - # This check will ensure 'root_metadata' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - # Raises securesystemslib.exceptions.FormatError. - formats.ROOT_SCHEMA.check_match(root_metadata) - - # Is 'repository_name' formatted correctly? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Clear the role database. - if repository_name in _roledb_dict: - _roledb_dict[repository_name].clear() - - # Ensure _roledb_dict and _dirty_roles contains an entry for - # 'repository_name' so that adding the newly created roleinfo succeeds. - _roledb_dict[repository_name] = {} - _dirty_roles[repository_name] = set() - - # Do not modify the contents of the 'root_metadata' argument. - root_metadata = copy.deepcopy(root_metadata) - - # Iterate the roles found in 'root_metadata' and add them to '_roledb_dict'. - # Duplicates are avoided. - for rolename, roleinfo in root_metadata['roles'].items(): - if rolename == 'root': - roleinfo['version'] = root_metadata['version'] - roleinfo['expires'] = root_metadata['expires'] - roleinfo['previous_keyids'] = roleinfo['keyids'] - roleinfo['previous_threshold'] = roleinfo['threshold'] - - roleinfo['signatures'] = [] - roleinfo['signing_keyids'] = [] - roleinfo['partial_loaded'] = False - - if rolename.startswith('targets'): - roleinfo['paths'] = {} - roleinfo['delegations'] = {'keys': {}, 'roles': []} - - add_role(rolename, roleinfo, repository_name) - - - - - -def create_roledb(repository_name): - """ - - Create a roledb for the repository named 'repository_name'. This function - is intended for creation of a non-default roledb. - - - repository_name: - The name of the repository to create. An empty roledb is created, and - roles may be added via add_role(rolename, roleinfo, repository_name) or - create_roledb_from_root_metadata(root_metadata, repository_name). - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' already - exists in the roledb. - - - None. - - - None. - """ - - # Is 'repository_name' properly formatted? If not, raise - # 'securesystemslib.exceptions.FormatError'. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name in _roledb_dict or repository_name in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name' - ' already exists: ' + repr(repository_name)) - - _roledb_dict[repository_name] = {} - _dirty_roles[repository_name] = set() - - - - - -def remove_roledb(repository_name): - """ - - Remove the roledb belonging to 'repository_name'. - - - repository_name: - The name of the repository to remove. 'repository_name' cannot be - 'default' because the default repository is expected to always exist. - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' is the - 'default' repository name. The 'default' repository name should always - exist. - - - None. - - - None. - """ - - # Is 'repository_name' properly formatted? If not, raise - # 'securesystemslib.exceptions.FormatError'. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - logger.warning('Repository name does not exist:' - ' ' + repr(repository_name)) - return - - if repository_name == 'default': - raise sslib_exceptions.InvalidNameError('Cannot remove the' - ' default repository: ' + repr(repository_name)) - - del _roledb_dict[repository_name] - del _dirty_roles[repository_name] - - - -def add_role(rolename, roleinfo, repository_name='default'): - """ - - Add to the role database the 'roleinfo' associated with 'rolename'. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - roleinfo: - An object representing the role associated with 'rolename', conformant to - ROLEDB_SCHEMA. 'roleinfo' has the form: - {'keyids': ['34345df32093bd12...'], - 'threshold': 1, - 'signatures': ['ab23dfc32'] - 'paths': ['path/to/target1', 'path/to/target2', ...], - 'path_hash_prefixes': ['a324fcd...', ...], - 'delegations': {'keys': } - - The 'paths', 'path_hash_prefixes', and 'delegations' dict keys are - optional. - - The 'target' role has an additional 'paths' key. Its value is a list of - strings representing the path of the target file(s). - - repository_name: - The name of the repository to store 'rolename'. If not supplied, - 'rolename' is added to the 'default' repository. - - - securesystemslib.exceptions.FormatError, if 'rolename' or 'roleinfo' does - not have the correct object format. - - securesystemslib.exceptions.RoleAlreadyExistsError, if 'rolename' has - already been added. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is improperly - formatted, or 'repository_name' does not exist. - - - The role database is modified. - - - None. - """ - - # Does 'rolename' have the correct object format? - # This check will ensure 'rolename' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - formats.ROLENAME_SCHEMA.check_match(rolename) - - # Does 'roleinfo' have the correct object format? - formats.ROLEDB_SCHEMA.check_match(roleinfo) - - # Is 'repository_name' correctly formatted? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.InvalidNameError. - _validate_rolename(rolename) - - if repository_name not in _roledb_dict: - raise sslib_exceptions.InvalidNameError('Repository name does not exist: ' + repository_name) - - if rolename in _roledb_dict[repository_name]: - raise exceptions.RoleAlreadyExistsError('Role already exists: ' + rolename) - - _roledb_dict[repository_name][rolename] = copy.deepcopy(roleinfo) - - - - - -def update_roleinfo(rolename, roleinfo, mark_role_as_dirty=True, repository_name='default'): - """ - - Modify 'rolename's _roledb_dict entry to include the new 'roleinfo'. - 'rolename' is also added to the _dirty_roles set. Roles added to - '_dirty_roles' are marked as modified and can be used by the repository - tools to determine which roles need to be written to disk. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - roleinfo: - An object representing the role associated with 'rolename', conformant to - ROLEDB_SCHEMA. 'roleinfo' has the form: - {'name': 'role_name', - 'keyids': ['34345df32093bd12...'], - 'threshold': 1, - 'paths': ['path/to/target1', 'path/to/target2', ...], - 'path_hash_prefixes': ['a324fcd...', ...]} - - The 'name', 'paths', and 'path_hash_prefixes' dict keys are optional. - - The 'target' role has an additional 'paths' key. Its value is a list of - strings representing the path of the target file(s). - - mark_role_as_dirty: - A boolean indicating whether the updated 'roleinfo' for 'rolename' should - be marked as dirty. The caller might not want to mark 'rolename' as - dirty if it is loading metadata from disk and only wants to populate - roledb.py. Likewise, add_role() would support a similar boolean to allow - the repository tools to successfully load roles via load_repository() - without needing to mark these roles as dirty (default behavior). - - repository_name: - The name of the repository to update the roleinfo of 'rolename'. If not - supplied, the 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if 'rolename' or 'roleinfo' does - not have the correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is improperly - formatted, or 'repository_name' does not exist in the role database. - - - The role database is modified. - - - None. - """ - - # Does the arguments have the correct object format? - # This check will ensure arguments have the appropriate number of objects - # and object types, and that all dict keys are properly named. - formats.ROLENAME_SCHEMA.check_match(rolename) - sslib_formats.BOOLEAN_SCHEMA.check_match(mark_role_as_dirty) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Does 'roleinfo' have the correct object format? - formats.ROLEDB_SCHEMA.check_match(roleinfo) - - # Raises securesystemslib.exceptions.InvalidNameError. - _validate_rolename(rolename) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does not' ' exist: ' + - repository_name) - - if rolename not in _roledb_dict[repository_name]: - raise exceptions.UnknownRoleError('Role does not exist: ' + rolename) - - # Update the global _roledb_dict and _dirty_roles structures so that - # the latest 'roleinfo' is available to other modules, and the repository - # tools know which roles should be saved to disk. - _roledb_dict[repository_name][rolename] = copy.deepcopy(roleinfo) - - if mark_role_as_dirty: - _dirty_roles[repository_name].add(rolename) - - - - - -def get_dirty_roles(repository_name='default'): - """ - - A function that returns a list of the roles that have been modified. Tools - that write metadata to disk can use the list returned to determine which - roles should be written. - - - repository_name: - The name of the repository to get the dirty roles. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not - exist in the role database. - - - None. - - - A sorted list of the roles that have been modified. - """ - - # Does 'repository_name' have the correct format? Raise - # 'securesystemslib.exceptions.FormatError' if not. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does' - ' not' ' exist: ' + repository_name) - - return sorted(list(_dirty_roles[repository_name])) - - - -def mark_dirty(roles, repository_name='default'): - """ - - Mark the list of 'roles' as dirty. - - - repository_name: - The name of the repository to get the dirty roles. If not supplied, the - 'default' repository is searched. - - roles: - A list of roles that should be marked as dirty. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not - exist in the role database. - - - None. - - - None. - """ - - # Are the arguments properly formatted? If not, raise - # securesystemslib.exceptions.FormatError. - sslib_formats.NAMES_SCHEMA.check_match(roles) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does' - ' not' ' exist: ' + repository_name) - - _dirty_roles[repository_name].update(roles) - - - -def unmark_dirty(roles, repository_name='default'): - """ - - No longer mark the roles in 'roles' as dirty. - - - repository_name: - The name of the repository to get the dirty roles. If not supplied, the - 'default' repository is searched. - - roles: - A list of roles that should no longer be marked as dirty. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not - exist in the role database. - - - None. - - - None. - """ - - # Are the arguments properly formatted? If not, raise - # securesystemslib.exceptions.FormatError. - sslib_formats.NAMES_SCHEMA.check_match(roles) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does' - ' not exist: ' + repository_name) - - for role in roles: - try: - _dirty_roles[repository_name].remove(role) - - except (KeyError, ValueError): - logger.debug(repr(role) + ' is not dirty.') - - - -def role_exists(rolename, repository_name='default'): - """ - - Verify whether 'rolename' is stored in the role database. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to check whether 'rolename' exists. If not - supplied, the 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if 'rolename' does not have the - correct object format. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - None. - - - Boolean. True if 'rolename' is found in the role database, False otherwise. - """ - - # Raise securesystemslib.exceptions.FormatError, - # securesystemslib.exceptions.InvalidNameError if the arguments are invalid. - # We do not intercept securesystemslib.exceptions.FormatError - # or securesystemslib.exceptions.InvalidNameError exceptions. - try: - _check_rolename(rolename, repository_name) - - except exceptions.UnknownRoleError: - return False - - return True - - - - - -def remove_role(rolename, repository_name='default'): - """ - - Remove 'rolename'. Delegated roles were previously removed as well, - but this step is longer supported since the repository can resemble - a graph of delegations. That is, we shouldn't delete rolename's - delegations because another role may have a valid delegation - to it, whereas before the only valid delegation to it must be from - 'rolename' (repository resembles a tree of delegations). - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to remove the role. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if 'rolename' does not have the - correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - A role may be removed from the role database. - - - None. - """ - - # Does 'repository_name' have the correct format? Raise - # 'securesystemslib.exceptions.FormatError' if it is improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - # 'rolename' was verified to exist in _check_rolename(). - # Remove 'rolename' now. - del _roledb_dict[repository_name][rolename] - - - - - -def get_rolenames(repository_name='default'): - """ - - Return a list of the rolenames found in the role database. - - - repository_name: - The name of the repository to get the rolenames. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if 'repository_name' is improperly - formatted. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not - exist in the role database. - - - None. - - - A list of rolenames. - """ - - # Does 'repository_name' have the correct format? Raise - # 'securesystemslib.exceptions.FormatError' if it is improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does' - ' not' ' exist: ' + repository_name) - - return list(_roledb_dict[repository_name].keys()) - - - - - -def get_roleinfo(rolename, repository_name='default'): - """ - - Return the roleinfo of 'rolename'. - - {'keyids': ['34345df32093bd12...'], - 'threshold': 1, - 'signatures': ['ab453bdf...', ...], - 'paths': ['path/to/target1', 'path/to/target2', ...], - 'path_hash_prefixes': ['a324fcd...', ...], - 'delegations': {'keys': {}, 'roles': []}} - - The 'signatures', 'paths', 'path_hash_prefixes', and 'delegations' dict keys - are optional. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to get the role info. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if the arguments are improperly - formatted. - - tuf.exceptions.UnknownRoleError, if 'rolename' does not exist. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - - None. - - - The roleinfo of 'rolename'. - """ - - # Is 'repository_name' properly formatted? If not, raise - # 'securesystemslib.exceptions.FormatError'. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - return copy.deepcopy(_roledb_dict[repository_name][rolename]) - - - - - -def get_role_keyids(rolename, repository_name='default'): - """ - - Return a list of the keyids associated with 'rolename'. Keyids are used as - identifiers for keys (e.g., rsa key). A list of keyids are associated with - each rolename. Signing a metadata file, such as 'root.json' (Root role), - involves signing or verifying the file with a list of keys identified by - keyid. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to get the role keyids. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the - correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - None. - - - A list of keyids. - """ - - # Raise 'securesystemslib.exceptions.FormatError' if 'repository_name' is - # improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - roleinfo = _roledb_dict[repository_name][rolename] - - return roleinfo['keyids'] - - - - - -def get_role_threshold(rolename, repository_name='default'): - """ - - Return the threshold value of the role associated with 'rolename'. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to get the role threshold. If not supplied, - the 'default' repository is searched. - - - - securesystemslib.exceptions.FormatError, if the arguments do not have the - correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - None. - - - A threshold integer value. - """ - - # Raise 'securesystemslib.exceptions.FormatError' if 'repository_name' is - # improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - roleinfo = _roledb_dict[repository_name][rolename] - - return roleinfo['threshold'] - - - - - -def get_role_paths(rolename, repository_name='default'): - """ - - Return the paths of the role associated with 'rolename'. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to get the role paths. If not supplied, the - 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the - correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - None. - - - A list of paths. - """ - - # Raise 'securesystemslib.exceptions.FormatError' if 'repository_name' is - # improperly formatted. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - roleinfo = _roledb_dict[repository_name][rolename] - - # Paths won't exist for non-target roles. - try: - return roleinfo['paths'] - - except KeyError: - return dict() - - - - - -def get_delegated_rolenames(rolename, repository_name='default'): - """ - - Return the delegations of a role. If 'rolename' is 'tuf' and the role - database contains ['django', 'requests', 'cryptography'], in 'tuf's - delegations field, return ['django', 'requests', 'cryptography']. - - - rolename: - An object representing the role's name, conformant to 'ROLENAME_SCHEMA' - (e.g., 'root', 'snapshot', 'timestamp'). - - repository_name: - The name of the repository to get the delegated rolenames. If not - supplied, the 'default' repository is searched. - - - securesystemslib.exceptions.FormatError, if the arguments do not have the - correct object format. - - tuf.exceptions.UnknownRoleError, if 'rolename' cannot be found - in the role database. - - securesystemslib.exceptions.InvalidNameError, if 'rolename' is incorrectly - formatted, or 'repository_name' does not exist in the role database. - - - None. - - - A list of rolenames. Note that the rolenames are *NOT* sorted by order of - delegation. - """ - - - # Does 'repository_name' have the correct format? Raise - # 'securesystemslib.exceptions.FormatError' if it does not. - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.FormatError, - # tuf.exceptions.UnknownRoleError, or - # securesystemslib.exceptions.InvalidNameError. - _check_rolename(rolename, repository_name) - - # get_roleinfo() raises a 'securesystemslib.exceptions.InvalidNameError' if - # 'repository_name' does not exist in the role database. - roleinfo = get_roleinfo(rolename, repository_name) - delegated_roles = [] - - for delegated_role in roleinfo['delegations']['roles']: - delegated_roles.append(delegated_role['name']) - - return delegated_roles - - - - - -def clear_roledb(repository_name='default', clear_all=False): - """ - - Reset the roledb database. - - - repository_name: - The name of the repository to clear. If not supplied, the 'default' - repository is cleared. - - clear_all: - Boolean indicating whether to clear the entire roledb. - - - securesystemslib.exceptions.FormatError, if 'repository_name' does not have - the correct format. - - securesystemslib.exceptions.InvalidNameError, if 'repository_name' does not - exist in the role database. - - - None. - - - None. - """ - - # Do the arguments have the correct format? If not, raise - # 'securesystemslib.exceptions.FormatError' - sslib_formats.NAME_SCHEMA.check_match(repository_name) - sslib_formats.BOOLEAN_SCHEMA.check_match(clear_all) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does not' - ' exist: ' + repository_name) - - if clear_all: - _roledb_dict.clear() - _roledb_dict['default'] = {} - _dirty_roles.clear() - _dirty_roles['default'] = set() - return - - _roledb_dict[repository_name] = {} - _dirty_roles[repository_name] = set() - - - - - -def _check_rolename(rolename, repository_name='default'): - """ Raise securesystemslib.exceptions.FormatError if 'rolename' does not match - 'tuf.formats.ROLENAME_SCHEMA', - tuf.exceptions.UnknownRoleError if 'rolename' is not found in the - role database, or securesystemslib.exceptions.InvalidNameError if - 'repository_name' does not exist in the role database. - """ - - # Does 'rolename' have the correct object format? - # This check will ensure 'rolename' has the appropriate number of objects - # and object types, and that all dict keys are properly named. - formats.ROLENAME_SCHEMA.check_match(rolename) - - # Does 'repository_name' have the correct format? - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Raises securesystemslib.exceptions.InvalidNameError. - _validate_rolename(rolename) - - if repository_name not in _roledb_dict or repository_name not in _dirty_roles: - raise sslib_exceptions.InvalidNameError('Repository name does not' - ' exist: ' + repository_name) - - if rolename not in _roledb_dict[repository_name]: - raise exceptions.UnknownRoleError('Role name does not exist: ' + rolename) - - - - - -def _validate_rolename(rolename): - """ - Raise securesystemslib.exceptions.InvalidNameError if 'rolename' is not - formatted correctly. It is assumed 'rolename' has been checked against - 'ROLENAME_SCHEMA' prior to calling this function. """ - - if rolename == '': - raise sslib_exceptions.InvalidNameError('Rolename must *not* be' - ' an empty string.') - - if rolename != rolename.strip(): - raise sslib_exceptions.InvalidNameError('Invalid rolename.' - ' Cannot start or end with whitespace: ' + rolename) - - if rolename.startswith('/') or rolename.endswith('/'): - raise sslib_exceptions.InvalidNameError('Invalid rolename.' - ' Cannot start or end with a "/": ' + rolename) diff --git a/tuf/scripts/__init__.py b/tuf/scripts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tuf/scripts/client.py b/tuf/scripts/client.py deleted file mode 100755 index 8f30c53648..0000000000 --- a/tuf/scripts/client.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2018, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - client.py - - - Vladimir Diaz - - - September 2012. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Provide a basic TUF client that can update all of the metatada and target - files provided by the user-specified repository mirror. Updated files are - saved to the 'targets' directory in the current working directory. The - repository mirror is specified by the user through the '--repo' command- - line option. - - Normally, a software updater integrating TUF will develop their own costum - client module by importing 'tuf.client.updater', instantiating the required - object, and calling the desired methods to perform an update. This basic - client is provided to users who wish to give TUF a quick test run without the - hassle of writing client code. This module can also used by updaters that do - not need the customization and only require their clients to perform an - update of all the files provided by their repository mirror(s). - - For software updaters that DO require customization, see the - 'example_client.py' script. The 'example_client.py' script provides an - outline of the client code that software updaters may develop and then tailor - to their specific software updater or package manager. - - Additional tools for clients running legacy applications will also be made - available. These tools will allow secure software updates using The Update - Framework without the need to modify the original application. - - - $ client.py --repo http://localhost:8001 - $ client.py --repo http://localhost:8001 --verbose 3 - - - --verbose: - Set the verbosity level of logging messages. Accepts values 1-5. - - Example: - $ client.py --repo http://localhost:8001 --verbose 3 README.txt - - --repo: - Set the repository mirror that will be responding to client requests. - E.g., 'http://localhost:8001'. - - Example: - $ client.py --repo http://localhost:8001 README.txt -""" - -import sys -import argparse -import logging - -from tuf import exceptions -from tuf import log -from tuf import settings -from tuf.client.updater import Updater - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - - -def update_client(parsed_arguments): - """ - - Perform an update of the metadata and target files located at - 'repository_mirror'. Target files are saved to the 'targets' directory - in the current working directory. The current directory must already - include a 'metadata' directory, which in turn must contain the 'current' - and 'previous' directories. At a minimum, these two directories require - the 'root.json' metadata file. - - - parsed_arguments: - An argparse Namespace object, containing the parsed arguments. - - - tuf.exceptions.Error, if 'parsed_arguments' is not a Namespace object. - - - Connects to a repository mirror and updates the local metadata files and - any target files. Obsolete, local targets are also removed. - - - None. - """ - - if not isinstance(parsed_arguments, argparse.Namespace): - raise exceptions.Error('Invalid namespace object.') - - else: - logger.debug('We have a valid argparse Namespace object.') - - # Set the local repositories directory containing all of the metadata files. - settings.repositories_directory = '.' - - # Set the repository mirrors. This dictionary is needed by the Updater - # class of updater.py. - repository_mirrors = {'mirror': {'url_prefix': parsed_arguments.repo, - 'metadata_path': 'metadata', 'targets_path': 'targets'}} - - # Create the repository object using the repository name 'repository' - # and the repository mirrors defined above. - updater = Updater('tufrepo', repository_mirrors) - - # The local destination directory to save the target files. - destination_directory = './tuftargets' - - # Refresh the repository's top-level roles... - updater.refresh(unsafely_update_root_if_necessary=False) - - # ... and store the target information for the target file specified on the - # command line, and determine which of these targets have been updated. - target_fileinfo = [] - for target in parsed_arguments.targets: - target_fileinfo.append(updater.get_one_valid_targetinfo(target)) - - updated_targets = updater.updated_targets(target_fileinfo, destination_directory) - - # Retrieve each of these updated targets and save them to the destination - # directory. - for target in updated_targets: - try: - updater.download_target(target, destination_directory) - - except exceptions.DownloadError: - pass - - # Remove any files from the destination directory that are no longer being - # tracked. - updater.remove_obsolete_targets(destination_directory) - - - - - -def parse_arguments(): - """ - - Parse the command-line options and set the logging level - as specified by the user through the --verbose option. - 'client' expects the '--repo' to be set by the user. - - Example: - $ client.py --repo http://localhost:8001 LICENSE - - If the required option is unset, a parser error is printed - and the scripts exits. - - - None. - - - None. - - - Sets the logging level for TUF logging. - - - The parsed_arguments (i.e., a argparse Namespace object). - """ - - parser = argparse.ArgumentParser( - description='Retrieve file from TUF repository.') - - # Add the options supported by 'basic_client' to the option parser. - parser.add_argument('-v', '--verbose', type=int, default=2, - choices=range(0, 6), help='Set the verbosity level of logging messages.' - ' The lower the setting, the greater the verbosity. Supported logging' - ' levels: 0=UNSET, 1=DEBUG, 2=INFO, 3=WARNING, 4=ERROR,' - ' 5=CRITICAL') - - parser.add_argument('-r', '--repo', type=str, required=True, metavar='', - help='Specify the remote repository\'s URI' - ' (e.g., http://www.example.com:8001/tuf/). The client retrieves' - ' updates from the remote repository.') - - parser.add_argument('targets', nargs='+', metavar='', help='Specify' - ' the target files to retrieve from the specified TUF repository.') - - parsed_arguments = parser.parse_args() - - - # Set the logging level. - if parsed_arguments.verbose == 5: - log.set_log_level(logging.CRITICAL) - - elif parsed_arguments.verbose == 4: - log.set_log_level(logging.ERROR) - - elif parsed_arguments.verbose == 3: - log.set_log_level(logging.WARNING) - - elif parsed_arguments.verbose == 2: - log.set_log_level(logging.INFO) - - elif parsed_arguments.verbose == 1: - log.set_log_level(logging.DEBUG) - - else: - log.set_log_level(logging.NOTSET) - - # Return the repository mirror containing the metadata and target files. - return parsed_arguments - - - -if __name__ == '__main__': - - # Parse the command-line arguments and set the logging level. - arguments = parse_arguments() - - # Perform an update of all the files in the 'targets' directory located in - # the current directory. - try: - update_client(arguments) - - except (exceptions.NoWorkingMirrorError, exceptions.RepositoryError, - exceptions.FormatError, exceptions.Error) as e: - sys.stderr.write('Error: ' + str(e) + '\n') - sys.exit(1) - - # Successfully updated the client's target files. - sys.exit(0) diff --git a/tuf/scripts/repo.py b/tuf/scripts/repo.py deleted file mode 100755 index 0b61b2bc59..0000000000 --- a/tuf/scripts/repo.py +++ /dev/null @@ -1,1149 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - repo.py - - - Vladimir Diaz - - - January 2018. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Provide a command-line interface to create and modify TUF repositories. The - CLI removes the need to write Python code when creating or modifying - repositories, which is the case with repository_tool.py and - developer_tool.py. - - Note: - 'python3 -m pip install securesystemslib[crypto,pynacl]' is required by the CLI, - which installs the 3rd-party dependencies: cryptography and pynacl. - - - Note: arguments within brackets are optional. - - $ repo.py --init - [--consistent, --bare, --path, --root_pw, --targets_pw, - --snapshot_pw, --timestamp_pw] - $ repo.py --add ... [--path, --recursive] - $ repo.py --remove - $ repo.py --distrust --pubkeys [--role] - $ repo.py --trust --pubkeys [--role] - $ repo.py --sign [--role ] - $ repo.py --key - [--filename - --path , --pw [my_password]] - $ repo.py --delegate --delegatee - --pubkeys - [--role --terminating --threshold - --sign ] - $ repo.py --revoke --delegatee - [--role --sign ] - $ repo.py --verbose <0-5> - $ repo.py --clean [--path] - - - --init: - Create new TUF repository in current working or specified directory. - - --consistent: - Enable consistent snapshots for newly created TUF repository. - - --bare: - Specify creation of bare TUF repository with no key created or set. - - --path: - Choose specified path location of a TUF repository or key(s). - - --role: - Specify top-level role(s) affected by the main command-line option. - - --pubkeys: - Indicate location of key(s) affected by the main command-line option. - - --root_pw: - Set password for encrypting top-level key file of root role. - - --targets_pw: - Set password for encrypting top-level key file of targets role. - - --snapshot_pw: - Set password for encrypting top-level key file of snapshot role. - - --timestamp_pw: - Set password for encrypting top-level key file of timestamp role. - - --add: - Add file specified by to the Targets metadata. - - --recursive: - Include files in subdirectories of specified directory . - - --remove: - Remove target files from Targets metadata matching . - - --distrust: - Discontinue trust of keys located in directory of a role. - - --trust: - Indicate trusted keys located in directory of a role. - - --sign: - Sign metadata of target role(s) with keys in specified directory. - - --key: - Generate cryptographic key of specified type (default: Ed25519). - - --filename: - Specify filename associated with generated top-level key. - - --pw: - Set password for the generated key of specified type . - - --delegate: - Delegate trust of target files from Targets role (or specified - in --role) to --delegatee role with specified . - - --delegatee: - Specify role that is targeted by delegator in --role to sign for - target files matching delegated or in revocation of trust. - - --terminating: - Mark delegation to --delegatee role from delegator as a terminating one. - - --threshold: - Specify signature threshold of --delegatee role as the value . - - --revoke: - Revoke trust of target files from delegated role (--delegatee) - - --verbose: - Set the verbosity level of logging messages. Accepts values 1-5. - - --clean: - Delete repo in current working or specified directory. -""" - -import os -import sys -import logging -import argparse -import shutil -import time -import fnmatch - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import interface as sslib_interface -from securesystemslib import keys as sslib_keys -from securesystemslib import settings as sslib_settings -from securesystemslib import util as sslib_util - -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import log -from tuf import repository_tool as repo_tool -from tuf import roledb - - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -repo_tool.disable_console_log_messages() - -PROG_NAME = 'repo.py' - -REPO_DIR = 'tufrepo' -CLIENT_DIR = 'tufclient' -KEYSTORE_DIR = 'tufkeystore' - -ROOT_KEY_NAME = 'root_key' -TARGETS_KEY_NAME = 'targets_key' -SNAPSHOT_KEY_NAME = 'snapshot_key' -TIMESTAMP_KEY_NAME = 'timestamp_key' - -STAGED_METADATA_DIR = 'metadata.staged' -METADATA_DIR = 'metadata' - -# The keytype strings, as expected on the command line. -ED25519_KEYTYPE = 'ed25519' -ECDSA_KEYTYPE = 'ecdsa' -RSA_KEYTYPE = 'rsa' -SUPPORTED_CLI_KEYTYPES = (ECDSA_KEYTYPE, ED25519_KEYTYPE, RSA_KEYTYPE) - -# The supported keytype strings (as they appear in metadata) are listed here -# because they won't necessarily match the key types supported by -# securesystemslib. -SUPPORTED_KEY_TYPES = ('rsa', 'ed25519', 'ecdsa', 'ecdsa-sha2-nistp256') - -# pylint: disable=protected-access -# ... to allow use of sslib _generate_and_write_*_keypair convenience methods - -def process_command_line_arguments(parsed_arguments): - """ - - Perform the relevant operations on the repo according to the chosen - command-line options. Which functions are executed depends on - 'parsed_arguments'. For instance, the --init and --clean options will - cause the init_repo() and clean_repo() functions to be called. - Multiple operations can be executed in one invocation of the CLI. - - - parsed_arguments: - The parsed arguments returned by argparse.parse_args(). - - - securesystemslib.exceptions.Error, if any of the arguments are - improperly formatted or if any of the argument could not be processed. - - - None. - - - None. - """ - - # Do we have a valid argparse Namespace? - if not isinstance(parsed_arguments, argparse.Namespace): - raise exceptions.Error('Invalid namespace: ' + repr(parsed_arguments)) - - else: - logger.debug('We have a valid argparse Namespace.') - - # TODO: Make sure the order that the arguments are processed allows for the - # most convenient use of multiple options in one invocation of the CLI. For - # instance, it might be best for --clean to be processed first before --init - # so that a user can do the following: repo.py --clean --init (that is, first - # clear the repo in the current working directory, and then initialize a new - # one. - if parsed_arguments.clean: - clean_repo(parsed_arguments) - - if parsed_arguments.init: - init_repo(parsed_arguments) - - if parsed_arguments.remove: - remove_targets(parsed_arguments) - - if parsed_arguments.add: - add_targets(parsed_arguments) - - if parsed_arguments.distrust: - remove_verification_key(parsed_arguments) - - if parsed_arguments.trust: - add_verification_key(parsed_arguments) - - if parsed_arguments.key: - gen_key(parsed_arguments) - - if parsed_arguments.revoke: - revoke(parsed_arguments) - - if parsed_arguments.delegate: - delegate(parsed_arguments) - - # --sign should be processed last, after the other options, so that metadata - # is signed last after potentially being modified by the other options. - if parsed_arguments.sign: - sign_role(parsed_arguments) - - - -def delegate(parsed_arguments): - - if not parsed_arguments.delegatee: - raise exceptions.Error( - '--delegatee must be set to perform the delegation.') - - if parsed_arguments.delegatee in ('root', 'snapshot', 'timestamp', 'targets'): - raise exceptions.Error( - 'Cannot delegate to the top-level role: ' + repr(parsed_arguments.delegatee)) - - if not parsed_arguments.pubkeys: - raise exceptions.Error( - '--pubkeys must be set to perform the delegation.') - - public_keys = [] - for public_key in parsed_arguments.pubkeys: - imported_pubkey = import_publickey_from_file(public_key) - public_keys.append(imported_pubkey) - - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - if parsed_arguments.role == 'targets': - repository.targets.delegate(parsed_arguments.delegatee, public_keys, - parsed_arguments.delegate, parsed_arguments.threshold, - parsed_arguments.terminating, list_of_targets=None, - path_hash_prefixes=None) - - targets_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME), - parsed_arguments.targets_pw) - - repository.targets.load_signing_key(targets_private) - - # A delegated (non-top-level-Targets) role. - else: - repository.targets(parsed_arguments.role).delegate( - parsed_arguments.delegatee, public_keys, - parsed_arguments.delegate, parsed_arguments.threshold, - parsed_arguments.terminating, list_of_targets=None, - path_hash_prefixes=None) - - # Update the required top-level roles, Snapshot and Timestamp, to make a new - # release. Automatically making a new release can be disabled via - # --no_release. - if not parsed_arguments.no_release: - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - repository.writeall(consistent_snapshot=consistent_snapshot) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def revoke(parsed_arguments): - - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - if parsed_arguments.role == 'targets': - repository.targets.revoke(parsed_arguments.delegatee) - - targets_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME), - parsed_arguments.targets_pw) - - repository.targets.load_signing_key(targets_private) - - # A non-top-level role. - else: - repository.targets(parsed_arguments.role).revoke(parsed_arguments.delegatee) - - role_privatekey = import_privatekey_from_file(parsed_arguments.sign) - - repository.targets(parsed_arguments.role).load_signing_key(role_privatekey) - - # Update the required top-level roles, Snapshot and Timestamp, to make a new - # release. Automatically making a new release can be disabled via - # --no_release. - if not parsed_arguments.no_release: - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - repository.writeall(consistent_snapshot=consistent_snapshot) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def gen_key(parsed_arguments): - - if parsed_arguments.filename: - parsed_arguments.filename = os.path.join(parsed_arguments.path, - KEYSTORE_DIR, parsed_arguments.filename) - - keypath = None - - keygen_kwargs = { - "password": parsed_arguments.pw, - "filepath": parsed_arguments.filename, - "prompt": (not parsed_arguments.pw) # prompt if no default or passed pw - } - - if parsed_arguments.key not in SUPPORTED_CLI_KEYTYPES: - exceptions.Error( - 'Invalid key type: ' + repr(parsed_arguments.key) + '. Supported' - ' key types: ' + repr(SUPPORTED_CLI_KEYTYPES)) - - elif parsed_arguments.key == ECDSA_KEYTYPE: - keypath = sslib_interface._generate_and_write_ecdsa_keypair( - **keygen_kwargs) - - elif parsed_arguments.key == ED25519_KEYTYPE: - keypath = sslib_interface._generate_and_write_ed25519_keypair( - **keygen_kwargs) - - # RSA key.. - else: - keypath = sslib_interface._generate_and_write_rsa_keypair( - **keygen_kwargs) - - - # If a filename is not given, the generated keypair is saved to the current - # working directory. By default, the keypair is written to .pub - # and (private key). - if not parsed_arguments.filename: - privkey_repo_path = os.path.join(parsed_arguments.path, - KEYSTORE_DIR, os.path.basename(keypath)) - pubkey_repo_path = os.path.join(parsed_arguments.path, - KEYSTORE_DIR, os.path.basename(keypath + '.pub')) - - sslib_util.ensure_parent_dir(privkey_repo_path) - sslib_util.ensure_parent_dir(pubkey_repo_path) - - # Move them from the CWD to the repo's keystore. - shutil.move(keypath, privkey_repo_path) - shutil.move(keypath + '.pub', pubkey_repo_path) - - - -def import_privatekey_from_file(keypath, password=None): - # Note: should securesystemslib support this functionality (import any - # privatekey type)? - # If the caller does not provide a password argument, prompt for one. - # Password confirmation is disabled here, which should ideally happen only - # when creating encrypted key files. - if password is None: # pragma: no cover - - # It is safe to specify the full path of 'filepath' in the prompt and not - # worry about leaking sensitive information about the key's location. - # However, care should be taken when including the full path in exceptions - # and log files. - password = sslib_interface.get_password('Enter a password for' - ' the encrypted key (' + sslib_interface.TERM_RED + repr(keypath) + sslib_interface.TERM_RED + '): ', - confirm=False) - - # Does 'password' have the correct format? - sslib_formats.PASSWORD_SCHEMA.check_match(password) - - # Store the encrypted contents of 'filepath' prior to calling the decryption - # routine. - encrypted_key = None - - with open(keypath, 'rb') as file_object: - encrypted_key = file_object.read().decode('utf-8') - - # Decrypt the loaded key file, calling the 'cryptography' library to generate - # the derived encryption key from 'password'. Raise - # 'securesystemslib.exceptions.CryptoError' if the decryption fails. - try: - key_object = sslib_keys.decrypt_key(encrypted_key, password) - - except sslib_exceptions.CryptoError: - try: - logger.debug( - 'Decryption failed. Attempting to import a private PEM instead.') - key_object = sslib_keys.import_rsakey_from_private_pem( - encrypted_key, 'rsassa-pss-sha256', password) - - except sslib_exceptions.CryptoError as error: - raise exceptions.Error(repr(keypath) + ' cannot be ' - ' imported, possibly because an invalid key file is given or ' - ' the decryption password is incorrect.') from error - - if key_object['keytype'] not in SUPPORTED_KEY_TYPES: - raise exceptions.Error('Trying to import an unsupported key' - ' type: ' + repr(key_object['keytype'] + '.' - ' Supported key types: ' + repr(SUPPORTED_KEY_TYPES))) - - else: - # Add "keyid_hash_algorithms" so that equal keys with different keyids can - # be associated using supported keyid_hash_algorithms. - key_object['keyid_hash_algorithms'] = sslib_settings.HASH_ALGORITHMS - - return key_object - - - -def import_publickey_from_file(keypath): - - try: - key_metadata = sslib_util.load_json_file(keypath) - - # An RSA public key is saved to disk in PEM format (not JSON), so the - # load_json_file() call above can fail for this reason. Try to potentially - # load the PEM string in keypath if an exception is raised. - except sslib_exceptions.Error: - key_metadata = sslib_interface.import_rsa_publickey_from_file( - keypath) - - key_object, junk = sslib_keys.format_metadata_to_key(key_metadata) - - if key_object['keytype'] not in SUPPORTED_KEY_TYPES: - raise exceptions.Error('Trying to import an unsupported key' - ' type: ' + repr(key_object['keytype'] + '.' - ' Supported key types: ' + repr(SUPPORTED_KEY_TYPES))) - - else: - return key_object - - - -def add_verification_key(parsed_arguments): - if not parsed_arguments.pubkeys: - raise exceptions.Error('--pubkeys must be given with --trust.') - - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - for keypath in parsed_arguments.pubkeys: - imported_pubkey = import_publickey_from_file(keypath) - - if parsed_arguments.role not in ('root', 'targets', 'snapshot', 'timestamp'): - raise exceptions.Error('The given --role is not a top-level role.') - - elif parsed_arguments.role == 'root': - repository.root.add_verification_key(imported_pubkey) - - elif parsed_arguments.role == 'targets': - repository.targets.add_verification_key(imported_pubkey) - - elif parsed_arguments.role == 'snapshot': - repository.snapshot.add_verification_key(imported_pubkey) - - # The timestamp role.. - else: - repository.timestamp.add_verification_key(imported_pubkey) - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - repository.write('root', consistent_snapshot=consistent_snapshot, - increment_version_number=False) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def remove_verification_key(parsed_arguments): - if not parsed_arguments.pubkeys: - raise exceptions.Error('--pubkeys must be given with --distrust.') - - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - for keypath in parsed_arguments.pubkeys: - imported_pubkey = import_publickey_from_file(keypath) - - try: - if parsed_arguments.role not in ('root', 'targets', 'snapshot', 'timestamp'): - raise exceptions.Error('The given --role is not a top-level role.') - - elif parsed_arguments.role == 'root': - repository.root.remove_verification_key(imported_pubkey) - - elif parsed_arguments.role == 'targets': - repository.targets.remove_verification_key(imported_pubkey) - - elif parsed_arguments.role == 'snapshot': - repository.snapshot.remove_verification_key(imported_pubkey) - - # The Timestamp key.. - else: - repository.timestamp.remove_verification_key(imported_pubkey) - - # It is assumed remove_verification_key() only raises - # securesystemslib.exceptions.Error and - # securesystemslib.exceptions.FormatError, and the latter is not raised - # because a valid key should have been returned by - # import_publickey_from_file(). - except sslib_exceptions.Error: - print(repr(keypath) + ' is not a trusted key. Skipping.') - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - repository.write('root', consistent_snapshot=consistent_snapshot, - increment_version_number=False) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def sign_role(parsed_arguments): - - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - - for keypath in parsed_arguments.sign: - - role_privatekey = import_privatekey_from_file(keypath) - - if parsed_arguments.role == 'targets': - repository.targets.load_signing_key(role_privatekey) - - elif parsed_arguments.role == 'root': - repository.root.load_signing_key(role_privatekey) - - elif parsed_arguments.role == 'snapshot': - repository.snapshot.load_signing_key(role_privatekey) - - elif parsed_arguments.role == 'timestamp': - repository.timestamp.load_signing_key(role_privatekey) - - else: - # TODO: repository_tool.py will be refactored to clean up the following - # code, which adds and signs for a non-existent role. - if not roledb.role_exists(parsed_arguments.role): - - # Load the private key keydb and set the roleinfo in roledb so that - # metadata can be written with repository.write(). - keydb.remove_key(role_privatekey['keyid'], - repository_name = repository._repository_name) - keydb.add_key( - role_privatekey, repository_name = repository._repository_name) - - # Set the delegated metadata file to expire in 3 months. - expiration = formats.unix_timestamp_to_datetime( - int(time.time() + 7889230)) - expiration = expiration.isoformat() + 'Z' - - roleinfo = {'name': parsed_arguments.role, - 'keyids': [role_privatekey['keyid']], - 'signing_keyids': [role_privatekey['keyid']], - 'partial_loaded': False, 'paths': {}, - 'signatures': [], 'version': 1, 'expires': expiration, - 'delegations': {'keys': {}, 'roles': []}} - - roledb.add_role(parsed_arguments.role, roleinfo, - repository_name=repository._repository_name) - - # Generate the Targets object of --role, and add it to the top-level - # 'targets' object. - new_targets_object = repo_tool.Targets(repository._targets_directory, - parsed_arguments.role, roleinfo, - repository_name=repository._repository_name) - repository.targets._delegated_roles[parsed_arguments.role] = new_targets_object - - else: - repository.targets(parsed_arguments.role).load_signing_key(role_privatekey) - - # Write the Targets metadata now that it's been modified. Once write() is - # called on a role, it is no longer considered "dirty" and the role will not - # be written again if another write() or writeall() were subsequently made. - repository.write(parsed_arguments.role, - consistent_snapshot=consistent_snapshot, increment_version_number=False) - - # Write the updated top-level roles, if any. Also write Snapshot and - # Timestamp to make a new release. Automatically making a new release can be - # disabled via --no_release. - if not parsed_arguments.no_release: - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - repository.writeall(consistent_snapshot=consistent_snapshot) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def clean_repo(parsed_arguments): - repo_dir = os.path.join(parsed_arguments.path, REPO_DIR) - client_dir = os.path.join(parsed_arguments.path, CLIENT_DIR) - keystore_dir = os.path.join(parsed_arguments.path, KEYSTORE_DIR) - - shutil.rmtree(repo_dir, ignore_errors=True) - shutil.rmtree(client_dir, ignore_errors=True) - shutil.rmtree(keystore_dir, ignore_errors=True) - - - -def write_to_live_repo(parsed_arguments): - staged_meta_directory = os.path.join( - parsed_arguments.path, REPO_DIR, STAGED_METADATA_DIR) - live_meta_directory = os.path.join( - parsed_arguments.path, REPO_DIR, METADATA_DIR) - - shutil.rmtree(live_meta_directory, ignore_errors=True) - shutil.copytree(staged_meta_directory, live_meta_directory) - - - -def add_target_to_repo(parsed_arguments, target_path, repo_targets_path, - repository, custom=None): - """ - (1) Copy 'target_path' to 'repo_targets_path'. - (2) Add 'target_path' to Targets metadata of 'repository'. - """ - - if custom is None: - custom = {} - - if not os.path.exists(target_path): - logger.debug(repr(target_path) + ' does not exist. Skipping.') - - else: - sslib_util.ensure_parent_dir(os.path.join(repo_targets_path, target_path)) - shutil.copy(target_path, os.path.join(repo_targets_path, target_path)) - - - roleinfo = roledb.get_roleinfo( - parsed_arguments.role, repository_name=repository._repository_name) - - # It is assumed we have a delegated role, and that the caller has made - # sure to reject top-level roles specified with --role. - if target_path not in roleinfo['paths']: - logger.debug('Adding new target: ' + repr(target_path)) - roleinfo['paths'].update({target_path: custom}) - - else: - logger.debug('Replacing target: ' + repr(target_path)) - roleinfo['paths'].update({target_path: custom}) - - roledb.update_roleinfo(parsed_arguments.role, roleinfo, - mark_role_as_dirty=True, repository_name=repository._repository_name) - - - -def remove_target_files_from_metadata(parsed_arguments, repository): - - if parsed_arguments.role in ('root', 'snapshot', 'timestamp'): - raise exceptions.Error( - 'Invalid rolename specified: ' + repr(parsed_arguments.role) + '.' - ' It must be "targets" or a delegated rolename.') - - else: - # NOTE: The following approach of using roledb to update the target - # files will be modified in the future when the repository tool's API is - # refactored. - roleinfo = roledb.get_roleinfo( - parsed_arguments.role, repository._repository_name) - - for glob_pattern in parsed_arguments.remove: - for path in list(roleinfo['paths'].keys()): - if fnmatch.fnmatch(path, glob_pattern): - del roleinfo['paths'][path] - - else: - logger.debug('Delegated path ' + repr(path) + ' does not match' - ' given path/glob pattern ' + repr(glob_pattern)) - continue - - roledb.update_roleinfo( - parsed_arguments.role, roleinfo, mark_role_as_dirty=True, - repository_name=repository._repository_name) - - - -def add_targets(parsed_arguments): - repo_targets_path = os.path.join(parsed_arguments.path, REPO_DIR, 'targets') - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - # Copy the target files in --path to the repo directory, and - # add them to Targets metadata. Make sure to also copy & add files - # in directories (and subdirectories, if --recursive is True). - for target_path in parsed_arguments.add: - if os.path.isdir(target_path): - for sub_target_path in repository.get_filepaths_in_directory( - target_path, parsed_arguments.recursive): - add_target_to_repo(parsed_arguments, sub_target_path, - repo_targets_path, repository) - - else: - add_target_to_repo(parsed_arguments, target_path, - repo_targets_path, repository) - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - - if parsed_arguments.role == 'targets': - # Load the top-level, non-root, keys to make a new release. - targets_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME), - parsed_arguments.targets_pw) - repository.targets.load_signing_key(targets_private) - - elif parsed_arguments.role not in ('root', 'snapshot', 'timestamp'): - repository.write(parsed_arguments.role, - consistent_snapshot=consistent_snapshot, increment_version_number=True) - return - - # Update the required top-level roles, Snapshot and Timestamp, to make a new - # release. Automatically making a new release can be disabled via - # --no_release. - if not parsed_arguments.no_release: - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - repository.writeall(consistent_snapshot=consistent_snapshot) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def remove_targets(parsed_arguments): - repository = repo_tool.load_repository( - os.path.join(parsed_arguments.path, REPO_DIR)) - - # Remove target files from the Targets metadata (or the role specified in - # --role) that match the glob patterns specified in --remove. - remove_target_files_from_metadata(parsed_arguments, repository) - - # Examples of how the --pw command-line option is interpreted: - # repo.py --init': parsed_arguments.pw = 'pw' - # repo.py --init --pw my_password: parsed_arguments.pw = 'my_password' - # repo.py --init --pw: The user is prompted for a password, as follows: - if not parsed_arguments.pw: - parsed_arguments.pw = sslib_interface.get_password( - prompt='Enter a password for the top-level role keys: ', confirm=True) - - targets_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME), - parsed_arguments.targets_pw) - repository.targets.load_signing_key(targets_private) - - # Load the top-level keys for Snapshot and Timestamp to make a new release. - # Automatically making a new release can be disabled via --no_release. - if not parsed_arguments.no_release: - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - consistent_snapshot = roledb.get_roleinfo('root', - repository._repository_name)['consistent_snapshot'] - repository.writeall(consistent_snapshot=consistent_snapshot) - - # Move staged metadata directory to "live" metadata directory. - write_to_live_repo(parsed_arguments) - - - -def init_repo(parsed_arguments): - """ - Create a repo at the specified location in --path (the current working - directory, by default). Each top-level role has one key, if --bare' is False - (default). - """ - - repo_path = os.path.join(parsed_arguments.path, REPO_DIR) - repository = repo_tool.create_new_repository(repo_path) - - if not parsed_arguments.bare: - set_top_level_keys(repository, parsed_arguments) - repository.writeall(consistent_snapshot=parsed_arguments.consistent) - - else: - repository.write( - 'root', consistent_snapshot=parsed_arguments.consistent) - repository.write('targets', consistent_snapshot=parsed_arguments.consistent) - repository.write('snapshot', consistent_snapshot=parsed_arguments.consistent) - repository.write('timestamp', consistent_snapshot=parsed_arguments.consistent) - - write_to_live_repo(parsed_arguments) - - # Create the client files. The client directory contains the required - # directory structure and metadata files for clients to successfully perform - # an update. - repo_tool.create_tuf_client_directory( - os.path.join(parsed_arguments.path, REPO_DIR), - os.path.join(parsed_arguments.path, CLIENT_DIR, REPO_DIR)) - - - -def set_top_level_keys(repository, parsed_arguments): - """ - Generate, write, and set the top-level keys. 'repository' is modified. - """ - - # Examples of how the --*_pw command-line options are interpreted: - # repo.py --init': parsed_arguments.*_pw = 'pw' - # repo.py --init --*_pw my_pw: parsed_arguments.*_pw = 'my_pw' - # repo.py --init --*_pw: The user is prompted for a password. - - sslib_interface._generate_and_write_ed25519_keypair( - password=parsed_arguments.root_pw, - filepath=os.path.join(parsed_arguments.path, KEYSTORE_DIR, ROOT_KEY_NAME), - prompt=(not parsed_arguments.root_pw)) - sslib_interface._generate_and_write_ed25519_keypair( - password=parsed_arguments.targets_pw, - filepath=os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME), - prompt=(not parsed_arguments.targets_pw)) - sslib_interface._generate_and_write_ed25519_keypair( - password=parsed_arguments.snapshot_pw, - filepath=os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME), - prompt=(not parsed_arguments.snapshot_pw)) - sslib_interface._generate_and_write_ed25519_keypair( - password=parsed_arguments.timestamp_pw, - filepath=os.path.join(parsed_arguments.path, KEYSTORE_DIR, TIMESTAMP_KEY_NAME), - prompt=(not parsed_arguments.timestamp_pw)) - - # Import the private keys. They are needed to generate the signatures - # included in metadata. - root_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - ROOT_KEY_NAME), parsed_arguments.root_pw) - targets_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TARGETS_KEY_NAME), parsed_arguments.targets_pw) - snapshot_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - SNAPSHOT_KEY_NAME), parsed_arguments.snapshot_pw) - timestamp_private = import_privatekey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw) - - # Import the public keys. They are needed so that metadata roles are - # assigned verification keys, which clients need in order to verify the - # signatures created by the corresponding private keys. - root_public = import_publickey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - ROOT_KEY_NAME) + '.pub') - targets_public = import_publickey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TARGETS_KEY_NAME) + '.pub') - snapshot_public = import_publickey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - SNAPSHOT_KEY_NAME) + '.pub') - timestamp_public = import_publickey_from_file( - os.path.join(parsed_arguments.path, KEYSTORE_DIR, - TIMESTAMP_KEY_NAME) + '.pub') - - # Add the verification keys to the top-level roles. - repository.root.add_verification_key(root_public) - repository.targets.add_verification_key(targets_public) - repository.snapshot.add_verification_key(snapshot_public) - repository.timestamp.add_verification_key(timestamp_public) - - # Load the previously imported signing keys for the top-level roles so that - # valid metadata can be written. - repository.root.load_signing_key(root_private) - repository.targets.load_signing_key(targets_private) - repository.snapshot.load_signing_key(snapshot_private) - repository.timestamp.load_signing_key(timestamp_private) - - - -def parse_arguments(): - """ - - Parse the command-line arguments. Also set the logging level, as specified - via the --verbose argument (2, by default). - - Example: - # Create a TUF repository in the current working directory. The - # top-level roles are created, each containing one key. - $ repo.py --init - - $ repo.py --init --bare --consistent --verbose 3 - - If a required argument is unset, a parser error is printed and the script - exits. - - - None. - - - None. - - - Sets the logging level for TUF logging. - - - A tuple ('options.REPOSITORY_PATH', command, command_arguments). 'command' - 'command_arguments' correspond to a repository tool fuction. - """ - - parser = argparse.ArgumentParser( - description='Create or modify a TUF repository.') - - parser.add_argument('-i', '--init', action='store_true', - help='Create a repository. The "tufrepo", "tufkeystore", and' - ' "tufclient" directories are created in the current working' - ' directory, unless --path is specified.') - - parser.add_argument('-p', '--path', nargs='?', default='.', - metavar='', help='Specify a repository path. If used' - ' with --init, the initialized repository is saved to the given' - ' path.') - - parser.add_argument('-b', '--bare', action='store_true', - help='If initializing a repository, neither create nor set keys' - ' for any of the top-level roles. False, by default.') - - parser.add_argument('--no_release', action='store_true', - help='Do not automatically sign Snapshot and Timestamp metadata.' - ' False, by default.') - - parser.add_argument('--consistent', action='store_true', - help='Set consistent snapshots for an initialized repository.' - ' Consistent snapshot is False by default.') - - parser.add_argument('-c', '--clean', type=str, nargs='?', const='.', - metavar='', help='Delete the repo files from the' - ' specified directory. If a directory is not specified, the current' - ' working directory is cleaned.') - - parser.add_argument('-a', '--add', type=str, nargs='+', - metavar='', help='Add one or more target files to the' - ' "targets" role (or the role specified in --role). If a directory' - ' is given, all files in the directory are added.') - - parser.add_argument('--remove', type=str, nargs='+', - metavar='', help='Remove one or more target files from the' - ' "targets" role (or the role specified in --role).') - - parser.add_argument('--role', nargs='?', type=str, const='targets', - default='targets', metavar='', help='Specify a rolename.' - ' The rolename "targets" is used by default.') - - parser.add_argument('-r', '--recursive', action='store_true', - help='By setting -r, any directory specified with --add is processed' - ' recursively. If unset, the default behavior is to not add target' - ' files in subdirectories.') - - parser.add_argument('-k', '--key', type=str, nargs='?', const=ED25519_KEYTYPE, - default=None, choices=[ECDSA_KEYTYPE, ED25519_KEYTYPE, RSA_KEYTYPE], - help='Generate an ECDSA, Ed25519, or RSA key. An Ed25519 key is' - ' created if the key type is unspecified.') - - parser.add_argument('--filename', nargs='?', default=None, const=None, - metavar='', help='Specify a filename. This option can' - ' be used to name a generated key file. The top-level keys should' - ' be named "root_key", "targets_key", "snapshot_key", "timestamp_key."') - - parser.add_argument('--trust', action='store_true', - help='Indicate the trusted key(s) (via --pubkeys) for the role in --role.' - ' This action modifies Root metadata with the trusted key(s).') - - parser.add_argument('--distrust', action='store_true', - help='Discontinue trust of key(s) (via --pubkeys) for the role in --role.' - ' This action modifies Root metadata by removing trusted key(s).') - - parser.add_argument('--sign', nargs='+', type=str, - metavar='', help='Sign the "targets"' - ' metadata (or the one for --role) with the specified key(s).') - - parser.add_argument('--pw', nargs='?', default='pw', metavar='', - help='Specify a password. "pw" is used if --pw is unset, or a' - ' password can be entered via a prompt by specifying --pw by itself.' - ' This option can be used with --sign and --key.') - - parser.add_argument('--root_pw', nargs='?', default='pw', metavar='', - help='Specify a Root password. "pw" is used if --pw is unset, or a' - ' password can be entered via a prompt by specifying --pw by itself.') - - parser.add_argument('--targets_pw', nargs='?', default='pw', metavar='', - help='Specify a Targets password. "pw" is used if --pw is unset, or a' - ' password can be entered via a prompt by specifying --pw by itself.') - - parser.add_argument('--snapshot_pw', nargs='?', default='pw', metavar='', - help='Specify a Snapshot password. "pw" is used if --pw is unset, or a' - ' password can be entered via a prompt by specifying --pw by itself.') - - parser.add_argument('--timestamp_pw', nargs='?', default='pw', metavar='', - help='Specify a Timestamp password. "pw" is used if --pw is unset, or a' - ' password can be entered via a prompt by specifying --pw by itself.') - - parser.add_argument('-d', '--delegate', type=str, nargs='+', - metavar='', help='Delegate trust of target files' - ' from the "targets" role (or --role) to some other role (--delegatee).' - ' The named delegatee is trusted to sign for the target files that' - ' match the glob pattern(s).') - - parser.add_argument('--delegatee', nargs='?', type=str, const=None, - default=None, metavar='', help='Specify the rolename' - ' of the delegated role. Can be used with --delegate.') - - parser.add_argument('-t', '--terminating', action='store_true', - help='Set the terminating flag to True. Can be used with --delegate.') - - parser.add_argument('--threshold', type=int, default=1, metavar='', - help='Set the threshold number of signatures' - ' needed to validate a metadata file. Can be used with --delegate.') - - parser.add_argument('--pubkeys', type=str, nargs='+', - metavar='', help='Specify one or more public keys' - ' for the delegated role. Can be used with --delegate.') - - parser.add_argument('--revoke', action='store_true', - help='Revoke trust of target files from a delegated role.') - - # Add the parser arguments supported by PROG_NAME. - parser.add_argument('-v', '--verbose', type=int, default=2, - choices=range(0, 6), help='Set the verbosity level of logging messages.' - ' The lower the setting, the greater the verbosity. Supported logging' - ' levels: 0=UNSET, 1=DEBUG, 2=INFO, 3=WARNING, 4=ERROR,' - ' 5=CRITICAL') - - # Should we include usage examples in the help output? - - parsed_args = parser.parse_args() - - # Set the logging level. - logging_levels = [logging.NOTSET, logging.DEBUG, - logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] - - log.set_log_level(logging_levels[parsed_args.verbose]) - - return parsed_args - - - -if __name__ == '__main__': - - # Parse the arguments and set the logging level. - arguments = parse_arguments() - - # Create or modify the repository depending on the option specified on the - # command line. For example, the following adds the 'foo.bar.gz' to the - # default repository and updates the relevant metadata (i.e., Targets, - # Snapshot, and Timestamp metadata are updated): - # $ repo.py --add foo.bar.gz - - try: - process_command_line_arguments(arguments) - - except (exceptions.Error) as e: - sys.stderr.write('Error: ' + str(e) + '\n') - sys.exit(1) - - # Successfully created or updated the TUF repository. - sys.exit(0) diff --git a/tuf/settings.py b/tuf/settings.py deleted file mode 100755 index f07c4d961a..0000000000 --- a/tuf/settings.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - settings.py - - - Vladimir Diaz - - - January 11, 2017 - - - See LICENSE-MIT OR LICENSE for licensing information. - - - A central location for TUF configuration settings. Example options include - setting the destination of temporary files and downloaded content, the maximum - length of downloaded metadata (unknown file attributes), and download - behavior. -""" - - -# Set a directory that should be used for all temporary files. If this -# is None, then the system default will be used. The system default -# will also be used if a directory path set here is invalid or -# unusable. -temporary_directory = None - -# Set a local directory to store metadata that is requested from mirrors. This -# directory contains subdirectories for different repositories, where each -# subdirectory contains a different set of metadata. For example: -# tuf.settings.repositories_directory = /tmp/repositories. The root file for a -# repository named 'django_repo' can be found at: -# /tmp/repositories/django_repo/metadata/current/root.METADATA_EXTENSION -repositories_directory = None - -# The 'log.py' module manages TUF's logging system. Users have the option to -# enable/disable logging to a file via 'ENABLE_FILE_LOGGING', or -# tuf.log.enable_file_logging() and tuf.log.disable_file_logging(). -ENABLE_FILE_LOGGING = False - -# If file logging is enabled via 'ENABLE_FILE_LOGGING', TUF log messages will -# be saved to 'LOG_FILENAME' -LOG_FILENAME = 'tuf.log' - -# Since the timestamp role does not have signed metadata about itself, we set a -# default but sane upper bound for the number of bytes required to download it. -DEFAULT_TIMESTAMP_REQUIRED_LENGTH = 16384 #bytes - -# The Root role may be updated without knowing its version if top-level -# metadata cannot be safely downloaded (e.g., keys may have been revoked, thus -# requiring a new Root file that includes the updated keys). Set a default -# upper bound for the maximum total bytes that may be downloaded for Root -# metadata. -DEFAULT_ROOT_REQUIRED_LENGTH = 512000 #bytes - -# Set a default, but sane, upper bound for the number of bytes required to -# download Snapshot metadata. -DEFAULT_SNAPSHOT_REQUIRED_LENGTH = 2000000 #bytes - -# Set a default, but sane, upper bound for the number of bytes required to -# download Targets metadata. -DEFAULT_TARGETS_REQUIRED_LENGTH = 5000000 #bytes - -# Set a timeout value in seconds (float) for non-blocking socket operations. -SOCKET_TIMEOUT = 4 #seconds - -# The maximum chunk of data, in bytes, we would download in every round. -CHUNK_SIZE = 400000 #bytes - -# The minimum average download speed (bytes/second) that must be met to -# avoid being considered as a slow retrieval attack. -MIN_AVERAGE_DOWNLOAD_SPEED = 50 #bytes/second - -# By default, limit number of delegatees we visit for any target. -MAX_NUMBER_OF_DELEGATIONS = 2**5 - -# A setting for the instances where a default hashing algorithm is needed. -# This setting is currently used to calculate the path hash prefixes of hashed -# bin delegations, and digests of targets filepaths. The other instances -# (e.g., digest of files) that require a hashing algorithm rely on settings in -# the securesystemslib external library. -DEFAULT_HASH_ALGORITHM = 'sha256' - -# The hashing algorithms used to compute file hashes -FILE_HASH_ALGORITHMS = ['sha256', 'sha512'] - -# The client's update procedure (contained within a while-loop) can potentially -# hog the CPU. The following setting can be used to force the update sequence -# to suspend execution for a specified amount of time. See -# theupdateframework/tuf/issue#338. -SLEEP_BEFORE_ROUND = None - -# Maximum number of root metadata file rotations we should perform in order to -# prevent a denial-of-service (DoS) attack. -MAX_NUMBER_ROOT_ROTATIONS = 2**5 diff --git a/tuf/sig.py b/tuf/sig.py deleted file mode 100755 index 4e1f05fc2a..0000000000 --- a/tuf/sig.py +++ /dev/null @@ -1,395 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2017, New York University and the TUF contributors -# SPDX-License-Identifier: MIT OR Apache-2.0 - -""" - - sig.py - - - Vladimir Diaz - - - February 28, 2012. Based on a previous version by Geremy Condra. - - - See LICENSE-MIT OR LICENSE for licensing information. - - - Survivable key compromise is one feature of a secure update system - incorporated into TUF's design. Responsibility separation through - the use of multiple roles, multi-signature trust, and explicit and - implicit key revocation are some of the mechanisms employed towards - this goal of survivability. These mechanisms can all be seen in - play by the functions available in this module. - - The signed metadata files utilized by TUF to download target files - securely are used and represented here as the 'signable' object. - More precisely, the signature structures contained within these metadata - files are packaged into 'signable' dictionaries. This module makes it - possible to capture the states of these signatures by organizing the - keys into different categories. As keys are added and removed, the - system must securely and efficiently verify the status of these signatures. - For instance, a bunch of keys have recently expired. How many valid keys - are now available to the Snapshot role? This question can be answered by - get_signature_status(), which will return a full 'status report' of these - 'signable' dicts. This module also provides a convenient verify() function - that will determine if a role still has a sufficient number of valid keys. - If a caller needs to update the signatures of a 'signable' object, there - is also a function for that. -""" - -import logging - -import securesystemslib # pylint: disable=unused-import -from securesystemslib import exceptions as sslib_exceptions -from securesystemslib import formats as sslib_formats -from securesystemslib import keys as sslib_keys - -from tuf import exceptions -from tuf import formats -from tuf import keydb -from tuf import roledb - -# See 'log.py' to learn how logging is handled in TUF. -logger = logging.getLogger(__name__) - -def get_signature_status(signable, role=None, repository_name='default', - threshold=None, keyids=None): - """ - - Return a dictionary representing the status of the signatures listed in - 'signable'. Signatures in the returned dictionary are identified by the - signature keyid and can have a status of either: - - * bad -- Invalid signature - * good -- Valid signature from key that is available in 'tuf.keydb', and is - authorized for the passed role as per 'roledb' (authorization may be - overwritten by passed 'keyids'). - * unknown -- Signature from key that is not available in 'tuf.keydb', or if - 'role' is None. - * unknown signing schemes -- Signature from key with unknown signing - scheme. - * untrusted -- Valid signature from key that is available in 'tuf.keydb', - but is not trusted for the passed role as per 'roledb' or the passed - 'keyids'. - - NOTE: The result may contain duplicate keyids or keyids that reference the - same key, if 'signable' lists multiple signatures from the same key. - - - signable: - A dictionary containing a list of signatures and a 'signed' identifier. - signable = {'signed': 'signer', - 'signatures': [{'keyid': keyid, - 'sig': sig}]} - - Conformant to tuf.formats.SIGNABLE_SCHEMA. - - role: - TUF role string (e.g. 'root', 'targets', 'snapshot' or timestamp). - - threshold: - Rather than reference the role's threshold as set in roledb, use - the given 'threshold' to calculate the signature status of 'signable'. - 'threshold' is an integer value that sets the role's threshold value, or - the minimum number of signatures needed for metadata to be considered - fully signed. - - keyids: - Similar to the 'threshold' argument, use the supplied list of 'keyids' - to calculate the signature status, instead of referencing the keyids - in roledb for 'role'. - - - securesystemslib.exceptions.FormatError, if 'signable' does not have the - correct format. - - tuf.exceptions.UnknownRoleError, if 'role' is not recognized. - - - None. - - - A dictionary representing the status of the signatures in 'signable'. - Conformant to tuf.formats.SIGNATURESTATUS_SCHEMA. - """ - - # Do the arguments have the correct format? This check will ensure that - # arguments have the appropriate number of objects and object types, and that - # all dict keys are properly named. Raise - # 'securesystemslib.exceptions.FormatError' if the check fails. - formats.SIGNABLE_SCHEMA.check_match(signable) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - if role is not None: - formats.ROLENAME_SCHEMA.check_match(role) - - if threshold is not None: - formats.THRESHOLD_SCHEMA.check_match(threshold) - - if keyids is not None: - sslib_formats.KEYIDS_SCHEMA.check_match(keyids) - - # The signature status dictionary returned. - signature_status = {} - good_sigs = [] - bad_sigs = [] - unknown_sigs = [] - untrusted_sigs = [] - unknown_signing_schemes = [] - - # Extract the relevant fields from 'signable' that will allow us to identify - # the different classes of keys (i.e., good_sigs, bad_sigs, etc.). - signed = sslib_formats.encode_canonical(signable['signed']).encode('utf-8') - signatures = signable['signatures'] - - # Iterate the signatures and enumerate the signature_status fields. - # (i.e., good_sigs, bad_sigs, etc.). - for signature in signatures: - keyid = signature['keyid'] - - # Does the signature use an unrecognized key? - try: - key = keydb.get_key(keyid, repository_name) - - except exceptions.UnknownKeyError: - unknown_sigs.append(keyid) - continue - - # Does the signature use an unknown/unsupported signing scheme? - try: - valid_sig = sslib_keys.verify_signature(key, signature, signed) - - except sslib_exceptions.UnsupportedAlgorithmError: - unknown_signing_schemes.append(keyid) - continue - - # We are now dealing with either a trusted or untrusted key... - if valid_sig: - if role is not None: - - # Is this an unauthorized key? (a keyid associated with 'role') - # Note that if the role is not known, tuf.exceptions.UnknownRoleError - # is raised here. - if keyids is None: - keyids = roledb.get_role_keyids(role, repository_name) - - if keyid not in keyids: - untrusted_sigs.append(keyid) - continue - - # This is an unset role, thus an unknown signature. - else: - unknown_sigs.append(keyid) - continue - - # Identify good/authorized key. - good_sigs.append(keyid) - - else: - # This is a bad signature for a trusted key. - bad_sigs.append(keyid) - - # Retrieve the threshold value for 'role'. Raise - # tuf.exceptions.UnknownRoleError if we were given an invalid role. - if role is not None: - if threshold is None: - # Note that if the role is not known, tuf.exceptions.UnknownRoleError is - # raised here. - threshold = roledb.get_role_threshold( - role, repository_name=repository_name) - - else: - logger.debug('Not using roledb.py\'s threshold for ' + repr(role)) - - else: - threshold = 0 - - # Build the signature_status dict. - signature_status['threshold'] = threshold - signature_status['good_sigs'] = good_sigs - signature_status['bad_sigs'] = bad_sigs - signature_status['unknown_sigs'] = unknown_sigs - signature_status['untrusted_sigs'] = untrusted_sigs - signature_status['unknown_signing_schemes'] = unknown_signing_schemes - - return signature_status - - - - - -def verify(signable, role, repository_name='default', threshold=None, - keyids=None): - """ - - Verify that 'signable' has a valid threshold of authorized signatures - identified by unique keyids. The threshold and whether a keyid is - authorized is determined by querying the 'threshold' and 'keyids' info for - the passed 'role' in 'roledb'. Both values can be overwritten by - passing the 'threshold' or 'keyids' arguments. - - NOTE: - - Signatures with identical authorized keyids only count towards the - threshold once. - - Signatures with the same key only count toward the threshold once. - - - signable: - A dictionary containing a list of signatures and a 'signed' identifier - that conforms to SIGNABLE_SCHEMA, e.g.: - signable = {'signed':, 'signatures': [{'keyid':, 'method':, 'sig':}]} - - role: - TUF role string (e.g. 'root', 'targets', 'snapshot' or timestamp). - - threshold: - Rather than reference the role's threshold as set in roledb, use - the given 'threshold' to calculate the signature status of 'signable'. - 'threshold' is an integer value that sets the role's threshold value, or - the minimum number of signatures needed for metadata to be considered - fully signed. - - keyids: - Similar to the 'threshold' argument, use the supplied list of 'keyids' - to calculate the signature status, instead of referencing the keyids - in roledb for 'role'. - - - tuf.exceptions.UnknownRoleError, if 'role' is not recognized. - - securesystemslib.exceptions.FormatError, if 'signable' is not formatted - correctly. - - securesystemslib.exceptions.Error, if an invalid threshold is encountered. - - - tuf.sig.get_signature_status() called. Any exceptions thrown by - get_signature_status() will be caught here and re-raised. - - - Boolean. True if the number of good unique (by keyid) signatures >= the - role's threshold, False otherwise. - """ - - formats.SIGNABLE_SCHEMA.check_match(signable) - formats.ROLENAME_SCHEMA.check_match(role) - sslib_formats.NAME_SCHEMA.check_match(repository_name) - - # Retrieve the signature status. tuf.sig.get_signature_status() raises: - # tuf.exceptions.UnknownRoleError - # securesystemslib.exceptions.FormatError. 'threshold' and 'keyids' are also - # validated. - status = get_signature_status(signable, role, repository_name, threshold, keyids) - - # Retrieve the role's threshold and the authorized keys of 'status' - threshold = status['threshold'] - good_sigs = status['good_sigs'] - - # Does 'status' have the required threshold of signatures? - # First check for invalid threshold values before returning result. - # Note: get_signature_status() is expected to verify that 'threshold' is - # not None or <= 0. - if threshold is None or threshold <= 0: #pragma: no cover - raise sslib_exceptions.Error("Invalid threshold: " + repr(threshold)) - - unique_keys = set() - for keyid in good_sigs: - key = keydb.get_key(keyid, repository_name) - unique_keys.add(key['keyval']['public']) - - return len(unique_keys) >= threshold - - - - - -def may_need_new_keys(signature_status): - """ - - Return true iff downloading a new set of keys might tip this - signature status over to valid. This is determined by checking - if either the number of unknown or untrusted keys is > 0. - - - signature_status: - The dictionary returned by tuf.sig.get_signature_status(). - - - securesystemslib.exceptions.FormatError, if 'signature_status does not have - the correct format. - - - None. - - - Boolean. - """ - - # Does 'signature_status' have the correct format? - # This check will ensure 'signature_status' has the appropriate number - # of objects and object types, and that all dict keys are properly named. - # Raise 'securesystemslib.exceptions.FormatError' if the check fails. - formats.SIGNATURESTATUS_SCHEMA.check_match(signature_status) - - unknown = signature_status['unknown_sigs'] - untrusted = signature_status['untrusted_sigs'] - - return len(unknown) or len(untrusted) - - - - - -def generate_rsa_signature(signed, rsakey_dict): - """ - - Generate a new signature dict presumably to be added to the 'signatures' - field of 'signable'. The 'signable' dict is of the form: - - {'signed': 'signer', - 'signatures': [{'keyid': keyid, - 'method': 'evp', - 'sig': sig}]} - - The 'signed' argument is needed here for the signing process. - The 'rsakey_dict' argument is used to generate 'keyid', 'method', and 'sig'. - - The caller should ensure the returned signature is not already in - 'signable'. - - - signed: - The data used by 'securesystemslib.keys.create_signature()' to generate - signatures. It is stored in the 'signed' field of 'signable'. - - rsakey_dict: - The RSA key, a 'securesystemslib.formats.RSAKEY_SCHEMA' dictionary. - Used here to produce 'keyid', 'method', and 'sig'. - - - securesystemslib.exceptions.FormatError, if 'rsakey_dict' does not have the - correct format. - - TypeError, if a private key is not defined for 'rsakey_dict'. - - - None. - - - Signature dictionary conformant to securesystemslib.formats.SIGNATURE_SCHEMA. - Has the form: - {'keyid': keyid, 'method': 'evp', 'sig': sig} - """ - - # We need 'signed' in canonical JSON format to generate - # the 'method' and 'sig' fields of the signature. - signed = sslib_formats.encode_canonical(signed).encode('utf-8') - - # Generate the RSA signature. - # Raises securesystemslib.exceptions.FormatError and TypeError. - signature = sslib_keys.create_signature(rsakey_dict, signed) - - return signature