diff --git a/docs/apiref/devices.rst b/docs/apiref/devices.rst index ac228213..fc602fb9 100644 --- a/docs/apiref/devices.rst +++ b/docs/apiref/devices.rst @@ -230,10 +230,12 @@ This will schedule a job to send the configuration to the device. Initialize check ---------------- -Before initializing a new CORE or DIST device you can run a pre-check API call. -This will check that compatible LLDP neighbors are found and that the -interfaces facing these neighbors are set to the correct ifclass as well as -some basic device state checks. +Before initializing a new device you can run a pre-check API call. This will +perform some basic device state checks and check that compatible LLDP +neighbors are found. For access devices it will try and find a compatible +mgmtdomain and for core/dist devices it will check that interfaces facing +neighbors are set to the correct ifclass. It is possible that the init will +fail even if the initcheck passed. To test if a device is compatible for DIST ZTP run: @@ -280,7 +282,9 @@ actually not compatible for DIST ZTP at the moment. We did find a compatible linknet, but there were not enough neighboring devices of the correct device type found. If you want to perform some non-standard configuration like trying ZTP with just one neighbor you can manually specify what neighbors you expect -to see instead. +to see instead ("neighbors": ["core1"]). Other arguments that can be passed +to device_init should also be valid here, like "mlag_peer_id" and +"mlag_peer_hostname" for access MLAG pairs. If the checks can not be performed at all, like when the device is not found or an invalid device type is specified the API call will return a 400 or 500 diff --git a/docs/howto/index.rst b/docs/howto/index.rst index 939456e2..8d571ce7 100644 --- a/docs/howto/index.rst +++ b/docs/howto/index.rst @@ -112,9 +112,11 @@ container at the initial steps of the process, and also logs from the API container at later stages of the process. If the device gets stuck in the DHCP_BOOT process for example, it probably means the API can not log in to the device using the credentials and IP address saved in the database. The API -will retry connecting to the device 5 times with increasing delay between -each attempt. If you want to trigger more retries at a later point you can manually -call the discover_device API call and send the MAC and DHCP IP of the device. +will retry connecting to the device 3 times with increasing delay between +each attempt. If you want to trigger more retries at a later point you can +manually call the discover_device API call and send the MAC and DHCP IP of the +device. New attempts to discover the device will also be made when the DHCP +lease is renewed or reaquired. Zero-touch provisioning of fabric switch diff --git a/src/cnaas_nms/api/device.py b/src/cnaas_nms/api/device.py index 93c60354..c61f8fee 100644 --- a/src/cnaas_nms/api/device.py +++ b/src/cnaas_nms/api/device.py @@ -11,6 +11,7 @@ import cnaas_nms.confpush.sync_devices import cnaas_nms.confpush.underlay import cnaas_nms.confpush.get +import cnaas_nms.confpush.update from cnaas_nms.confpush.nornir_helper import cnaas_init, inventory_selector from cnaas_nms.api.generic import build_filter, empty_result from cnaas_nms.db.device import Device, DeviceState, DeviceType @@ -380,45 +381,87 @@ def post(self, device_id: int): parsed_args = DeviceInitApi.arg_check(device_id, json_data) target_devtype = DeviceType[parsed_args['device_type']] target_hostname = parsed_args['new_hostname'] + mlag_peer_target_hostname: Optional[str] = None + mlag_peer_id: Optional[int] = None + mlag_peer_dev: Optional[Device] = None + if 'mlag_peer_id' in parsed_args and 'mlag_peer_new_hostname' in parsed_args: + mlag_peer_target_hostname = parsed_args['mlag_peer_new_hostname'] + mlag_peer_id = parsed_args['mlag_peer_id'] except ValueError as e: - return empty_result(status='error', data=str(e)), 400 + return empty_result(status='error', + data="Error parsing arguments: {}".format(e)), 400 with sqla_session() as session: try: dev = cnaas_nms.confpush.init_device.pre_init_checks(session, device_id) except ValueError as e: - return empty_result(status='error', data=str(e)), 400 + return empty_result(status='error', + data="ValueError in pre_init_checks: {}".format(e)), 400 except Exception as e: - return empty_result(status='error', data=str(e)), 500 + return empty_result(status='error', + data="Exception in pre_init_checks: {}".format(e)), 500 + + if mlag_peer_id: + try: + mlag_peer_dev = cnaas_nms.confpush.init_device.pre_init_checks( + session, mlag_peer_id) + except ValueError as e: + return empty_result(status='error', + data="ValueError in pre_init_checks: {}".format(e)), 400 + except Exception as e: + return empty_result(status='error', + data="Exception in pre_init_checks: {}".format(e)), 500 try: - ret['linknets'] = cnaas_nms.confpush.get.update_linknets( + ret['linknets'] = cnaas_nms.confpush.update.update_linknets( session, hostname=dev.hostname, devtype=target_devtype, ztp_hostname=target_hostname, dry_run=True ) + if mlag_peer_dev: + ret['linknets'] += cnaas_nms.confpush.update.update_linknets( + session, + hostname=mlag_peer_dev.hostname, + devtype=target_devtype, + ztp_hostname=mlag_peer_target_hostname, + dry_run=True + ) ret['linknets_compatible'] = True except ValueError as e: ret['linknets_compatible'] = False ret['linknets_error'] = str(e) except Exception as e: - return empty_result(status='error', data=str(e)), 500 + return empty_result(status='error', + data="Exception in update_linknets: {}".format(e)), 500 try: if 'linknets' in ret: ret['neighbors'] = cnaas_nms.confpush.init_device.pre_init_check_neighbors( session, dev, target_devtype, - ret['linknets'], parsed_args['neighbors']) + ret['linknets'], parsed_args['neighbors'], mlag_peer_dev) ret['neighbors_compatible'] = True + else: + ret['neighbors_compatible'] = False + ret['neighbors_error'] = "No linknets found" except (ValueError, cnaas_nms.confpush.init_device.InitVerificationError) as e: ret['neighbors_compatible'] = False ret['neighbors_error'] = str(e) except Exception as e: - return empty_result(status='error', data=str(e)), 500 + return empty_result( + status='error', + data="Exception in pre_init_check_neighbors: {}".format(e)), 500 + + if mlag_peer_dev: + try: + ret['mlag_compatible'] = mlag_peer_dev.hostname in ret['neighbors'] + except Exception: + ret['mlag_compatible'] = False ret['parsed_args'] = parsed_args + if mlag_peer_id and not ret['mlag_compatible']: + ret['compatible'] = False if ret['linknets_compatible'] and ret['neighbors_compatible']: ret['compatible'] = True else: diff --git a/src/cnaas_nms/api/linknet.py b/src/cnaas_nms/api/linknet.py index a99de46c..916e746c 100644 --- a/src/cnaas_nms/api/linknet.py +++ b/src/cnaas_nms/api/linknet.py @@ -1,11 +1,12 @@ from flask import request from flask_restx import Resource, Namespace, fields from flask_jwt_extended import jwt_required +from ipaddress import IPv4Network from cnaas_nms.api.generic import empty_result from cnaas_nms.db.session import sqla_session from cnaas_nms.db.linknet import Linknet -from cnaas_nms.db.device import Device +from cnaas_nms.db.device import Device, DeviceType from cnaas_nms.confpush.underlay import find_free_infra_linknet from cnaas_nms.version import __api_version__ @@ -42,11 +43,15 @@ def post(self): if 'device_a' in json_data: if not Device.valid_hostname(json_data['device_a']): errors.append("Invalid hostname specified for device_a") + else: + hostname_a = json_data['device_a'] else: errors.append("Required field hostname_a not found") if 'device_b' in json_data: if not Device.valid_hostname(json_data['device_b']): errors.append("Invalid hostname specified for device_b") + else: + hostname_b = json_data['device_b'] else: errors.append("Required field hostname_b not found") if 'device_a_port' not in json_data: @@ -54,15 +59,50 @@ def post(self): if 'device_b_port' not in json_data: errors.append("Required field device_b_port not found") + new_prefix = None + if 'prefix' in json_data: + if json_data['prefix']: + try: + new_prefix = IPv4Network(json_data['prefix']) + except Exception as e: + errors.append("Invalid prefix: {}".format(e)) + if errors: return empty_result(status='error', data=errors), 400 with sqla_session() as session: + dev_a: Device = session.query(Device).\ + filter(Device.hostname == hostname_a).one_or_none() + if not dev_a: + return empty_result( + status='error', + data=f"Hostname '{hostname_a}' not found or is in invalid state" + ), 400 + + dev_b: Device = session.query(Device). \ + filter(Device.hostname == hostname_b).one_or_none() + if not dev_b: + return empty_result( + status='error', + data=f"Hostname '{hostname_b}' not found or is in invalid state" + ), 400 + + # check if we need an ip prefix for the linknet + ip_linknet_devtypes = [DeviceType.CORE, DeviceType.DIST] + if dev_a.device_type in ip_linknet_devtypes and \ + dev_b.device_type in ip_linknet_devtypes: + if not new_prefix: + new_prefix = find_free_infra_linknet(session) + if not new_prefix: + return empty_result( + status='error', + data="Device types requires IP linknets, but no prefix could be found" + ), 400 + try: - new_prefix = find_free_infra_linknet(session) new_linknet = Linknet.create_linknet( - session, json_data['device_a'], json_data['device_a_port'], - json_data['device_b'], json_data['device_b_port'], new_prefix) + session, hostname_a, json_data['device_a_port'], + hostname_b, json_data['device_b_port'], new_prefix) session.add(new_linknet) session.commit() data = new_linknet.as_dict() diff --git a/src/cnaas_nms/confpush/get.py b/src/cnaas_nms/confpush/get.py index ac4cb77a..e9c377f4 100644 --- a/src/cnaas_nms/confpush/get.py +++ b/src/cnaas_nms/confpush/get.py @@ -2,7 +2,7 @@ import re import hashlib -from typing import Optional, Tuple, List, Dict +from typing import Optional, List, Dict from nornir.core.filter import F from nornir.core.task import AggregatedResult @@ -11,12 +11,9 @@ import cnaas_nms.confpush.nornir_helper from cnaas_nms.db.session import sqla_session -from cnaas_nms.db.device import Device, DeviceType, DeviceState -from cnaas_nms.db.linknet import Linknet +from cnaas_nms.db.device import Device, DeviceType from cnaas_nms.tools.log import get_logger from cnaas_nms.db.interface import Interface, InterfaceConfigType -from cnaas_nms.confpush.underlay import find_free_infra_linknet -from cnaas_nms.db.settings import get_settings def get_inventory(): @@ -93,7 +90,9 @@ def get_neighbors(hostname: Optional[str] = None, group: Optional[str] = None)\ return result -def get_uplinks(session, hostname: str, recheck: bool = False) -> Dict[str, str]: +def get_uplinks(session, hostname: str, recheck: bool = False, + neighbors: Optional[List[Device]] = None, + linknets = None) -> Dict[str, str]: """Returns dict with mapping of interface -> neighbor hostname""" logger = get_logger() uplinks = {} @@ -116,7 +115,10 @@ def get_uplinks(session, hostname: str, recheck: bool = False) -> Dict[str, str] return uplinks neighbor_d: Device - for neighbor_d in dev.get_neighbors(session): + if not neighbors: + neighbors = dev.get_neighbors(session) + + for neighbor_d in neighbors: if neighbor_d.device_type == DeviceType.DIST: local_if = dev.get_neighbor_local_ifname(session, neighbor_d) # Neighbor interface ifclass is already verified in @@ -217,48 +219,6 @@ def get_interfacedb_ifs(session, hostname: str) -> List[str]: return ret -def update_inventory(hostname: str, site='default') -> dict: - """Update CMDB inventory with information gathered from device. - - Args: - hostname (str): Hostname of device to update - - Returns: - python dict with any differances of update - - Raises: - napalm.base.exceptions.ConnectionException: Can't connect to specified device - """ - # TODO: Handle napalm.base.exceptions.ConnectionException ? - result = get_facts(hostname=hostname)[hostname][0] - if result.failed: - raise Exception - facts = result.result['facts'] - with sqla_session() as session: - d = session.query(Device).\ - filter(Device.hostname == hostname).\ - one() - attr_map = { - # Map NAPALM getfacts name -> device.Device member name - 'vendor': 'vendor', - 'model': 'model', - 'os_version': 'os_version', - 'serial_number': 'serial', - } - diff = {} - # Update any attributes that has changed, save diff - for dict_key, obj_mem in attr_map.items(): - obj_data = d.__getattribute__(obj_mem) - if facts[dict_key] and obj_data != facts[dict_key]: - diff[obj_mem] = {'old': obj_data, - 'new': facts[dict_key] - } - d.__setattr__(obj_mem, facts[dict_key]) - d.last_seen = datetime.datetime.now() - session.commit() - return diff - - def verify_peer_iftype(local_hostname: str, local_devtype: DeviceType, local_device_settings: dict, local_if: str, remote_hostname: str, remote_devtype: DeviceType, @@ -308,131 +268,3 @@ def verify_peer_iftype(local_hostname: str, local_devtype: DeviceType, "{} {} ifclass: {}".format(remote_hostname, intf['name'], intf['ifclass'])) - - -def update_linknets(session, hostname: str, devtype: DeviceType, - ztp_hostname: Optional[str] = None, dry_run: bool = False): - """Update linknet data for specified device using LLDP neighbor data. - """ - logger = get_logger() - result = get_neighbors(hostname=hostname)[hostname][0] - if result.failed: - raise Exception - neighbors = result.result['lldp_neighbors'] - if ztp_hostname: - settings_hostname = ztp_hostname - else: - settings_hostname = hostname - - ret = [] - - local_device_inst: Device = session.query(Device).filter(Device.hostname == hostname).one() - logger.debug("Updating linknets for device {} of type {}...".format( - local_device_inst.id, devtype.name)) - - for local_if, data in neighbors.items(): - logger.debug(f"Local: {local_if}, remote: {data[0]['hostname']} {data[0]['port']}") - remote_device_inst: Device = session.query(Device).\ - filter(Device.hostname == data[0]['hostname']).one_or_none() - if not remote_device_inst: - logger.debug(f"Unknown neighbor device, ignoring: {data[0]['hostname']}") - continue - if remote_device_inst.state in [DeviceState.DISCOVERED, DeviceState.INIT]: - # In case of MLAG init the peer does not have the correct devtype set yet, - # use same devtype as local device instead - remote_devtype = devtype - elif remote_device_inst.state not in [DeviceState.MANAGED, DeviceState.UNMANAGED]: - logger.debug("Neighbor device has invalid state, ignoring: {}".format( - data[0]['hostname'])) - continue - else: - remote_devtype = remote_device_inst.device_type - - logger.debug(f"Remote device found, device id: {remote_device_inst.id}") - - local_device_settings, _ = get_settings(settings_hostname, - devtype, - local_device_inst.model - ) - remote_device_settings, _ = get_settings(remote_device_inst.hostname, - remote_devtype, - remote_device_inst.model - ) - - verify_peer_iftype(hostname, devtype, - local_device_settings, local_if, - remote_device_inst.hostname, remote_device_inst.device_type, - remote_device_settings, data[0]['port']) - - # Check if linknet object already exists in database - local_devid = local_device_inst.id - check_linknet = session.query(Linknet).\ - filter( - ((Linknet.device_a_id == local_devid) & (Linknet.device_a_port == local_if)) - | - ((Linknet.device_b_id == local_devid) & (Linknet.device_b_port == local_if)) - | - ((Linknet.device_a_id == remote_device_inst.id) & - (Linknet.device_a_port == data[0]['port'])) - | - ((Linknet.device_b_id == remote_device_inst.id) & - (Linknet.device_b_port == data[0]['port'])) - ).one_or_none() - if check_linknet: - logger.debug(f"Found existing linknet id: {check_linknet.id}") - if ( - ( - check_linknet.device_a_id == local_devid - and check_linknet.device_a_port == local_if - and check_linknet.device_b_id == remote_device_inst.id - and check_linknet.device_b_port == data[0]['port'] - ) - or - ( - check_linknet.device_a_id == local_devid - and check_linknet.device_a_port == local_if - and check_linknet.device_b_id == remote_device_inst.id - and check_linknet.device_b_port == data[0]['port'] - ) - ): - # All info is the same, no update required - continue - else: - # TODO: update instead of delete+new insert? - if not dry_run: - session.delete(check_linknet) - session.commit() - - if devtype in [DeviceType.CORE, DeviceType.DIST] and \ - remote_device_inst.device_type in [DeviceType.CORE, DeviceType.DIST]: - ipv4_network = find_free_infra_linknet(session) - else: - ipv4_network = None - new_link = Linknet.create_linknet( - session, - hostname_a=local_device_inst.hostname, - interface_a=local_if, - hostname_b=remote_device_inst.hostname, - interface_b=data[0]['port'], - ipv4_network=ipv4_network, - strict_check=not dry_run # Don't do strict check if this is a dry_run - ) - if not dry_run: - local_device_inst.synchronized = False - remote_device_inst.synchronized = False - session.add(new_link) - session.commit() - else: - # Make sure linknet object is not added to session because of foreign key load - session.expunge(new_link) - # Make return data pretty - ret_dict = { - 'device_a_hostname': local_device_inst.hostname, - 'device_b_hostname': remote_device_inst.hostname, - **new_link.as_dict() - } - del ret_dict['id'] - del ret_dict['device_a_id'] - del ret_dict['device_b_id'] - ret.append({k: ret_dict[k] for k in sorted(ret_dict)}) - return ret diff --git a/src/cnaas_nms/confpush/init_device.py b/src/cnaas_nms/confpush/init_device.py index 482b38e2..c1b495c1 100644 --- a/src/cnaas_nms/confpush/init_device.py +++ b/src/cnaas_nms/confpush/init_device.py @@ -4,7 +4,6 @@ from nornir_napalm.plugins.tasks import napalm_configure, napalm_get from nornir_jinja2.plugins.tasks import template_file from nornir_utils.plugins.functions import print_result -from napalm.base.exceptions import SessionLockedException from apscheduler.job import Job import yaml import os @@ -19,7 +18,7 @@ from cnaas_nms.scheduler.scheduler import Scheduler from cnaas_nms.scheduler.wrapper import job_wrapper from cnaas_nms.confpush.nornir_helper import NornirJobResult, cnaas_jinja_env -from cnaas_nms.confpush.update import update_interfacedb_worker +from cnaas_nms.confpush.update import update_interfacedb_worker, update_linknets, set_facts from cnaas_nms.confpush.sync_devices import populate_device_vars, confcheck_devices, \ sync_devices from cnaas_nms.db.git import RepoStructureException @@ -140,8 +139,20 @@ def pre_init_checks(session, device_id) -> Device: def pre_init_check_neighbors(session, dev: Device, devtype: DeviceType, linknets: List[dict], - expected_neighbors: Optional[List[str]] = None) -> List[str]: + expected_neighbors: Optional[List[str]] = None, + mlag_peer_dev: Optional[Device] = None) -> List[str]: + """Check for compatible neighbors + Args: + session: SQLAlchemy session + dev: Device object to check + devtype: The target device type (not the same as current during init) + linknets: List of linknets to check for compatible neighbors + expected_neighbors: Optional list to manually specify neighbors + Returns: + List of compatible neighbor hostnames + """ logger = get_logger() + verified_neighbors = [] if expected_neighbors is not None and len(expected_neighbors) == 0: logger.debug("expected_neighbors explicitly set to empty list, skipping neighbor checks") return [] @@ -149,9 +160,40 @@ def pre_init_check_neighbors(session, dev: Device, devtype: DeviceType, raise Exception("No linknets were specified to check_neighbors") if devtype == DeviceType.ACCESS: - pass + neighbors = [] + uplinks = [] + for linknet in linknets: + if linknet['device_a_hostname'] == linknet['device_b_hostname']: + continue # don't add loopback cables as neighbors + elif linknet['device_a_hostname'] == dev.hostname: + if mlag_peer_dev and linknet['device_b_hostname'] == mlag_peer_dev.hostname: + continue # only add mlag peer linknet in one direction to avoid duplicate + else: + neighbor = linknet['device_b_hostname'] + elif linknet['device_b_hostname'] == dev.hostname: + neighbor = linknet['device_a_hostname'] + elif mlag_peer_dev: + if linknet['device_a_hostname'] == mlag_peer_dev.hostname: + neighbor = linknet['device_b_hostname'] + elif linknet['device_b_hostname'] == mlag_peer_dev.hostname: + neighbor = linknet['device_a_hostname'] + else: + raise Exception("Own hostname not found in linknet") + neighbor_dev: Device = session.query(Device). \ + filter(Device.hostname == neighbor).one_or_none() + if not neighbor_dev: + raise Exception("Neighbor device {} not found in database".format(neighbor)) + if neighbor_dev.device_type in [DeviceType.ACCESS, DeviceType.DIST]: + uplinks.append(neighbor) + + neighbors.append(neighbor) + try: + cnaas_nms.db.helper.find_mgmtdomain(session, uplinks) + except Exception as e: + raise InitVerificationError(str(e)) + else: + verified_neighbors = neighbors elif devtype in [DeviceType.CORE, DeviceType.DIST]: - verified_neighbors = [] for linknet in linknets: if linknet['device_a_hostname'] == dev.hostname: neighbor = linknet['device_b_hostname'] @@ -278,13 +320,12 @@ def init_access_device_step1(device_id: int, new_hostname: str, dev = pre_init_checks(session, device_id) # update linknets using LLDP data - cnaas_nms.confpush.get.update_linknets(session, dev.hostname, DeviceType.ACCESS) + update_linknets(session, dev.hostname, DeviceType.ACCESS) # If this is the first device in an MLAG pair if mlag_peer_id and mlag_peer_new_hostname: mlag_peer_dev = pre_init_checks(session, mlag_peer_id) - cnaas_nms.confpush.get.update_linknets(session, mlag_peer_dev.hostname, - DeviceType.ACCESS) + update_linknets(session, mlag_peer_dev.hostname, DeviceType.ACCESS) update_interfacedb_worker(session, dev, replace=True, delete_all=False, mlag_peer_hostname=mlag_peer_dev.hostname) update_interfacedb_worker(session, mlag_peer_dev, replace=True, delete_all=False, @@ -451,7 +492,7 @@ def init_fabric_device_step1(device_id: int, new_hostname: str, device_type: str dev = pre_init_checks(session, device_id) # Test update of linknets using LLDP data - linknets = cnaas_nms.confpush.get.update_linknets( + linknets = update_linknets( session, dev.hostname, devtype, ztp_hostname=new_hostname, dry_run=True) try: @@ -469,7 +510,7 @@ def init_fabric_device_step1(device_id: int, new_hostname: str, device_type: str session.commit() # If neighbor check works, commit new linknets # This will also mark neighbors as unsynced - linknets = cnaas_nms.confpush.get.update_linknets( + linknets = update_linknets( session, dev.hostname, devtype, ztp_hostname=new_hostname, dry_run=False) logger.debug("New linknets for INIT of {} created: {}".format( new_hostname, linknets @@ -561,7 +602,7 @@ def init_fabric_device_step1(device_id: int, new_hostname: str, device_type: str def schedule_init_device_step2(device_id: int, iteration: int, - scheduled_by: str) -> Optional[Job]: + scheduled_by: str) -> Optional[int]: max_iterations = 2 if iteration > 0 and iteration < max_iterations: scheduler = Scheduler() @@ -616,10 +657,7 @@ def init_device_step2(device_id: int, iteration: int = -1, dev: Device = session.query(Device).filter(Device.id == device_id).one() dev.state = DeviceState.MANAGED dev.synchronized = False - dev.serial = facts['serial_number'][:64] - dev.vendor = facts['vendor'][:64] - dev.model = facts['model'][:64] - dev.os_version = facts['os_version'][:64] + set_facts(dev, facts) management_ip = dev.management_ip dev.dhcp_ip = None diff --git a/src/cnaas_nms/confpush/tests/test_get.py b/src/cnaas_nms/confpush/tests/test_get.py index 33a26624..0d656889 100644 --- a/src/cnaas_nms/confpush/tests/test_get.py +++ b/src/cnaas_nms/confpush/tests/test_get.py @@ -6,6 +6,7 @@ import yaml import os +import cnaas_nms.confpush.update from cnaas_nms.db.session import sqla_session from cnaas_nms.db.device import DeviceType @@ -32,13 +33,9 @@ def test_get_facts(self): result = cnaas_nms.confpush.get.get_facts(group='S_DHCP_BOOT') pprint.pprint(result) - def test_update_inventory(self): - diff = cnaas_nms.confpush.get.update_inventory(self.testdata['update_hostname']) - pprint.pprint(diff) - def test_update_links(self): with sqla_session() as session: - new_links = cnaas_nms.confpush.get.update_linknets( + new_links = cnaas_nms.confpush.update.update_linknets( session, self.testdata['update_hostname'], DeviceType.ACCESS) pprint.pprint(new_links) diff --git a/src/cnaas_nms/confpush/update.py b/src/cnaas_nms/confpush/update.py index d34fffee..57831edb 100644 --- a/src/cnaas_nms/confpush/update.py +++ b/src/cnaas_nms/confpush/update.py @@ -2,11 +2,14 @@ from nornir_napalm.plugins.tasks import napalm_get +from cnaas_nms.confpush.underlay import find_free_infra_linknet +from cnaas_nms.db.linknet import Linknet from cnaas_nms.db.session import sqla_session from cnaas_nms.db.device import Device, DeviceType, DeviceState from cnaas_nms.db.interface import Interface, InterfaceConfigType from cnaas_nms.confpush.get import get_interfaces_names, get_uplinks, \ - filter_interfaces, get_mlag_ifs + filter_interfaces, get_mlag_ifs, get_neighbors, verify_peer_iftype +from cnaas_nms.db.settings import get_settings from cnaas_nms.tools.log import get_logger from cnaas_nms.scheduler.wrapper import job_wrapper from cnaas_nms.confpush.nornir_helper import NornirJobResult @@ -132,6 +135,29 @@ def reset_interfacedb(hostname: str): return ret +def set_facts(dev: Device, facts: dict) -> dict: + attr_map = { + # Map NAPALM getfacts name -> device.Device member name + 'vendor': 'vendor', + 'model': 'model', + 'os_version': 'os_version', + 'serial_number': 'serial', + } + diff = {} + # Update any attributes that has changed + for dict_key, obj_member in attr_map.items(): + obj_data = dev.__getattribute__(obj_member) + maxlen = Device.__dict__[obj_member].property.columns[0].type.length + fact_data = facts[dict_key][:maxlen] + if fact_data and obj_data != fact_data: + diff[obj_member] = { + 'old': obj_data, + 'new': fact_data + } + dev.__setattr__(obj_member, fact_data) + return diff + + @job_wrapper def update_facts(hostname: str, job_id: Optional[str] = None, @@ -159,11 +185,9 @@ def update_facts(hostname: str, facts = nrresult[hostname][0].result['facts'] with sqla_session() as session: dev: Device = session.query(Device).filter(Device.hostname == hostname).one() - dev.serial = facts['serial_number'][:64] - dev.vendor = facts['vendor'][:64] - dev.model = facts['model'][:64] - dev.os_version = facts['os_version'][:64] - logger.debug("Updating facts for device {}: {}, {}, {}, {}".format( + diff = set_facts(dev, facts) + + logger.debug("Updating facts for device {}, new values: {}, {}, {}, {}".format( hostname, facts['serial_number'], facts['vendor'], facts['model'], facts['os_version'] )) except Exception as e: @@ -173,4 +197,132 @@ def update_facts(hostname: str, logger.debug("Get facts nrresult for hostname {}: {}".format(hostname, nrresult)) raise e - return NornirJobResult(nrresult=nrresult) + return DictJobResult(result={"diff": diff}) + + +def update_linknets(session, hostname: str, devtype: DeviceType, + ztp_hostname: Optional[str] = None, dry_run: bool = False) -> List[dict]: + """Update linknet data for specified device using LLDP neighbor data. + """ + logger = get_logger() + result = get_neighbors(hostname=hostname)[hostname][0] + if result.failed: + raise Exception("Could not get LLDP neighbors for {}".format(hostname)) + neighbors = result.result['lldp_neighbors'] + if ztp_hostname: + settings_hostname = ztp_hostname + else: + settings_hostname = hostname + + ret = [] + + local_device_inst: Device = session.query(Device).filter(Device.hostname == hostname).one() + logger.debug("Updating linknets for device {} of type {}...".format( + local_device_inst.id, devtype.name)) + + for local_if, data in neighbors.items(): + logger.debug(f"Local: {local_if}, remote: {data[0]['hostname']} {data[0]['port']}") + remote_device_inst: Device = session.query(Device).\ + filter(Device.hostname == data[0]['hostname']).one_or_none() + if not remote_device_inst: + logger.debug(f"Unknown neighbor device, ignoring: {data[0]['hostname']}") + continue + if remote_device_inst.state in [DeviceState.DISCOVERED, DeviceState.INIT]: + # In case of MLAG init the peer does not have the correct devtype set yet, + # use same devtype as local device instead + remote_devtype = devtype + elif remote_device_inst.state not in [DeviceState.MANAGED, DeviceState.UNMANAGED]: + logger.debug("Neighbor device has invalid state, ignoring: {}".format( + data[0]['hostname'])) + continue + else: + remote_devtype = remote_device_inst.device_type + + logger.debug(f"Remote device found, device id: {remote_device_inst.id}") + + local_device_settings, _ = get_settings(settings_hostname, + devtype, + local_device_inst.model + ) + remote_device_settings, _ = get_settings(remote_device_inst.hostname, + remote_devtype, + remote_device_inst.model + ) + + verify_peer_iftype(hostname, devtype, + local_device_settings, local_if, + remote_device_inst.hostname, remote_device_inst.device_type, + remote_device_settings, data[0]['port']) + + # Check if linknet object already exists in database + local_devid = local_device_inst.id + check_linknet = session.query(Linknet).\ + filter( + ((Linknet.device_a_id == local_devid) & (Linknet.device_a_port == local_if)) + | + ((Linknet.device_b_id == local_devid) & (Linknet.device_b_port == local_if)) + | + ((Linknet.device_a_id == remote_device_inst.id) & + (Linknet.device_a_port == data[0]['port'])) + | + ((Linknet.device_b_id == remote_device_inst.id) & + (Linknet.device_b_port == data[0]['port'])) + ).one_or_none() + if check_linknet: + logger.debug(f"Found existing linknet id: {check_linknet.id}") + if ( + ( + check_linknet.device_a_id == local_devid + and check_linknet.device_a_port == local_if + and check_linknet.device_b_id == remote_device_inst.id + and check_linknet.device_b_port == data[0]['port'] + ) + or + ( + check_linknet.device_a_id == local_devid + and check_linknet.device_a_port == local_if + and check_linknet.device_b_id == remote_device_inst.id + and check_linknet.device_b_port == data[0]['port'] + ) + ): + # All info is the same, no update required + continue + else: + # TODO: update instead of delete+new insert? + if not dry_run: + session.delete(check_linknet) + session.commit() + + if devtype in [DeviceType.CORE, DeviceType.DIST] and \ + remote_device_inst.device_type in [DeviceType.CORE, DeviceType.DIST]: + ipv4_network = find_free_infra_linknet(session) + else: + ipv4_network = None + new_link = Linknet.create_linknet( + session, + hostname_a=local_device_inst.hostname, + interface_a=local_if, + hostname_b=remote_device_inst.hostname, + interface_b=data[0]['port'], + ipv4_network=ipv4_network, + strict_check=not dry_run # Don't do strict check if this is a dry_run + ) + if not dry_run: + local_device_inst.synchronized = False + remote_device_inst.synchronized = False + session.add(new_link) + session.commit() + else: + # Make sure linknet object is not added to session because of foreign key load + session.expunge(new_link) + # Make return data pretty + ret_dict = { + 'device_a_hostname': local_device_inst.hostname, + 'device_b_hostname': remote_device_inst.hostname, + **new_link.as_dict() + } + del ret_dict['id'] + del ret_dict['device_a_id'] + del ret_dict['device_b_id'] + ret.append({k: ret_dict[k] for k in sorted(ret_dict)}) + return ret diff --git a/src/cnaas_nms/db/helper.py b/src/cnaas_nms/db/helper.py index e603832a..fdbd478d 100644 --- a/src/cnaas_nms/db/helper.py +++ b/src/cnaas_nms/db/helper.py @@ -29,7 +29,7 @@ def find_mgmtdomain(session, hostnames: List[str]) -> Optional[Mgmtdomain]: """ if not isinstance(hostnames, list) or not len(hostnames) == 2: raise ValueError( - "hostnames argument must be a list with two device hostnames, got: {}".format( + "Two uplink devices are required to find a compatible mgmtdomain, got: {}".format( hostnames )) for hostname in hostnames: diff --git a/test/integrationtests.py b/test/integrationtests.py index 9acfc02c..7633b950 100644 --- a/test/integrationtests.py +++ b/test/integrationtests.py @@ -132,10 +132,23 @@ def test_02_ztp(self): hostname, device_id = self.wait_for_discovered_device() print("Discovered hostname, id: {}, {}".format(hostname, device_id)) self.assertTrue(hostname, "No device in state discovered found for ZTP") + data = {"hostname": "eosaccess", "device_type": "ACCESS"} + r = requests.post( + f'{URL}/api/v1.0/device_initcheck/{device_id}', + headers=AUTH_HEADER, + json=data, + verify=TLS_VERIFY + ) + self.assertEqual(r.status_code, 200, "Failed device_initcheck, http status") + self.assertEqual(r.json()['status'], 'success', + "Failed device_initcheck, returned unsuccessful") +# this check fails when running integrationtests with one dist because of faked neighbor: +# self.assertTrue(r.json()['data']['compatible'], "initcheck was not compatible") + r = requests.post( f'{URL}/api/v1.0/device_init/{device_id}', headers=AUTH_HEADER, - json={"hostname": "eosaccess", "device_type": "ACCESS"}, + json=data, verify=TLS_VERIFY ) self.assertEqual(r.status_code, 200, "Failed to start device_init") @@ -278,9 +291,9 @@ def test_11_update_facts_dist(self): verify=TLS_VERIFY ) self.assertEqual(r.status_code, 200, "Failed to do update facts for dist") - restore_job_id = r.json()['job_id'] - job = self.check_jobid(restore_job_id) - self.assertFalse(job['result']['devices'][hostname]['failed']) + update_facts_job_id = r.json()['job_id'] + job = self.check_jobid(update_facts_job_id) + self.assertIn("diff", job['result']) def test_12_abort_running_job(self): data = {