From bd3c0c6d3ac66cdfcf57cd4b288077322d463bd5 Mon Sep 17 00:00:00 2001 From: Sonic Build Admin Date: Sun, 9 Nov 2025 18:45:12 +0000 Subject: [PATCH] Generic Configuration Updater (GCU) performance enhancements ***Important***: Please review each commit (including commit message) individually. Looking at the patch-set as a whole may cause confusion. #### What I did Generic Configuration Updater is extremely slow, using the python profiler it was possible to determine the worst offenders where changes could be made without affecting the overall algorithm and HLD design documentation. Brief overview of changes: * Prevent copy.deepcopy() calls where possible * Don't run validation twice back to back * Move configdb path <> xpath conversion logic to sonic-yang-mgmt where it belongs and enhance it to support schema conversion (not just data) and add caching. * Sort table keys by the number of schema backlinks and must statements for the node to try better guess the right order of the patches to generate rather than doing it in alphabetical order which is likely to cause validation failures. * Add ability to Group patches together in some commits where its known they will not cause issues, these are things like grouping parameter updates under the same key. * When applying changes, do not re-read the configuration from redis twice between each applied patch (this is **extremely** slow, and actually hid a race condition). We are mutating the configuration and a lock is held so we know the expected before and after. There is still a final validation to ensure something didn't go sideways. Dependencies: * sonic-yang-mgmt enhancements: https://github.com/sonic-net/sonic-buildimage/pull/22254 * sonic-yang-mgmt parse uses/grouping: https://github.com/sonic-net/sonic-buildimage/pull/21907 * sonic-utilities rely on sonic-yang-mgmt uses/grouping handling: https://github.com/sonic-net/sonic-utilities/pull/3814 Stats below ... (stats need both this and the sonic-utilities PR to be relevant)... **Original Performance:** Dry Run: ``` time sudo config replace -d ./config_db.json ... real 2m51.588s user 2m23.777s sys 0m25.300s ``` Full: ``` time sudo config replace ./config_db.json ... real 14m53.772s user 12m2.376s sys 2m8.908s ``` **With Patch**: Dry Run: ``` time sudo config replace -d ./config_db.json ... real 0m59.602s user 0m56.434s sys 0m2.110s ``` Full: ``` time sudo config replace ./config_db.json ... real 1m54.303s user 0m58.482s sys 0m2.545s ``` So that's roughly 3x improvement for dry-run, and 7.5x improvement for full commit. There is room for improvement on the full commit due to a `sleep(1)` being used between each patch because of a race condition found in the prior code (that was hidden due to a costly sanity check that has been removed). #### How I did it Profiling via cProfiler #### How to verify it Run sonic-utilities test cases, they still pass #### Previous command output (if the output of a command-line utility has changed) N/A #### New command output (if the output of a command-line utility has changed) N/A Fixes https://github.com/sonic-net/sonic-buildimage/issues/22372 --- generic_config_updater/change_applier.py | 51 +- generic_config_updater/generic_updater.py | 3 +- generic_config_updater/gu_common.py | 677 ++++-------------- generic_config_updater/patch_sorter.py | 671 ++++++++++++++--- .../change_applier_test.py | 13 +- .../files/patch_sorter_test_success.json | 540 ++++---------- .../gcu_feature_patch_application_test.py | 6 +- .../generic_updater_test.py | 8 +- .../generic_config_updater/gu_common_test.py | 76 +- .../generic_config_updater/gutest_helpers.py | 45 ++ .../multiasic_change_applier_test.py | 22 +- .../patch_sorter_test.py | 237 +++--- 12 files changed, 1094 insertions(+), 1255 deletions(-) diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index b5712d024..32593a974 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -5,11 +5,12 @@ import importlib import os import tempfile +import time from collections import defaultdict from swsscommon.swsscommon import ConfigDBConnector from sonic_py_common import multi_asic from .gu_common import GenericConfigUpdaterError, genericUpdaterLogging -from .gu_common import get_config_db_as_json +from .gu_common import JsonChange SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) UPDATER_CONF_FILE = f"{SCRIPT_DIR}/gcu_services_validator.conf.json" @@ -64,8 +65,8 @@ class DryRunChangeApplier: def __init__(self, config_wrapper): self.config_wrapper = config_wrapper - def apply(self, change): - self.config_wrapper.apply_change_to_config_db(change) + def apply(self, current_configdb: dict, change: JsonChange) -> dict: + return self.config_wrapper.apply_change_to_config_db(current_configdb, change) def remove_backend_tables_from_config(self, data): return data @@ -137,25 +138,45 @@ def _report_mismatch(self, run_data, upd_data): log_error("run_data vs expected_data: {}".format( str(jsondiff.diff(run_data, upd_data))[0:40])) - def apply(self, change): - run_data = get_config_db_as_json(self.scope) - upd_data = prune_empty_table(change.apply(copy.deepcopy(run_data))) + def apply(self, current_configdb: dict, change: JsonChange) -> dict: + run_data = current_configdb + upd_data = prune_empty_table(change.apply(run_data, in_place=False)) upd_keys = defaultdict(dict) for tbl in sorted(set(run_data.keys()).union(set(upd_data.keys()))): self._upd_data(tbl, run_data.get(tbl, {}), upd_data.get(tbl, {}), upd_keys) ret = self._services_validate(run_data, upd_data, upd_keys) - if not ret: - run_data = get_config_db_as_json(self.scope) - self.remove_backend_tables_from_config(upd_data) - self.remove_backend_tables_from_config(run_data) - if upd_data != run_data: - self._report_mismatch(run_data, upd_data) - ret = -1 - if ret: + # The above function returns 0 on success as it uses shell return codes + if ret != 0: log_error("Failed to apply Json change") - return ret + + # There was a sanity check in this position originally that appeared + # to be development-time code to ensure things were operating correctly. + # It would retrieve the configdb from Redis and perform transformation + # and comparison. Its not possible for the configuration to not be what + # we expect since we have a known state we are mutating with a lock. + # That said we are leaving in the final configuration comparison in + # PatchApplier "just in case". + # + # However, this code did hide a pretty nasty race condition since there + # is no feedback loop for when config_db changes are actually consumed. + # This check would consume high CPU and would take a good amount of + # time (0.5s - 1s). + # + # The below sleep is functionally equivalent in terms of preventing the + # race condition (without the high CPU that might cause other control + # plane issues), but is of course not the proper fix. + # + # An upstream SONiC issue will be opened for the race condition, and + # until resolved leaving this comment in place for future reference. + time.sleep(1) + + # Interestingly this function returns the updated data and doesn't + # propagate an error. Maybe it should? Or are exceptions thrown + # from _upd_data on failure? We seem to intentionally only log on + # _services_validate() + return upd_data def remove_backend_tables_from_config(self, data): for key in self.backend_tables: diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index e8bb02180..be2124f7f 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -141,9 +141,10 @@ def apply(self, patch, sort=True): # Apply changes in order self.logger.log_notice(f"{scope}: applying {changes_len} change{'s' if changes_len != 1 else ''} " \ f"in order{':' if changes_len > 0 else '.'}") + current_config = old_config for change in changes: self.logger.log_notice(f" * {change}") - self.changeapplier.apply(change) + current_config = self.changeapplier.apply(current_config, change) # Validate config updated successfully self.logger.log_notice(f"{scope}: verifying patch updates are reflected on ConfigDB.") diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 184566e4f..0c209a563 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -11,6 +11,7 @@ import os from sonic_py_common import logger, multi_asic from enum import Enum +from functools import cmp_to_key YANG_DIR = "/usr/local/yang-models" SYSLOG_IDENTIFIER = "GenericConfigUpdater" @@ -38,8 +39,8 @@ class JsonChange: def __init__(self, patch): self.patch = patch - def apply(self, config): - return self.patch.apply(config) + def apply(self, config, in_place: bool = False): + return self.patch.apply(config, in_place) def __repr__(self): return str(self) @@ -130,9 +131,8 @@ def validate_sonic_yang_config(self, sonic_yang_as_json): sy = self.create_sonic_yang_with_loaded_models() try: + # Loading data automatically does full validation sy.loadData(config_db_as_json) - - sy.validate_data_tree() return True, None except sonic_yang.SonicYangException as ex: return False, ex @@ -145,12 +145,8 @@ def validate_config_db_config(self, config_db_as_json): self.validate_lanes] try: - tmp_config_db_as_json = copy.deepcopy(config_db_as_json) - - sy.loadData(tmp_config_db_as_json) - - sy.validate_data_tree() - + # Loading data automatically does full validation + sy.loadData(config_db_as_json) for supplemental_yang_validator in supplemental_yang_validators: success, error = supplemental_yang_validator(config_db_as_json) if not success: @@ -320,10 +316,10 @@ def validate_bgp_peer_group(self, config_db): def crop_tables_without_yang(self, config_db_as_json): sy = self.create_sonic_yang_with_loaded_models() - sy.jIn = copy.deepcopy(config_db_as_json) - + # Current sonic-yang-mgmt guarantees _cropConfigDB() will deep copy if + # it needs to modify. + sy.jIn = config_db_as_json sy.tablesWithOutYang = dict() - sy._cropConfigDB() return sy.jIn @@ -351,7 +347,7 @@ def create_sonic_yang_with_loaded_models(self): loaded_models_sy.loadYangModel() # This call takes a long time (100s of ms) because it reads files from disk self.sonic_yang_with_loaded_models = loaded_models_sy - return copy.copy(self.sonic_yang_with_loaded_models) + return self.sonic_yang_with_loaded_models class DryRunConfigWrapper(ConfigWrapper): # This class will simulate all read/write operations to ConfigDB on a virtual storage unit. @@ -360,10 +356,11 @@ def __init__(self, initial_imitated_config_db=None, scope=multi_asic.DEFAULT_NAM self.logger = genericUpdaterLogging.get_logger(title="** DryRun", print_all_to_console=True) self.imitated_config_db = copy.deepcopy(initial_imitated_config_db) - def apply_change_to_config_db(self, change): + def apply_change_to_config_db(self, current_config_db: dict, change): self._init_imitated_config_db_if_none() self.logger.log_notice(f"Would apply {change}") - self.imitated_config_db = change.apply(self.imitated_config_db) + self.imitated_config_db = change.apply(current_config_db, in_place=True) + return self.imitated_config_db def get_config_db_as_json(self): self._init_imitated_config_db_if_none() @@ -463,11 +460,17 @@ class PathAddressing: def __init__(self, config_wrapper=None): self.config_wrapper = config_wrapper - def get_path_tokens(self, path): - return JsonPointer(path).parts + @staticmethod + def get_path_tokens(path): + return sonic_yang.SonicYang.configdb_path_split(path) - def create_path(self, tokens): - return JsonPointer.from_parts(tokens).path + @staticmethod + def create_path(tokens): + return sonic_yang.SonicYang.configdb_path_join(tokens) + + @staticmethod + def get_xpath_tokens(xpath): + return sonic_yang.SonicYang.xpath_split(xpath) def has_path(self, doc, path): return self.get_from_path(doc, path) is not None @@ -478,95 +481,10 @@ def get_from_path(self, doc, path): def is_config_different(self, path, current, target): return self.get_from_path(current, path) != self.get_from_path(target, path) - def get_xpath_tokens(self, xpath): - """ - Splits the given xpath into tokens by '/'. - - Example: - xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode - tokens: sonic-vlan:sonic-vlan, VLAN_MEMBER, VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8'], tagging_mode - """ - if xpath == "": - raise ValueError("xpath cannot be empty") - - if xpath == "/": - return [] - - idx = 0 - tokens = [] - while idx < len(xpath): - end = self._get_xpath_token_end(idx+1, xpath) - token = xpath[idx+1:end] - tokens.append(token) - idx = end - - return tokens - - def _get_xpath_token_end(self, start, xpath): - idx = start - while idx < len(xpath): - if xpath[idx] == PathAddressing.XPATH_SEPARATOR: - break - elif xpath[idx] == "[": - idx = self._get_xpath_predicate_end(idx, xpath) - idx = idx+1 - - return idx - - def _get_xpath_predicate_end(self, start, xpath): - idx = start - while idx < len(xpath): - if xpath[idx] == "]": - break - elif xpath[idx] == "'": - idx = self._get_xpath_single_quote_str_end(idx, xpath) - elif xpath[idx] == '"': - idx = self._get_xpath_double_quote_str_end(idx, xpath) - - idx = idx+1 - - return idx - - def _get_xpath_single_quote_str_end(self, start, xpath): - idx = start+1 # skip first single quote - while idx < len(xpath): - if xpath[idx] == "'": - break - # libyang implements XPATH 1.0 which does not escape single quotes - # libyang src: https://netopeer.liberouter.org/doc/libyang/master/html/howtoxpath.html - # XPATH 1.0 src: https://www.w3.org/TR/1999/REC-xpath-19991116/#NT-Literal - idx = idx+1 - - return idx - - def _get_xpath_double_quote_str_end(self, start, xpath): - idx = start+1 # skip first single quote - while idx < len(xpath): - if xpath[idx] == '"': - break - # libyang implements XPATH 1.0 which does not escape double quotes - # libyang src: https://netopeer.liberouter.org/doc/libyang/master/html/howtoxpath.html - # XPATH 1.0 src: https://www.w3.org/TR/1999/REC-xpath-19991116/#NT-Literal - idx = idx+1 - - return idx - - def create_xpath(self, tokens): - """ - Creates an xpath by combining the given tokens using '/' - Example: - tokens: module, container, list[key='value'], leaf - xpath: /module/container/list[key='value']/leaf - """ - if len(tokens) == 0: - return "/" - - return f"{PathAddressing.XPATH_SEPARATOR}{PathAddressing.XPATH_SEPARATOR.join(str(t) for t in tokens)}" - def _create_sonic_yang_with_loaded_models(self): return self.config_wrapper.create_sonic_yang_with_loaded_models() - def find_ref_paths(self, path, config): + def find_ref_paths(self, paths, config, reload_config: bool = True): """ Finds the paths referencing any line under the given 'path' within the given 'config'. Example: @@ -604,25 +522,28 @@ def find_ref_paths(self, path, config): /ACL_TABLE/EVERFLOW6/ports/1 """ # TODO: Also fetch references by must statement (check similar statements) - return self._find_leafref_paths(path, config) - - def _find_leafref_paths(self, path, config): sy = self._create_sonic_yang_with_loaded_models() - tmp_config = copy.deepcopy(config) + if reload_config: + sy.loadData(config) - sy.loadData(tmp_config) + # Force to be a list + if not isinstance(paths, list): + paths = [paths] - xpath = self.convert_path_to_xpath(path, config, sy) + ref_paths = [] + ref_paths_set = set() + ref_xpaths = [] - leaf_xpaths = self._get_inner_leaf_xpaths(xpath, sy) + # Iterate across all paths fetching references + for path in paths: + xpath = self.convert_path_to_xpath(path, config, sy) - ref_xpaths = [] - for xpath in leaf_xpaths: - ref_xpaths.extend(sy.find_data_dependencies(xpath)) + leaf_xpaths = self._get_inner_leaf_xpaths(xpath, sy) + for xpath in leaf_xpaths: + ref_xpaths.extend(sy.find_data_dependencies(xpath)) - ref_paths = [] - ref_paths_set = set() + # For each xpath, convert to configdb path for ref_xpath in ref_xpaths: ref_path = self.convert_xpath_to_path(ref_xpath, config, sy) if ref_path not in ref_paths_set: @@ -648,457 +569,107 @@ def _is_leaf_node(self, node): schema = node.schema() return ly.LYS_LEAF == schema.nodetype() - def convert_path_to_xpath(self, path, config, sy): + def convert_path_to_xpath(self, path, config=None, sy=None): """ Converts the given JsonPatch path (i.e. JsonPointer) to XPATH. Example: path: /VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode """ - self.convert_xpath_to_path - tokens = self.get_path_tokens(path) - if len(tokens) == 0: - return self.create_xpath(tokens) - - xpath_tokens = [] - table = tokens[0] - - cmap = sy.confDbYangMap[table] - - # getting the top level element : - xpath_tokens.append(cmap['module']+":"+cmap['topLevelContainer']) - - xpath_tokens.extend(self._get_xpath_tokens_from_container(cmap['container'], 0, tokens, config)) - - return self.create_xpath(xpath_tokens) - - def _get_xpath_tokens_from_container(self, model, token_index, path_tokens, config): - token = path_tokens[token_index] - xpath_tokens = [token] - - if len(path_tokens)-1 == token_index: - return xpath_tokens - - # check if the configdb token is referring to a list - list_model = self._get_list_model(model, token_index, path_tokens) - if list_model: - new_xpath_tokens = self._get_xpath_tokens_from_list(list_model, token_index+1, path_tokens, config[path_tokens[token_index]]) - xpath_tokens.extend(new_xpath_tokens) - return xpath_tokens - - # check if it is targetting a child container - child_container_model = self._get_model(model.get('container'), path_tokens[token_index+1]) - if child_container_model: - new_xpath_tokens = self._get_xpath_tokens_from_container(child_container_model, token_index+1, path_tokens, config[path_tokens[token_index]]) - xpath_tokens.extend(new_xpath_tokens) - return xpath_tokens - - new_xpath_tokens = self._get_xpath_tokens_from_leaf(model, token_index+1, path_tokens, config[path_tokens[token_index]]) - xpath_tokens.extend(new_xpath_tokens) - - return xpath_tokens - - def _get_xpath_tokens_from_list(self, model, token_index, path_tokens, config): - list_name = model['@name'] - - tableKey = path_tokens[token_index] - listKeys = model['key']['@value'] - keyDict = self._extractKey(tableKey, listKeys) - keyTokens = [f"[{key}='{keyDict[key]}']" for key in keyDict] - item_token = f"{list_name}{''.join(keyTokens)}" - - xpath_tokens = [item_token] - - # if whole list-item is needed i.e. if in the path is not referencing child leaf items - # Example: - # path: /VLAN/Vlan1000 - # xpath: /sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000'] - if len(path_tokens)-1 == token_index: - return xpath_tokens - - type_1_list_model = self._get_type_1_list_model(model) - if type_1_list_model: - new_xpath_tokens = self._get_xpath_tokens_from_type_1_list(type_1_list_model, token_index+1, path_tokens, config[path_tokens[token_index]]) - xpath_tokens.extend(new_xpath_tokens) - return xpath_tokens - - new_xpath_tokens = self._get_xpath_tokens_from_leaf(model, token_index+1, path_tokens,config[path_tokens[token_index]]) - xpath_tokens.extend(new_xpath_tokens) - return xpath_tokens - - def _get_xpath_tokens_from_type_1_list(self, model, token_index, path_tokens, config): - type_1_list_name = model['@name'] - keyName = model['key']['@value'] - value = path_tokens[token_index] - keyToken = f"[{keyName}='{value}']" - itemToken = f"{type_1_list_name}{keyToken}" - - return [itemToken] - - def _get_xpath_tokens_from_leaf(self, model, token_index, path_tokens, config): - token = path_tokens[token_index] - - # checking all leaves - leaf_model = self._get_model(model.get('leaf'), token) - if leaf_model: - return [token] - - # checking choice - choices = model.get('choice') - if choices: - for choice in choices: - cases = choice['case'] - for case in cases: - leaf_model = self._get_model(case.get('leaf'), token) - if leaf_model: - return [token] - - # checking leaf-list (i.e. arrays of string, number or bool) - leaf_list_model = self._get_model(model.get('leaf-list'), token) - if leaf_list_model: - # if whole-list is to be returned, just return the token without checking the list items - # Example: - # path: /VLAN/Vlan1000/dhcp_servers - # xpath: /sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers - if len(path_tokens)-1 == token_index: - return [token] - list_config = config[token] - value = list_config[int(path_tokens[token_index+1])] - # To get a leaf-list instance with the value 'val' - # /module-name:container/leaf-list[.='val'] - # Source: Check examples in https://netopeer.liberouter.org/doc/libyang/master/html/howto_x_path.html - return [f"{token}[.='{value}']"] - - # checking 'uses' statement - if not isinstance(config[token], list): # leaf-list under uses is not supported yet in sonic_yang - table = path_tokens[0] - uses_leaf_model = self._get_uses_leaf_model(model, table, token) - if uses_leaf_model: - return [token] - - raise ValueError(f"Path token not found.\n model: {model}\n token_index: {token_index}\n " + \ - f"path_tokens: {path_tokens}\n config: {config}") - - def _extractKey(self, tableKey, keys): - keyList = keys.split() - # get the value groups - value = tableKey.split("|") - # match lens - if len(keyList) != len(value): - raise ValueError("Value not found for {} in {}".format(keys, tableKey)) - # create the keyDict - keyDict = dict() - for i in range(len(keyList)): - keyDict[keyList[i]] = value[i].strip() - - return keyDict - - def _get_list_model(self, model, token_index, path_tokens): - parent_container_name = path_tokens[token_index] - clist = model.get('list') - # Container contains a single list, just return it - # TODO: check if matching also by name is necessary - if isinstance(clist, dict): - return clist - - if isinstance(clist, list): - configdb_values_str = path_tokens[token_index+1] - # Format: "value1|value2|value|..." - configdb_values = configdb_values_str.split("|") - for list_model in clist: - yang_keys_str = list_model['key']['@value'] - # Format: "key1 key2 key3 ..." - yang_keys = yang_keys_str.split() - # if same number of values and keys, this is the intended list-model - # TODO: Match also on types and not only the length of the keys/values - if len(yang_keys) == len(configdb_values): - return list_model - raise GenericConfigUpdaterError(f"Container {parent_container_name} has multiple lists, " - f"but none of them match the config_db value {configdb_values_str}") - - return None - - def _get_type_1_list_model(self, model): - list_name = model['@name'] - if list_name not in sonic_yang_ext.Type_1_list_maps_model: - return None - - # Type 1 list is expected to have a single inner list model. - # No need to check if it is a dictionary of list models. - return model.get('list') - - def convert_xpath_to_path(self, xpath, config, sy): + if sy is None: + sy = self._create_sonic_yang_with_loaded_models() + return sy.configdb_path_to_xpath(path, configdb=config) + + def convert_xpath_to_path(self, xpath, config=None, sy=None): """ Converts the given XPATH to JsonPatch path (i.e. JsonPointer). Example: xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode path: /VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode """ - tokens = self.get_xpath_tokens(xpath) - if len(tokens) == 0: - return self.create_path([]) - - if len(tokens) == 1: - raise GenericConfigUpdaterError("xpath cannot be just the module-name, there is no mapping to path") - - table = tokens[1] - cmap = sy.confDbYangMap[table] - - path_tokens = self._get_path_tokens_from_container(cmap['container'], 1, tokens, config) - return self.create_path(path_tokens) - - def _get_path_tokens_from_container(self, model, token_index, xpath_tokens, config): - token = xpath_tokens[token_index] - path_tokens = [token] - - if len(xpath_tokens)-1 == token_index: - return path_tokens - - # check child list - list_name = xpath_tokens[token_index+1].split("[")[0] - list_model = self._get_model(model.get('list'), list_name) - if list_model: - new_path_tokens = self._get_path_tokens_from_list(list_model, token_index+1, xpath_tokens, config[token]) - path_tokens.extend(new_path_tokens) - return path_tokens - - container_name = xpath_tokens[token_index+1] - container_model = self._get_model(model.get('container'), container_name) - if container_model: - new_path_tokens = self._get_path_tokens_from_container(container_model, token_index+1, xpath_tokens, config[token]) - path_tokens.extend(new_path_tokens) - return path_tokens - - new_path_tokens = self._get_path_tokens_from_leaf(model, token_index+1, xpath_tokens, config[token]) - path_tokens.extend(new_path_tokens) - - return path_tokens - - def _get_path_tokens_from_list(self, model, token_index, xpath_tokens, config): - token = xpath_tokens[token_index] - key_dict = self._extract_key_dict(token) - - # If no keys specified return empty tokens, as we are already inside the correct table. - # Also note that the list name in SonicYang has no correspondence in ConfigDb and is ignored. - # Example where VLAN_MEMBER_LIST has no specific key/value: - # xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST - # path: /VLAN_MEMBER - if not(key_dict): - return [] - - listKeys = model['key']['@value'] - key_list = listKeys.split() - - if len(key_list) != len(key_dict): - raise GenericConfigUpdaterError(f"Keys in configDb not matching keys in SonicYang. ConfigDb keys: {key_dict.keys()}. SonicYang keys: {key_list}") - - values = [key_dict[k] for k in key_list] - path_token = '|'.join(values) - path_tokens = [path_token] - - if len(xpath_tokens)-1 == token_index: - return path_tokens - - next_token = xpath_tokens[token_index+1] - # if the target node is a key, then it does not have a correspondene to path. - # Just return the current 'key1|key2|..' token as it already refers to the keys - # Example where the target node is 'name' which is a key in VLAN_MEMBER_LIST: - # xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/name - # path: /VLAN_MEMBER/Vlan1000|Ethernet8 - if next_token in key_dict: - return path_tokens - - type_1_list_model = self._get_type_1_list_model(model) - if type_1_list_model: - new_path_tokens = self._get_path_tokens_from_type_1_list(type_1_list_model, token_index+1, xpath_tokens, config[path_token]) - path_tokens.extend(new_path_tokens) - return path_tokens - - new_path_tokens = self._get_path_tokens_from_leaf(model, token_index+1, xpath_tokens, config[path_token]) - path_tokens.extend(new_path_tokens) - return path_tokens - - def _get_path_tokens_from_type_1_list(self, model, token_index, xpath_tokens, config): - type_1_inner_list_name = model['@name'] - - token = xpath_tokens[token_index] - list_tokens = token.split("[", 1) # split once on the first '[', first element will be the inner list name - inner_list_name = list_tokens[0] - - if type_1_inner_list_name != inner_list_name: - raise GenericConfigUpdaterError(f"Type 1 inner list name '{type_1_inner_list_name}' does match xpath inner list name '{inner_list_name}'.") - - key_dict = self._extract_key_dict(token) - - # If no keys specified return empty tokens, as we are already inside the correct table. - # Also note that the type 1 inner list name in SonicYang has no correspondence in ConfigDb and is ignored. - # Example where VLAN_MEMBER_LIST has no specific key/value: - # xpath: /sonic-dot1p-tc-map:sonic-dot1p-tc-map/DOT1P_TO_TC_MAP/DOT1P_TO_TC_MAP_LIST[name='Dot1p_to_tc_map1']/DOT1P_TO_TC_MAP - # path: /DOT1P_TO_TC_MAP/Dot1p_to_tc_map1 - if not(key_dict): - return [] - - if len(key_dict) > 1: - raise GenericConfigUpdaterError(f"Type 1 inner list should have only 1 key in xpath, {len(key_dict)} specified. Key dictionary: {key_dict}") - - keyName = next(iter(key_dict.keys())) - value = key_dict[keyName] - - path_tokens = [value] - - # If this is the last xpath token, return the path tokens we have built so far, no need for futher checks - # Example: - # xpath: /sonic-dot1p-tc-map:sonic-dot1p-tc-map/DOT1P_TO_TC_MAP/DOT1P_TO_TC_MAP_LIST[name='Dot1p_to_tc_map1']/DOT1P_TO_TC_MAP[dot1p='2'] - # path: /DOT1P_TO_TC_MAP/Dot1p_to_tc_map1/2 - if token_index+1 >= len(xpath_tokens): - return path_tokens - - # Checking if the next_token is actually a child leaf of the inner type 1 list, for which case - # just ignore the token, and return the already created ConfigDb path pointing to the whole object - # Example where the leaf specified is the key: - # xpath: /sonic-dot1p-tc-map:sonic-dot1p-tc-map/DOT1P_TO_TC_MAP/DOT1P_TO_TC_MAP_LIST[name='Dot1p_to_tc_map1']/DOT1P_TO_TC_MAP[dot1p='2']/dot1p - # path: /DOT1P_TO_TC_MAP/Dot1p_to_tc_map1/2 - # Example where the leaf specified is not the key: - # xpath: /sonic-dot1p-tc-map:sonic-dot1p-tc-map/DOT1P_TO_TC_MAP/DOT1P_TO_TC_MAP_LIST[name='Dot1p_to_tc_map1']/DOT1P_TO_TC_MAP[dot1p='2']/tc - # path: /DOT1P_TO_TC_MAP/Dot1p_to_tc_map1/2 - next_token = xpath_tokens[token_index+1] - leaf_model = self._get_model(model.get('leaf'), next_token) - if leaf_model: - return path_tokens - - raise GenericConfigUpdaterError(f"Type 1 inner list '{type_1_inner_list_name}' does not have a child leaf named '{next_token}'") - - def _get_path_tokens_from_leaf(self, model, token_index, xpath_tokens, config): - token = xpath_tokens[token_index] - - # checking all leaves - leaf_model = self._get_model(model.get('leaf'), token) - if leaf_model: - return [token] - - # checking choices - choices = model.get('choice') - if choices: - for choice in choices: - cases = choice['case'] - for case in cases: - leaf_model = self._get_model(case.get('leaf'), token) - if leaf_model: - return [token] - - # checking leaf-list - leaf_list_tokens = token.split("[", 1) # split once on the first '[', a regex is used later to fetch keys/values - leaf_list_name = leaf_list_tokens[0] - leaf_list_model = self._get_model(model.get('leaf-list'), leaf_list_name) - if leaf_list_model: - # if whole-list is to be returned, just return the list-name without checking the list items - # Example: - # xpath: /sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers - # path: /VLAN/Vlan1000/dhcp_servers - if len(leaf_list_tokens) == 1: - return [leaf_list_name] - leaf_list_pattern = "^[^\[]+(?:\[\.='([^']*)'\])?$" - leaf_list_regex = re.compile(leaf_list_pattern) - match = leaf_list_regex.match(token) - # leaf_list_name = match.group(1) - leaf_list_value = match.group(1) - list_config = config[leaf_list_name] - # Workaround for those fields who is defined as leaf-list in YANG model but have string value in config DB - # No need to lookup the item index in ConfigDb since the list is represented as a string, return path to string immediately - # Example: - # xpath: /sonic-buffer-port-egress-profile-list:sonic-buffer-port-egress-profile-list/BUFFER_PORT_EGRESS_PROFILE_LIST/BUFFER_PORT_EGRESS_PROFILE_LIST_LIST[port='Ethernet9']/profile_list[.='egress_lossy_profile'] - # path: /BUFFER_PORT_EGRESS_PROFILE_LIST/Ethernet9/profile_list - if isinstance(list_config, str): - return [leaf_list_name] - - if not isinstance(list_config, list): - raise ValueError(f"list_config is expected to be of type list or string. Found {type(list_config)}.\n " + \ - f"model: {model}\n token_index: {token_index}\n " + \ - f"xpath_tokens: {xpath_tokens}\n config: {config}") - - list_idx = list_config.index(leaf_list_value) - return [leaf_list_name, list_idx] - - # checking 'uses' statement - if not isinstance(config[leaf_list_name], list): # leaf-list under uses is not supported yet in sonic_yang - table = xpath_tokens[1] - uses_leaf_model = self._get_uses_leaf_model(model, table, token) - if uses_leaf_model: - return [token] - - raise ValueError(f"Xpath token not found.\n model: {model}\n token_index: {token_index}\n " + \ - f"xpath_tokens: {xpath_tokens}\n config: {config}") - - def _extract_key_dict(self, list_token): - # Example: VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8'] - # the groups would be ('VLAN_MEMBER'), ("[name='Vlan1000'][port='Ethernet8']") - table_keys_pattern = "^([^\[]+)(.*)$" - text = list_token - table_keys_regex = re.compile(table_keys_pattern) - match = table_keys_regex.match(text) - # list_name = match.group(1) - all_key_value = match.group(2) - - # Example: [name='Vlan1000'][port='Ethernet8'] - # the findall groups would be ('name', 'Vlan1000'), ('port', 'Ethernet8') - key_value_pattern = "\[([^=]+)='([^']*)'\]" - matches = re.findall(key_value_pattern, all_key_value) - key_dict = {} - for item in matches: - key = item[0] - value = item[1] - key_dict[key] = value - - return key_dict - - def _get_model(self, model, name): - if isinstance(model, dict) and model['@name'] == name: - return model - if isinstance(model, list): - for submodel in model: - if submodel['@name'] == name: - return submodel - - return None - - def _get_uses_leaf_model(self, model, table, token): + if sy is None: + sy = self._create_sonic_yang_with_loaded_models() + return sy.xpath_to_configdb_path(xpath, config) + + def configdb_sort_cmp(self, a, b): + # Order first by number of backlinks + cmp = a["backlinks"] - b["backlinks"] + if cmp != 0: + return cmp + + # Then order (in reverse!) by musts + cmp = b["musts"] - a["musts"] + if cmp != 0: + return cmp + + # Finally, if we differ by number of separators, a lot of times the + # one with fewer separators wins. Hopefully the 'musts' will catch + # this anyhow. + cmp = a["nsep"] - b["nsep"] + return cmp + + def configdb_sorted_keys_by_backlinks(self, configdb_path: str, configdb: dict, reverse: bool = False, + configdb_relative: bool = False, sy=None): """ - Getting leaf model in uses model matching the given token. + Given a path and a config, iterates across all keys at the path location + to look up the number of backlinks per key, then returns the keys sorted + by backlinks in acending order by default (set reverse=True to use descending order) + + The configdb is only used to look up the keys at the given path, it is not + loaded into the context. The sort is not performed by actual references + to the key in data, but rather the "potential" number of references based + on the schema alone. + + If configdb_relative=True then we will use the provided configdb ptr + directly instead of using the configdb_path parameter to find the proper + position. """ - uses_s = model.get('uses') - if not uses_s: - return None - - # a model can be a single dict or a list of dictionaries, unify to a list of dictionaries - if not isinstance(uses_s, list): - uses_s = [uses_s] - sy = self._create_sonic_yang_with_loaded_models() - # find yang module for current table - table_module = sy.confDbYangMap[table]['yangModule'] - # uses Example: "@name": "bgpcmn:sonic-bgp-cmn" - for uses in uses_s: - if not isinstance(uses, dict): - raise GenericConfigUpdaterError(f"'uses' is expected to be a dictionary found '{type(uses)}'.\n" \ - f" uses: {uses}\n model: {model}\n table: {table}\n token: {token}") - - # Assume ':' means reference to another module - if ':' in uses['@name']: - name_parts = uses['@name'].split(':') - prefix = name_parts[0].strip() - uses_module_name = sy._findYangModuleFromPrefix(prefix, table_module) - grouping = name_parts[-1].strip() + if sy is None and self.config_wrapper is not None: + sy = self._create_sonic_yang_with_loaded_models() + + # Traverse configdb to find the right pointer + ptr = configdb + tokens = self.get_path_tokens(configdb_path) + if not configdb_relative: + for token in tokens: + ptr = ptr[token] + + # Test cases expect non-sorted and config_wrapper isn't set. + if self.config_wrapper is None: + return [key for key in ptr] + + keys = [] + # Enumerate all keys and retrieve backlinks, store in a list of dictionaries for sorting + for key in ptr: + tokens.append(key) + path = self.create_path(tokens) + try: + xpath = sy.configdb_path_to_xpath(path, schema_xpath=True) + except KeyError: + # Test cases use invalid tables, so we have to handle that even + # though it shouldn't be possible in live code as tables without + # yang are trimmed + keys.append({ + "key": key, + "backlinks": 0, + "musts": 0, + "nsep": 0 + }) else: - uses_module_name = table_module['@name'] - grouping = uses['@name'] - - leafs = sy.preProcessedYang['grouping'][uses_module_name][grouping] - - leaf_model = self._get_model(leafs, token) - if leaf_model: - return leaf_model - - return None + keys.append({ + "key": key, + "backlinks": len(sy.find_schema_dependencies(xpath, match_ancestors=True)), + "musts": sy.find_schema_must_count(xpath, match_ancestors=True), + "nsep": str(key).count("|") + }) + tokens.pop() + + # Sort list of keys by count + keys = sorted(keys, key=cmp_to_key(self.configdb_sort_cmp), reverse=reverse) + + # Caller doesn't care about the count, just that the list of keys is ordered + return [d['key'] for d in keys] class TitledLogger(logger.Logger): def __init__(self, syslog_identifier, title, verbose, print_all_to_console): diff --git a/generic_config_updater/patch_sorter.py b/generic_config_updater/patch_sorter.py index 829c3c2b6..2e5266b42 100644 --- a/generic_config_updater/patch_sorter.py +++ b/generic_config_updater/patch_sorter.py @@ -1,6 +1,7 @@ import copy import json import jsonpatch +import sonic_yang from collections import deque, OrderedDict from enum import Enum from .gu_common import OperationWrapper, OperationType, GenericConfigUpdaterError, \ @@ -29,8 +30,12 @@ def __eq__(self, other): # TODO: Can be optimized to apply the move in place. JsonPatch supports that using the option 'in_place=True' # Check: https://python-json-patch.readthedocs.io/en/latest/tutorial.html#applying-a-patch # NOTE: in case move is applied in place, we will need to support `undo_move` as well. - def apply_move(self, move): - new_current_config = move.apply(self.current_config) + def apply_move(self, move, in_place: bool = False): + new_current_config = move.apply(self.current_config, in_place) + return Diff(new_current_config, self.target_config) + + def undo_move(self, move, in_place: bool = False): + new_current_config = move.undo(self.current_config, in_place) return Diff(new_current_config, self.target_config) def has_no_diff(self): @@ -43,6 +48,7 @@ def __str__(self): def __repr__(self): return str(self) + class JsonMove: """ A class similar to JsonPatch operation, but it allows the path to refer to non-existing middle elements. @@ -58,7 +64,10 @@ class JsonMove: and current_config_tokens i.e. current_config path where the update needs to happen. """ def __init__(self, diff, op_type, current_config_tokens, target_config_tokens=None): - operation = JsonMove._to_jsonpatch_operation(diff, op_type, current_config_tokens, target_config_tokens) + # Support for undo + self.orig_value = None + + operation = self._to_jsonpatch_operation(diff, op_type, current_config_tokens, target_config_tokens) self.patch = jsonpatch.JsonPatch([operation]) self.op_type = operation[OperationWrapper.OP_KEYWORD] self.path = operation[OperationWrapper.PATH_KEYWORD] @@ -68,8 +77,7 @@ def __init__(self, diff, op_type, current_config_tokens, target_config_tokens=No self.current_config_tokens = current_config_tokens self.target_config_tokens = target_config_tokens - @staticmethod - def _to_jsonpatch_operation(diff, op_type, current_config_tokens, target_config_tokens): + def _to_jsonpatch_operation(self, diff, op_type, current_config_tokens, target_config_tokens): operation_wrapper = OperationWrapper() path_addressing = PathAddressing() @@ -90,6 +98,8 @@ def _to_jsonpatch_operation(diff, op_type, current_config_tokens, target_config_ @staticmethod def _get_value(config, tokens): for token in tokens: + if isinstance(token, str) and token.isnumeric(): + token = int(token) config = config[token] return copy.deepcopy(config) @@ -271,8 +281,26 @@ def from_operation(operation): return JsonMove(diff, op_type, current_config_tokens, target_config_tokens) - def apply(self, config): - return self.patch.apply(config) + def apply(self, config, in_place: bool = False): + if self.op_type == OperationType.REMOVE or self.op_type == OperationType.REPLACE: + self.orig_value = JsonMove._get_value(config, sonic_yang.SonicYang.configdb_path_split(self.path)) + + return self.patch.apply(config, in_place=in_place) + + def undo(self, config, in_place: bool = False): + # Create new patch to undo previous application + if self.patch.patch[0]['op'] == 'add': + patch = jsonpatch.JsonPatch([{'op': 'remove', 'path': self.patch.patch[0]['path']}]) + elif self.patch.patch[0]['op'] == 'replace': + patch = jsonpatch.JsonPatch([{ + 'op': 'replace', + 'path': self.patch.patch[0]['path'], + 'value': self.orig_value + }]) + elif self.patch.patch[0]['op'] == 'remove': + patch = jsonpatch.JsonPatch([{'op': 'add', 'path': self.patch.patch[0]['path'], 'value': self.orig_value}]) + + return patch.apply(config, in_place=in_place) def __str__(self): return str(self.patch) @@ -289,6 +317,107 @@ def __eq__(self, other): def __hash__(self): return hash((self.op_type, self.path, json.dumps(self.value))) + +class JsonMoveGroup: + """ + Group of JsonMove objects to be applied together + """ + def __init__(self, move: JsonMove = None): + self.patches = [] + if move is not None: + self.append(move) + + def append(self, move: JsonMove): + self.patches.append(move) + + def apply(self, config, in_place: bool = False): + update = config + for i, patch in enumerate(self.patches): + if i != 0: + in_place = True + update = patch.apply(update, in_place=in_place) + if update is None: + return None + return update + + def undo(self, config, in_place: bool = False): + update = config + for i, patch in enumerate(reversed(self.patches)): + if i != 0: + in_place = True + update = patch.undo(update, in_place=in_place) + if update is None: + return None + return update + + def get_jsonpatch(self): + raw_patches = [] + for move in self.patches: + raw_patches.extend(move.patch.patch) + return jsonpatch.JsonPatch(raw_patches) + + def parentTableName(self): + """ + This extracts the parent table name from the first patch associated + with this grouping. This is a bit special-cased for grouping purposes + where it evaluates both '/' and '|' to look for parents to group. + + Examples: + * /PORT/Ethernet0/description -> /PORT + * /PORTCHANNEL_MEMBER/PortChannel0001|Ethernet16 -> /PORTCHANNEL_MEMBER/PortChannel0001 + * /ACL_RULE/V4-ACL-TABLE|Rule_20/DST_IP -> /ACL_RULE/V4-ACL-TABLE + """ + tokens = sonic_yang.SonicYang.configdb_path_split(self.patches[0].path) + + # See if the last token has a |, if so just truncate and return + token = tokens[-1] + if "|" in token: + tokens[-1] = token.rsplit("|", 1)[0] + return sonic_yang.SonicYang.configdb_path_join(tokens) + + # Pop off the last element as we don't need it, its not a key + tokens.pop() + + # See if the last token has a |, if so just truncate and return + token = tokens[-1] + if "|" in token: + tokens[-1] = token.rsplit("|", 1)[0] + return sonic_yang.SonicYang.configdb_path_join(tokens) + + # If we're here it means we're on a key to be removed, pop and return + tokens.pop() + return sonic_yang.SonicYang.configdb_path_join(tokens) + + def merge(self, group): + self.patches.extend(group.patches) + + def __str__(self): + return ",".join([str(patch) for patch in self.patches]) + + def __repr__(self): + return str(self) + + def __eq__(self, other): + if isinstance(other, JsonMoveGroup): + if len(other) != len(self.patches): + return False + for i, patch in enumerate(self.patches): + if patch.patch != other.patches[i].patch: + return False + return True + return False + + def __hash__(self): + return hash(str(self)) + + def __len__(self): + return len(self.patches) + + def __iter__(self): + for patch in self.patches: + yield patch + + class MoveWrapper: def __init__(self, move_generators, move_non_extendable_generators, move_extenders, move_validators): self.move_generators = move_generators @@ -312,7 +441,6 @@ def generate(self, diff): processed_moves = set() extended_moves = set() moves = deque([]) - for move in self._generate_non_extendable_moves(diff): if not(move in processed_moves): processed_moves.add(move) @@ -338,13 +466,19 @@ def generate(self, diff): moves.extend(self._extend_moves(move, diff)) def validate(self, move, diff): + # Generate simulated config once, not once per validator as this performs + # a deep copy + simulated_config = move.apply(diff.current_config) for validator in self.move_validators: - if not validator.validate(move, diff): + if not validator.validate(move, diff, simulated_config): return False return True - def simulate(self, move, diff): - return diff.apply_move(move) + def simulate(self, move, diff, in_place: bool = False): + return diff.apply_move(move, in_place) + + def undo_simulate(self, move, diff, in_place: bool = False): + return diff.undo_move(move, in_place) def _generate_moves(self, diff): for generator in self.move_generators: @@ -356,10 +490,14 @@ def _generate_non_extendable_moves(self, diff): for move in generator.generate(diff): yield move - def _extend_moves(self, move, diff): - for extender in self.move_extenders: - for newmove in extender.extend(move, diff): - yield newmove + def _extend_moves(self, moveGroup: JsonMoveGroup, diff) -> JsonMoveGroup: + # The enxtender only operates on JsonMove, so iterate across the individual + # moves within the group to generate the new moves. In theory there + # should be at most one move per group if we're running extenders. + for move in moveGroup: + for extender in self.move_extenders: + for newmove in extender.extend(move, diff): + yield JsonMoveGroup(newmove) class JsonPointerFilter: """ @@ -493,6 +631,26 @@ def __init__(self, path_addressing): setting["common_key_index"] = index setting["requiring_filter"] = JsonPointerFilter(setting["requiring_patterns"], path_addressing) + """ + Simple function to determine if the path tokens match any of the required patterns. + This is used to avoid grouping such changes in bulk operations. + """ + def target_in_required_pattern(self, configdb_path_tokens): + for setting in self.settings: + if len(setting["required_pattern"]) != len(configdb_path_tokens): + continue + + is_match = True + + for idx, token in enumerate(setting["required_pattern"]): + if token not in ["*", "@", configdb_path_tokens[idx]]: + is_match = False + break + + if is_match: + return True + + return False def get_required_value_data(self, configs): data = {} @@ -586,9 +744,11 @@ def __init__(self, path_addressing): self.path_addressing = path_addressing self.create_only_filter = CreateOnlyFilter(path_addressing).get_filter() - def validate(self, move, diff): + def validate(self, group: JsonMoveGroup, diff, simulated_config): + # Note: group is not used by this validator current_config = diff.current_config target_config = diff.target_config # Final config after applying whole patch + reload_config = True processed_tables = set() for path in self.create_only_filter.get_paths(current_config): @@ -614,19 +774,22 @@ def validate(self, move, diff): if not target_members: continue - simulated_config = move.apply(current_config) # Config after applying just this move - for member_name in current_members: if member_name not in target_members: continue if not self._validate_member(tokens, member_name, - current_config, target_config, simulated_config): + current_config, target_config, simulated_config, + reload_config=reload_config): return False + # After first call, no need to reload again + reload_config = False + return True - def _validate_member(self, tokens, member_name, current_config, target_config, simulated_config): + def _validate_member(self, tokens, member_name, current_config, target_config, simulated_config, + reload_config: bool = True): table_to_check, create_only_field = tokens[0], tokens[-1] current_field = self._get_create_only_field( @@ -654,7 +817,7 @@ def _validate_member(self, tokens, member_name, current_config, target_config, s return False member_path = f"/{table_to_check}/{member_name}" - for ref_path in self.path_addressing.find_ref_paths(member_path, simulated_config): + for ref_path in self.path_addressing.find_ref_paths(member_path, simulated_config, reload_config=reload_config): if not self.path_addressing.has_path(current_config, ref_path): return False @@ -668,8 +831,8 @@ class DeleteWholeConfigMoveValidator: """ A class to validate not deleting whole config as it is not supported by JsonPatch lib. """ - def validate(self, move, diff): - if move.op_type == OperationType.REMOVE and move.path == "": + def validate(self, group: JsonMoveGroup, diff, simulated_config): + if group.patches[0].op_type == OperationType.REMOVE and group.patches[0].path == "": return False return True @@ -680,8 +843,7 @@ class FullConfigMoveValidator: def __init__(self, config_wrapper): self.config_wrapper = config_wrapper - def validate(self, move, diff): - simulated_config = move.apply(diff.current_config) + def validate(self, move, diff, simulated_config): is_valid, error = self.config_wrapper.validate_config_db_config(simulated_config) return is_valid @@ -698,8 +860,9 @@ def __init__(self, path_addressing): # TODO: create-only fields are hard-coded for now, it should be moved to YANG models self.create_only_filter = CreateOnlyFilter(path_addressing).get_filter() - def validate(self, move, diff): - simulated_config = move.apply(diff.current_config) + def validate(self, group: JsonMoveGroup, diff, simulated_config): + # NOTE: group not used by this validator + # get create-only paths from current config, simulated config and also target config # simulated config is the result of the move # target config is the final config @@ -796,27 +959,35 @@ def __init__(self, path_addressing, config_wrapper): self.path_addressing = path_addressing self.config_wrapper = config_wrapper - def validate(self, move, diff): + def validate(self, group: JsonMoveGroup, diff, simulated_config): + reload_config = True + # Note: all moves in a group are guaranteed to be the same operation type + for move in group: + if not self.__validate_move(move, diff, simulated_config, reload_config=reload_config): + return False + reload_config = False + return True + + def __validate_move(self, move, diff, simulated_config, reload_config: bool = True): operation_type = move.op_type path = move.path if operation_type == OperationType.ADD: - simulated_config = move.apply(diff.current_config) # For add operation, we check the simulated config has no dependencies between nodes under the added path - if not self._validate_paths_config([path], simulated_config): + if not self._validate_paths_config([path], simulated_config, reload_config): return False elif operation_type == OperationType.REMOVE: # For remove operation, we check the current config has no dependencies between nodes under the removed path - if not self._validate_paths_config([path], diff.current_config): + if not self._validate_paths_config([path], diff.current_config, reload_config): return False elif operation_type == OperationType.REPLACE: - if not self._validate_replace(move, diff): + if not self._validate_replace(move, diff, simulated_config): return False return True # NOTE: this function can be used for validating JsonChange as well which might have more than one move. - def _validate_replace(self, move, diff): + def _validate_replace(self, move, diff, simulated_config): """ The table below shows how mixed deletion/addition within replace affect this validation. @@ -847,15 +1018,17 @@ def _validate_replace(self, move, diff): if A is added and refA is added: return False return True """ - simulated_config = move.apply(diff.current_config) deleted_paths, added_paths = self._get_paths(diff.current_config, simulated_config, []) + # Note: on replace operations we are loading both current and simulated configs so we have to + # load twice :( + # For deleted paths, we check the current config has no dependencies between nodes under the removed path - if not self._validate_paths_config(deleted_paths, diff.current_config): + if not self._validate_paths_config(deleted_paths, diff.current_config, reload_config=True): return False # For added paths, we check the simulated config has no dependencies between nodes under the added path - if not self._validate_paths_config(added_paths, simulated_config): + if not self._validate_paths_config(added_paths, simulated_config, reload_config=True): return False return True @@ -922,11 +1095,11 @@ def _get_list_paths(self, current_list, target_list, tokens): return deleted_paths, added_paths - def _validate_paths_config(self, paths, config): + def _validate_paths_config(self, paths, config, reload_config: bool = True): """ validates all config under paths do not have config and its references """ - refs = self._find_ref_paths(paths, config) + refs = self.path_addressing.find_ref_paths(paths, config, reload_config=reload_config) for ref in refs: for path in paths: if ref.startswith(path): @@ -934,12 +1107,6 @@ def _validate_paths_config(self, paths, config): return True - def _find_ref_paths(self, paths, config): - refs = [] - for path in paths: - refs.extend(self.path_addressing.find_ref_paths(path, config)) - return refs - class NoEmptyTableMoveValidator: """ A class to validate that a move will not result in an empty table, because empty table do not show up in ConfigDB. @@ -947,8 +1114,13 @@ class NoEmptyTableMoveValidator: def __init__(self, path_addressing): self.path_addressing = path_addressing - def validate(self, move, diff): - simulated_config = move.apply(diff.current_config) + def validate(self, group, diff, simulated_config): + for move in group: + if not self.__validate_move(move, diff, simulated_config): + return False + return True + + def __validate_move(self, move, diff, simulated_config): op_path = move.path if op_path == "": # If updating whole file @@ -984,13 +1156,12 @@ def __init__(self, path_addressing): self.path_addressing = path_addressing self.identifier = RequiredValueIdentifier(path_addressing) - def validate(self, move, diff): + def validate(self, group: JsonMoveGroup, diff, simulated_config): # ignore full config removal because it is not possible by JsonPatch lib - if move.op_type == OperationType.REMOVE and move.path == "": + if group.patches[0].op_type == OperationType.REMOVE and group.patches[0].path == "": return current_config = diff.current_config - simulated_config = move.apply(current_config) # Config after applying just this move target_config = diff.target_config # Final config after applying whole patch # data dictionary: @@ -1040,18 +1211,20 @@ class TableLevelMoveGenerator: This class will generate moves to remove tables if they are in current, but not target. It also add tables if they are in target but not current configs. """ + def __init__(self, path_addressing): + self.path_addressing = path_addressing def generate(self, diff): # Removing tables in current but not target - for tokens in self._get_non_existing_tables_tokens(diff.current_config, diff.target_config): - yield JsonMove(diff, OperationType.REMOVE, tokens) + for tokens in self._get_non_existing_tables_tokens(diff.current_config, diff.target_config, False): + yield JsonMoveGroup(JsonMove(diff, OperationType.REMOVE, tokens)) # Adding tables in target but not current - for tokens in self._get_non_existing_tables_tokens(diff.target_config, diff.current_config): - yield JsonMove(diff, OperationType.ADD, tokens, tokens) + for tokens in self._get_non_existing_tables_tokens(diff.target_config, diff.current_config, True): + yield JsonMoveGroup(JsonMove(diff, OperationType.ADD, tokens, tokens)) - def _get_non_existing_tables_tokens(self, config1, config2): - for table in config1: + def _get_non_existing_tables_tokens(self, config1, config2, reverse): + for table in self.path_addressing.configdb_sorted_keys_by_backlinks("/", config1, reverse=reverse): if not(table in config2): yield [table] @@ -1070,38 +1243,325 @@ class KeyLevelMoveGenerator: This class will generate moves to remove keys if they are in current, but not target. It also add keys if they are in target but not current configs. """ + def __init__(self, path_addressing): + self.path_addressing = path_addressing + def generate(self, diff): # Removing keys in current but not target - for tokens in self._get_non_existing_keys_tokens(diff.current_config, diff.target_config): + for tokens in self._get_non_existing_keys_tokens(diff.current_config, diff.target_config, reverse=False): table = tokens[0] # if table has a single key, delete the whole table because empty tables are not allowed in ConfigDB if len(diff.current_config[table]) == 1: - yield JsonMove(diff, OperationType.REMOVE, [table]) + yield JsonMoveGroup(JsonMove(diff, OperationType.REMOVE, [table])) else: - yield JsonMove(diff, OperationType.REMOVE, tokens) + yield JsonMoveGroup(JsonMove(diff, OperationType.REMOVE, tokens)) # Adding keys in target but not current - for tokens in self._get_non_existing_keys_tokens(diff.target_config, diff.current_config): - yield JsonMove(diff, OperationType.ADD, tokens, tokens) + for tokens in self._get_non_existing_keys_tokens(diff.target_config, diff.current_config, reverse=True): + yield JsonMoveGroup(JsonMove(diff, OperationType.ADD, tokens, tokens)) - def _get_non_existing_keys_tokens(self, config1, config2): - for table in config1: - for key in config1[table]: + def _get_non_existing_keys_tokens(self, config1, config2, reverse): + for table in self.path_addressing.configdb_sorted_keys_by_backlinks("/", config1, reverse=reverse): + for key in self.path_addressing.configdb_sorted_keys_by_backlinks("/" + table, config1, reverse=reverse): if not(table in config2) or not (key in config2[table]): yield [table, key] -class LowLevelMoveGenerator: + +class BulkKeyLevelMoveGenerator: """ - A class to generate the low level moves i.e. moves corresponding to differences between current/target config - where the path of the move does not have children. + Same concept as KeyLevelMoveGenerator, but groups additions and removals of sibling keys. + """ + def __init__(self, path_addressing): + self.path_addressing = path_addressing + + def generate(self, diff): + prev_num_separators = -1 + group = None + prev_table = "" + + # Removing keys in current but not target + for tokens in self._get_non_existing_keys_tokens(diff.current_config, diff.target_config, reverse=False): + table = tokens[0] + key = tokens[1] + + # If the number of separators changed, do not group these operations with the previous ones. + num_separators = key.count("|") + if group is not None and (prev_num_separators != num_separators or table != prev_table): + # Special case if we are deleting all the current keys in a table to emit a table delete too + if len(list(group)) == len(diff.current_config[table if prev_table == "" else prev_table]): + group.append(JsonMove(diff, OperationType.REMOVE, [table])) + yield group + group = None + + prev_table = table + prev_num_separators = num_separators + if group is None: + group = JsonMoveGroup() + + group.append(JsonMove(diff, OperationType.REMOVE, tokens)) + + # Pending group, emit + if group is not None: + # Special case if we are deleting all the current keys in a table to emit a table delete too + if len(list(group)) == len(diff.current_config[table]): + group.append(JsonMove(diff, OperationType.REMOVE, [table])) + yield group + + prev_num_separators = -1 + group = None + prev_table = "" + + # Adding keys in target but not current + for tokens in self._get_non_existing_keys_tokens(diff.target_config, diff.current_config, reverse=True): + table = tokens[0] + key = tokens[1] + + # We do not support adding the whole table, only grouping key entry creation. + if table not in diff.current_config: + continue + + # If the number of separators changed, do not group these operations with the previous ones. + num_separators = key.count("|") + if group is not None and (prev_num_separators != num_separators or table != prev_table): + yield group + group = None + + prev_table = table + prev_num_separators = num_separators + if group is None: + group = JsonMoveGroup() + + group.append(JsonMove(diff, OperationType.ADD, tokens, tokens)) + + # Pending group, emit + if group is not None: + yield group + + def _get_non_existing_keys_tokens(self, config1, config2, reverse): + for table in self.path_addressing.configdb_sorted_keys_by_backlinks("/", config1, reverse=reverse): + for key in self.path_addressing.configdb_sorted_keys_by_backlinks("/" + table, config1, reverse=reverse): + if not(table in config2) or not (key in config2[table]): + yield [table, key] + + +class BulkKeyGroupLowLevelMoveGenerator: + """ + This is a Wrapper around BulkLowLevelMoveGenerator that groups the leaf + operations together spanning multiple table keys. For example if someone + wants to update PORT/Ethernet0/description and PORT/Ethernet1/description + at the same time, it is safe to do so in the same patch group. This grouping + exists in order to attempt to optimize the fast path when there are a lot + of changes to the same table and there are often very few + cross-dependencies. This will bring down the patch set count considerably + for a lot of change operations. We do still fall back to other more + primitive generators if the validators fail so this is an optimization that + otherwise doesn't have any impact on the overall outcome, only performance. + + As a secondary optimization, restricted keys (primarily PORT/*/admin_status) + operations are grouped together as typically it is ok for all restricted keys + to change at the same time for the same table. If its not, the validator + will simply fail it and the generator will move on and "try" something else. + """ + def __init__(self, path_addressing): + self.generator = BulkLowLevelMoveGenerator(path_addressing) + + def generate(self, diff): + # Handle removals first + for move in self.generate_groups(diff, self.generator.generate_remove): + yield move + + # Handle removals for restricted keys in bulk independently + for move in self.generate_groups(diff, self.generator.generate_remove, restricted_only=True): + yield move + + # Handle replacements next + for move in self.generate_groups(diff, self.generator.generate_replace): + yield move + + # Handle replacements for restricted keys in bulk independently + for move in self.generate_groups(diff, self.generator.generate_replace, restricted_only=True): + yield move + + # Handle additions last + for move in self.generate_groups(diff, self.generator.generate_add): + yield move + + # Handle additions for restricted keys in bulk independently + for move in self.generate_groups(diff, self.generator.generate_add, restricted_only=True): + yield move + + def generate_groups(self, diff, cb, restricted_only: bool = False): + group = None + for move in cb(diff, min_moves=1, restricted_only=restricted_only): + if group is None: + group = move + continue + + if move.parentTableName() != group.parentTableName(): + # Don't yield if one entry, we will let the extendable move generator + # generate it. + if len(group) > 1: + yield group + group = move + continue + + # group matches move, merge them. + group.merge(move) + + # Don't yield if one entry, we will let the extendable move generator + # generate it. + if group is not None and len(group) > 1: + yield group + + +class BulkLowLevelMoveGenerator: + """ + A class that generates low level moves that can be grouped together as a single patch + that operate on at most one key at a time. These are moves where the path of the move + has no children, these are the end leafs. We are going to use this as a non-extendable + generator as the normal LowLevelMoveGenerator will generate the extendable moves. """ def __init__(self, path_addressing): + self.diff = None self.path_addressing = path_addressing + self.requiredval = RequiredValueIdentifier(path_addressing) + def generate(self, diff): - single_run_generator = SingleRunLowLevelMoveGenerator(diff, self.path_addressing) - for move in single_run_generator.generate(): + # Handle removals first + for move in self.generate_remove(diff): yield move + # Handle replacements next + for move in self.generate_replace(diff): + yield move + + # Finally handle additions + for move in self.generate_add(diff): + yield move + + def generate_remove(self, diff, min_moves: int = 2, restricted_only: bool = False): + self.diff = diff + tokens = [] + + for move in self.__traverse(OperationType.REMOVE, self.diff.current_config, self.diff.target_config, tokens, + min_moves, restricted_only): + yield move + + def generate_replace(self, diff, min_moves: int = 2, restricted_only: bool = False): + self.diff = diff + tokens = [] + + for move in self.__traverse(OperationType.REPLACE, self.diff.current_config, self.diff.target_config, tokens, + min_moves, restricted_only): + yield move + + def generate_add(self, diff, min_moves: int = 2, restricted_only: bool = False): + self.diff = diff + tokens = [] + + for move in self.__traverse(OperationType.ADD, self.diff.current_config, self.diff.target_config, tokens, + min_moves, restricted_only): + yield move + + def __restricted_key(self, tokens, key, invert: bool = False): + tokens.append(key) + rv = self.requiredval.target_in_required_pattern(tokens) + tokens.pop() + if invert: + if rv: + return False + return True + return rv + + def __traverse(self, op, current_ptr, target_ptr, tokens, min_moves, restricted_only: bool = False): + if isinstance(current_ptr, dict): + if self.__children_are_leafs(current_ptr) and self.__children_are_leafs(target_ptr): + for move in self.__output_bulk_move(op, current_ptr, target_ptr, tokens, min_moves, restricted_only): + yield move + return + + # If current and target are different types, skip + if not isinstance(target_ptr, dict): + return + + # Recurse across children, sorted by backlinks + reverse = True + if op == OperationType.REMOVE: + reverse = False + + for key in self.path_addressing.configdb_sorted_keys_by_backlinks( + self.path_addressing.create_path(tokens), current_ptr, reverse=reverse, configdb_relative=True): + + # Does not exist in target, skip + if target_ptr.get(key) is None: + continue + + tokens.append(key) + for move in self.__traverse(op, current_ptr[key], target_ptr[key], tokens, min_moves, restricted_only): + yield move + tokens.pop() + return + + # TODO: implement list support + return + + def __children_are_leafs(self, config_ptr): + for key in config_ptr: + if isinstance(config_ptr[key], dict) or isinstance(config_ptr[key], list): + return False + return True + + def __output_bulk_move(self, op, current_ptr, target_ptr, tokens, min_moves, restricted_only): + match op: + case OperationType.REMOVE: + for move in self.__output_bulk_remove(current_ptr, target_ptr, tokens, min_moves, restricted_only): + yield move + case OperationType.REPLACE: + for move in self.__output_bulk_replace(current_ptr, target_ptr, tokens, min_moves, restricted_only): + yield move + case OperationType.ADD: + for move in self.__output_bulk_add(current_ptr, target_ptr, tokens, min_moves, restricted_only): + yield move + + def __output_bulk_add(self, current_ptr, target_ptr, tokens, min_moves, restricted_only): + group = JsonMoveGroup() + for key in target_ptr: + if current_ptr.get(key) is None and not self.__restricted_key(tokens, key, invert=restricted_only): + tokens.append(key) + group.append(JsonMove(self.diff, OperationType.ADD, tokens, tokens)) + tokens.pop() + + # Not a bulk move if there's not more than one action to take + if len(group) >= min_moves: + yield group + + def __output_bulk_remove(self, current_ptr, target_ptr, tokens, min_moves, restricted_only): + group = JsonMoveGroup() + for key in current_ptr: + if target_ptr.get(key) is None and not self.__restricted_key(tokens, key, invert=restricted_only): + tokens.append(key) + group.append(JsonMove(self.diff, OperationType.REMOVE, tokens)) + tokens.pop() + + # Not a bulk move if there's not more than one action to take + if len(group) >= min_moves: + yield group + + def __output_bulk_replace(self, current_ptr, target_ptr, tokens, min_moves, restricted_only): + group = JsonMoveGroup() + for key in current_ptr: + target_val = target_ptr.get(key) + if (target_val is not None and target_val != current_ptr.get(key) and + not self.__restricted_key(tokens, key, invert=restricted_only)): + tokens.append(key) + group.append(JsonMove(self.diff, OperationType.REPLACE, tokens, tokens)) + tokens.pop() + + # Not a bulk move if there's not more than one action to take + if len(group) >= min_moves: + yield group + + class RemoveCreateOnlyDependencyMoveGenerator: """ A class to generate the create-only fields' dependency removing moves @@ -1113,6 +1573,7 @@ def __init__(self, path_addressing): def generate(self, diff): current_config = diff.current_config target_config = diff.target_config # Final config after applying whole patch + reload_config = True processed_tables = set() for path in self.create_only_filter.get_paths(current_config): @@ -1152,24 +1613,30 @@ def generate(self, diff): member_path = f"/{table_to_check}/{member_name}" - for ref_path in self.path_addressing.find_ref_paths(member_path, current_config): - yield JsonMove(diff, OperationType.REMOVE, - self.path_addressing.get_path_tokens(ref_path)) + for ref_path in self.path_addressing.find_ref_paths(member_path, current_config, + reload_config=reload_config): + yield JsonMoveGroup(JsonMove(diff, OperationType.REMOVE, + self.path_addressing.get_path_tokens(ref_path))) + + # No need to reload config after first call + reload_config = False def _get_create_only_field(self, config, table_to_check, member_name, create_only_field): return config[table_to_check][member_name].get(create_only_field, None) -class SingleRunLowLevelMoveGenerator: +class LowLevelMoveGenerator: """ - A class that can only run once to assist LowLevelMoveGenerator with generating the moves. + A class to generate the low level moves i.e. moves corresponding to differences between current/target config + where the path of the move does not have children. """ - def __init__(self, diff, path_addressing): - self.diff = diff + def __init__(self, path_addressing): + self.diff = None self.path_addressing = path_addressing - def generate(self): + def generate(self, diff): + self.diff = diff current_ptr = self.diff.current_config target_ptr = self.diff.target_config current_tokens = [] @@ -1276,7 +1743,7 @@ def _traverse_value(self, current_value, target_value, current_tokens, target_to if current_value == target_value: return - yield JsonMove(self.diff, OperationType.REPLACE, current_tokens, target_tokens) + yield JsonMoveGroup(JsonMove(self.diff, OperationType.REPLACE, current_tokens, target_tokens)) def _traverse_current(self, ptr, current_tokens): if isinstance(ptr, list): @@ -1286,7 +1753,7 @@ def _traverse_current(self, ptr, current_tokens): if isinstance(ptr, dict): if len(ptr) == 0: - yield JsonMove(self.diff, OperationType.REMOVE, current_tokens) + yield JsonMoveGroup(JsonMove(self.diff, OperationType.REMOVE, current_tokens)) return for key in ptr: @@ -1303,7 +1770,7 @@ def _traverse_current(self, ptr, current_tokens): def _traverse_current_list(self, ptr, current_tokens): if len(ptr) == 0: - yield JsonMove(self.diff, OperationType.REMOVE, current_tokens) + yield JsonMoveGroup(JsonMove(self.diff, OperationType.REMOVE, current_tokens)) return for index, val in enumerate(ptr): @@ -1313,7 +1780,7 @@ def _traverse_current_list(self, ptr, current_tokens): current_tokens.pop() def _traverse_current_value(self, val, current_tokens): - yield JsonMove(self.diff, OperationType.REMOVE, current_tokens) + yield JsonMoveGroup(JsonMove(self.diff, OperationType.REMOVE, current_tokens)) def _traverse_target(self, ptr, current_tokens, target_tokens): if isinstance(ptr, list): @@ -1323,7 +1790,7 @@ def _traverse_target(self, ptr, current_tokens, target_tokens): if isinstance(ptr, dict): if len(ptr) == 0: - yield JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens) + yield JsonMoveGroup(JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens)) return for key in ptr: @@ -1342,7 +1809,7 @@ def _traverse_target(self, ptr, current_tokens, target_tokens): def _traverse_target_list(self, ptr, current_tokens, target_tokens): if len(ptr) == 0: - yield JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens) + yield JsonMoveGroup(JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens)) return for index, val in enumerate(ptr): @@ -1356,7 +1823,7 @@ def _traverse_target_list(self, ptr, current_tokens, target_tokens): current_tokens.pop() def _traverse_target_value(self, val, current_tokens, target_tokens): - yield JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens) + yield JsonMoveGroup(JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens)) def _list_to_dict_with_count(self, items): counts = dict() @@ -1369,6 +1836,7 @@ def _list_to_dict_with_count(self, items): return counts + class RequiredValueMoveExtender: """ Check RequiredValueIdentifier class description first. @@ -1388,7 +1856,7 @@ def __init__(self, path_addressing, operation_wrapper): self.identifier = RequiredValueIdentifier(path_addressing) self.operation_wrapper = operation_wrapper - def extend(self, move, diff): + def extend(self, move: JsonMove, diff): # ignore full config removal because it is not possible by JsonPatch lib if move.op_type == OperationType.REMOVE and move.path == "": return @@ -1441,7 +1909,7 @@ def extend(self, move, diff): extended_move = self._flip(move, flip_path_value_tuples) yield extended_move - def _flip(self, move, flip_path_value_tuples): + def _flip(self, move: JsonMove, flip_path_value_tuples): new_value = copy.deepcopy(move.value) move_tokens = self.path_addressing.get_path_tokens(move.path) for field_path, field_value in flip_path_value_tuples: @@ -1471,7 +1939,7 @@ class UpperLevelMoveExtender: 2) If parent was in current but not target, then delete the parent 3) If parent was in target but not current, then add the parent """ - def extend(self, move, diff): + def extend(self, move: JsonMove, diff): # if no tokens i.e. whole config if not move.current_config_tokens: return @@ -1504,7 +1972,7 @@ class DeleteInsteadOfReplaceMoveExtender: """ A class to extend the given REPLACE move by adding a REMOVE move. """ - def extend(self, move, diff): + def extend(self, move: JsonMove, diff): operation_type = move.op_type if operation_type != OperationType.REPLACE: @@ -1518,6 +1986,7 @@ def extend(self, move, diff): yield new_move + class DeleteRefsMoveExtender: """ A class to extend the given DELETE move by adding DELETE moves to configs referring to the path in the move. @@ -1525,15 +1994,16 @@ class DeleteRefsMoveExtender: def __init__(self, path_addressing): self.path_addressing = path_addressing - def extend(self, move, diff): + def extend(self, move: JsonMove, diff): operation_type = move.op_type if operation_type != OperationType.REMOVE: return - for ref_path in self.path_addressing.find_ref_paths(move.path, diff.current_config): + for ref_path in self.path_addressing.find_ref_paths(move.path, diff.current_config, reload_config=True): yield JsonMove(diff, OperationType.REMOVE, self.path_addressing.get_path_tokens(ref_path)) + class DfsSorter: def __init__(self, move_wrapper): self.visited = {} @@ -1552,13 +2022,16 @@ def sort(self, diff): for move in moves: if self.move_wrapper.validate(move, diff): - new_diff = self.move_wrapper.simulate(move, diff) + # NOTE: due to the recursive nature, we can't modify in-place as on error we will + # receive "RuntimeError: dictionary changed size during iteration" + new_diff = self.move_wrapper.simulate(move, diff, in_place=False) new_moves = self.sort(new_diff) if new_moves is not None: return [move] + new_moves return None + class BfsSorter: def __init__(self, move_wrapper): self.visited = {} @@ -1594,6 +2067,7 @@ def sort(self, diff): return None + class MemoizationSorter: def __init__(self, move_wrapper): self.visited = {} @@ -1624,11 +2098,13 @@ def sort(self, diff): self.mem[diff_hash] = bst_moves return bst_moves + class Algorithm(Enum): DFS = 1 BFS = 2 MEMOIZATION = 3 + class SortAlgorithmFactory: def __init__(self, operation_wrapper, config_wrapper, path_addressing): self.operation_wrapper = operation_wrapper @@ -1639,7 +2115,10 @@ def create(self, algorithm=Algorithm.DFS): move_generators = [RemoveCreateOnlyDependencyMoveGenerator(self.path_addressing), LowLevelMoveGenerator(self.path_addressing)] # TODO: Enable TableLevelMoveGenerator once it is confirmed whole table can be updated at the same time - move_non_extendable_generators = [KeyLevelMoveGenerator()] + move_non_extendable_generators = [BulkKeyLevelMoveGenerator(self.path_addressing), + KeyLevelMoveGenerator(self.path_addressing), + BulkKeyGroupLowLevelMoveGenerator(self.path_addressing), + BulkLowLevelMoveGenerator(self.path_addressing)] move_extenders = [RequiredValueMoveExtender(self.path_addressing, self.operation_wrapper), UpperLevelMoveExtender(), DeleteInsteadOfReplaceMoveExtender(), @@ -1665,6 +2144,7 @@ def create(self, algorithm=Algorithm.DFS): return sorter + class StrictPatchSorter: def __init__(self, config_wrapper, patch_wrapper, inner_patch_sorter=None): self.logger = genericUpdaterLogging.get_logger(title="Patch Sorter - Strict", print_all_to_console=True) @@ -1727,11 +2207,12 @@ def split_yang_non_yang_distinct_field_path(self, config): # Add to config_without_yang from config_with_yang tokens = self.path_addressing.get_path_tokens(path) - add_move = JsonMove(Diff(config_without_yang, config_with_yang), OperationType.ADD, tokens, tokens) + add_move = JsonMoveGroup(JsonMove(Diff(config_without_yang, config_with_yang), OperationType.ADD, tokens, + tokens)) config_without_yang = add_move.apply(config_without_yang) # Remove from config_with_yang - remove_move = JsonMove(Diff(config_with_yang, {}), OperationType.REMOVE, tokens) + remove_move = JsonMoveGroup(JsonMove(Diff(config_with_yang, {}), OperationType.REMOVE, tokens)) config_with_yang = remove_move.apply(config_with_yang) # Splitting the config based on 'ignore_paths_from_yang_list' can result in empty tables. @@ -1927,7 +2408,7 @@ def sort(self, patch, algorithm=Algorithm.DFS, preloaded_current_config=None): current_config = preloaded_current_config if preloaded_current_config else self.config_wrapper.get_config_db_as_json() target_config = self.patch_wrapper.simulate_patch(patch, current_config) - diff = Diff(current_config, target_config) + diff = Diff(copy.deepcopy(current_config), target_config) sort_algorithm = self.sort_algorithm_factory.create(algorithm) moves = sort_algorithm.sort(diff) @@ -1935,6 +2416,6 @@ def sort(self, patch, algorithm=Algorithm.DFS, preloaded_current_config=None): if moves is None: raise GenericConfigUpdaterError("There is no possible sorting") - changes = [JsonChange(move.patch) for move in moves] + changes = [JsonChange(move.get_jsonpatch()) for move in moves] return changes diff --git a/tests/generic_config_updater/change_applier_test.py b/tests/generic_config_updater/change_applier_test.py index 6a8926f01..b5cd761bc 100644 --- a/tests/generic_config_updater/change_applier_test.py +++ b/tests/generic_config_updater/change_applier_test.py @@ -128,7 +128,8 @@ def set_entry(config_db, tbl, key, data): # mimics JsonChange.apply # class mock_obj: - def apply(self, config): + def apply(self, config, in_place): + config = copy.deepcopy(config) json_change = json_changes[json_change_index] update = copy.deepcopy(json_change["update"]) @@ -255,7 +256,8 @@ def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): debug_print("main: json_change_index={}".format(json_change_index)) - applier.apply(mock_obj()) + current_config = copy.deepcopy(start_running_config) + current_config = applier.apply(current_config, mock_obj()) debug_print(f"Testing json_change {json_change_index}") @@ -290,8 +292,9 @@ def test_apply__calls_apply_change_to_config_db(self): applier = generic_config_updater.change_applier.DryRunChangeApplier(config_wrapper) # Act - applier.apply(change) - applier.remove_backend_tables_from_config(change) + current_config = copy.deepcopy(running_config) + current_config = applier.apply(current_config, change) + current_config = applier.remove_backend_tables_from_config(current_config) # Assert - applier.config_wrapper.apply_change_to_config_db.assert_has_calls([call(change)]) + applier.config_wrapper.apply_change_to_config_db.assert_called() diff --git a/tests/generic_config_updater/files/patch_sorter_test_success.json b/tests/generic_config_updater/files/patch_sorter_test_success.json index 217737e41..4d8cb8a5a 100644 --- a/tests/generic_config_updater/files/patch_sorter_test_success.json +++ b/tests/generic_config_updater/files/patch_sorter_test_success.json @@ -410,9 +410,7 @@ "stage": "ingress", "type": "L3" } - } - ], - [ + }, { "op": "add", "path": "/ACL_TABLE/EVERFLOW", @@ -424,9 +422,7 @@ "stage": "ingress", "type": "MIRROR" } - } - ], - [ + }, { "op": "add", "path": "/ACL_TABLE/EVERFLOWV6", @@ -606,9 +602,7 @@ "op": "add", "path": "/LOOPBACK_INTERFACE/Loopback0|10.1.0.32~132", "value": {} - } - ], - [ + }, { "op": "add", "path": "/LOOPBACK_INTERFACE/Loopback0|1100:1::32~1128", @@ -806,9 +800,7 @@ "description": "", "speed": "10000" } - } - ], - [ + }, { "op": "add", "path": "/PORT/Ethernet2", @@ -818,9 +810,7 @@ "description": "", "speed": "10000" } - } - ], - [ + }, { "op": "add", "path": "/PORT/Ethernet1", @@ -850,18 +840,14 @@ "value": { "tagging_mode": "untagged" } - } - ], - [ + }, { "op": "add", "path": "/VLAN_MEMBER/Vlan100|Ethernet3", "value": { "tagging_mode": "untagged" } - } - ], - [ + }, { "op": "add", "path": "/VLAN_MEMBER/Vlan100|Ethernet2", @@ -1032,15 +1018,11 @@ { "op": "remove", "path": "/VLAN_MEMBER/Vlan100|Ethernet1" - } - ], - [ + }, { "op": "remove", "path": "/VLAN_MEMBER/Vlan100|Ethernet2" - } - ], - [ + }, { "op": "remove", "path": "/VLAN_MEMBER/Vlan100|Ethernet3" @@ -1411,9 +1393,7 @@ "op": "add", "path": "/VLAN_INTERFACE/Vlan1000|fc02:1000::1~164", "value": {} - } - ], - [ + }, { "op": "add", "path": "/VLAN_INTERFACE/Vlan1000|192.168.0.1~121", @@ -1451,9 +1431,7 @@ "op": "replace", "path": "/CRM/Config/acl_counter_high_threshold", "value": "80" - } - ], - [ + }, { "op": "replace", "path": "/CRM/Config/acl_counter_low_threshold", @@ -2146,21 +2124,19 @@ { "op": "remove", "path": "/ACL_TABLE/NO-NSW-PACL-V4" - } - ], - [ + }, { "op": "remove", "path": "/ACL_TABLE/DATAACL" - } - ], - [ + }, { "op": "remove", "path": "/ACL_TABLE/EVERFLOW" - } - ], - [ + }, + { + "op": "remove", + "path": "/ACL_TABLE/EVERFLOWV6" + }, { "op": "remove", "path": "/ACL_TABLE" @@ -2223,9 +2199,7 @@ "op": "add", "path": "/LOOPBACK_INTERFACE/Loopback1|20.2.0.32~132", "value": {} - } - ], - [ + }, { "op": "add", "path": "/LOOPBACK_INTERFACE/Loopback1|2200:2::32~1128", @@ -2512,9 +2486,7 @@ "op": "add", "path": "/LOOPBACK_INTERFACE/Loopback0|10.1.0.32~132", "value": {} - } - ], - [ + }, { "op": "add", "path": "/LOOPBACK_INTERFACE/Loopback0|1100:1::32~1128", @@ -2583,9 +2555,7 @@ "nhopself": "0", "rrclient": "0" } - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.61", @@ -3923,9 +3893,7 @@ "nhopself": "0", "rrclient": "0" } - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::42", @@ -3948,9 +3916,7 @@ "value": { "profile": "pg_lossless_40000_40m_profile" } - } - ], - [ + }, { "op": "add", "path": "/BUFFER_PG/Ethernet64|0", @@ -3966,18 +3932,14 @@ "value": { "profile": "egress_lossy_profile" } - } - ], - [ + }, { "op": "add", "path": "/BUFFER_QUEUE/Ethernet64|3-4", "value": { "profile": "egress_lossless_profile" } - } - ], - [ + }, { "op": "add", "path": "/BUFFER_QUEUE/Ethernet64|5-6", @@ -3996,27 +3958,6 @@ } } ], - [ - { - "op": "add", - "path": "/INTERFACE/Ethernet64", - "value": {} - } - ], - [ - { - "op": "add", - "path": "/INTERFACE/Ethernet64|10.0.0.32~131", - "value": {} - } - ], - [ - { - "op": "add", - "path": "/INTERFACE/Ethernet64|FC00::41~1126", - "value": {} - } - ], [ { "op": "add", @@ -4033,15 +3974,20 @@ [ { "op": "add", - "path": "/ACL_TABLE/EVERFLOW/ports/0", - "value": "Ethernet64" + "path": "/INTERFACE/Ethernet64", + "value": {} } ], [ { "op": "add", - "path": "/ACL_TABLE/EVERFLOWV6/ports/0", - "value": "Ethernet64" + "path": "/INTERFACE/Ethernet64|10.0.0.32~131", + "value": {} + }, + { + "op": "add", + "path": "/INTERFACE/Ethernet64|FC00::41~1126", + "value": {} } ], [ @@ -4049,321 +3995,245 @@ "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.1/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.5/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.9/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.13/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.17/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.21/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.25/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.29/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.35/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.37/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.39/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.41/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.43/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.45/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.47/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.49/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.51/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.53/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.55/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.57/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.59/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.61/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/10.0.0.63/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::1a/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::2/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::2a/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::3a/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::4a/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::4e/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::5a/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::5e/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::6a/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::6e/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::7a/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::7e/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::12/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::22/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::32/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::46/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::52/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::56/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::62/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::66/admin_status", "value": "up" - } - ], - [ + }, { "op": "add", "path": "/BGP_NEIGHBOR/fc00::72/admin_status", "value": "up" + }, + { + "op": "add", + "path": "/BGP_NEIGHBOR/fc00::76/admin_status", + "value": "up" + }, + { + "op": "add", + "path": "/BGP_NEIGHBOR/fc00::a/admin_status", + "value": "up" } ], [ { "op": "add", - "path": "/BGP_NEIGHBOR/fc00::76/admin_status", - "value": "up" + "path": "/ACL_TABLE/EVERFLOW/ports/0", + "value": "Ethernet64" } ], [ { "op": "add", - "path": "/BGP_NEIGHBOR/fc00::a/admin_status", - "value": "up" + "path": "/ACL_TABLE/EVERFLOWV6/ports/0", + "value": "Ethernet64" } ], [ @@ -5542,31 +5412,11 @@ } ], "expected_changes": [ - [ - { - "op": "remove", - "path": "/BGP_NEIGHBOR/10.0.0.33" - } - ], - [ - { - "op": "remove", - "path": "/BGP_NEIGHBOR/fc00::42" - } - ], - [ - { - "op": "remove", - "path": "/DEVICE_NEIGHBOR/Ethernet64" - } - ], [ { "op": "remove", "path": "/INTERFACE/Ethernet64|10.0.0.32~131" - } - ], - [ + }, { "op": "remove", "path": "/INTERFACE/Ethernet64|FC00::41~1126" @@ -5581,289 +5431,215 @@ [ { "op": "remove", - "path": "/ACL_TABLE/EVERFLOW/ports/0" + "path": "/DEVICE_NEIGHBOR/Ethernet64" } ], [ { "op": "remove", - "path": "/ACL_TABLE/EVERFLOWV6/ports/0" + "path": "/BGP_NEIGHBOR/10.0.0.33" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::42" } ], [ { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.1/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.5/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.9/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.13/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.17/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.21/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.25/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.29/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.35/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.37/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.39/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.41/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.43/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.45/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.47/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.49/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.51/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.53/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.55/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.57/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.59/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.61/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/10.0.0.63/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::1a/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::2/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::2a/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::3a/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::4a/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::4e/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::5a/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::5e/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::6a/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::6e/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::7a/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::7e/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::12/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::22/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::32/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::46/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::52/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::56/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::62/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::66/admin_status" - } - ], - [ + }, { "op": "remove", "path": "/BGP_NEIGHBOR/fc00::72/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::76/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::a/admin_status" } ], [ { "op": "remove", - "path": "/BGP_NEIGHBOR/fc00::76/admin_status" + "path": "/ACL_TABLE/EVERFLOW/ports/0" } ], [ { "op": "remove", - "path": "/BGP_NEIGHBOR/fc00::a/admin_status" + "path": "/ACL_TABLE/EVERFLOWV6/ports/0" } ], [ @@ -5889,9 +5665,7 @@ { "op": "remove", "path": "/BUFFER_PG/Ethernet64|3-4" - } - ], - [ + }, { "op": "remove", "path": "/BUFFER_PG/Ethernet64|0" @@ -5901,15 +5675,11 @@ { "op": "remove", "path": "/BUFFER_QUEUE/Ethernet64|0-2" - } - ], - [ + }, { "op": "remove", "path": "/BUFFER_QUEUE/Ethernet64|3-4" - } - ], - [ + }, { "op": "remove", "path": "/BUFFER_QUEUE/Ethernet64|5-6" @@ -5923,4 +5693,4 @@ ] ] } -} \ No newline at end of file +} diff --git a/tests/generic_config_updater/gcu_feature_patch_application_test.py b/tests/generic_config_updater/gcu_feature_patch_application_test.py index 27d9ebf21..48b5b1ca6 100644 --- a/tests/generic_config_updater/gcu_feature_patch_application_test.py +++ b/tests/generic_config_updater/gcu_feature_patch_application_test.py @@ -94,10 +94,9 @@ def create_patch_applier(self, config): patch_wrapper = PatchWrapper(config_wrapper) return gu.PatchApplier(config_wrapper=config_wrapper, patch_wrapper=patch_wrapper, changeapplier=change_applier) - @patch('generic_config_updater.change_applier.get_config_db_as_json', side_effect=get_running_config) @patch("generic_config_updater.change_applier.get_config_db") @patch("generic_config_updater.change_applier.set_config") - def run_single_success_case_applier(self, data, mock_set, mock_db, mock_get_config_db_as_json): + def run_single_success_case_applier(self, data, mock_set, mock_db): current_config = data["current_config"] expected_config = data["expected_config"] patch = jsonpatch.JsonPatch(data["patch"]) @@ -125,8 +124,7 @@ def run_single_success_case_applier(self, data, mock_set, mock_db, mock_get_conf self.assertEqual(simulated_config, expected_config) @patch("generic_config_updater.change_applier.get_config_db") - @patch('generic_config_updater.change_applier.get_config_db_as_json', side_effect=get_running_config) - def run_single_failure_case_applier(self, data, mock_db, mock_get_config_db_as_json): + def run_single_failure_case_applier(self, data, mock_db): current_config = data["current_config"] patch = jsonpatch.JsonPatch(data["patch"]) expected_error_substrings = data["expected_error_substrings"] diff --git a/tests/generic_config_updater/generic_updater_test.py b/tests/generic_config_updater/generic_updater_test.py index 8480dc23b..d86930062 100644 --- a/tests/generic_config_updater/generic_updater_test.py +++ b/tests/generic_config_updater/generic_updater_test.py @@ -3,7 +3,7 @@ import shutil import unittest from unittest.mock import MagicMock, Mock, call, patch -from .gutest_helpers import create_side_effect_dict, Files +from .gutest_helpers import create_side_effect_dict, create_side_effect_skipfirstarg_dict, Files import generic_config_updater.generic_updater as gu import generic_config_updater.patch_sorter as ps @@ -41,7 +41,7 @@ def test_apply__no_errors__update_successful(self): patch_applier.patch_wrapper.simulate_patch.assert_has_calls( [call(Files.MULTI_OPERATION_CONFIG_DB_PATCH, Files.CONFIG_DB_AS_JSON)]) patch_applier.patchsorter.sort.assert_has_calls([call(Files.MULTI_OPERATION_CONFIG_DB_PATCH)]) - patch_applier.changeapplier.apply.assert_has_calls([call(changes[0]), call(changes[1])]) + patch_applier.changeapplier.apply.assert_called() patch_applier.patch_wrapper.verify_same_json.assert_has_calls( [call(Files.CONFIG_DB_AFTER_MULTI_PATCH, Files.CONFIG_DB_AFTER_MULTI_PATCH)]) @@ -72,7 +72,9 @@ def __create_patch_applier(self, create_side_effect_dict({(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): changes}) changeapplier = Mock() - changeapplier.apply.side_effect = create_side_effect_dict({(str(changes[0]),): 0, (str(changes[1]),): 0}) + changeapplier.apply.side_effect = create_side_effect_skipfirstarg_dict({ + (str(changes[0]),): Files.CONFIG_DB_AFTER_MULTI_PATCH, + (str(changes[1]),): Files.CONFIG_DB_AFTER_MULTI_PATCH}) return gu.PatchApplier(patchsorter, changeapplier, config_wrapper, patch_wrapper) diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index 4a16a5ca4..a2df2a148 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -56,11 +56,10 @@ def test_apply_change_to_config_db__multiple_calls__changes_imitated_config_db(s ] expected = imitated_config_db + actual = config_wrapper.get_config_db_as_json() for change in changes: # Act - config_wrapper.apply_change_to_config_db(change) - - actual = config_wrapper.get_config_db_as_json() + actual = config_wrapper.apply_change_to_config_db(actual, change) expected = change.apply(expected) # Assert @@ -451,48 +450,6 @@ def test_remove_empty_tables__multiple_empty_tables__returns_config_without_empt # Assert self.assertDictEqual({"any_table": {"key": "value"}}, actual) - def test_create_sonic_yang_with_loaded_models__creates_new_sonic_yang_every_call(self): - # check yang models fields are the same or None, non-yang model fields are different - def check(sy1, sy2): - # instances are different - self.assertNotEqual(sy1, sy2) - - # yang models fields are same or None - self.assertTrue(sy1.confDbYangMap is sy2.confDbYangMap) - self.assertTrue(sy1.ctx is sy2.ctx) - self.assertTrue(sy1.DEBUG is sy2.DEBUG) - self.assertTrue(sy1.preProcessedYang is sy2.preProcessedYang) - self.assertTrue(sy1.SYSLOG_IDENTIFIER is sy2.SYSLOG_IDENTIFIER) - self.assertTrue(sy1.yang_dir is sy2.yang_dir) - self.assertTrue(sy1.yangFiles is sy2.yangFiles) - self.assertTrue(sy1.yJson is sy2.yJson) - self.assertTrue(not(hasattr(sy1, 'module')) or sy1.module is None) # module is unused, might get deleted - self.assertTrue(not(hasattr(sy2, 'module')) or sy2.module is None) - - # non yang models fields are different - self.assertFalse(sy1.root is sy2.root) - self.assertFalse(sy1.jIn is sy2.jIn) - self.assertFalse(sy1.tablesWithOutYang is sy2.tablesWithOutYang) - self.assertFalse(sy1.xlateJson is sy2.xlateJson) - self.assertFalse(sy1.revXlateJson is sy2.revXlateJson) - - config_wrapper = gu_common.ConfigWrapper() - self.assertTrue(config_wrapper.sonic_yang_with_loaded_models is None) - - sy1 = config_wrapper.create_sonic_yang_with_loaded_models() - sy2 = config_wrapper.create_sonic_yang_with_loaded_models() - - # Simulating loading non-yang model fields - sy1.loadData(Files.ANY_CONFIG_DB) - sy1.getData() - - # Simulating loading non-yang model fields - sy2.loadData(Files.ANY_CONFIG_DB) - sy2.getData() - - check(sy1, sy2) - check(sy1, config_wrapper.sonic_yang_with_loaded_models) - check(sy2, config_wrapper.sonic_yang_with_loaded_models) class TestPatchWrapper(unittest.TestCase): def setUp(self): @@ -690,7 +647,7 @@ def check(path, tokens): self.assertEqual(expected, actual) check("", []) - check("/", [""]) + check("/", []) check("/token", ["token"]) check("/more/than/one/token", ["more", "than", "one", "token"]) check("/has/numbers/0/and/symbols/^", ["has", "numbers", "0", "and", "symbols", "^"]) @@ -744,33 +701,6 @@ def check(path, tokens): # Not validating no double-quotes within double-quoted string check('/a/mix["of""quotes\'does"]/not/work/well', ["a", 'mix["of""quotes\'does"]', "not", "work", "well"]) - def test_create_xpath(self): - def check(tokens, xpath): - expected=xpath - actual=self.path_addressing.create_xpath(tokens) - self.assertEqual(expected, actual) - - check([], "/") - check(["token"], "/token") - check(["more", "than", "one", "token"], "/more/than/one/token") - check(["multi", "tokens", "with", "empty", "last", "token", ""], "/multi/tokens/with/empty/last/token/") - check(["has", "numbers", "0", "and", "symbols", "^"], "/has/numbers/0/and/symbols/^") - check(["has[a='predicate']", "in", "the", "beginning"], "/has[a='predicate']/in/the/beginning") - check(["ha", "s[a='predicate']", "in", "the", "middle"], "/ha/s[a='predicate']/in/the/middle") - check(["ha", "s[a='predicate-in-the-end']"], "/ha/s[a='predicate-in-the-end']") - check(["it", "has[more='than'][one='predicate']", "somewhere"], "/it/has[more='than'][one='predicate']/somewhere") - check(["ha", "s[a='predicate\"with']", "double-quotes", "inside"], "/ha/s[a='predicate\"with']/double-quotes/inside") - check(["a", 'predicate[with="double"]', "quotes"], '/a/predicate[with="double"]/quotes') - check(['multiple["predicate"][with="double"]', "quotes"], '/multiple["predicate"][with="double"]/quotes') - check(['multiple["predicate"][with="double"]', "quotes"], '/multiple["predicate"][with="double"]/quotes') - check(["ha", 's[a="predicate\'with"]', "single-quote", "inside"], '/ha/s[a="predicate\'with"]/single-quote/inside') - # XPATH 1.0 does not support single-quote within single-quoted string. str literal can be '[^']*' - # Not validating no single-quote within single-quoted string - check(["a", "mix['of''quotes\"does']", "not", "work", "well"], "/a/mix['of''quotes\"does']/not/work/well", ) - # XPATH 1.0 does not support double-quotes within double-quoted string. str literal can be "[^"]*" - # Not validating no double-quotes within double-quoted string - check(["a", 'mix["of""quotes\'does"]', "not", "work", "well"], '/a/mix["of""quotes\'does"]/not/work/well') - def test_find_ref_paths__ref_is_the_whole_key__returns_ref_paths(self): # Arrange path = "/PORT/Ethernet0" diff --git a/tests/generic_config_updater/gutest_helpers.py b/tests/generic_config_updater/gutest_helpers.py index 2e8984ad6..b95812d96 100644 --- a/tests/generic_config_updater/gutest_helpers.py +++ b/tests/generic_config_updater/gutest_helpers.py @@ -5,6 +5,7 @@ import sys import unittest from unittest.mock import MagicMock, Mock, call +from generic_config_updater.patch_sorter import JsonMoveGroup class MockSideEffectDict: def __init__(self, map): @@ -19,9 +20,53 @@ def side_effect_func(self, *args): return value + def side_effect_skiplastarg_func(self, *args): + arglist = [str(args[i]) for i in range(len(args)-1)] + key = tuple(arglist) + value = self.map.get(key) + if value is None: + raise ValueError(f"Given arguments were not found in arguments map.\n Arguments: {key}\n Map: {self.map}") + + return value + + def side_effect_skipfirstarg_func(self, *args): + arglist = [str(args[i+1]) for i in range(len(args)-1)] + key = tuple(arglist) + value = self.map.get(key) + if value is None: + raise ValueError(f"Given arguments were not found in arguments map.\n Arguments: {key}\n Map: {self.map}") + + return value + + def side_effect_jsonmovegroup_func(self, *args): + arglist = [str(arg) for arg in args] + key = tuple(arglist) + value = self.map.get(key) + if value is None: + raise ValueError(f"Given arguments were not found in arguments map.\n Arguments: {key}\n Map: {self.map}") + + rv = [] + for val in value: + rv.append(JsonMoveGroup(val)) + return rv + + def create_side_effect_dict(map): return MockSideEffectDict(map).side_effect_func + +def create_side_effect_skiplastarg_dict(map): + return MockSideEffectDict(map).side_effect_skiplastarg_func + + +def create_side_effect_skipfirstarg_dict(map): + return MockSideEffectDict(map).side_effect_skipfirstarg_func + + +def create_side_effect_jsonmovegroup_dict(map): + return MockSideEffectDict(map).side_effect_jsonmovegroup_func + + class FilesLoader: def __init__(self): self.files_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files") diff --git a/tests/generic_config_updater/multiasic_change_applier_test.py b/tests/generic_config_updater/multiasic_change_applier_test.py index 743969737..c7effd257 100644 --- a/tests/generic_config_updater/multiasic_change_applier_test.py +++ b/tests/generic_config_updater/multiasic_change_applier_test.py @@ -1,5 +1,6 @@ import jsonpointer import unittest +import copy from importlib import reload from unittest.mock import patch, MagicMock from generic_config_updater.generic_updater import extract_scope @@ -177,7 +178,7 @@ def test_extract_scope_singleasic(self, mock_is_multi_asic): except jsonpointer.JsonPointerException: assert(not result) - @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) + @patch('generic_config_updater.gu_common.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) def test_apply_change_default_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector @@ -194,12 +195,13 @@ def test_apply_change_default_scope(self, mock_ConfigDBConnector, mock_get_runni change = MagicMock() # Call the apply method with the change object - applier.apply(change) + current_config = copy.deepcopy(generic_config_updater.gu_common.get_config_db_as_json(scope=self)) + current_config = applier.apply(current_config, change) # Assert ConfigDBConnector called with the correct namespace mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="") - @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) + @patch('generic_config_updater.gu_common.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) def test_apply_change_given_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector @@ -214,12 +216,13 @@ def test_apply_change_given_scope(self, mock_ConfigDBConnector, mock_get_running change = MagicMock() # Call the apply method with the change object - applier.apply(change) + current_config = copy.deepcopy(generic_config_updater.gu_common.get_config_db_as_json(scope="asic0")) + current_config = applier.apply(current_config, change) # Assert ConfigDBConnector called with the correct scope mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="asic0") - @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) + @patch('generic_config_updater.gu_common.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector @@ -237,11 +240,12 @@ def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_con # Test the behavior when os.system fails with self.assertRaises(Exception) as context: - applier.apply(change) + current_config = copy.deepcopy(generic_config_updater.gu_common.get_config_db_as_json()) + current_config = applier.apply(current_config, change) self.assertTrue('Failed to get running config' in str(context.exception)) - @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) + @patch('generic_config_updater.gu_common.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) def test_apply_patch_with_empty_tables_failure(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector @@ -267,8 +271,10 @@ def mock_get_empty_running_config_side_effect(): # Prepare a change object or data that applier.apply would use, simulating a patch that requires non-empty tables change = MagicMock() + current_config = copy.deepcopy(generic_config_updater.gu_common.get_config_db_as_json()) + # Apply the patch try: - assert(applier.apply(change) != 0) + assert(applier.apply(current_config, change) is not None) except Exception: pass diff --git a/tests/generic_config_updater/patch_sorter_test.py b/tests/generic_config_updater/patch_sorter_test.py index baeab6fdd..d33cf5644 100644 --- a/tests/generic_config_updater/patch_sorter_test.py +++ b/tests/generic_config_updater/patch_sorter_test.py @@ -2,11 +2,12 @@ import jsonpatch import unittest from unittest.mock import MagicMock, Mock - import generic_config_updater.patch_sorter as ps -from .gutest_helpers import Files, create_side_effect_dict +from .gutest_helpers import Files, create_side_effect_dict, create_side_effect_jsonmovegroup_dict, \ + create_side_effect_skiplastarg_dict from generic_config_updater.gu_common import ConfigWrapper, PatchWrapper, OperationWrapper, \ GenericConfigUpdaterError, OperationType, JsonChange, PathAddressing +from generic_config_updater.patch_sorter import JsonMoveGroup class TestDiff(unittest.TestCase): def test_apply_move__updates_current_config(self): @@ -438,14 +439,14 @@ def setUp(self): self.single_move_generator = Mock() self.single_move_generator.generate.side_effect = \ - create_side_effect_dict({(str(self.any_diff),): [self.any_move]}) + create_side_effect_jsonmovegroup_dict({(str(self.any_diff),): [self.any_move]}) self.another_single_move_generator = Mock() self.another_single_move_generator.generate.side_effect = \ - create_side_effect_dict({(str(self.any_diff),): [self.any_other_move1]}) + create_side_effect_jsonmovegroup_dict({(str(self.any_diff),): [self.any_other_move1]}) self.multiple_move_generator = Mock() - self.multiple_move_generator.generate.side_effect = create_side_effect_dict( + self.multiple_move_generator.generate.side_effect = create_side_effect_jsonmovegroup_dict( {(str(self.any_diff),): [self.any_move, self.any_other_move1, self.any_other_move2]}) self.single_move_extender = Mock() @@ -488,11 +489,11 @@ def setUp(self): }) self.fail_move_validator = Mock() - self.fail_move_validator.validate.side_effect = create_side_effect_dict( + self.fail_move_validator.validate.side_effect = create_side_effect_skiplastarg_dict( {(str(self.any_move), str(self.any_diff)): False}) self.success_move_validator = Mock() - self.success_move_validator.validate.side_effect = create_side_effect_dict( + self.success_move_validator.validate.side_effect = create_side_effect_skiplastarg_dict( {(str(self.any_move), str(self.any_diff)): True}) def test_ctor__assigns_values_correctly(self): @@ -515,7 +516,7 @@ def test_generate__single_move_generator__single_move_returned(self): # Arrange move_generators = [self.single_move_generator] move_wrapper = ps.MoveWrapper(move_generators, [], [], []) - expected = [self.any_move] + expected = [JsonMoveGroup(self.any_move)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -527,7 +528,8 @@ def test_generate__multiple_move_generator__multiple_move_returned(self): # Arrange move_generators = [self.multiple_move_generator] move_wrapper = ps.MoveWrapper(move_generators, [], [], []) - expected = [self.any_move, self.any_other_move1, self.any_other_move2] + expected = [JsonMoveGroup(self.any_move), JsonMoveGroup(self.any_other_move1), + JsonMoveGroup(self.any_other_move2)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -539,7 +541,7 @@ def test_generate__different_move_generators__different_moves_returned(self): # Arrange move_generators = [self.single_move_generator, self.another_single_move_generator] move_wrapper = ps.MoveWrapper(move_generators, [], [], []) - expected = [self.any_move, self.any_other_move1] + expected = [JsonMoveGroup(self.any_move), JsonMoveGroup(self.any_other_move1)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -551,7 +553,7 @@ def test_generate__duplicate_generated_moves__unique_moves_returned(self): # Arrange move_generators = [self.single_move_generator, self.single_move_generator] move_wrapper = ps.MoveWrapper(move_generators, [], [], []) - expected = [self.any_move] + expected = [JsonMoveGroup(self.any_move)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -563,7 +565,7 @@ def test_generate__different_move_non_extendable_generators__different_moves_ret # Arrange move_non_extendable_generators = [self.single_move_generator, self.another_single_move_generator] move_wrapper = ps.MoveWrapper([], move_non_extendable_generators, [], []) - expected = [self.any_move, self.any_other_move1] + expected = [JsonMoveGroup(self.any_move), JsonMoveGroup(self.any_other_move1)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -575,7 +577,7 @@ def test_generate__duplicate_generated_non_extendable_moves__unique_moves_return # Arrange move_non_extendable_generators = [self.single_move_generator, self.single_move_generator] move_wrapper = ps.MoveWrapper([], move_non_extendable_generators, [], []) - expected = [self.any_move] + expected = [JsonMoveGroup(self.any_move)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -588,7 +590,7 @@ def test_generate__duplicate_move_between_extendable_and_non_extendable_generato move_generators = [self.single_move_generator] move_non_extendable_generators = [self.single_move_generator] move_wrapper = ps.MoveWrapper(move_generators, move_non_extendable_generators, [], []) - expected = [self.any_move] + expected = [JsonMoveGroup(self.any_move)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -601,7 +603,7 @@ def test_generate__single_move_extender__one_extended_move_returned(self): move_generators = [self.single_move_generator] move_extenders = [self.single_move_extender] move_wrapper = ps.MoveWrapper(move_generators, [], move_extenders, []) - expected = [self.any_move, self.any_extended_move] + expected = [JsonMoveGroup(self.any_move), JsonMoveGroup(self.any_extended_move)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -614,7 +616,8 @@ def test_generate__multiple_move_extender__multiple_extended_move_returned(self) move_generators = [self.single_move_generator] move_extenders = [self.multiple_move_extender] move_wrapper = ps.MoveWrapper(move_generators, [], move_extenders, []) - expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1, self.any_other_extended_move2] + expected = [JsonMoveGroup(self.any_move), JsonMoveGroup(self.any_extended_move), + JsonMoveGroup(self.any_other_extended_move1), JsonMoveGroup(self.any_other_extended_move2)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -627,7 +630,8 @@ def test_generate__different_move_extenders__different_extended_moves_returned(s move_generators = [self.single_move_generator] move_extenders = [self.single_move_extender, self.another_single_move_extender] move_wrapper = ps.MoveWrapper(move_generators, [], move_extenders, []) - expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1] + expected = [JsonMoveGroup(self.any_move), JsonMoveGroup(self.any_extended_move), + JsonMoveGroup(self.any_other_extended_move1)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -640,7 +644,7 @@ def test_generate__duplicate_extended_moves__unique_moves_returned(self): move_generators = [self.single_move_generator] move_extenders = [self.single_move_extender, self.single_move_extender] move_wrapper = ps.MoveWrapper(move_generators, [], move_extenders, []) - expected = [self.any_move, self.any_extended_move] + expected = [JsonMoveGroup(self.any_move), JsonMoveGroup(self.any_extended_move)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -653,11 +657,11 @@ def test_generate__mixed_extended_moves__unique_moves_returned(self): move_generators = [self.single_move_generator, self.another_single_move_generator] move_extenders = [self.mixed_move_extender] move_wrapper = ps.MoveWrapper(move_generators, [], move_extenders, []) - expected = [self.any_move, - self.any_other_move1, - self.any_extended_move, - self.any_other_extended_move1, - self.any_other_extended_move2] + expected = [JsonMoveGroup(self.any_move), + JsonMoveGroup(self.any_other_move1), + JsonMoveGroup(self.any_extended_move), + JsonMoveGroup(self.any_other_extended_move1), + JsonMoveGroup(self.any_other_extended_move2)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -670,7 +674,7 @@ def test_generate__multiple_non_extendable_moves__no_moves_extended(self): move_non_extendable_generators = [self.single_move_generator, self.another_single_move_generator] move_extenders = [self.mixed_move_extender] move_wrapper = ps.MoveWrapper([], move_non_extendable_generators, move_extenders, []) - expected = [self.any_move, self.any_other_move1] + expected = [JsonMoveGroup(self.any_move), JsonMoveGroup(self.any_other_move1)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -684,9 +688,9 @@ def test_generate__mixed_extendable_non_extendable_moves__only_extendable_moves_ move_non_extendable_generators = [self.single_move_generator] # generates: any_move move_extenders = [self.mixed_move_extender] move_wrapper = ps.MoveWrapper(move_generators, move_non_extendable_generators, move_extenders, []) - expected = [self.any_move, - self.any_other_move1, - self.any_other_extended_move1] + expected = [JsonMoveGroup(self.any_move), + JsonMoveGroup(self.any_other_move1), + JsonMoveGroup(self.any_other_extended_move1)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -700,8 +704,8 @@ def test_generate__move_is_extendable_and_non_extendable__move_is_extended(self) move_non_extendable_generators = [self.single_move_generator] move_extenders = [self.single_move_extender] move_wrapper = ps.MoveWrapper(move_generators, move_non_extendable_generators, move_extenders, []) - expected = [self.any_move, - self.any_extended_move] + expected = [JsonMoveGroup(self.any_move), + JsonMoveGroup(self.any_extended_move)] # Act actual = list(move_wrapper.generate(self.any_diff)) @@ -739,12 +743,12 @@ def test_validate__multiple_validators_succeed___true_returned(self): move_wrapper = ps.MoveWrapper([], [], [], move_validators) # Act and assert - self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) + self.assertTrue(move_wrapper.validate(JsonMoveGroup(self.any_move), self.any_diff)) def test_simulate__applies_move(self): # Arrange diff = Mock() - diff.apply_move.side_effect = create_side_effect_dict({(str(self.any_move), ): self.any_diff}) + diff.apply_move.side_effect = create_side_effect_skiplastarg_dict({(str(self.any_move), ): self.any_diff}) move_wrapper = ps.MoveWrapper(None, None, None, None) # Act @@ -870,7 +874,9 @@ class TestDeleteWholeConfigMoveValidator(unittest.TestCase): def setUp(self): self.operation_wrapper = OperationWrapper() self.validator = ps.DeleteWholeConfigMoveValidator() - self.any_diff = Mock() + self.any_current_config = Mock() + self.any_target_config = Mock() + self.any_diff = ps.Diff(self.any_current_config, self.any_target_config) self.any_non_whole_config_path = "/table1" self.whole_config_path = "" @@ -898,7 +904,7 @@ def verify(self, operation_type, path, expected): move = ps.JsonMove.from_operation(operation) # Act - actual = self.validator.validate(move, self.any_diff) + actual = self.validator.validate(JsonMoveGroup(move), self.any_diff, self.any_target_config) # Assert self.assertEqual(expected, actual) @@ -921,7 +927,7 @@ def test_validate__invalid_config_db_after_applying_move__failure(self): validator = ps.FullConfigMoveValidator(config_wrapper) # Act and assert - self.assertFalse(validator.validate(self.any_move, self.any_diff)) + self.assertFalse(validator.validate(JsonMoveGroup(self.any_move), self.any_diff, self.any_simulated_config)) def test_validate__valid_config_db_after_applying_move__success(self): # Arrange @@ -931,7 +937,7 @@ def test_validate__valid_config_db_after_applying_move__success(self): validator = ps.FullConfigMoveValidator(config_wrapper) # Act and assert - self.assertTrue(validator.validate(self.any_move, self.any_diff)) + self.assertTrue(validator.validate(JsonMoveGroup(self.any_move), self.any_diff, self.any_simulated_config)) class TestCreateOnlyMoveValidator(unittest.TestCase): def setUp(self): @@ -1193,7 +1199,7 @@ def verify_parent_adding(self, added_parent_value, expected): diff = ps.Diff(current_config, target_config) move = ps.JsonMove.from_operation({"op":"add", "path":"/BGP_NEIGHBOR/10.0.0.57", "value": added_parent_value}) - actual = self.validator.validate(move, diff) + actual = self.validator.validate(move, diff, move.apply(diff.current_config)) self.assertEqual(expected, actual) @@ -1205,7 +1211,7 @@ def verify_diff(self, current_config, target_config, current_config_tokens=None, move = ps.JsonMove(diff, OperationType.REPLACE, current_config_tokens, target_config_tokens) # Act - actual = self.validator.validate(move, diff) + actual = self.validator.validate(move, diff, move.apply(diff.current_config)) # Assert self.assertEqual(expected, actual) @@ -1220,18 +1226,17 @@ def test_validate__add_full_config_has_dependencies__failure(self): # Arrange # CROPPED_CONFIG_DB_AS_JSON has dependencies between PORT and ACL_TABLE diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CROPPED_CONFIG_DB_AS_JSON) - move = ps.JsonMove(diff, OperationType.ADD, [], []) - + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.ADD, [], [])) # Act and assert - self.assertFalse(self.validator.validate(move, diff)) + self.assertFalse(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__add_full_config_no_dependencies__success(self): # Arrange diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CONFIG_DB_NO_DEPENDENCIES) - move = ps.JsonMove(diff, OperationType.ADD, [], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.ADD, [], [])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__add_table_has_no_dependencies__success(self): # Arrange @@ -1241,27 +1246,10 @@ def test_validate__add_table_has_no_dependencies__success(self): {"op": "remove", "path":"/ACL_TABLE"} ])) diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.ADD, ["ACL_TABLE"], ["ACL_TABLE"]) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.ADD, ["ACL_TABLE"], ["ACL_TABLE"])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__remove_full_config_has_dependencies__failure(self): - # Arrange - # CROPPED_CONFIG_DB_AS_JSON has dependencies between PORT and ACL_TABLE - diff = ps.Diff(Files.CROPPED_CONFIG_DB_AS_JSON, Files.EMPTY_CONFIG_DB) - move = ps.JsonMove(diff, OperationType.REMOVE, [], []) - - # Act and assert - self.assertFalse(self.validator.validate(move, diff)) - - def test_validate__remove_full_config_no_dependencies__success(self): - # Arrange - diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CONFIG_DB_NO_DEPENDENCIES) - move = ps.JsonMove(diff, OperationType.REMOVE, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__remove_table_has_no_dependencies__success(self): # Arrange @@ -1270,10 +1258,10 @@ def test_validate__remove_table_has_no_dependencies__success(self): {"op": "remove", "path":"/ACL_TABLE"} ])) diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REMOVE, ["ACL_TABLE"]) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REMOVE, ["ACL_TABLE"])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__replace_whole_config_item_added_ref_added__failure(self): # Arrange @@ -1285,10 +1273,10 @@ def test_validate__replace_whole_config_item_added_ref_added__failure(self): ])) diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, [], [])) # Act and assert - self.assertFalse(self.validator.validate(move, diff)) + self.assertFalse(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__replace_whole_config_item_removed_ref_removed__false(self): # Arrange @@ -1300,10 +1288,10 @@ def test_validate__replace_whole_config_item_removed_ref_removed__false(self): ])) diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, [], [])) # Act and assert - self.assertFalse(self.validator.validate(move, diff)) + self.assertFalse(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__replace_whole_config_item_same_ref_added__true(self): # Arrange @@ -1314,10 +1302,10 @@ def test_validate__replace_whole_config_item_same_ref_added__true(self): ])) diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, [], [])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__replace_whole_config_item_same_ref_removed__true(self): # Arrange @@ -1328,10 +1316,10 @@ def test_validate__replace_whole_config_item_same_ref_removed__true(self): ])) diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, [], [])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__replace_whole_config_item_same_ref_same__true(self): # Arrange @@ -1340,10 +1328,10 @@ def test_validate__replace_whole_config_item_same_ref_same__true(self): target_config = current_config diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, [], [])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__replace_list_item_different_location_than_target_and_no_deps__true(self): # Arrange @@ -1371,10 +1359,11 @@ def test_validate__replace_list_item_different_location_than_target_and_no_deps_ diff = ps.Diff(current_config, target_config) # the target tokens point to location 0 which exist in target_config # but the replace operation is operating on location 1 in current_config - move = ps.JsonMove(diff, OperationType.REPLACE, ["VLAN", "Vlan100", "dhcp_servers", 1], ["VLAN", "Vlan100", "dhcp_servers", 0]) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, ["VLAN", "Vlan100", "dhcp_servers", 1], + ["VLAN", "Vlan100", "dhcp_servers", 0])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def prepare_config(self, config, patch): return patch.apply(config) @@ -1389,110 +1378,110 @@ def test_validate__no_changes__success(self): current_config = {"some_table":{"key1":"value1", "key2":"value2"}} target_config = {"some_table":{"key1":"value1", "key2":"value22"}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, ["some_table", "key1"], ["some_table", "key1"]) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, ["some_table", "key1"], ["some_table", "key1"])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__change_but_no_empty_table__success(self): # Arrange current_config = {"some_table":{"key1":"value1", "key2":"value2"}} target_config = {"some_table":{"key1":"value1", "key2":"value22"}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, ["some_table", "key2"], ["some_table", "key2"]) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, ["some_table", "key2"], ["some_table", "key2"])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__single_empty_table__failure(self): # Arrange current_config = {"some_table":{"key1":"value1", "key2":"value2"}} target_config = {"some_table":{}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, ["some_table"], ["some_table"]) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, ["some_table"], ["some_table"])) # Act and assert - self.assertFalse(self.validator.validate(move, diff)) + self.assertFalse(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__whole_config_replace_single_empty_table__failure(self): # Arrange current_config = {"some_table":{"key1":"value1", "key2":"value2"}} target_config = {"some_table":{}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, [], [])) # Act and assert - self.assertFalse(self.validator.validate(move, diff)) + self.assertFalse(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__whole_config_replace_mix_of_empty_and_non_empty__failure(self): # Arrange current_config = {"some_table":{"key1":"value1"}, "other_table":{"key2":"value2"}} target_config = {"some_table":{"key1":"value1"}, "other_table":{}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, [], [])) # Act and assert - self.assertFalse(self.validator.validate(move, diff)) + self.assertFalse(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__whole_config_multiple_empty_tables__failure(self): # Arrange current_config = {"some_table":{"key1":"value1"}, "other_table":{"key2":"value2"}} target_config = {"some_table":{}, "other_table":{}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, [], [])) # Act and assert - self.assertFalse(self.validator.validate(move, diff)) + self.assertFalse(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__remove_key_empties_a_table__failure(self): # Arrange current_config = {"some_table":{"key1":"value1"}, "other_table":{"key2":"value2"}} target_config = {"some_table":{"key1":"value1"}, "other_table":{}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REMOVE, ["other_table", "key2"], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REMOVE, ["other_table", "key2"], [])) # Act and assert - self.assertFalse(self.validator.validate(move, diff)) + self.assertFalse(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__remove_key_but_table_has_other_keys__success(self): # Arrange current_config = {"some_table":{"key1":"value1"}, "other_table":{"key2":"value2", "key3":"value3"}} target_config = {"some_table":{"key1":"value1"}, "other_table":{"key3":"value3"}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REMOVE, ["other_table", "key2"], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REMOVE, ["other_table", "key2"], [])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__remove_whole_table__success(self): # Arrange current_config = {"some_table":{"key1":"value1"}, "other_table":{"key2":"value2"}} target_config = {"some_table":{"key1":"value1"}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REMOVE, ["other_table"], []) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.REMOVE, ["other_table"], [])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__add_empty_table__failure(self): # Arrange current_config = {"some_table":{"key1":"value1"}, "other_table":{"key2":"value2"}} target_config = {"new_table":{}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.ADD, ["new_table"], ["new_table"]) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.ADD, ["new_table"], ["new_table"])) # Act and assert - self.assertFalse(self.validator.validate(move, diff)) + self.assertFalse(self.validator.validate(move, diff, move.apply(diff.current_config))) def test_validate__add_non_empty_table__success(self): # Arrange current_config = {"some_table":{"key1":"value1"}, "other_table":{"key2":"value2"}} target_config = {"new_table":{"key3":"value3"}} diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.ADD, ["new_table"], ["new_table"]) + move = JsonMoveGroup(ps.JsonMove(diff, OperationType.ADD, ["new_table"], ["new_table"])) # Act and assert - self.assertTrue(self.validator.validate(move, diff)) + self.assertTrue(self.validator.validate(move, diff, move.apply(diff.current_config))) class TestRequiredValueMoveValidator(unittest.TestCase): def setUp(self): @@ -1527,12 +1516,12 @@ def _run_single_test(self, test_case): # Arrange expected = test_case['expected'] current_config = test_case['config'] - move = test_case['move'] + move = JsonMoveGroup(test_case['move']) target_config = test_case.get('target_config', move.apply(current_config)) diff = ps.Diff(current_config, target_config) # Act and Assert - self.assertEqual(expected, self.validator.validate(move, diff)) + self.assertEqual(expected, self.validator.validate(move, diff, move.apply(diff.current_config))) def _get_critical_port_change_test_cases(self): # port-up status-changing under-port port-exist verdict @@ -1991,12 +1980,12 @@ def _run_single_test(self, test_case): # Arrange expected = test_case['expected'] current_config = test_case['config'] - move = test_case['move'] + move = JsonMoveGroup(test_case['move']) target_config = test_case.get('target_config', move.apply(current_config)) diff = ps.Diff(current_config, target_config) # Act and Assert - self.assertEqual(expected, self.validator.validate(move, diff)) + self.assertEqual(expected, self.validator.validate(move, diff, move.apply(diff.current_config))) def _apply_operations(self, config, operations): return jsonpatch.JsonPatch(operations).apply(config) @@ -2078,7 +2067,8 @@ def _get_lane_replacement_change_test_cases(self): class TestTableLevelMoveGenerator(unittest.TestCase): def setUp(self): - self.generator = ps.TableLevelMoveGenerator() + path_addressing = PathAddressing() + self.generator = ps.TableLevelMoveGenerator(path_addressing) def test_generate__tables_in_current_but_not_target__tables_deleted_moves(self): self.verify(current = {"ExistingTable": {}, "NonExistingTable1": {}, "NonExistingTable2": {}}, @@ -2115,12 +2105,15 @@ def verify(self, current, target, ex_ops): moves) def verify_moves(self, ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] + moves_ops = [] + for move in moves: + moves_ops.extend(move.get_jsonpatch()) self.assertCountEqual(ops, moves_ops) class TestKeyLevelMoveGenerator(unittest.TestCase): def setUp(self): - self.generator = ps.KeyLevelMoveGenerator() + path_addressing = PathAddressing() + self.generator = ps.KeyLevelMoveGenerator(path_addressing) def test_generate__keys_in_current_but_not_target__keys_deleted_moves(self): self.verify(current = { @@ -2197,7 +2190,9 @@ def verify(self, current, target, ex_ops): moves) def verify_moves(self, ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] + moves_ops = [] + for move in moves: + moves_ops.extend(move.get_jsonpatch()) self.assertCountEqual(ops, moves_ops) class TestLowLevelMoveGenerator(unittest.TestCase): @@ -2436,7 +2431,9 @@ def verify(self, tc_ops=None, cc_ops=None, ex_ops=None): self.verify_moves(expected, actual) def verify_moves(self, ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] + moves_ops = [] + for move in moves: + moves_ops.extend(move.get_jsonpatch()) self.assertCountEqual(ops, moves_ops) def get_diff(self, target_config_ops = None, current_config_ops = None): @@ -2528,7 +2525,9 @@ def test_generate__dpb_1_to_4_example(self): moves) def verify_moves(self, ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] + moves_ops = [] + for move in moves: + moves_ops.extend(move.get_jsonpatch()) self.assertCountEqual(ops, moves_ops) class TestRequiredValueMoveExtender(unittest.TestCase): @@ -2782,7 +2781,9 @@ def test_extend__port_deletion__no_extension(self): self._verify_moves(expected, actual) def _verify_moves(self, ex_ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] + moves_ops = [] + for move in moves: + moves_ops.extend(move.patch) self.assertCountEqual(ex_ops, moves_ops) def _apply_operations(self, config, operations): @@ -3113,7 +3114,9 @@ def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[] self.verify_moves(ex_ops, moves) def verify_moves(self, ex_ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] + moves_ops = [] + for move in moves: + moves_ops.extend(move.patch) self.assertCountEqual(ex_ops, moves_ops) class TestDeleteInsteadOfReplaceMoveExtender(unittest.TestCase): @@ -3178,7 +3181,9 @@ def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[] self.verify_moves(ex_ops, moves) def verify_moves(self, ex_ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] + moves_ops = [] + for move in moves: + moves_ops.extend(move.patch) self.assertCountEqual(ex_ops, moves_ops) class DeleteRefsMoveExtender(unittest.TestCase): @@ -3242,7 +3247,9 @@ def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[] self.verify_moves(ex_ops, moves) def verify_moves(self, ex_ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] + moves_ops = [] + for move in moves: + moves_ops.extend(move.patch) self.assertCountEqual(ex_ops, moves_ops) class TestSortAlgorithmFactory(unittest.TestCase): @@ -3261,7 +3268,10 @@ def verify(self, algo, algo_class): factory = ps.SortAlgorithmFactory(OperationWrapper(), config_wrapper, PathAddressing(config_wrapper)) expected_generators = [ps.RemoveCreateOnlyDependencyMoveGenerator, ps.LowLevelMoveGenerator] - expected_non_extendable_generators = [ps.KeyLevelMoveGenerator] + expected_non_extendable_generators = [ps.BulkKeyLevelMoveGenerator, + ps.KeyLevelMoveGenerator, + ps.BulkKeyGroupLowLevelMoveGenerator, + ps.BulkLowLevelMoveGenerator] expected_extenders = [ps.RequiredValueMoveExtender, ps.UpperLevelMoveExtender, ps.DeleteInsteadOfReplaceMoveExtender, @@ -3332,6 +3342,7 @@ def run_single_success_case(self, data, skip_exact_change_list_match): simulated_config = change.apply(simulated_config) is_valid, error = self.config_wrapper.validate_config_db_config(simulated_config) self.assertTrue(is_valid, f"Change will produce invalid config. Error: {error}") + self.assertEqual(target_config, simulated_config) def test_patch_sorter_failure(self): @@ -3378,7 +3389,7 @@ def test_sort__does_not_remove_tables_without_yang_unintentionally_if_generated_ any_patch = Files.SINGLE_OPERATION_CONFIG_DB_PATCH target_config = any_patch.apply(current_config) sort_algorithm = Mock() - sort_algorithm.sort = lambda diff: [ps.JsonMove(diff, OperationType.REPLACE, [], [])] + sort_algorithm.sort = lambda diff: [JsonMoveGroup(ps.JsonMove(diff, OperationType.REPLACE, [], []))] patch_sorter = self.create_patch_sorter(current_config, sort_algorithm) expected = [JsonChange(jsonpatch.JsonPatch([OperationWrapper().create(OperationType.REPLACE, "", target_config)]))]