diff --git a/runbot/__manifest__.py b/runbot/__manifest__.py index 177b5d4db..f166684a5 100644 --- a/runbot/__manifest__.py +++ b/runbot/__manifest__.py @@ -6,7 +6,7 @@ 'author': "Odoo SA", 'website': "http://runbot.odoo.com", 'category': 'Website', - 'version': '5.14', + 'version': '5.17', 'application': True, 'depends': ['base', 'base_automation', 'website', 'auth_oauth'], 'data': [ @@ -57,6 +57,7 @@ 'views/oauth_provider_views.xml', 'views/repo_views.xml', 'views/res_config_settings_views.xml', + 'views/semgrep_rules.xml', 'views/stat_views.xml', 'views/upgrade.xml', 'views/upgrade_matrix_views.xml', diff --git a/runbot/common.py b/runbot/common.py index de9f5b333..fe34c2d44 100644 --- a/runbot/common.py +++ b/runbot/common.py @@ -24,6 +24,13 @@ dest_reg = re.compile(r'^\d{5,}-.+$') +try: + from odoo.addons.saas_worker.util import from_role +except ImportError: + def from_role(*_, **__): + return lambda _: None + + def transactioncache(method): @functools.wraps(method) def wrapper(self, *args, **kwargs): @@ -322,9 +329,35 @@ class TestTagsParser: (?:\[(.*)\])? # parameters $''', re.VERBOSE) # [-][tag][/module][:class][.method][[params]] - def __init__(self, test_tags): - parts = re.split(r',(?![^\[]*\])', test_tags) # split on all comma not inside [] (not followed by ]) + def __init__(self, test_tags, keep_escape=True): + parts = [''] + bracket_level = 0 + escape_next = False + for char in test_tags: + if char == ',' and bracket_level == 0: + parts.append('') + continue + + if char == '\\': + if not escape_next: + escape_next = True + if keep_escape: + parts[-1] += '\\' # not as the TagsSelector, we keep the escape character + continue + elif char == '[': + if not escape_next: + bracket_level += 1 + elif char == ']': + if not escape_next: + bracket_level -= 1 + elif not keep_escape and escape_next: # the previous \ was not escaping anything, put it back + parts[-1] += '\\' + + escape_next = False + parts[-1] += char + filter_specs = [t.strip() for t in parts if t.strip()] + self.filter_specs = filter_specs self.exclude = set() self.include = set() self.parameters = OrderedSet() @@ -332,8 +365,7 @@ def __init__(self, test_tags): for filter_spec in filter_specs: match = self.filter_spec_re.match(filter_spec) if not match: - _logger.error('Invalid tag %s', filter_spec) - continue + raise ValueError('Invalid tag %s' % filter_spec) sign, tag, file_path, module, klass, method, parameters = match.groups() is_include = sign != '-' @@ -362,6 +394,7 @@ def __init__(self, test_tags): def test_tags_to_search_domain(self, exclude_error_id=None): search_domains = [] + params_by_spec = dict(self.parameters) for include in self.include: _, test_module, test_class, test_method, file_path = include module_path = file_path or ((test_module or '') + '%') @@ -369,6 +402,10 @@ def test_tags_to_search_domain(self, exclude_error_id=None): test_method = test_method or '%' search_pattern = f'{module_path}:{test_class}.{test_method}' tag_domain = [('canonical_tags', 'like', f'{search_pattern}')] + params = params_by_spec.get(include) + if params: + _sign, parameters = params + tag_domain.append(('canonical_tags', 'like', f'%[{parameters}%]%')) if exclude_error_id: tag_domain.append(('id', '!=', exclude_error_id)) search_domains.append(tag_domain) diff --git a/runbot/container.py b/runbot/container.py index d2f2a1624..f52456c1e 100644 --- a/runbot/container.py +++ b/runbot/container.py @@ -116,7 +116,7 @@ def _docker_build(build_dir, image_tag, pull=False): """Build the docker image :param build_dir: the build directory that contains Dockerfile. :param image_tag: name used to tag the resulting docker image - :return: tuple(success, msg) where success is a boolean and msg is the error message or None + :return: dict """ with DockerManager(image_tag) as dm: @@ -259,7 +259,10 @@ def _docker_run(cmd=False, log_path=False, build_dir=False, container_name=False else: run_cmd = cmd run_cmd = f'cd /data/build;touch start-{container_name};{run_cmd};cd /data/build;touch end-{container_name}' - _logger.info('Docker run command: %s', run_cmd) + run_cmd_repr = str(run_cmd) + if len(run_cmd_repr) > 250: + run_cmd_repr = run_cmd_repr[:250] + '...' + _logger.info('Docker run command: %s', run_cmd_repr) docker_clear_state(container_name, build_dir) # ensure that no state are remaining build_dir = file_path(build_dir) diff --git a/runbot/controllers/badge.py b/runbot/controllers/badge.py index fa6e031ee..662163682 100644 --- a/runbot/controllers/badge.py +++ b/runbot/controllers/badge.py @@ -44,13 +44,9 @@ def badge(self, name, repo_id=False, trigger_id=False, theme='default'): if not builds: state = 'testing' else: - result = builds._result_multi() - if result == 'ok': + state = 'failed' + if all(build.global_result == 'ok' for build in builds): state = 'success' - elif result == 'warn': - state = 'warning' - else: - state = 'failed' etag = request.httprequest.headers.get('If-None-Match') retag = hashlib.md5(state.encode()).hexdigest() diff --git a/runbot/controllers/frontend.py b/runbot/controllers/frontend.py index 27075fc1a..6735fe586 100644 --- a/runbot/controllers/frontend.py +++ b/runbot/controllers/frontend.py @@ -80,7 +80,7 @@ def _pending(self): '/runbot/', '/runbot//search/'], website=True, auth='public', type='http') def bundles(self, project=None, search='', refresh=False, limit=40, has_pr=None, **kwargs): - search = search if len(search) < 60 else search[:60] + search = search if len(search) < 60 else search[:200] env = request.env categories = env['runbot.category'].search([]) projects = self.env['runbot.project'].search([('hidden', '=', False)]) @@ -119,13 +119,11 @@ def bundles(self, project=None, search='', refresh=False, limit=40, has_pr=None, pr_numbers = [] for search_elem in search.split("|"): if search_elem.isnumeric(): - pr_numbers.append(int(search_elem)) + search_domains.append([('branch_ids', 'any', [('name', '=', search_elem)])]) + if ':' in search_elem: + search_domains.append([('branch_ids', 'any', [('pull_head_name', '=', search_elem)])]) operator = '=ilike' if '%' in search_elem else 'ilike' search_domains.append([('name', operator, search_elem)]) - if pr_numbers: - res = request.env['runbot.branch'].search([('name', 'in', pr_numbers)]) - if res: - search_domains.append([('id', 'in', res.mapped('bundle_id').ids)]) search_domain = Domain.OR(search_domains) domain = Domain.AND([domain, search_domain]) @@ -166,7 +164,7 @@ def bundles(self, project=None, search='', refresh=False, limit=40, has_pr=None, '/runbot/bundle//page/', '/runbot/bundle/', ], website=True, auth='public', type='http', sitemap=False) - def bundle(self, bundle=None, page=1, limit=50, **kwargs): + def bundle(self, bundle=None, page=1, limit=50, expand_custom=False, **kwargs): if isinstance(bundle, str): bundle = request.env['runbot.bundle'].search([('name', '=', bundle)], limit=1, order='id') if not bundle: @@ -183,6 +181,7 @@ def bundle(self, bundle=None, page=1, limit=50, **kwargs): ) batchs = request.env['runbot.batch'].search(domain, limit=limit, offset=pager.get('offset', 0), order='id desc') + # compute if we should display the new batch button context = { 'bundle': bundle, 'batchs': batchs, @@ -190,6 +189,8 @@ def bundle(self, bundle=None, page=1, limit=50, **kwargs): 'project': bundle.project_id, 'title': 'Bundle %s' % bundle.name, 'page_info_state': bundle.last_batch._get_global_result(), + 'expand_custom': expand_custom, + 'needs_update': bundle.last_batch and bundle.last_batch.sudo().needs_update(), } return request.render('runbot.bundle', context) @@ -199,7 +200,7 @@ def bundle(self, bundle=None, page=1, limit=50, **kwargs): '/runbot/bundle//force/', ], type='http', auth="user", methods=['GET', 'POST'], csrf=False) def force_bundle(self, bundle, auto_rebase=False, use_base_commits=False, **_post): - if not request.env.user.has_group('runbot.group_runbot_advanced_user') and ':' not in bundle.name: + if not request.env.user.has_group('runbot.group_runbot_advanced_user') and ':' not in bundle.name and not bundle.last_batch.needs_update(): message = "Only users with a specific group can do that. Please contact runbot administrators" raise Forbidden(message) _logger.info('user %s forcing bundle %s', request.env.user.name, bundle.name) # user must be able to read bundle @@ -220,6 +221,12 @@ def batch(self, batch_id=None, **kwargs): } return request.render('runbot.batch', context) + @route(['/runbot/batch//prioritize'], website=True, auth='user', type='http', sitemap=False) + def batch_priority(self, batch_id=None, **kwargs): + batch = request.env['runbot.batch'].browse(batch_id) + batch.sudo().priority_level = int(batch.create_date.timestamp() - 3600) + return werkzeug.utils.redirect('/runbot/batch/%s' % batch_id) + @route(['/runbot/batch/slot//build'], auth='user', type='http') def slot_create_build(self, slot=None, **kwargs): build = slot.sudo()._create_missing_build() @@ -269,6 +276,7 @@ def resend_status(self, status_id=None, **kwargs): ], type='http', auth="user", methods=['POST'], csrf=False) def build_operations(self, build_id, operation, **post): build = request.env['runbot.build'].sudo().browse(build_id) + build.check_access('read') if operation == 'rebuild': build = build._rebuild() elif operation == 'kill': @@ -283,18 +291,17 @@ def build_operations(self, build_id, operation, **post): '/runbot/batch//build/', ], type='http', auth="public", website=True, sitemap=False) def build(self, build_id, search=None, from_batch=None, **post): - """Events/Logs""" - + build = request.env['runbot.build'].browse(build_id) if from_batch: from_batch = request.env['runbot.batch'].browse(int(from_batch)) - if build_id not in from_batch.with_context(active_test=False).slot_ids.build_id.ids: + if build.top_parent not in from_batch.with_context(active_test=False).slot_ids.build_id and build.create_batch_id != from_batch: # the url may have been forged replacing the build id, redirect to hide the batch return werkzeug.utils.redirect('/runbot/build/%s' % build_id) from_batch = from_batch.with_context(batch=from_batch) Build = request.env['runbot.build'].with_context(batch=from_batch) - build = Build.browse([build_id])[0] + build = Build.browse(build_id) if not build.exists(): return request.not_found() siblings = (build.parent_id.children_ids if build.parent_id else from_batch.slot_ids.build_id if from_batch else build).sorted('id') @@ -316,7 +323,8 @@ def build(self, build_id, search=None, from_batch=None, **post): @route([ '/runbot/build/search', ], website=True, auth='public', type='http', sitemap=False) - def builds(self, **kwargs): + def builds(self, limit=100, **kwargs): + limit = min(int(limit), 1000) domain = [] for key in ('config_id', 'version_id', 'project_id', 'trigger_id', 'create_batch_id.bundle_id', 'create_batch_id'): # allowed params value = kwargs.get(key) @@ -330,10 +338,12 @@ def builds(self, **kwargs): for key in ('description',): if key in kwargs: - domain.append((f'{key}', 'ilike', kwargs.get(key))) + value = kwargs.get(key) + operator = 'ilike' if '%' in value else '=' + domain.append((f'{key}', operator, value)) context = { - 'builds': request.env['runbot.build'].search(domain, limit=100), + 'builds': request.env['runbot.build'].search(domain, limit=limit), } return request.render('runbot.build_search', context) @@ -444,8 +454,8 @@ def build_errors(self, sort=None, page=1, limit=20, **kwargs): 'build_count asc': 'Number seen: Low to High', 'responsible asc': 'Assignee: A - Z', 'responsible desc': 'Assignee: Z - A', - 'module_name asc': 'Module name: A - Z', - 'module_name desc': 'Module name: Z -A', + 'team_id asc': 'Team', + 'name asc': 'Name', } sort_order = sort if sort in sort_order_choices else 'last_seen_date desc' @@ -663,19 +673,40 @@ def parse_log(self, ir_log, **kwargs): request.env['runbot.build.error']._parse_logs(ir_log) return werkzeug.utils.redirect('/runbot/build/%s' % ir_log.build_id.id) - @route(['/runbot/bundle/toggle_no_build//'], type='http', auth='user', sitemap=False) - def toggle_no_build(self, bundle_id, value, **kwargs): - if not request.env.user.has_group('base.group_user'): - return 'Forbidden' - bundle = request.env['runbot.bundle'].browse(bundle_id).exists() - if bundle.sticky or bundle.is_base: - return 'Forbidden' - if bundle.project_id.tmp_prefix and bundle.name.startswith(bundle.project_id.tmp_prefix): - return 'Forbidden' - bundle.sudo().no_build = bool(value) - _logger.info('Bundle %s no_build set to %s by %s', bundle.name, bool(value), request.env.user.name) + @route(['/runbot/bundle//triggers/'], type='http', auth='user', sitemap=False) + def configure_bundle_triggers(self, bundle_id, action, expand_custom=False, **kwargs): + if not request.env.user.has_group('runbot.group_user'): + raise NotFound() + + bundle = request.env['runbot.bundle'].browse(bundle_id) + if bundle.is_base or bundle.is_staging: + raise NotFound() + if action == 'disable_all': + bundle.sudo()._configure_custom_trigger_start_mode('disabled') + elif action == 'force_all': + bundle.sudo()._configure_custom_trigger_start_mode('force') + elif action == 'auto_all': + bundle.sudo()._configure_custom_trigger_start_mode('auto') + elif action == 'light_all': + bundle.sudo()._configure_custom_trigger_start_mode('light') + else: + raise NotFound() + if expand_custom: + return werkzeug.utils.redirect(f'/runbot/bundle/{bundle_id}?expand_custom=1') return werkzeug.utils.redirect(f'/runbot/bundle/{bundle_id}') + @route(['/runbot/trigger_custom//set_mode/'], type='http', auth='user', sitemap=False) + def configure_custom_trigger(self, trigger_custom_id, mode, **kwargs): + if not request.env.user.has_group('runbot.group_user'): + raise NotFound() + trigger_custom = request.env['runbot.bundle.trigger.custom'].browse(trigger_custom_id) + bundle = trigger_custom.bundle_id + if bundle.is_base or bundle.is_staging: + raise NotFound() + + trigger_custom.sudo().start_mode = mode + return werkzeug.utils.redirect(f'/runbot/bundle/{trigger_custom.bundle_id.id}?expand_custom=1') + @route(['/runbot/trigger/report/'], type='http', auth='user', website=True, sitemap=False) def report_view(self, trigger_id=None, **kwargs): return request.render("runbot.trigger_report", { @@ -852,21 +883,20 @@ def repos_heads(self, project_id=None, bundle_name=None, **kwargs): else: domain = Domain.AND([domain, [('sticky', '=', True)]]) bundles = request.env['runbot.bundle'].search(domain, order='id desc, name') - - last_batches_infos = { - bundle.name: { + last_batches_infos = dict() + for bundle in bundles: + batch = bundle.last_batch if bundle.last_batch.state != 'preparing' else bundle.last_done_batch + last_batches_infos[bundle.name] = { "commits": [ { "repo": commit_link.commit_id.repo_id.name, "head": commit_link.commit_id.name, "match_type": commit_link.match_type, } - for commit_link in bundle.last_batch.commit_link_ids + for commit_link in batch.commit_link_ids ], - "autotags": request.env["runbot.build.error"].sudo()._disabling_tags(build_id=bundle.last_batch.slot_ids.build_id[0]), + "autotags": request.env["runbot.build.error"].sudo()._disabling_tags(build_id=batch.slot_ids.build_id[0]), } - for bundle in bundles - } return request.make_json_response(last_batches_infos) @route([ diff --git a/runbot/controllers/hook.py b/runbot/controllers/hook.py index d46c7f699..4bd36a20d 100644 --- a/runbot/controllers/hook.py +++ b/runbot/controllers/hook.py @@ -4,8 +4,9 @@ import json import logging -from odoo import http +from odoo import http, fields from odoo.http import request +from ..common import from_role _logger = logging.getLogger(__name__) @@ -50,3 +51,18 @@ def hook(self, remote_id=None, **_post): branch = request.env['runbot.branch'].sudo().search([('remote_id', '=', remote.id), ('name', '=', branch_ref)]) branch.alive = False return "" + + @from_role('mergebot', signed=True) + @http.route(['/runbot/request_ci'], type='http', methods=["POST"], auth="public", website=True, csrf=False, sitemap=False) + def force_ci(self): + pull_request_names = request.get_json_data().get('pull_requests', []) + pull_domains = [] + for pull_request_names in pull_request_names: + remote_short_name, name = pull_request_names.split('#') + owner, repo_name = remote_short_name.split('/') + pull_domains.append([('remote_id.owner', '=', owner), ('remote_id.repo_name', '=', repo_name), ('name', '=', name)]) + pull_domains = fields.Domain.OR(pull_domains) + pull_requests = request.env['runbot.branch'].sudo().search([('is_pr', '=', True)] + pull_domains) + bundles = pull_requests.bundle_id + _logger.info('Received CI request for bundles: %s', bundles.mapped('name')) + bundles._force_ci() diff --git a/runbot/data/dockerfile_data.xml b/runbot/data/dockerfile_data.xml index 1671663d4..558e8f3d4 100644 --- a/runbot/data/dockerfile_data.xml +++ b/runbot/data/dockerfile_data.xml @@ -120,6 +120,7 @@ Install branch debian/control with latest postgresql-client # This layer updates the repository list to get the latest postgresql-client, mainly needed if the host postgresql version is higher than the default version of the docker os +# CACHE 60 ADD https://raw.githubusercontent.com/odoo/odoo/{odoo_branch}/debian/control /tmp/control.txt RUN curl -sSL https://www.postgresql.org/media/keys/ACCC4CF8.asc -o /etc/apt/trusted.gpg.d/psql_client.asc \ && echo "deb http://apt.postgresql.org/pub/repos/apt/ {os_release_name}-pgdg main" > /etc/apt/sources.list.d/pgclient.list \ @@ -136,7 +137,7 @@ RUN curl -sSL https://www.postgresql.org/media/keys/ACCC4CF8.asc -o /etc/apt/tru template Install chrome - + RUN curl -sSL https://dl.google.com/linux/chrome/deb/pool/main/g/google-chrome-stable/google-chrome-stable_{chrome_version}_amd64.deb -o /tmp/chrome.deb \ && apt-get update \ && apt-get -y install --no-install-recommends /tmp/chrome.deb \ @@ -195,7 +196,8 @@ ENV PIP_BREAK_SYSTEM_PACKAGES=1 template Install branch requirements - ADD --chown={USERNAME} https://raw.githubusercontent.com/odoo/odoo/{odoo_branch}/requirements.txt /tmp/requirements.txt + # CACHE 60 +ADD --chown={USERNAME} https://raw.githubusercontent.com/odoo/odoo/{odoo_branch}/requirements.txt /tmp/requirements.txt RUN python3 -m pip install --no-cache-dir -r /tmp/requirements.txt diff --git a/runbot/documentation/dynamic_config.md b/runbot/documentation/dynamic_config.md index acff2c71d..9282bd816 100644 --- a/runbot/documentation/dynamic_config.md +++ b/runbot/documentation/dynamic_config.md @@ -75,7 +75,7 @@ The config steps are mainly defined by their `job_type`. The `name` key is also ``` The `db_name` is optionnal, usually set to all as a convention on runbot for databases that contains *almost* all modules. If not defined the sanitized version of the name will be used. -`install_modules` and `install_default_modules` behave the same way except that `install_modules` will consider that we start with no module (prepends `.*` filter) while `install_default_modules` will be based on the runbot default module list (all available modules minus the repo blacklist) +`install_modules` and `install_default_modules` behave the same way except that `install_modules` will consider that we start with no module (prepends `-*` filter) while `install_default_modules` will be based on the runbot default module list (all available modules minus the repo blacklist) Both entries will use the value as a runbot module filter, and then passed as the -i, [see corresponding section](#module-selection) for more info. @@ -344,21 +344,43 @@ Filters are a way to transform dynamic values before using them. They are define For example, to transform a module filter into test tags: +#### filter_all_modules, make_module_test_tags + ```json {"test_tags": "-at_install,{{test_module_filter|filter_all_modules|make_module_test_tags}}", ``` In this example, the `filter_all_modules` filters will first transform the `test_module_filter` variable (which is a module filter) into a list of modules, and then the `make_module_test_tags` filters will transform this list of modules into test tags by prepending each module with a `/` to indicate that we want to run all tests from these modules. -Note that `filter_all_modules` is actually equivalent to `filter_default_modules`, but prepending a `*` at the begining of the filter. +#### filter_default_modules + +`filter_all_modules` is actually equivalent to `filter_default_modules`, but prepending a `*` at the begining of the filter. Without that a runbot defined filter is applied, returning a default list of modules per repo. `*,mail -> !web|filter_default_modules` is the same as `mail -> !web|filter_all_modules` + +#### prepend, append In some case we also want to combine the test-tags module with another tag or test method, this can be done using prepend and append `"{{-*,web*|filter_all_modules|make_module_test_tags|append('.test_method')}}` `{{-*,web*|filter_all_modules|make_module_test_tags|prepend('custom_tag')}}` -It is also possible to filter modules based on the one modified in the current bundle. +#### modified_modules + +It is possible to filter modules based on the one modified in the current bundle. `{{*|filter_all_modules|modified_modules}}"` + +#### select_existing_modules + +`select_existing_modules` is equivalent to `filter_default_modules` but with a -* at the beginning of the filter, meaning that we start with an empty selection and only add modules that are explicitly selected. + +This is a solution to keep only existing modules from a specific list, when we are not sure modules exists: +`{{*|filter_all_modules|modified_modules|prepend('test_')|select_existing_modules|make_module_test_tags}}` + +- `*|filter_all_modules` will select all existing modules +- `|modified_modules` will only keep the modified ones +- `prepend('test_')` will prepend test_ to have the test equivalent name of the modified modules (mail-> test_mail, base -> test_base) +- `select_existing_modules` will only keep modules that exists (test_mail) +- `make_module_test_tags` make the module test tags by prepending a / to each module. + diff --git a/runbot/migrations/19.0.5.15/post-migration.py b/runbot/migrations/19.0.5.15/post-migration.py new file mode 100644 index 000000000..efba09b23 --- /dev/null +++ b/runbot/migrations/19.0.5.15/post-migration.py @@ -0,0 +1,42 @@ +import logging + +_logger = logging.getLogger(__name__) + + +def migrate(cr, version): + cr.execute(""" + SELECT to_regclass('public.x_runbot_semgrep_rules'); + """) + if not cr.fetchone()[0]: + return + + cr.execute("""SELECT "x_checker", "x_language", "x_maxver", "x_message", "x_minver", "x_name", "x_rule", "x_severity" FROM x_runbot_semgrep_rules""") + results = cr.dictfetchall() + _logger.info('Migrating %d semgrep rules', len(results)) + categories = [] + for result in results: + categories.append(result['x_checker']) + + category_map = {} + for category in sorted(set(categories)): + cr.execute(""" + INSERT INTO runbot_checker_category (name) + VALUES (%s) + RETURNING id + """, (category,)) + category_map[category] = cr.fetchone()[0] + + for result in results: + cr.execute(""" + INSERT INTO runbot_semgrep_rule (name, category_id, language, max_version_number, min_version_number, message, rule, severity) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s) + """, ( + result['x_name'], + category_map[result['x_checker']], + result['x_language'], + result['x_maxver'], + result['x_minver'], + result['x_message'], + result['x_rule'], + result['x_severity'], + )) diff --git a/runbot/migrations/19.0.5.16/pre-migration.py b/runbot/migrations/19.0.5.16/pre-migration.py new file mode 100644 index 000000000..bffcd9582 --- /dev/null +++ b/runbot/migrations/19.0.5.16/pre-migration.py @@ -0,0 +1,8 @@ +import logging + +_logger = logging.getLogger(__name__) + + +def migrate(cr, version): + cr.execute("""ALTER TABLE runbot_batch ADD COLUMN priority_level integer""") + cr.execute("""ALTER TABLE runbot_build ADD COLUMN priority_level integer""") diff --git a/runbot/migrations/19.0.5.17/pre-migration.py b/runbot/migrations/19.0.5.17/pre-migration.py new file mode 100644 index 000000000..c21faba28 --- /dev/null +++ b/runbot/migrations/19.0.5.17/pre-migration.py @@ -0,0 +1,3 @@ +def migrate(cr, version): + cr.execute("""UPDATE runbot_build set local_result = 'killed' where local_result = 'manually_killed'""") + cr.execute("""UPDATE runbot_build set global_result = 'killed' where global_result = 'manually_killed'""") diff --git a/runbot/models/__init__.py b/runbot/models/__init__.py index 1551bf611..7041959e3 100644 --- a/runbot/models/__init__.py +++ b/runbot/models/__init__.py @@ -14,6 +14,7 @@ from . import database from . import docker from . import host +from . import ir_action from . import ir_cron from . import ir_http from . import ir_model_fields_converter @@ -25,6 +26,7 @@ from . import res_config_settings from . import res_users from . import runbot +from . import semgrep_rule from . import team from . import upgrade from . import user diff --git a/runbot/models/batch.py b/runbot/models/batch.py index 81806b1a8..cad114cb7 100644 --- a/runbot/models/batch.py +++ b/runbot/models/batch.py @@ -15,11 +15,13 @@ class Batch(models.Model): last_update = fields.Datetime('Last ref update') bundle_id = fields.Many2one('runbot.bundle', required=True, index=True, ondelete='cascade') + build_all = fields.Boolean('Force all triggers') commit_link_ids = fields.Many2many('runbot.commit.link') commit_ids = fields.Many2many('runbot.commit', compute='_compute_commit_ids') slot_ids = fields.One2many('runbot.batch.slot', 'batch_id') all_build_ids = fields.Many2many('runbot.build', compute='_compute_all_build_ids', help="Recursive builds") state = fields.Selection([('preparing', 'Preparing'), ('ready', 'Ready'), ('done', 'Done'), ('skipped', 'Skipped')]) + priority_level = fields.Integer("Priority level", help="Priority level of the batch, determined from the create date and the bundle priority offset. The lower, the higher priority.") hidden = fields.Boolean('Hidden', default=False) age = fields.Integer(compute='_compute_age', string='Build age') category_id = fields.Many2one('runbot.category', index=True, default=lambda self: self.env.ref('runbot.default_category', raise_if_not_found=False)) @@ -152,7 +154,7 @@ def _create_build(self, params, slot): build = self.env['runbot.build'].search(domain, limit=1, order='id desc') link_type = 'matched' - killed_states = ('skipped', 'killed', 'manually_killed') + killed_states = ('skipped', 'killed') if build and build.local_result not in killed_states and build.global_result not in killed_states: if build.killable: build.killable = False @@ -163,7 +165,7 @@ def _create_build(self, params, slot): build_type = 'normal' if self.category_id != self.env.ref('runbot.default_category'): build_type = 'scheduled' - elif self.bundle_id.priority: + elif self.bundle_id.priority or params.trigger_id.use_extra_slot: build_type = 'priority' build = self.env['runbot.build'].create({ @@ -182,6 +184,12 @@ def _create_build(self, params, slot): def _prepare(self, auto_rebase=False, use_base_commits=False): _logger.info('Preparing batch %s', self.id) + + priority_offset = self.bundle_id.priority_offset + if not priority_offset and self.bundle_id.branch_ids.forwardport_of_id and self.bundle_id.last_batchs == self: # this is the only batch of a forwardported pr. + priority_offset = - 3600 * 5 + self.build_all = True # for normal pr, mergebot will request all ci on r+ if needed, for forward port, we need to ensure they are all created or the chain could be blocked + self.priority_level = int(self.create_date.timestamp() - priority_offset) if use_base_commits: self._warning('This batch will use base commits instead of bundle commits') if not self.bundle_id.base_id: @@ -364,7 +372,7 @@ def _fill_missing(branch_commits, match_type): base_commit_link_by_repos = {commit_link.commit_id.repo_id.id: commit_link for commit_link in self.base_reference_batch_id.commit_link_ids} if use_base_commits: commit_link_by_repos = base_commit_link_by_repos - version_id = self.bundle_id.version_id.id + bundle_version_id = self.bundle_id.version_id.id project_id = self.bundle_id.project_id.id trigger_customs = {} for trigger_custom in self.bundle_id.all_trigger_custom_ids: @@ -376,7 +384,22 @@ def _fill_missing(branch_commits, match_type): self._warning('Missing commit for repo %s for trigger %s', (trigger_repos & missing_repos).mapped('name'), trigger.name) continue # in any case, search for an existing build - config = trigger_custom.config_id or trigger.config_id + config = trigger.config_id + if not trigger_custom and trigger.light_config_id and not bundle.build_all and not self.build_all and not bundle.is_staging and not bundle.is_base: + if (project.use_light_default + or + project.use_light_draft and any(branch.draft for branch in self.bundle_id.branch_ids) + or + project.use_light_no_pr and not any(branch.is_pr for branch in self.bundle_id.branch_ids) + ): + config = trigger.light_config_id + + if trigger_custom.config_id: + config = trigger_custom.config_id + elif trigger_custom.start_mode == 'light' and trigger.light_config_id: + config = trigger.light_config_id + + extra_params = trigger_custom.extra_params or '' config_data = dict(trigger.config_data or {}) | dict(trigger_custom.config_data or {}) trigger_commit_link_by_repos = commit_link_by_repos @@ -384,6 +407,7 @@ def _fill_missing(branch_commits, match_type): self._warning(f'This batch will use base commits instead of bundle commits for trigger {trigger.name}') trigger_commit_link_by_repos = base_commit_link_by_repos commits_links = [trigger_commit_link_by_repos[repo.id].id for repo in trigger_repos] + version_id = bundle_version_id if (trigger.version_dependent or trigger.batch_dependent) else False params_value = { 'version_id': version_id, 'extra_params': extra_params, @@ -395,7 +419,7 @@ def _fill_missing(branch_commits, match_type): 'modules': bundle.modules, 'dockerfile_id': dockerfile_id, 'create_batch_id': self.id, - 'used_custom_trigger': bool(trigger_custom), + 'used_custom_trigger': bool(trigger_custom.config_id or trigger_custom.extra_params or trigger_custom.config_data or trigger_custom.use_base_commits), } params = self.env['runbot.build.params'].create(params_value) @@ -433,16 +457,22 @@ def _start_builds(self): is_dev = not bundle.is_staging and not bundle.is_base for trigger in self.slot_ids.trigger_id: enable_on_bundle = (trigger.on_staging and bundle.is_staging) or (trigger.on_base and bundle.is_base) or (trigger.on_dev and is_dev) - if ((trigger.repo_ids & bundle_repos) or bundle.build_all or bundle.sticky) and enable_on_bundle: + common_repo = (trigger.repo_ids & bundle_repos) + if self.build_all and not common_repo: + common_repo = (trigger.dependency_ids & bundle_repos) + if (common_repo or bundle.build_all or bundle.sticky) and enable_on_bundle: should_start_triggers_ids.add(trigger.id) + disabled_triggers = self.bundle_id.all_trigger_custom_ids.filtered(lambda tc: tc.start_mode == 'disabled').trigger_id for slot in self.slot_ids: if slot.build_id: continue trigger = slot.trigger_id - if trigger.starts_after_ids - success_trigger: # some required triggers are missing - continue trigger_custom = trigger_customs.get(trigger, self.env['runbot.bundle.trigger.custom']) + missing_triggers = trigger.starts_after_ids - success_trigger + if missing_triggers: + if not trigger_custom or (missing_triggers - disabled_triggers): + continue force_trigger = trigger_custom and trigger_custom.start_mode == 'force' skip_trigger = (trigger_custom and trigger_custom.start_mode == 'disabled') or trigger.manual should_start = slot.trigger_id.id in should_start_triggers_ids @@ -507,6 +537,23 @@ def _log(self, message, *args, level='INFO'): 'level': level, }) + def needs_update(self): + bundle = self.bundle_id + custom_trigger_per_trigger = {ct.trigger_id: ct for ct in bundle.trigger_custom_ids} + for slot in self.slot_ids: + trigger = slot.trigger_id + custom_trigger = custom_trigger_per_trigger.get(trigger) + if not custom_trigger: + continue + expected_config = trigger.config_id + if custom_trigger.config_id: + expected_config = custom_trigger.config_id + elif trigger.light_config_id and custom_trigger.start_mode == 'light': + expected_config = trigger.light_config_id + if slot.params_id.config_id != expected_config: + return True + return False + class BatchLog(models.Model): _name = 'runbot.batch.log' _description = 'Batch log' diff --git a/runbot/models/branch.py b/runbot/models/branch.py index f2cf6edf0..8deb9ec5c 100644 --- a/runbot/models/branch.py +++ b/runbot/models/branch.py @@ -101,7 +101,8 @@ def _compute_reference_name(self): # branch.reference_name = '%s~%s' % (branch.pull_head_name, branch.name) else: reference_name = branch.name - forced_version = branch.remote_id.repo_id.single_version # we don't add a depend on repo.single_version to avoid mass recompute of existing branches + repo = branch.remote_id.repo_id + forced_version = repo.enforce_version and repo.single_version # we don't add a depend on repo.single_version to avoid mass recompute of existing branches if forced_version and not (reference_name.startswith(f'{forced_version.name}-') or reference_name == forced_version.name): reference_name = f'{forced_version.name}---{reference_name}' branch.reference_name = reference_name diff --git a/runbot/models/build.py b/runbot/models/build.py index b3f10c962..947458008 100644 --- a/runbot/models/build.py +++ b/runbot/models/build.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- - +import ast import datetime import getpass import hashlib @@ -10,27 +10,41 @@ import shutil import time import uuid - from collections import defaultdict -from dateutil import parser from pathlib import Path + +from dateutil import parser +from markupsafe import Markup from psycopg2 import sql from psycopg2.extensions import TransactionRollbackError -from ..common import dt2time, now, grep, local_pgadmin_cursor, dest_reg, os, list_local_dbs, pseudo_markdown, RunbotException, findall, sanitize, markdown_escape, tail -from ..container import docker_stop, docker_state, Command, docker_run, docker_pull -from ..fields import JsonDictField - -from odoo import models, fields, api - +from odoo import api, fields, models from odoo.exceptions import ValidationError -from odoo.tools import file_open, file_path +from odoo.tools import file_open, file_path, html_escape from odoo.tools.safe_eval import safe_eval +from ..common import ( + RunbotException, + dest_reg, + dt2time, + findall, + grep, + list_local_dbs, + local_pgadmin_cursor, + markdown_escape, + now, + os, + pseudo_markdown, + sanitize, + tail, + transactioncache, +) +from ..container import Command, docker_pull, docker_run, docker_state, docker_stop +from ..fields import JsonDictField _logger = logging.getLogger(__name__) -result_order = ['ok', 'warn', 'ko', 'skipped', 'killed', 'manually_killed'] +result_order = ['ok', 'warn', 'ko', 'skipped', 'killed', 'manually_killed'] # TODO remove manually_killed state_order = ['pending', 'testing', 'waiting', 'running', 'done'] COPY_WHITELIST = [ @@ -61,7 +75,6 @@ def remove_readonly(func, path_str, exinfo): def make_selection(array): return [(elem, elem.replace('_', ' ').capitalize()) if isinstance(elem, str) else elem for elem in array] - class BuildParameters(models.Model): _name = 'runbot.build.params' _description = "Build parameters" @@ -70,7 +83,7 @@ class BuildParameters(models.Model): # execution parametter commit_link_ids = fields.Many2many('runbot.commit.link', copy=True) commit_ids = fields.Many2many('runbot.commit', compute='_compute_commit_ids') - version_id = fields.Many2one('runbot.version', required=True, index=True) + version_id = fields.Many2one('runbot.version', index=True) project_id = fields.Many2one('runbot.project', required=True, index=True) # for access rights trigger_id = fields.Many2one('runbot.trigger', index=True) # for access rights create_batch_id = fields.Many2one('runbot.batch', index=True) @@ -88,6 +101,7 @@ class BuildParameters(models.Model): build_ids = fields.One2many('runbot.build', 'params_id') builds_reference_ids = fields.Many2many('runbot.build', relation='runbot_build_params_references', copy=True) + reference_build_id = fields.Many2one('runbot.build', 'Reference Build', index=True) modules = fields.Char('Modules') upgrade_to_build_id = fields.Many2one('runbot.build', index=True) # use to define sources to use with upgrade script @@ -105,8 +119,18 @@ class BuildParameters(models.Model): # @api.depends('version_id', 'project_id', 'extra_params', 'config_id', 'config_data', 'modules', 'commit_link_ids', 'builds_reference_ids') def _compute_fingerprint(self): + def get_commit_links_ident(commit_link): + commit_idents = [] + for c in commit_link.commit_id: + commit_ident = c.tree_hash or c.name + if c.rebase_on_id: + commit_ident += (c.rebase_on_id.tree_hash or c.rebase_on_id.name) + # in a ideal world, we would be able to determine what the real threehash would be + commit_idents.append(commit_ident) + return sorted(commit_idents) + for param in self: - commit_ident = sorted([c.tree_hash or '' for c in param.commit_link_ids.commit_id]) + commit_ident = get_commit_links_ident(param.commit_link_ids) if param.trigger_id.batch_dependent: commit_ident = sorted(param.commit_link_ids.commit_id.ids) cleaned_vals = { @@ -123,9 +147,11 @@ def _compute_fingerprint(self): 'dockerfile_id': param.dockerfile_id.id, 'skip_requirements': param.skip_requirements, } + if param.reference_build_id: + cleaned_vals['reference_build_id'] = param.reference_build_id.id if param.upgrade_to_build_id: cleaned_vals['upgrade_to_build_dockerfile_id'] = param.upgrade_to_build_id.params_id.dockerfile_id.id - cleaned_vals['upgrade_to_build_commits'] = sorted([c.tree_hash or c.id for c in param.upgrade_to_build_id.params_id.commit_link_ids.commit_id]) + cleaned_vals['upgrade_to_build_commits'] = get_commit_links_ident(param.upgrade_to_build_id.params_id.commit_link_ids) if param.upgrade_from_build_id: cleaned_vals['upgrade_from_build_id'] = param.upgrade_from_build_id.id if param.trigger_id.batch_dependent: @@ -266,6 +292,7 @@ class BuildResult(models.Model): create_batch_id = fields.Many2one('runbot.batch', related='params_id.create_batch_id', store=True, index=True) create_bundle_id = fields.Many2one('runbot.bundle', related='params_id.create_batch_id.bundle_id', index=True) dynamic_config = JsonDictField('Dynamic Config', related='params_id.dynamic_config') + priority_level = fields.Integer('Priority', related='create_batch_id.priority_level', store=True, index=True) # state machine global_state = fields.Selection(make_selection(state_order), string='Status', compute='_compute_global_state', store=True, recursive=True) @@ -274,6 +301,8 @@ class BuildResult(models.Model): local_result = fields.Selection(make_selection(result_order), string='Build Result', default='ok') requested_action = fields.Selection([('wake_up', 'To wake up'), ('deathrow', 'To kill')], string='Action requested', index=True) + to_kill = fields.Boolean('To kill', compute='_compute_to_kill') + message_ids = fields.One2many('runbot.host.message', 'build_id', string='Messages') # web infos host = fields.Char('Host name') host_id = fields.Many2one('runbot.host', string="Host", compute='_compute_host_id') @@ -371,6 +400,11 @@ def _compute_global_state(self): else: record.global_state = record.local_state + @api.depends('message_ids') + def _compute_to_kill(self): + for record in self: + record.to_kill = any(message.message == 'kill' for message in record.message_ids) + @api.depends('gc_delay', 'job_end') def _compute_gc_date(self): icp = self.env['ir.config_parameter'].sudo() @@ -495,7 +529,7 @@ def write(self, values): return res - def _add_child(self, param_values, orphan=False, description=False, additionnal_commit_links=False): + def _add_child(self, param_values, orphan=False, description=False, additionnal_commit_links=False, use_parent=...): build_values = {key: value for key, value in param_values.items() if key not in self.params_id._fields} param_values = {key: value for key, value in param_values.items() if key in self.params_id._fields} @@ -508,10 +542,20 @@ def _add_child(self, param_values, orphan=False, description=False, additionnal_ commit_link_ids |= additionnal_commit_links param_values['commit_link_ids'] = commit_link_ids + config = param_values.get('config_id') or self.params_id.config_id + if isinstance(config, int): + config = self.env['runbot.build.config'].browse(config) + + if use_parent == ...: + use_parent = config._default_uses_parent(param_values) + if use_parent: + param_values['reference_build_id'] = self.id + return self.create({ 'params_id': self.params_id.copy(param_values).id, 'parent_id': self.id, 'build_type': self.build_type, + 'priority_level': self.priority_level, 'description': description, 'orphan_result': orphan, 'keep_host': self.keep_host, @@ -519,22 +563,11 @@ def _add_child(self, param_values, orphan=False, description=False, additionnal_ **build_values, }) - def _result_multi(self): - if all(build.global_result == 'ok' or not build.global_result for build in self): - return 'ok' - if any(build.global_result in ('skipped', 'killed', 'manually_killed') for build in self): - return 'killed' - if any(build.global_result == 'ko' for build in self): - return 'ko' - if any(build.global_result == 'warning' for build in self): - return 'warning' - return 'ko' # ? - @api.depends('params_id.version_id.name') def _compute_dest(self): for build in self: if build.id: - nickname = build.params_id.version_id.name + nickname = build.params_id.version_id.name or 'build' nickname = re.sub(r'"|\'|~|\:', '', nickname) nickname = re.sub(r'_|/|\.', '-', nickname) build.dest = ("%05d-%s" % (build.id or 0, nickname[:32])).lower() @@ -802,11 +835,13 @@ def _init_pendings(self): def _process_requested_actions(self): self.ensure_one() build = self + # TODO remove, replaced by queue if build.requested_action == 'deathrow': result = None if build.local_state != 'running' and build.global_result not in ('warn', 'ko'): - result = 'manually_killed' + result = 'killed' build._kill(result=result) + build.requested_action = False return if build.requested_action == 'wake_up': @@ -982,7 +1017,7 @@ def _docker_run(self, step, cmd=None, ro_volumes=None, env_variables=None, **kwa for dest, source in _ro_volumes.items(): ro_volumes[f'/data/build/{dest}'] = source if 'image_tag' not in kwargs: - kwargs.update({'image_tag': self.params_id.dockerfile_id.image_tag}) + kwargs.update({'image_tag': step.dockerfile_id.image_tag or self.params_id.dockerfile_id.image_tag}) dockerfile_variant = self.params_id.config_data.get('dockerfile_variant', step.dockerfile_variant) if dockerfile_variant and f'.{dockerfile_variant.lower()}' not in kwargs['image_tag']: kwargs['image_tag'] += f'.{dockerfile_variant.lower()}' @@ -1079,25 +1114,28 @@ def _checkout(self): return exports + def _list_available_modules(self): + for commit in self.env.context.get('defined_commit_ids') or self.params_id.commit_ids: + for (addons_path, module, manifest_file_name) in commit._list_available_modules(): + yield commit, addons_path, module, manifest_file_name + def _get_available_modules(self): all_modules = dict() available_modules = defaultdict(list) # repo_modules = [] - for commit in self.env.context.get('defined_commit_ids') or self.params_id.commit_ids: - for (addons_path, module, manifest_file_name) in commit._get_available_modules(): - if module in all_modules: - self._log( - 'Building environment', - '%s is a duplicated modules (found in "%s", already defined in %s)' % ( - module, - commit._source_path(addons_path, module, manifest_file_name), - all_modules[module]._source_path(addons_path, module, manifest_file_name)), - level='WARNING', - ) - else: - available_modules[commit.repo_id].append(module) - all_modules[module] = commit - # return repo_modules, available_modules + for commit, addons_path, module, manifest_file_name in self._list_available_modules(): + if module in all_modules: + self._log( + 'Building environment', + '%s is a duplicated modules (found in "%s", already defined in %s)' % ( + module, + commit._source_path(addons_path, module, manifest_file_name), + all_modules[module]._source_path(addons_path, module, manifest_file_name)), + level='WARNING', + ) + else: + available_modules[commit.repo_id].append(module) + all_modules[module] = commit return available_modules def _get_modules_to_test(self, modules_patterns=''): @@ -1108,13 +1146,56 @@ def _get_modules_to_test(self, modules_patterns=''): modules_patterns = (modules_patterns or '').split(',') return trigger._filter_modules_to_test(modules, params_patterns + modules_patterns) # we may switch params_patterns and modules_patterns order + @transactioncache + def _dependency_graph(self): + dependency_graph = defaultdict(set) + dependant_graph = defaultdict(set) + for commit in self.env.context.get('defined_commit_ids') or self.params_id.commit_ids: + file_paths = [] + modules = [] + for (addons_path, module, manifest_file_name) in commit._list_available_modules(): + file_paths.append(os.path.join(addons_path, module, manifest_file_name)) + modules.append(module) + contents = commit._git_show_files(file_paths) + for module, manifest in zip(modules, contents): + manifest_content = ast.literal_eval(manifest) + depends = manifest_content.get('depends', []) + if not depends and module != 'base': + depends = ['base'] + for dep in depends: + dependency_graph[module].add(dep) + dependant_graph[dep].add(module) + return dependency_graph, dependant_graph + + def search_modules_graph(self, modules, graph, depth=None): + def search(modules, depth=None, visited=None): + visited = visited or set() + modules = set(modules) - visited + visited |= modules + dependencies = set(modules) + if depth == 0 or not modules: + return dependencies + for module in modules: + dependencies |= search(graph[module], depth - 1 if depth is not None else None, visited) + return dependencies + return sorted(search(modules, depth)) + + def _get_modules_dependencies(self, modules, depth=None): + self.ensure_one() + dependency_graph, _ = self._dependency_graph() + return self.search_modules_graph(modules, dependency_graph, depth) + + def _get_dependant_modules(self, modules, depth=None): + _, dependant_graph = self._dependency_graph() + return self.search_modules_graph(modules, dependant_graph, depth) + def _local_pg_dropdb(self, dbname): msg = '' try: with local_pgadmin_cursor() as local_cr: query = 'SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname=%s' local_cr.execute(query, [dbname]) - local_cr.execute('SET LOCAL statement_timeout=10000') # avoid to be stuck if the dropdb is locked + local_cr.execute('SET statement_timeout=10000') # avoid to be stuck if the dropdb is locked local_cr.execute('DROP DATABASE IF EXISTS "%s"' % dbname) except Exception as e: msg = f"Failed to drop local logs database : {dbname} with exception: {e}" @@ -1162,7 +1243,7 @@ def truncate(message, maxlenght=300000): 'line': '0', }) - def _kill(self, result=None): + def _kill(self, result='killed'): host_name = self.env['runbot.host']._get_current_name() self.ensure_one() build = self @@ -1170,30 +1251,38 @@ def _kill(self, result=None): return build._log('kill', 'Kill build %s' % build.dest) docker_stop(build._get_docker_name(), build._path()) - v = {'local_state': 'done', 'requested_action': False, 'active_step': False, 'job_end': now()} + build.local_state = 'done' + build.active_step = False + build.job_end = now() if not build.build_end: - v['build_end'] = now() + build.build_end = now() if result: - v['local_result'] = result - build.write(v) - - def _ask_kill(self, lock=True, message=None): - # if build remains in same bundle, it's ok like that - # if build can be cross bundle, need to check number of ref to build - if lock: - self.env.cr.execute("""SELECT id FROM runbot_build WHERE parent_path like %s FOR UPDATE""", ['%s%%' % self.parent_path]) + build.local_result = result + + def _ask_kill(self, message=None): self.ensure_one() user = self.env.user uid = user.id build = self message = message or 'Killing build %s, requested by %s (user #%s)' % (build.dest, user.name, uid) build._log('_ask_kill', message) - if build.local_state == 'pending': - build._skip() - elif build.local_state in ['testing', 'running']: - build.requested_action = 'deathrow' - for child in build.children_ids: - child._ask_kill(lock=False) + + self.env.cr.execute("""SELECT id, local_state FROM runbot_build WHERE parent_path like %s""", ['%s%%' % self.parent_path]) + builds = self.browse([b[0] for b in self.env.cr.fetchall()]) + pending = builds.filtered(lambda b: b.local_state == 'pending') + killable = builds.filtered(lambda b: b.local_state in ('running', 'testing')) + if pending: + pending.local_state = 'done' + pending.local_result = 'killed' + pending.flush_recordset() # faster concurrent error or lock row + + values = [{ + 'host_id': b.host_id.id, + 'build_id': b.id, + 'message': 'kill', + } for b in killable] + + self.env['runbot.host.message'].sudo().create(values) def _wake_up(self): user = self.env.user @@ -1230,19 +1319,24 @@ def _modified_files(self, commit_link_links=None): commit_link_links = self.params_id.commit_link_ids for commit_link in commit_link_links: commit = commit_link.commit_id - modified = commit.repo_id._git(['diff', '--name-only', '%s..%s' % (commit_link.merge_base_commit_id.name, commit.name)]) + commit._fetch() + modified = commit.repo_id._git(['diff', '--name-only', '%s..%s' % (commit_link.merge_base_commit_id.tree_hash, commit.tree_hash)]) if modified: files = [os.sep.join([self._docker_source_folder(commit), file]) for file in modified.split('\n') if file] modified_files[commit_link] = files return modified_files - def _modified_modules(self, commit_link_links=None): + def _modified_modules(self, commit_link_links=None, defaults=None): modified_files = self._modified_files(commit_link_links) modified_modules = set() for commit_link, files in modified_files.items(): commit = commit_link.commit_id for file in files: - modified_modules.add(commit.repo_id._get_module(file)) + module = commit.repo_id._get_module(file) + if module: + modified_modules.add(module) + elif defaults: + modified_modules |= set(defaults) return modified_modules def _get_upgrade_path(self): @@ -1315,8 +1409,9 @@ def _cmd(self, python_params=None, py_version=None, local_only=True, sub_command faketime = [] if faketime_params := self.params_id.config_data.get('faketime'): - if self.parent_id: - parent_time_offset = (self.parent_id.build_end or self.create_date) - self.parent_id.build_start + reference_build = self.params_id.reference_build_id or self.parent_id # TODO cleanup parent_id + if reference_build: + parent_time_offset = (reference_build.build_end or self.create_date) - reference_build.build_start faketime_params = (parser.parse(faketime_params) + parent_time_offset).strftime('%Y-%m-%d %H:%M %Z') faketime = ['faketime', faketime_params] @@ -1443,10 +1538,62 @@ def _get_color_class(self): if self.global_result == 'ok': return 'success' - if self.global_result in ('skipped', 'killed', 'manually_killed'): + if self.global_result in ('skipped', 'killed'): return 'secondary' return 'default' + def _get_file_url(self, path, line=None): + repo_name = path.replace('/data/build/', '').split('/')[0] + for commit_link in self.params_id.commit_link_ids: + if commit_link.commit_id.repo_id.name == repo_name: + repo_base_url = commit_link.branch_id.remote_id.base_url + commit_hash = commit_link.commit_id.name + path = path.replace('/data/build/%s/' % repo_name, '') + url = f'https://{repo_base_url}/blob/{commit_hash}/{path}' + if line: + url = f'{url}#L{line}' + return url + return '' + + def _format_message(self, log): + text = log.message + if not "\n" in text and 'in: /data/build/' in text: + parts = text.split('in: /data/build/') + text = parts[0] + url = f'http://{self.host}/runbot/static/build/{self.dest}/{parts[-1]}' + template = Markup('%s') + return template % (url, text) + text = text.strip('\n') + text = html_escape(text) + + def get_link(match): + path = match.group(1) + line = match.group(2) + url = self._get_file_url(path, line) + if url: + if line: + return Markup('%s", line %s') % (url, path, line) + return Markup('%s') % (url, path) + return match.group(0) + regex = r''' + (/data/build/[\w\-\./]+\.(?:py|xml|js|css)) # Path in /data/build ending with a common extension + (?: + \&\#34;,\sline\s(\d+) # Optional line number (escaped quote) + )? + ''' + text = Markup(re.sub(regex, get_link, text, flags=re.VERBOSE)) + + return text + + def _log_details(self, log): + title = f"Logger: {log.name}\nFunc: {log.func}" + test_data = log.metadata.dict.get('test') + if test_data: + title += '\n' + for test_line in test_data: + title += f'\n{test_line}: {test_data[test_line]}' + return title + def _github_status(self): """Notify github of failed/successful builds""" for build in self: @@ -1458,14 +1605,18 @@ def _github_status(self): build.parent_id._github_status() else: trigger = build.params_id.trigger_id - if not trigger.ci_context: + ci_context = trigger.ci_context + if not ci_context: continue desc = trigger.ci_description or " (runtime %ss)" % (build.job_time,) if build.params_id.used_custom_trigger: - state = 'error' + ci_context += " (custom)" desc = "This build used custom config. Remove custom trigger to restore default ci" - elif build.global_result in ('ko', 'warn'): + if build.params_id.config_id == build.trigger_id.light_config_id: + ci_context += " (light)" + desc = "This build used a light config. Enable default build configuration to restore default ci" + if build.global_result in ('ko', 'warn'): state = 'error' elif build.global_state in ('pending', 'testing'): state = 'pending' @@ -1518,7 +1669,7 @@ def _github_status(self): else: target_url = f"{self.get_base_url()}/runbot/build/{build.id}" - commit._github_status(build, trigger.ci_context, state, target_url, desc, ci_strategy=trigger.ci_strategy) + commit._github_status(build, ci_context, state, target_url, desc, ci_strategy=trigger.ci_strategy) def _parse_config(self): return set(findall(self._server("tools/config.py"), r'--[\w-]+', )) diff --git a/runbot/models/build_config.py b/runbot/models/build_config.py index d369eca19..40bc993a9 100644 --- a/runbot/models/build_config.py +++ b/runbot/models/build_config.py @@ -1,35 +1,46 @@ import base64 +import fnmatch import glob import json import logging -import fnmatch -import psutil import re import shlex import time -from unidiff import PatchSet -from ..common import now, grep, time2str, rfind, s2human, os, RunbotException, ReProxy, markdown_escape -from ..container import docker_get_gateway_ip, Command -from odoo import models, fields, api, tools -from odoo.exceptions import UserError, ValidationError -from odoo.tools.misc import file_open -from odoo.tools.safe_eval import safe_eval, test_python_expr, _SAFE_OPCODES, to_opcodes -# adding some additionnal optcode to safe_eval. This is not 100% needed and won't be done in standard but will help -# to simplify some python step by wraping the content in a function to allow return statement and get closer to other -# steps +import psutil +from unidiff import VERSION, PatchSet, patch +from odoo import api, fields, models, tools +from odoo.exceptions import UserError, ValidationError +from odoo.tools.misc import file_open +from odoo.tools.safe_eval import _SAFE_OPCODES, safe_eval, test_python_expr, to_opcodes + +from ..common import ( + ReProxy, + RunbotException, + TestTagsParser, + grep, + markdown_escape, + now, + os, + rfind, + s2human, + time2str, +) +from ..container import Command, docker_get_gateway_ip # There is an issue in unidiff 0.7.3 fixed in 0.7.4 # https://github.com/matiasb/python-unidiff/commit/a3faffc54e5aacaee3ded4565c534482d5cc3465 # Since the unidiff packaged version in noble is 0.7.3 # patching it looks like the easiest solution -from unidiff import patch, VERSION if VERSION == '0.7.3': patch.RE_DIFF_GIT_DELETED_FILE = re.compile(r'^deleted file mode \d+$') patch.RE_DIFF_GIT_NEW_FILE = re.compile(r'^new file mode \d+$') +# adding some additionnal optcode to safe_eval. This is not 100% needed and won't be done in standard but will help +# to simplify some python step by wraping the content in a function to allow return statement and get closer to other +# steps _SAFE_OPCODES |= set(to_opcodes(['LOAD_DEREF', 'STORE_DEREF', 'LOAD_CLOSURE', 'MAKE_CELL', 'COPY_FREE_VARS'])) @@ -46,28 +57,41 @@ def filter_all_modules(selector, build, dynamic_vars): return filter_default_modules(selector, build, dynamic_vars) +def get_dependencies(modules, build, dynamic_vars, depth=None): + depth = int(depth) if depth else None + modules = modules.split(',') + dependant = set(build._get_modules_dependencies(modules, depth)) - set(modules) + return ','.join(sorted(dependant)) + + +def get_dependant(modules, build, dynamic_vars, depth=None): + depth = int(depth) if depth else None + modules = modules.split(',') + dependant = set(build._get_dependant_modules(modules, depth)) - set(modules) + return ','.join(sorted(dependant)) + + def filter_default_modules(selector, build, dynamic_vars): - build._checkout() # we need to ensure source are exported before _get_modules_to_test modules = build._get_modules_to_test(selector) return ','.join(modules) -def keep_modified_modules(modules, build, dynamic_vars): +def select_existing_modules(selector, build, dynamic_vars): + selector = f'-*,{selector}' + return filter_default_modules(selector, build, dynamic_vars) + + +def keep_modified_modules(modules, build, dynamic_vars, *defaults): if build.params_id.config_data.get('skip_modified_modules_filter', False): return modules - modified_modules = build._modified_modules() + if defaults: + defaults = [d[1:-1] if re.match(r'^[\'"].*[\'"]$', d) else d for d in defaults] + modified_modules = build._modified_modules(defaults=defaults) modules = modules.split(',') filtered_modules = [module for module in modules if module in modified_modules] return ','.join(filtered_modules) -def keep_modified_modules_or_base(modules, build, dynamic_vars): - bundle = build.params_id.create_batch_id.bundle_id - if bundle.is_base or bundle.is_staging: - return modules - return keep_modified_modules(modules, build, dynamic_vars) - - def make_module_test_tags(modules, build, dynamic_vars): return ','.join([f'/{module}' for module in modules.split(',')]) @@ -88,6 +112,17 @@ def append_string(modules, build, dynamic_vars, element): return ','.join([f'{module}{element}' for module in modules.split(',')]) +def union(modules, build, dynamic_vars, element): + if re.match(r'^[\'"].*[\'"]$', element): + element = element[1:-1] + else: + element = dynamic_vars.get(element, element) + element = element.strip() + modules = set(modules.split(',')) if modules else set() + new_modules = set(element.split(',')) if element else set() + return ','.join(sorted(modules | new_modules)) + + class Config(models.Model): _name = 'runbot.build.config' _description = "Build config" @@ -222,14 +257,18 @@ def wrapper(value, path): return wrapper def VARS(vars, path): - if not isinstance(vars, dict): - raise ValidationError(f'{path} ({vars}) should be a dict') - for key, val in vars.items(): - TECHNICAL_NAME(key, f'{path}.{key}') - STR(val, f'{path}.{key}') + if isinstance(vars, list): + for item in vars: + VARS(item, path) + else: + if not isinstance(vars, dict): + raise ValidationError(f'{path} ({vars}) should be a dict') + for key, val in vars.items(): + TECHNICAL_NAME(key, f'{path}.{key}') + STR(val, f'{path}.{key}') NAME = str_checker(r'^[\w \-]+$') - STR = str_checker(r'.+') + STR = str_checker(r'.*') DYNAMIC_VALUE = STR TECHNICAL_NAME = str_checker(r'^[a-z0-9_\-]+$') BOOL = type_checker(bool) @@ -241,6 +280,7 @@ def VARS(vars, path): 'vars': OPTIONAL(VARS), 'steps': REQUIRED(LIST(STEP)), 'description': OPTIONAL(DYNAMIC_VALUE), + 'log': OPTIONAL(DYNAMIC_VALUE), } valid_steps['odoo'] = { 'name': REQUIRED(NAME), @@ -251,9 +291,11 @@ def VARS(vars, path): 'test_tags': OPTIONAL(DYNAMIC_VALUE), 'demo_mode': OPTIONAL(IN(['default', 'with_demo', 'without_demo'])), 'enable_auto_tags': OPTIONAL(BOOL), + 'extra_params': OPTIONAL(DYNAMIC_VALUE), 'cpu_limit': OPTIONAL(INT), 'export_database': OPTIONAL(BOOL), 'make_stats': OPTIONAL(BOOL), + 'log': OPTIONAL(DYNAMIC_VALUE), } valid_steps['create_build'] = { 'name': REQUIRED(NAME), @@ -262,6 +304,9 @@ def VARS(vars, path): 'for_each_vars': OPTIONAL(LIST(VARS)), 'for_each_module': OPTIONAL(DYNAMIC_VALUE), 'max_builds': OPTIONAL(INT), + 'if': OPTIONAL(DYNAMIC_VALUE), + 'log': OPTIONAL(DYNAMIC_VALUE), + 'use_parent': OPTIONAL(BOOL), } valid_steps['restore'] = { 'name': REQUIRED(NAME), @@ -271,6 +316,7 @@ def VARS(vars, path): 'trigger_id': OPTIONAL(INT), 'use_current_batch': OPTIONAL(BOOL), 'zip_url': OPTIONAL(STR), + 'log': OPTIONAL(DYNAMIC_VALUE), } valid_steps['command'] = { 'name': REQUIRED(NAME), @@ -283,6 +329,7 @@ def VARS(vars, path): 'check_logs': OPTIONAL(LIST(STR)), 'expected_logs': OPTIONAL(LIST(STR)), 'make_stats': OPTIONAL(BOOL), + 'log': OPTIONAL(DYNAMIC_VALUE), } validate(config_schema, config, 'config') @@ -336,6 +383,18 @@ def _check_recursion(self, visited=None): for create_config in step.create_config_ids: create_config._check_recursion(visited[:]) + def _default_uses_parent(self, param_values): + if param_values.get('dump_db'): + return False + config_data = param_values.get('config_data', {}) or {} + if config_data.get('dump_url'): + return False + if config_data.get('restore_build_id'): + return False + if config_data.get('dump_trigger_id'): + return False + return any(step.job_type == 'restore' for step in self.step_ids) + class ConfigStepUpgradeDb(models.Model): _name = 'runbot.config.step.upgrade.db' @@ -355,6 +414,7 @@ class ConfigStepUpgradeDb(models.Model): ('test_upgrade', 'Test Upgrade'), ('restore', 'Restore'), ('dynamic', 'Dynamic'), + ('semgrep', 'Semgrep'), ] @@ -377,6 +437,7 @@ class ConfigStep(models.Model): group_name = fields.Char('Group name', related='group.name') make_stats = fields.Boolean('Make stats', default=False) build_stat_regex_ids = fields.Many2many('runbot.build.stat.regex', string='Stats Regexes') + dockerfile_id = fields.Many2one('runbot.dockerfile', string='Dockerfile') dockerfile_variant = fields.Char('Docker Variant') # install_odoo create_db = fields.Boolean('Create Db', default=True, tracking=True) # future @@ -389,11 +450,11 @@ class ConfigStep(models.Model): paths_to_omit = fields.Char('Paths to omit from coverage', tracking=True) flamegraph = fields.Boolean('Allow Flamegraph', default=False, tracking=True) test_enable = fields.Boolean('Test enable', default=True, tracking=True) - test_tags = fields.Char('Test tags', help="comma separated list of test tags", tracking=True) + test_tags = fields.Char('Test tags', help="new line (or comma) separated list of test tags", tracking=True) enable_auto_tags = fields.Boolean('Allow auto tag', default=True, tracking=True) sub_command = fields.Char('Subcommand', tracking=True) extra_params = fields.Char('Extra cmd args', tracking=True) - additionnal_env = fields.Char('Extra env', help='Example: foo="bar";bar="foo". Cannot contains \' ', tracking=True) + additionnal_env = fields.Char('Extra env', help='Example: foo=bar;bar=foo. Cannot contains \' ', tracking=True) enable_log_db = fields.Boolean("Enable log db", default=True) demo_mode = fields.Selection( [('default', 'Default'), ('without_demo', 'Without Demo'), ('with_demo', 'With Demo')], @@ -427,6 +488,10 @@ class ConfigStep(models.Model): restore_download_db_suffix = fields.Char('Download db suffix') restore_rename_db_suffix = fields.Char('Rename db suffix') + semgrep_category = fields.Many2one('runbot.checker_category', string='Semgrep Category', tracking=True) + custom_link = fields.Char('Custom link for semgrep codes', tracking=True) + disable_nosem = fields.Boolean('Disable nosem', default=False, tracking=True) + commit_limit = fields.Integer('Commit limit', default=50) file_limit = fields.Integer('File limit', default=450) break_before_if_ko = fields.Boolean('Break before this step if build is ko') @@ -548,7 +613,7 @@ def _run_step(self, build, **kwargs): return build._docker_run(self, **docker_params) return True - def _run_create_build(self, build, config_data=None, max_build=200): + def _run_create_build(self, build, config_data=None, max_build=200, use_parent=...): if config_data: config_data = {**config_data, **build.params_id.config_data} else: @@ -572,7 +637,7 @@ def _run_create_build(self, build, config_data=None, max_build=200): build._log('create_build', f'More than {max_build} build created, stopping', level='WARNING') return config_name = config_name or create_config.name - child = build._add_child(child_data_values, orphan=self.make_orphan, description=description or config_name) + child = build._add_child(child_data_values, orphan=self.make_orphan, description=description or config_name, use_parent=use_parent) build._log('create_build', 'created with config %s' % config_name, log_type='subbuild', path=str(child.id)) def _make_python_ctx(self, build): @@ -596,6 +661,7 @@ def _make_python_ctx(self, build): 'json_loads': json.loads, 'PatchSet': PatchSet, 'markdown_escape': markdown_escape, + 'TestTagsParser': TestTagsParser, } def _run_python(self, build, force=False): @@ -718,8 +784,8 @@ def _run_install_odoo(self, build, config_data=None): elif demo_mode == 'without_demo' and demo_installed_by_default: cmd.append('--without-demo=true') + extra_params = config_data.get('extra_params', build.params_id.extra_params or self.extra_params or '') # list module to install - extra_params = build.params_id.extra_params or self.extra_params or '' if mods and '-i' not in extra_params: cmd += ['-i', mods] config_path = build._server("tools/config.py") @@ -733,7 +799,7 @@ def _run_install_odoo(self, build, config_data=None): test_tags_in_extra = '--test-tags' in extra_params if (test_enable or test_tags) and "--test-tags" in available_options and not test_tags_in_extra: - test_tags = [t.strip() for t in (test_tags or '').split(',')] + test_tags = [t.strip() for t in TestTagsParser(test_tags or '').filter_specs] if enable_auto_tags and not config_data.get('disable_auto_tags', False): if grep(config_path, "[/module][:class]"): auto_tags = self.env['runbot.build.error']._disabling_tags(build) @@ -986,7 +1052,9 @@ def get_reference_builds_for_versions(versions): ) if self.allow_similar_build_quick_result: - existing_done_build = next((build for build in child.params_id.build_ids.sorted('id') if build.global_state == 'done' and build.local_result not in ('skipped', 'killed', 'manually_killed')), None) + existing_done_build = next((build for build in child.params_id.build_ids.sorted('id') if build.global_state == 'done' and build.global_result == 'ok'), None) + if not existing_done_build: + existing_done_build = next((build for build in child.params_id.build_ids.sorted('id') if build.global_state == 'done' and build.local_result not in ('skipped', 'killed')), None) if existing_done_build: child._log('', 'A similar [build](%s) has been found, marking as done directly', existing_done_build.build_url, log_type='markdown') child.local_state = 'done' @@ -1081,7 +1149,7 @@ def _run_restore(self, build, config_data=None): dump_build = dump_db.build_id else: download_db_suffix = config_data.get('dump_suffix', self.restore_download_db_suffix or 'all') - dump_build = build.parent_id + dump_build = params.reference_build_id or build.parent_id # TODO cleanup parent_id assert download_db_suffix and dump_build download_db_name = '%s-%s' % (dump_build.dest, download_db_suffix) zip_name = '%s.zip' % download_db_name @@ -1180,7 +1248,7 @@ def _coverage_params(self, build, modules_to_install): docker_source_folder = build._docker_source_folder(commit) for manifest_file in commit.repo_id.manifest_files.split(','): pattern_to_omit.add('*%s' % manifest_file) - for (addons_path, module, _) in commit._get_available_modules(): + for (addons_path, module, _) in commit._list_available_modules(): if module not in modules_to_install: # we want to omit docker_source_folder/[addons/path/]module/* module_path_in_docker = os.sep.join([docker_source_folder, addons_path, module]) @@ -1232,6 +1300,8 @@ def _make_results(self, build): self._make_upgrade_results(build) elif active_job_type == 'restore': self._make_restore_results(build) + elif active_job_type == 'semgrep': + self._make_semgrep_results(build) def _make_python_results(self, build): eval_ctx = self._make_python_ctx(build) @@ -1491,46 +1561,85 @@ def _run_dynamic(self, build): raise RunbotException('Too many ancestors builds, possible cyclic dynamic build creation') if build.parent_id and build.dynamic_config == build.parent_id.dynamic_config: raise RunbotException('A child build cannot load the same dynamic config if parent, recursion detected') + + config_vars_list = build.dynamic_config.get('vars', {}) + if not isinstance(config_vars_list, list): + config_vars_list = [config_vars_list] + raw_vars = {} + for config_vars in config_vars_list: + raw_vars.update(config_vars) + + raw_vars.update(build.params_id.config_data.get('dynamic_vars', {})) + dynamic_vars = {} + # dynamic_vars can either be raw value like 'account', value to evaluate lazily in anothed dynamic value like 'account->!mail' + # or dynamic value that we want to evaluate early like '{{*|filter_all_modules|modified_modules}}' (between {{}}) + # this loop will evalute the third category + # this alows to evaluate only once an expression that could be expensive to use it in multiple dynamic values + # this also allow to clarify the config by chaining vars definition + # TODO check ordering + for key, value in raw_vars.items(): + dynamic_vars[key] = self._parse_dynamic_entry(value, build, dynamic_vars=dynamic_vars) + current_step = self._get_dynamic_step(build) if not current_step: build._log('Dynamic Step', 'No dynamic config or steps found, skipping', level="WARNING") return + if current_step.get('log'): + text = self._parse_dynamic_entry(current_step['log'], build, dynamic_vars=dynamic_vars) + build._log('_run_dynamic', text) if current_step['job_type'] == 'create_build': for_each_vars_list = current_step.get('for_each_vars', [{}]) if 'for_each_module' in current_step: modules_vars = [] for for_each_vars in for_each_vars_list: - modules_entry = self._parse_dynamic_entry(current_step['for_each_module'], build, additional_dynamic_vars=for_each_vars) + modules_entry = self._parse_dynamic_entry(current_step['for_each_module'], build, dynamic_vars={**dynamic_vars, **for_each_vars}) modules = [m.strip() for m in modules_entry.split(',') if m.strip()] for module in modules: module_vars = {**for_each_vars, 'module': module} modules_vars.append(module_vars) for_each_vars_list = modules_vars - parent_vars = {**build.dynamic_config.get('vars', {}), **build.params_id.config_data.get('dynamic_vars', {})} + child_data_list = [] for child_index, child in enumerate(current_step.get('children', [])): child_vars = child.get('vars', {}) for for_each_vars in for_each_vars_list: config_name = child.get('name', build.params_id.config_id.name) - dynamic_vars = {**parent_vars, **child_vars, **for_each_vars} + raw_dynamic_vars = {**dynamic_vars, **for_each_vars, **child_vars} + child_dynamic_vars = {} + # evaluate for_each_vars + for key, value in raw_dynamic_vars.items(): + child_dynamic_vars[key] = self._parse_dynamic_entry(value, build, dynamic_vars=child_dynamic_vars) + if 'if' in current_step: + condition = self._parse_dynamic_entry(current_step['if'], build, dynamic_vars=child_dynamic_vars) + if not condition: + continue if 'description' in child: - description = self._parse_dynamic_entry(child['description'], build, additional_dynamic_vars=dynamic_vars) + description = self._parse_dynamic_entry(child['description'], build, dynamic_vars=child_dynamic_vars) # note: we mainly need to provide additional_dynamic_vars because the child is not created yet at this point else: description = config_name + # filter vars not prefixed with _ to simplify child values + if child.get('log'): + text = self._parse_dynamic_entry(child['log'], build, dynamic_vars=child_dynamic_vars) + build._log('_run_dynamic', text) + public_child_dynamic_vars = {key: value for key, value in child_dynamic_vars.items() if not key.startswith('_')} child_data = { - 'config_data': {**build.params_id.config_data.dict, "dynamic_vars": dynamic_vars}, + 'config_data': {**build.params_id.config_data.dict, "dynamic_vars": public_child_dynamic_vars}, 'config_id': build.params_id.config_id.id, 'dynamic_active_step_index': 0, 'dynamic_config_position': f'{build.params_id.dynamic_config_position or ""}/{build.dynamic_active_step_index}.{child_index}', 'config_name': config_name, 'description': description, } + if current_step.get('use_parent') or (current_step.get('use_parent') is None and any(step.get('job_type') == 'restore' for step in child.get('steps', []))): + # TODO improve, not needed is other restore params are given + child_data['reference_build_id'] = build.id child_data_list.append(child_data) return self._run_create_build( build, {'child_data': child_data_list, 'number_build': current_step.get('number_builds', 1)}, max_build=min(current_step.get('max_builds', 20), 200), + use_parent=current_step.get('use_parent', ...) ) if current_step['job_type'] == 'restore': @@ -1550,12 +1659,15 @@ def _run_dynamic(self, build): install_modules_pattern = current_step.get('install_modules', '') if install_modules_pattern.split(',', 1)[0] not in ('*', '-*'): install_modules_pattern = '-*,' + install_modules_pattern - config_data['install_module_pattern'] = self._parse_dynamic_entry(install_modules_pattern, build) + config_data['install_module_pattern'] = self._parse_dynamic_entry(install_modules_pattern, build, dynamic_vars) if 'test_tags' in current_step: - config_data['test_tags'] = self._parse_dynamic_entry(current_step.get('test_tags'), build) + config_data['test_tags'] = self._parse_dynamic_entry(current_step.get('test_tags'), build, dynamic_vars) config_data['test_enable'] = bool(current_step.get('test_enable') or current_step.get('test_tags')) + if 'extra_params' in current_step: + config_data['extra_params'] = self._parse_dynamic_entry(current_step.get('extra_params'), build, dynamic_vars) + for key in ('screencast', 'demo_mode', 'enable_auto_tags'): if key in current_step: value = current_step[key] @@ -1576,6 +1688,7 @@ def _run_dynamic(self, build): 'addons_path': ",".join(build._get_addons_path()), 'exports': ",".join(exports.keys()), 'exports_paths': ",".join(exports.values()), + **dynamic_vars, } command = [shlex.quote(self._parse_dynamic_entry(part, build, values)) for part in command] pres = [] @@ -1597,22 +1710,23 @@ def _get_dynamic_db_suffix(self, step): db_suffix = re.sub(r'[^a-z0-9_\-]', '_', db_suffix.lower()) return db_suffix - def _parse_dynamic_entry(self, entry, build, additional_dynamic_vars=None): + def _parse_dynamic_entry(self, entry, build, dynamic_vars): """ transforms a module/test-tags entry dynamically """ - dynamic_config = build.dynamic_config - expression_filters = { 'filter_all_modules': filter_all_modules, 'filter_default_modules': filter_default_modules, 'make_module_test_tags': make_module_test_tags, + 'select_existing_modules': select_existing_modules, + 'get_dependencies': get_dependencies, + 'get_dependant': get_dependant, 'prepend': prepend_string, 'append': append_string, 'modified_modules': keep_modified_modules, - 'modified_modules_or_base': keep_modified_modules_or_base, + 'union': union, } - dynamic_vars = {**dynamic_config.get('vars', {}), **build.params_id.config_data.get('dynamic_vars', {}), **(additional_dynamic_vars or {})} + dynamic_vars = dynamic_vars or {} def parse_expression(match): # inspired by jinja but with limited features @@ -1645,6 +1759,139 @@ def consume_remaining_tasks(self, build): return next_index < len(steps) return False + def _run_semgrep(self, build): + if not self._check_limits(build): + return + + rules = self.env['runbot.semgrep_rule'].search([ + ("category_id", '=', self.semgrep_category.id), + '|', ("min_version_number", '=', False), ("min_version_number", "<=", build.params_id.version_id.number), + '|', ('max_version_number', '=', False), ('max_version_number', '>', build.params_id.version_id.number), + ]) + if not rules: + return + + for rule in rules: + build._write_file(f"rules/{rule.name}.yaml", "rules:\n" + rule.rule_text) + + exports = build._checkout() + + files = [] + targets = [] + for link in build.params_id.commit_link_ids: + # filtering section for progressive CI (style & security) + modified = link.commit_id.repo_id._git([ + 'diff', + '%s..%s' % (link.merge_base_commit_id.name, link.commit_id.name), + '--', + '*.py', + '*.js', + ]) + for patched_file in PatchSet(modified.splitlines(keepends=True)): + target = patched_file.target_file.removeprefix('b/') + if target.startswith(('setup/',)): + continue + target = link.commit_id.repo_id.name + '/' + target + + before = len(targets) + targets.extend( + f"{target}:{line.target_line_no}" + for hunk in patched_file + for line in hunk + if line.is_added + ) + # only look at file if it has additions + if len(targets) > before: + files.append(target) + + if not files: + build._log("", "Nothing to scan.") + return + + build._log("", f"checking {len(targets)} lines in {len(files)} files") + + # add empty ignore file, otherwise semgrep ignores test directories by default + build._write_file(".semgrepignore", "") + build._write_file(f"logs/{self.name}-files_list.txt", "\n".join(files)) + build._write_file("targets", "\n".join(targets)) + + cmd = f"semgrep scan {'--disable-nosem' if self.disable_nosem else ''} -c /data/build/rules --json --timeout=0 --verbose $(cat logs/{self.name}-files_list.txt) > /data/build/results.json" + + return { + "cmd": cmd, + "container_name": build._get_docker_name(), + "ro_volumes": exports, + } + + def _make_semgrep_results(self, build): + step_result = "ok" + if build._is_file("targets"): + targets = set(build._read_file("targets").splitlines(keepends=False)) + f = build._read_file("results.json") + semgrep_result = json.loads(f) if f else {} + else: + targets = set() + semgrep_result = {} + + repo = { + link.commit_id.repo_id.name: (link.branch_id.remote_id.base_url, link.commit_id) + for link in build.params_id.commit_link_ids + } + + # some of the lints can catch the same issue multiple times on the same line, and semgrep does not dedup + seen = set() + + # rules results + for result in semgrep_result.get('results', ()): + _, _, code = result['check_id'].rpartition('.') + start = result['start']['line'] + matches = targets & { + f"{result['path']}:{start}" + for line in range(result['start']['line'], result['end']['line'] + 1) + } + if not matches: + continue + + if all((target, code) in seen for target in matches): + continue + seen.update((target, code) for target in matches) + + repo_name, path = result['path'].split('/', 1) + filename = f"{path}:{start}" + repo_base_url, commit = repo[repo_name] + commit_hash = commit.name + + # FIXME: should be a code block :( + extra = result['extra'] + # snippet = extra['lines'] #"\n".join(f'{line}' for line in extra['lines'].splitlines(keepends=False)) + file = commit._read_source(path, mode='rb') + snippet = file[result['start']['offset']:result['end']['offset']].decode() + + codelink = f"{code}: {extra['message']}\n" + if self.custom_link: + # message may be sensitive, do not display, show snippet on same line if single line, otherwise block below + codelink = f"[{code} 🔗]({self.custom_link}#{code}): " + if '\n' in snippet: + snippet = '\n' + snippet + + build._log( + "semgrep", + f"""\ + [%s](https://%s/blob/%s/%s#L%s-L%s) + {codelink}`%s` + """, filename, repo_base_url, commit_hash, path, result['start']['line'], result['end']['line'], snippet, + level=extra['severity'], + log_type='markdown', + ) + if extra['severity'] != 'INFO': + step_result = "ko" + + # internal semgrep errors + for err in semgrep_result.get('errors', ()): + build._log("semgrep", err.get('message') or str(err), log_type='markdown') + + build['local_result'] = build._get_worst_result([build.local_result, step_result]) + class ConfigStepOrder(models.Model): _name = 'runbot.build.config.step.order' diff --git a/runbot/models/build_config_codeowner.py b/runbot/models/build_config_codeowner.py index 9a07cc502..807043568 100644 --- a/runbot/models/build_config_codeowner.py +++ b/runbot/models/build_config_codeowner.py @@ -34,7 +34,7 @@ def _codeowners_regexes(self, codeowners, version_id): team_set |= set(t.strip() for t in github_teams) return list(regexes.items()) - def _reviewer_per_file(self, files, regexes, ownerships, repo): + def _reviewer_per_file(self, files, regexes, ownerships, repo, build): reviewer_per_file = {} for file in files: file_reviewers = set() @@ -42,7 +42,7 @@ def _reviewer_per_file(self, files, regexes, ownerships, repo): if re.match(regex, file): if not teams or 'none' in teams: file_reviewers = None - break # blacklisted, break + break # blacklisted, break file_reviewers |= teams if file_reviewers is None: continue @@ -56,8 +56,11 @@ def _reviewer_per_file(self, files, regexes, ownerships, repo): for ownership in ownerships: if file_module == ownership.module_id.name: file_reviewers.add(ownership.team_id.github_team) - if not file_reviewers and self.fallback_reviewer: - file_reviewers.add(self.fallback_reviewer) + if not file_reviewers: + if len(file.split('/')) <= 2: + build._log('', 'File %s is at the root level and it looks like it could be a mistake, remove it or ensure that a codeowner rule is added for this file', file, log_type='markdown', level="ERROR") + elif self.fallback_reviewer: + file_reviewers.add(self.fallback_reviewer) reviewer_per_file[file] = file_reviewers return reviewer_per_file @@ -121,7 +124,7 @@ def _run_codeowner(self, build): for commit_link, files in modified_files.items(): build._log('', 'Checking %s codeowner regexed on %s files' % (len(regexes), len(files))) reviewers = set() - reviewer_per_file = self._reviewer_per_file(files, regexes, ownerships, commit_link.commit_id.repo_id) + reviewer_per_file = self._reviewer_per_file(files, regexes, ownerships, commit_link.commit_id.repo_id, build) for file, file_reviewers in reviewer_per_file.items(): href = 'https://%s/blob/%s/%s' % (commit_link.branch_id.remote_id.base_url, commit_link.commit_id.name, file.split('/', 1)[-1]) if file_reviewers: diff --git a/runbot/models/build_error.py b/runbot/models/build_error.py index c5b4fdb1e..470e64774 100644 --- a/runbot/models/build_error.py +++ b/runbot/models/build_error.py @@ -9,16 +9,15 @@ from dateutil import rrule from dateutil.relativedelta import relativedelta from markupsafe import Markup - from werkzeug.urls import url_join from odoo import api, fields, models -from odoo.exceptions import UserError, ValidationError -from odoo.tools import SQL, lazy, ormcache +from odoo.exceptions import AccessError, UserError, ValidationError from odoo.fields import Domain +from odoo.tools import SQL, lazy, ormcache +from ..common import TestTagsParser, transactioncache from ..fields import JsonDictField -from ..common import transactioncache, TestTagsParser _logger = logging.getLogger(__name__) @@ -215,7 +214,6 @@ class BuildError(models.Model): _inherit = ('mail.thread', 'mail.activity.mixin', 'runbot.build.error.seen.mixin') _mail_post_access = 'read' - name = fields.Char("Name") active = fields.Boolean('Open (not fixed)', default=True, tracking=True) description = fields.Text("Description", store=True, compute='_compute_description') @@ -242,9 +240,10 @@ class BuildError(models.Model): breaking_bundle_id = fields.Many2one('runbot.bundle', 'Breaking bundle', tracking=True, help="Bundle that introduced the error", related='breaking_pr_id.bundle_id') breaking_bundle_url = fields.Char('Breaking bundle url', related='breaking_bundle_id.frontend_url') breaking_pr_date = fields.Datetime('Breaking date', related="breaking_pr_id.close_date", help="Date of the merge of the first pr") + duplicate_breaking_pr_count = fields.Integer('Same Breaking PR', compute='_compute_duplicate_breaking_pr_count', help='Other errors with same breaking PR') - test_tags = fields.Char(string='Test tags', help="Comma separated list of test_tags to use to reproduce/remove this error", tracking=True) - canonical_tags = fields.Char('Canonical tag', compute='_compute_canonical_tags', store=True) + test_tags = fields.Text(string='Test tags', help="Comma separated list of test_tags to use to reproduce/remove this error", tracking=True) + canonical_tags = fields.Text('Canonical tag', compute='_compute_canonical_tags', store=True) tags_match_count = fields.Integer('Nb errors matching the test_tags', compute='_compute_tags_match_count') tags_min_version_excluded_id = fields.Many2one('runbot.version', 'Tag min version (excluded)') tags_min_version_id = fields.Many2one('runbot.version', 'Tags Min version', compute="_compute_tags_min_version_id", inverse="_inverse_tags_min_version_id", help="Minimal version where the test tags will be applied.", tracking=True) @@ -262,7 +261,7 @@ class BuildError(models.Model): unique_build_error_link_ids = fields.Many2many('runbot.build.error.link', compute='_compute_unique_build_error_link_ids') build_ids = fields.Many2many('runbot.build', compute=_compute_related_error_content_ids('build_ids'), search=_search_related_error_content_ids('build_ids')) bundle_ids = fields.Many2many('runbot.bundle', compute=_compute_related_error_content_ids('bundle_ids'), search=_search_related_error_content_ids('bundle_ids')) - version_ids = fields.Many2many('runbot.version', string='Versions', compute=_compute_related_error_content_ids('version_ids'), search=_search_related_error_content_ids('version_ids')) + version_ids = fields.Many2many('runbot.version', string='Versions', compute='_compute_version_ids', search=_search_related_error_content_ids('version_ids')) trigger_ids = fields.Many2many('runbot.trigger', string='Triggers', compute=_compute_related_error_content_ids('trigger_ids'), store=True) tag_ids = fields.Many2many('runbot.build.error.tag', string='Tags', compute=_compute_related_error_content_ids('tag_ids'), search=_search_related_error_content_ids('tag_ids')) random = fields.Boolean('Random', compute="_compute_random", store=True) @@ -289,7 +288,7 @@ def _inverse_tags_min_version_id(self): def _compute_canonical_tags(self): for record in self: canonical_tags = sorted(set(record.error_content_ids.filtered('canonical_tag').mapped('canonical_tag'))) - record.canonical_tags = ','.join(canonical_tags) + record.canonical_tags = '\n'.join(canonical_tags) @api.depends('tags_min_version_id') def _compute_tags_min_version_id(self): @@ -322,6 +321,30 @@ def _compute_fixing_bundle_id(self): for record in self: record.fixing_bundle_id = record.fixing_pr_id.bundle_id if record.fixing_pr_id else False + @api.depends('breaking_pr_id') + def _compute_duplicate_breaking_pr_count(self): + breaking_counts = self.env["runbot.build.error"]._read_group( + domain=[ + ("breaking_pr_id", "in", self.breaking_pr_id.ids), + ("active", "=", True), + ], + groupby=["breaking_pr_id"], + aggregates=["id:count"], + having=[('id:count', '>', 1)], + ) + + count_by_pr = {pr_count[0]: pr_count[1] for pr_count in breaking_counts} + + for record in self: + # remove 1 to not count the current error + record.duplicate_breaking_pr_count = count_by_pr.get(record.breaking_pr_id, 1) - 1 + + + @api.depends('error_content_ids.version_ids') + def _compute_version_ids(self): + for record in self: + record['version_ids'] = record.error_content_ids['version_ids'].sorted('number') + def _compute_disappearing_batch_ids(self): # this is really inefficient but should only be used in form view # One search per version where it appeared @@ -396,7 +419,7 @@ def _compute_count(self): for record in self: record.error_count = len(record.error_content_ids) - @api.depends('error_content_ids') + @api.depends('error_content_ids.random') def _compute_random(self): for record in self: record.random = any(error.random for error in record.error_content_ids) @@ -426,10 +449,10 @@ def _compute_unique_qualifiers(self): @api.depends('common_qualifiers') def _compute_similar_ids(self): for record in self: - if record.common_qualifiers: + if record.common_qualifiers and (record.id or record.id.origin): query = SQL( r"""SELECT id FROM runbot_build_error WHERE id != %s AND common_qualifiers @> %s""", - record.id, + record.id or record.id.origin, json.dumps(record.common_qualifiers.dict), ) self.env.cr.execute(query) @@ -440,10 +463,10 @@ def _compute_similar_ids(self): @api.depends('common_qualifiers') def _compute_similar_content_ids(self): for record in self: - if record.common_qualifiers: + if record.common_qualifiers and (record.id or record.id.origin): query = SQL( r"""SELECT id FROM runbot_build_error_content WHERE error_id != %s AND qualifiers @> %s""", - record.id, + record.id or record.id.origin, json.dumps(record.common_qualifiers.dict), ) self.env.cr.execute(query) @@ -454,10 +477,10 @@ def _compute_similar_content_ids(self): @api.depends('common_qualifiers') def _compute_analogous_ids(self): for record in self: - if record.common_qualifiers: + if record.common_qualifiers and (record.id or record.id.origin): query = SQL( r"""SELECT id FROM runbot_build_error WHERE id != %s AND unique_qualifiers @> %s""", - record.id, + record.id or record.id.origin, json.dumps(record.unique_qualifiers.dict), ) self.env.cr.execute(query) @@ -468,10 +491,10 @@ def _compute_analogous_ids(self): @api.depends('common_qualifiers') def _compute_analogous_content_ids(self): for record in self: - if record.common_qualifiers: + if record.common_qualifiers and (record.id or record.id.origin): query = SQL( r"""SELECT id FROM runbot_build_error_content WHERE error_id != %s AND qualifiers @> %s""", - record.id, + record.id or record.id.origin, json.dumps(record.unique_qualifiers.dict), ) self.env.cr.execute(query) @@ -484,16 +507,20 @@ def _compute_tags_match_count(self): for record in self: record.tags_match_count = 0 if record.test_tags: - tags_parser = TestTagsParser(record.test_tags) - search_domain = tags_parser.test_tags_to_search_domain(exclude_error_id=record.id) - if search_domain: - record.tags_match_count = self.env['runbot.build.error'].with_context(active_test=True).search_count(search_domain) + try: + tags_parser = TestTagsParser(record.test_tags.replace('\n', ',')) + search_domain = tags_parser.test_tags_to_search_domain(exclude_error_id=record.id or record.id.origin) + if search_domain: + record.tags_match_count = self.env['runbot.build.error'].with_context(active_test=True).search_count(search_domain) + except Exception as e: # noqa: BLE001 + record.tags_match_count = -1 + _logger.warning("Error while computing tags_match_count for error %s with test_tags %s: %s", record.id, record.test_tags, e) def action_view_impacted_by_tag(self): self.ensure_one() if not self.test_tags: return - tags_parser = TestTagsParser(self.test_tags) + tags_parser = TestTagsParser(self.test_tags.replace('\n', ',')) return { 'type': 'ir.actions.act_window', 'views': [(False, 'list'), (False, 'form')], @@ -506,8 +533,15 @@ def action_view_impacted_by_tag(self): @api.constrains('test_tags') def _check_test_tags(self): for build_error in self: - if build_error.test_tags and '-' in build_error.test_tags: - raise ValidationError('Build error test_tags should not be negated') + if build_error.test_tags: + try: + test_tags = build_error.test_tags.replace('\n', ',') + tags_parser = TestTagsParser(test_tags) + tags_parser = TestTagsParser(test_tags, keep_escape=False) + except Exception as e: # noqa: BLE001 + raise ValidationError(f'Invalid test_tags format: {e}') + if tags_parser.exclude or any(params[0] == '-' for p, params in tags_parser.parameters): + raise ValidationError('Build error test_tags should not be negated') @api.onchange('test_tags') def _onchange_test_tags(self): @@ -534,6 +568,12 @@ def write(self, vals): if not vals['active'] and build_error.active and build_error.last_seen_date and build_error.last_seen_date + relativedelta(days=1) > datetime.datetime.now(): raise UserError("This error broke less than one day ago can only be deactivated by admin") + writable_fields = ['responsible', 'fixing_pr_id', 'breaking_pr_id', 'customer', 'random', 'team_id', 'manual_team_id'] + if not self.env.su and not self.env.user.has_groups('runbot.group_runbot_admin,runbot.group_runbot_error_manager'): + no_access_fields = vals.keys() - writable_fields + if no_access_fields != set(): + raise AccessError(f"You are not allowed to modify the following field(s): {','.join(no_access_fields)}") + if (responsible_id := vals.get('responsible')) and vals.get('active', True): responsible = self.env['res.users'].browse(responsible_id) for build_error in self: @@ -567,6 +607,8 @@ def _merge(self, others): # TODO xdo split the error id change and other params merge in order to avoid the merge in write and write in merge recursion self.ensure_one error = self + fields_to_merge = ['responsible', 'fixing_pr_id', 'breaking_pr_id'] + fields_to_copy = ['manual_team_id'] for previous_error in others: # todo, check that all relevant fields are checked and transfered/logged if previous_error.test_tags and error.test_tags != previous_error.test_tags: @@ -574,21 +616,19 @@ def _merge(self, others): error.sudo().test_tags = previous_error.test_tags previous_error.sudo().test_tags = False elif self.env.su: - test_tags = error.test_tags.split(',') - previous_error - for tag in previous_error.test_tags.split(','): + test_tags = TestTagsParser(error.test_tags.replace('\n', ',')).filter_specs + previous_error_tags = TestTagsParser(previous_error.test_tags.replace('\n', ',')).filter_specs + for tag in previous_error_tags: if tag not in test_tags: test_tags.append(tag) - error.test_tags = ','.join(test_tags) + error.test_tags = '\n'.join(test_tags) previous_error.test_tags = False - if previous_error.responsible: - if error.responsible and error.responsible != previous_error.responsible and not self.env.su: - raise UserError(f"error {error.id} as already a responsible ({error.responsible}) cannot assign {previous_error.responsible}") - if not error.responsible: - error.responsible = previous_error.responsible - if previous_error.team_id: - if not error.team_id: - error.team_id = previous_error.team_id + for field in fields_to_merge + fields_to_copy: + if previous_error[field]: + if field in fields_to_merge and error[field] and error[field] != previous_error[field] and not self.env.su: + raise UserError(f"error {error.id} as already a {field} ({error[field]}) cannot assign {previous_error[field]}") + if not error[field]: + error[field] = previous_error[field] previous_error.error_content_ids.with_context(merging=True).write({'error_id': self}) previous_error.common_qualifiers = dict() previous_error.unique_qualifiers = dict() @@ -612,7 +652,16 @@ def filter_tags(e): return True test_tag_list = self.search([('test_tags', '!=', False)]).filtered(filter_tags).mapped('test_tags') - return [test_tag for error_tags in test_tag_list for test_tag in (error_tags).split(',')] + parsed_test_tags = [] + for error_tags in test_tag_list: + try: + # we cannot rely only on '\n' since old test-tags or user defined ones could be comma separated + error_tags = error_tags.replace('\n', ',') + tags_parser = TestTagsParser(error_tags) + parsed_test_tags.extend(tags_parser.filter_specs) + except Exception as e: # noqa: BLE001 + _logger.warning('Error while parsing test_tags for error with id %s: %s', self.id, e) + return parsed_test_tags @api.model def _disabling_tags(self, build_id=False): @@ -690,6 +739,19 @@ def action_view_analogous_qualified_contents(self): 'name': 'Similary Qualified Contents' } + def action_view_duplicate_breaking_pr(self): + self.ensure_one() + return { + 'type': 'ir.actions.act_window', + 'res_model': 'runbot.build.error', + 'domain': [ + ('breaking_pr_id', '=', self.breaking_pr_id.id), + ('active', '=', True), + ], + 'view_mode': 'list,form', + 'name': 'Errors with same breaking PR', + } + @api.depends('manual_team_id', 'auto_team_id') def _compute_team_id(self): for error in self: @@ -814,6 +876,7 @@ class BuildErrorContent(models.Model): error_active = fields.Boolean('Active', related='error_id.active') error_id = fields.Many2one('runbot.build.error', 'Linked to', index=True, required=True, tracking=True, ondelete='cascade') + create_error_id = fields.Many2one('runbot.build.error', 'Original error', index=True) error_display_id = fields.Integer(compute='_compute_error_display_id', string="Error id") content = fields.Text('Error message', required=True) cleaned_content = fields.Text('Cleaned error message') @@ -842,7 +905,7 @@ class BuildErrorContent(models.Model): breaking_pr_id = fields.Many2one(related='error_id.breaking_pr_id') fixing_pr_alive = fields.Boolean(related='error_id.fixing_pr_alive') fixing_pr_url = fields.Char(related='error_id.fixing_pr_url') - test_tags = fields.Char(related='error_id.test_tags') + test_tags = fields.Text(related='error_id.test_tags') tags_min_version_id = fields.Many2one(related='error_id.tags_min_version_id') tags_max_version_id = fields.Many2one(related='error_id.tags_max_version_id') @@ -890,6 +953,7 @@ def create(self, vals_list): 'name': name, }) vals['error_id'] = error.id + vals['create_error_id'] = vals['error_id'] content = vals.get('content') cleaned_content = cleaners._r_sub(content) vals.update({ @@ -958,7 +1022,7 @@ def _compute_version_ids(self): res = dict(self.env.cr.fetchall()) for build_error_content in self: - build_error_content.version_ids = self.env['runbot.version'].browse([v for v in res.get(build_error_content.id, []) if v]) + build_error_content.version_ids = self.env['runbot.version'].browse([v for v in res.get(build_error_content.id, []) if v]).sorted('number') @api.depends('build_ids') def _compute_trigger_ids(self): @@ -979,10 +1043,10 @@ def _compute_error_display_id(self): def _compute_similar_ids(self): """error contents having the exactly the same qualifiers""" for record in self: - if record.qualifiers: + if record.qualifiers and (record.id or record.id.origin): query = SQL( r"""SELECT id FROM runbot_build_error_content WHERE id != %s AND qualifiers @> %s AND qualifiers <@ %s""", - record.id, + record.id or record.id.origin, json.dumps(record.qualifiers.dict), json.dumps(record.qualifiers.dict), ) diff --git a/runbot/models/bundle.py b/runbot/models/bundle.py index 1c54d9d53..3e8073e89 100644 --- a/runbot/models/bundle.py +++ b/runbot/models/bundle.py @@ -57,6 +57,8 @@ class Bundle(models.Model): tag_ids = fields.Many2many('runbot.bundle.tag', string='Tags') team_id = fields.Many2one('runbot.team', compute='_compute_team_id', store=True, readonly=False) + priority_offset = fields.Integer("Priority offset", help="Offset in seconds to remove from the create date of a batch to define priority, positive value means higher priority, negative value means lower priority.") + def _compute_frontend_url(self): for bundle in self: bundle.frontend_url = f'/runbot/bundle/{bundle.id}' @@ -275,7 +277,7 @@ def _consistency_warning(self): warnings.append(('info', 'PR %s targeting a non base branch: %s' % (branch.dname, branch.target_branch_name))) else: warnings.append(('warning' if branch.alive else 'info', 'PR %s targeting wrong version: %s (expecting %s)' % (branch.dname, branch.target_branch_name, self.base_id.name))) - elif not branch.is_pr and not branch.name.startswith(self.base_id.name) and not self.defined_base_id: + elif not branch.is_pr and not branch.name.startswith(self.base_id.name) and not self.defined_base_id and branch.remote_id.repo_id.enforce_version: warnings.append(('warning', 'Branch %s not starting with version name (%s)' % (branch.dname, self.base_id.name))) return warnings @@ -320,7 +322,12 @@ def action_generate_custom_trigger_restore_action(self): return self._generate_custom_trigger_action(context) def action_disable_all_triggers(self): - triggers_to_disable = ( + self._configure_custom_trigger_start_mode('disable') + + def _configure_custom_trigger_start_mode(self, mode): + self.ensure_one() + + triggers_to_create = ( self.env["runbot.trigger"] .search([ ("id", "not in", self.trigger_custom_ids.trigger_id.ids), @@ -333,13 +340,26 @@ def action_disable_all_triggers(self): ) ) vals = [] - for trigger in triggers_to_disable: - vals.append({ - 'bundle_id': self.id, - 'trigger_id': trigger.id, - 'start_mode': 'disabled', - }) + bundle_repos = self.branch_ids.remote_id.repo_id + for trigger in triggers_to_create: + if trigger.repo_ids & bundle_repos or trigger.dependency_ids & bundle_repos: + vals.append({ + 'bundle_id': self.id, + 'trigger_id': trigger.id, + }) self.env['runbot.bundle.trigger.custom'].create(vals) + for custom_trigger in self.trigger_custom_ids: + trigger_mode = mode + if mode == 'light' and not custom_trigger.trigger_id.light_config_id: + trigger_mode = 'auto' + custom_trigger.start_mode = trigger_mode + + def _force_ci(self): + for bundle in self: + bundle._configure_custom_trigger_start_mode('force') + # we need to create a new batch in case some of the triggers were in minimal mode + batch = bundle._force() or bundle.last_batch + batch._log("Batch was requested for ci") class BundleTag(models.Model): diff --git a/runbot/models/commit.py b/runbot/models/commit.py index 02973cfd3..13fa32bf4 100644 --- a/runbot/models/commit.py +++ b/runbot/models/commit.py @@ -3,7 +3,6 @@ import subprocess from ..common import os, RunbotException, make_github_session, transactioncache -import glob import shutil from odoo import models, fields, api @@ -66,22 +65,14 @@ def _rebase_on(self, commit): return self return self._get(self.name, self.repo_id.id, self.read()[0], commit.id) - def _get_available_modules(self): - for manifest_file_name in self.repo_id.manifest_files.split(','): # '__manifest__.py' '__openerp__.py' - for addons_path in (self.repo_id.addons_paths or '').split(','): # '' 'addons' 'odoo/addons' - sep = os.path.join(addons_path, '*') - for manifest_path in glob.glob(self._source_path(sep, manifest_file_name)): - module = os.path.basename(os.path.dirname(manifest_path)) - yield (addons_path, module, manifest_file_name) - def _list_files(self, patterns): #example: git ls-files --with-tree=abcf390f90dbdd39fd61abc53f8516e7278e0931 ':(glob)addons/*/*.py' ':(glob)odoo/addons/*/*.py' # note that glob is needed to avoid the star matching ** self.ensure_one() - return self.repo_id._git(['ls-files', '--with-tree', self.name, *patterns]).split('\n') + self._fetch() + return self.repo_id._git(['ls-files', '--with-tree', self.tree_hash, *patterns]).split('\n') def _list_available_modules(self): - # beta version, may replace _get_available_modules latter addons_paths = (self.repo_id.addons_paths or '').split(',') patterns = [] for manifest_file_name in self.repo_id.manifest_files.split(','): # '__manifest__.py' '__openerp__.py' @@ -98,12 +89,18 @@ def _list_available_modules(self): module, manifest_file_name = elems yield (addons_path, module, manifest_file_name) + @transactioncache # hack to avoid to fetch two time the same commit inside the same transaction + def _fetch(self): + try: + self.repo_id._fetch(self.name) + except RunbotException: + self.repo_id._fetch(self.tree_hash) def _export(self, build): """Export a git repo into a sources""" # TODO add automated tests self.ensure_one() - self.repo_id._fetch(self.name) + self._fetch() if not self.env['runbot.commit.export'].search([('build_id', '=', build.id), ('commit_id', '=', self.id)]): self.env['runbot.commit.export'].create({'commit_id': self.id, 'build_id': build.id}) export_path = self._source_path() @@ -166,12 +163,43 @@ def _read_source(self, file, mode='r'): @transactioncache def _git_show_file(self, file): + return self._git_show_files([file])[0] + + def _git_show_files(self, files): self.ensure_one() + if not files: + return [] + self.repo_id._fetch(self.name) + + queries = "\n".join([f"{self.name}:{f}" for f in files]) + "\n" + try: - return self.repo_id._git(['show', '%s:%s' % (self.name, file)]) + buffer = self.repo_id._git( + ['cat-file', '--batch'], + input_data=queries, + raw=True, + ) except subprocess.CalledProcessError: - return False + return [False] * len(files) + + results = [] + offset = 0 + buffer_len = len(buffer) + while offset < buffer_len: + newline_idx = buffer.find(b'\n', offset) + if newline_idx == -1: + break + header = buffer[offset:newline_idx].decode('utf-8') + offset = newline_idx + 1 + try: + size_in_bytes = int(header.rsplit(' ', 1)[-1]) + except ValueError: # most likely missing + results.append(False) + continue + results.append(buffer[offset : offset + size_in_bytes].decode('utf-8', errors='replace')) + offset += size_in_bytes + 1 + return results def _source_path(self, *paths): if not self.tree_hash: diff --git a/runbot/models/custom_trigger.py b/runbot/models/custom_trigger.py index 53cc74281..a31d7fa55 100644 --- a/runbot/models/custom_trigger.py +++ b/runbot/models/custom_trigger.py @@ -8,7 +8,7 @@ class BundleTriggerCustomization(models.Model): _description = 'Custom trigger' trigger_id = fields.Many2one('runbot.trigger') - start_mode = fields.Selection([('disabled', 'Disabled'), ('auto', 'Auto'), ('force', 'Force')], required=True, default='auto') + start_mode = fields.Selection([('disabled', 'Disabled'), ('auto', 'Auto'), ('light', 'Light'), ('force', 'Force')], required=True, default='auto') use_base_commits = fields.Boolean("Use base commits", help="Allow to test a trigger without the branch changes", default=False) bundle_id = fields.Many2one('runbot.bundle') config_id = fields.Many2one('runbot.build.config') diff --git a/runbot/models/docker.py b/runbot/models/docker.py index cd4816ad9..547a3c6e0 100644 --- a/runbot/models/docker.py +++ b/runbot/models/docker.py @@ -2,8 +2,13 @@ import logging import os import re +import time +from pathlib import Path + import docker -from odoo import api, fields, models, exceptions +import requests + +from odoo import api, exceptions, fields, models from ..container import docker_build from ..fields import JsonDictField @@ -330,17 +335,62 @@ def _get_docker_metadata(self, image_id): return {'error': str(e)} return metadata + def _get_cached_content(self, docker_build_path): + self.ensure_one() + cache_dir = Path(self.env['runbot.runbot']._path('docker', 'cache')) + cache_dir.mkdir(exist_ok=True) + cache_re = re.compile(r'^#\s?CACHE\s(?P\d+)$') + add_re = re.compile(r'^ADD\s(?Phttp.+)\s(?P.+)$') + lines = self.dockerfile.split('\n') + for i, line in enumerate(lines): + if cache_match := cache_re.match(line): + if add_match := add_re.match(lines[i + 1]): + cache_duration = int(cache_match.group('duration')) + url = add_match.group('url') + filename = re.sub(r'[^a-zA-Z0-9]', '_', url)[:255] + destination = add_match.group('destination') + # Use the destination name as hardlink name to avoid rebuild if file content is the same but not the url + hardlink_name = re.sub(r'[^a-zA-Z0-9]', '_', destination) + lines[i + 1] = f'COPY {hardlink_name} {destination}' + cache_file_path = cache_dir / filename + if not cache_file_path.exists() or time.time() - cache_file_path.lstat().st_mtime > cache_duration: + try: + with requests.get(url, stream=True) as response: + response.raise_for_status() + with cache_file_path.open('wb') as cache_file: + for chunk in response.iter_content(chunk_size=8192): + cache_file.write(chunk) + except (requests.exceptions.HTTPError, requests.exceptions.RequestException): + if cache_file_path.exists(): + cache_file_path.touch() # to avoid spamming in case of failures + self.env['runbot.runbot']._warning(f'Dockerfile {self.name} failed to fetch "{url}"') + else: + raise + hardlink_path = Path(docker_build_path) / hardlink_name + hardlink_path.unlink(missing_ok=True) + hardlink_path.hardlink_to(cache_file_path) + return '\n'.join(lines) + def _build(self, host=None): tag_dir = re.sub(r'[^\w]', '_', self.image_tag) docker_build_path = self.env['runbot.runbot']._path('docker', tag_dir) os.makedirs(docker_build_path, exist_ok=True) - content = self.dockerfile - with open(self.env['runbot.runbot']._path('docker', tag_dir, 'Dockerfile'), 'w') as Dockerfile: - Dockerfile.write(content) - result = docker_build(docker_build_path, self.image_future_tag, self.pull_on_build) - duration = result['duration'] - msg = result['msg'] - success = image_id = result.get('image_id') + + duration = 0 + content = '' + image_id = None + try: + content = self._get_cached_content(docker_build_path) + with open(self.env['runbot.runbot']._path('docker', tag_dir, 'Dockerfile'), 'w', encoding="utf-8") as Dockerfile: + Dockerfile.write(content) + result = docker_build(docker_build_path, self.image_future_tag, self.pull_on_build) + duration = result['duration'] + msg = result['msg'] + success = image_id = result.get('image_id') + except Exception as e: + success = False + msg = f'Exception during Docker build: "{e}"' + docker_build_result_values = {'dockerfile_id': self.id, 'output': msg, 'duration': duration, 'content': content, 'host_id': host and host.id} if success: docker_build_result_values['result'] = 'success' diff --git a/runbot/models/host.py b/runbot/models/host.py index 7dea8fba1..6d657d90f 100644 --- a/runbot/models/host.py +++ b/runbot/models/host.py @@ -341,7 +341,7 @@ def _get_builds(self, domain, order=None): return self.env['runbot.build'].search(self._get_build_domain(domain), order=order) def _process_messages(self): - self.host_message_ids._process() + return self.host_message_ids._process() class MessageQueue(models.Model): @@ -351,14 +351,24 @@ class MessageQueue(models.Model): _log_access = False create_date = fields.Datetime('Create date', default=fields.Datetime.now) - host_id = fields.Many2one('runbot.host', required=True, ondelete='cascade') - build_id = fields.Many2one('runbot.build') + host_id = fields.Many2one('runbot.host', required=True, ondelete='cascade', index=True) + build_id = fields.Many2one('runbot.build', index=True) message = fields.Char('Message') def _process(self): records = self + processed = False # todo consume messages here if records: + processed = True for record in records: + if record.message == 'kill': + if record.build_id: + build = record.build_id + result = None + if build.local_state != 'running' and build.global_result not in ('warn', 'ko'): + result = 'killed' + build._kill(result=result) self.env['runbot.runbot']._warning(f'Host {record.host_id.name} got an unexpected message {record.message}') self.unlink() + return processed diff --git a/runbot/models/ir_action.py b/runbot/models/ir_action.py new file mode 100644 index 000000000..95b82fc2e --- /dev/null +++ b/runbot/models/ir_action.py @@ -0,0 +1,12 @@ +import requests + +from odoo import models + + +class ExtendedServerActionContext(models.Model): + _inherit = 'ir.actions.server' + + def _get_eval_context(self, action=None): + ctx = super()._get_eval_context(action=action) + ctx.update(requests=requests.Session()) + return ctx diff --git a/runbot/models/ir_qweb.py b/runbot/models/ir_qweb.py index e9fa061ce..c8ea0eb37 100644 --- a/runbot/models/ir_qweb.py +++ b/runbot/models/ir_qweb.py @@ -1,5 +1,5 @@ -from ..common import s2human, s2human_long, precise_s2human -from odoo import models +from ..common import s2human, s2human_long, precise_s2human, transactioncache +from odoo import models, tools from odoo.http import request from odoo.addons.website.controllers.main import QueryURL @@ -12,3 +12,10 @@ def _prepare_frontend_environment(self, values): values['s2human_long'] = s2human_long values['precise_s2human'] = precise_s2human return response + + @tools.conditional( + 'xml' in tools.config['dev_mode'], + transactioncache, + ) # replace ormcache by transaction cache to avoid reading the same template multiple times in the same requests. Context is ignored but should be the same for each call in the same request + def _generate_code_cached(self, ref: int): + return super()._generate_code_cached(ref) diff --git a/runbot/models/project.py b/runbot/models/project.py index 5e9ee46d1..7032754c4 100644 --- a/runbot/models/project.py +++ b/runbot/models/project.py @@ -24,6 +24,10 @@ class Project(models.Model): hidden = fields.Boolean('Hidden', help='Hide this project from the main page') active = fields.Boolean("Active", default=True) process_delay = fields.Integer('Process delay', default=60, required=True, help="Delay between a push and a batch starting its process.") + next_freeze_tag_id = fields.Many2one('runbot.bundle.tag', string="Next freeze tag") + use_light_default = fields.Boolean('Use light config by default', help="Use the light config when possible for all triggers") + use_light_draft = fields.Boolean('Use light config for draft PRs', help="Use the light config when possible for bundle having draft pr") + use_light_no_pr = fields.Boolean('Use light config when no PR', help="Use the light config when possible for all bundles not having any pr") @api.constrains('process_delay') def _constraint_process_delay(self): diff --git a/runbot/models/repo.py b/runbot/models/repo.py index 14edd3760..175721603 100644 --- a/runbot/models/repo.py +++ b/runbot/models/repo.py @@ -55,6 +55,7 @@ class Trigger(models.Model): project_id = fields.Many2one('runbot.project', string="Project id", required=True) repo_ids = fields.Many2many('runbot.repo', relation='runbot_trigger_triggers', string="Triggers", domain="[('project_id', '=', project_id)]") dependency_ids = fields.Many2many('runbot.repo', relation='runbot_trigger_dependencies', string="Dependencies") + use_extra_slot = fields.Boolean('Use extra slot', help="If checked, builds from this trigger can use an extra slot on the builders (for light and fast triggers)") starts_before_ids = fields.Many2many( 'runbot.trigger', @@ -72,9 +73,11 @@ class Trigger(models.Model): ) module_filters = fields.One2many('runbot.module.filter', 'trigger_id', string="Module filters", help='Will be combined with repo module filters when used with this trigger') config_id = fields.Many2one('runbot.build.config', string="Config", required=True) + light_config_id = fields.Many2one('runbot.build.config', string="Light config", help="Alternative config to use when light mode is enabled") config_data = JsonDictField('Config Data') network_enabled = fields.Boolean('Network Enabled') batch_dependent = fields.Boolean('Batch Dependent', help="Force adding batch in build parameters to make it unique and give access to bundle") + version_dependent = fields.Boolean('Version Dependent', default=True, help="Add the version in build parameters. Uncheck if the version is not needed to determine the build result") ci_context = fields.Char("CI context", tracking=True) category_id = fields.Many2one('runbot.category', default=lambda self: self.env.ref('runbot.default_category', raise_if_not_found=False)) @@ -427,8 +430,10 @@ class Repo(models.Model): get_ref_time = fields.Float('Last refs db update', compute='_compute_get_ref_time') trigger_ids = fields.Many2many('runbot.trigger', relation='runbot_trigger_triggers', readonly=True) single_version = fields.Many2one('runbot.version', "Single version", help="Limit the repo to a single version for non versionned repo") + enforce_version = fields.Boolean('Force version', help="Force all bundle containing branch from this repo to be prefixed with the correct version", default=True) forbidden_regex = fields.Char('Forbidden regex', help="Regex that forid bundle creation if branch name is matching", tracking=True) invalid_branch_message = fields.Char('Forbidden branch message', tracking=True) + allow_slashes = fields.Boolean('Allow slashes in branch names', help="Allow branches with slashes in their name (e.g. odoo/tests/my_branch). If unchecked, only one level of branches is allowed (e.g. odoo/my_branch)", default=True) def _compute_get_ref_time(self): self.env.cr.execute(""" @@ -498,11 +503,19 @@ def _get_git_command(self, cmd, errors='strict'): cmd = ['git', '-C', self.path] + config_args + cmd return cmd - def _git(self, cmd, errors='strict', quiet=False): + def _git(self, cmd, errors='strict', quiet=False, input_data=None, raw=False): cmd = self._get_git_command(cmd, errors) if not quiet: _logger.info("git command: %s", shlex.join(cmd)) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode(errors=errors) + kwargs = {'stderr': subprocess.STDOUT} + if input_data is not None: + if isinstance(input_data, str): + input_data = input_data.encode('utf-8') + kwargs['input'] = input_data + output = subprocess.check_output(cmd, **kwargs) + if raw: + return output + return output.decode(errors=errors) def _fetch(self, sha): if not self._hash_exists(sha): @@ -553,7 +566,10 @@ def _get_refs(self, max_age=30, ignore=None): self._set_ref_time(get_ref_time) fields = ['refname', 'objectname', 'committerdate:unix', 'authorname', 'authoremail', 'subject', 'committername', 'committeremail', 'tree'] fmt = "%00".join(["%(" + field + ")" for field in fields]) - cmd = ['for-each-ref', '--format', fmt, '--sort=-committerdate', 'refs/*/heads/*'] + refs_desc = 'refs/*/heads/*' + if self.allow_slashes: + refs_desc = 'refs/*/heads/**' + cmd = ['for-each-ref', '--format', fmt, '--sort=-committerdate', refs_desc] if any(remote.fetch_pull for remote in self.remote_ids): cmd.append('refs/*/pull/*') git_refs = self._git(cmd) @@ -580,7 +596,7 @@ def _find_or_create_branches(self, refs): """ # FIXME WIP - names = [r[0].split('/')[-1] for r in refs] + names = [r[0].split('/', 3)[-1] for r in refs] branches = self.env['runbot.branch'].search([('name', 'in', names), ('remote_id', 'in', self.remote_ids.ids)]) ref_branches = {branch._ref(): branch for branch in branches} new_branch_values = [] @@ -589,7 +605,7 @@ def _find_or_create_branches(self, refs): # format example: # refs/ruodoo-dev/heads/12.0-must-fail # refs/ruodoo/pull/1 - _, remote_name, branch_type, name = ref_name.split('/') + _, remote_name, branch_type, name = ref_name.split('/', 3) remote_id = self.remote_ids.filtered(lambda r: r.remote_name == remote_name).id if not remote_id: _logger.warning('Remote %s not found', remote_name) diff --git a/runbot/models/runbot.py b/runbot/models/runbot.py index f750ef8fc..b470e1df9 100644 --- a/runbot/models/runbot.py +++ b/runbot/models/runbot.py @@ -53,10 +53,10 @@ def _scheduler(self, host): processed += 1 build._process_requested_actions() self._commit() + if host._process_messages(): + self._commit() host._process_logs() self._commit() - host._process_messages() - self._commit() for build in host._get_builds([('local_state', 'in', ['testing', 'running'])]) | self._get_builds_to_init(host): build = build.browse(build.id) # remove preftech ids, manage build one by one result = build._schedule() @@ -113,14 +113,14 @@ def _gc_running(self, host): ][:running_max] build_ids = host._get_builds([('local_state', '=', 'running'), ('id', 'not in', cannot_be_killed_ids)], order='job_start desc').ids for build in Build.browse(build_ids)[running_max:]: - build._kill() + build._kill(None) def _gc_testing(self, host): """garbage collect builds that could be killed""" # decide if we need room Build = self.env['runbot.build'] domain_host = host._get_build_domain() - testing_builds = Build.search(domain_host + [('local_state', 'in', ['testing', 'pending']), ('requested_action', '!=', 'deathrow')]) + testing_builds = Build.search(domain_host + [('local_state', 'in', ['testing', 'pending']), ('message_ids', '=', False)]) used_slots = len(testing_builds) available_slots = host.nb_worker - used_slots nb_pending = Build.search_count([('local_state', '=', 'pending'), ('host', '=', False)]) @@ -138,7 +138,7 @@ def _allocate_builds(self, host, nb_slots, domain=None): if domain: non_allocated_domain = Domain.AND([non_allocated_domain, domain]) query = self.env['runbot.build']._search(non_allocated_domain) - query.order = 'runbot_build.create_batch_id' + query.order = 'runbot_build.priority_level' self.env.execute_query(SQL("""UPDATE runbot_build SET diff --git a/runbot/models/semgrep_rule.py b/runbot/models/semgrep_rule.py new file mode 100644 index 000000000..c8dcffef0 --- /dev/null +++ b/runbot/models/semgrep_rule.py @@ -0,0 +1,69 @@ +from odoo import api, fields, models + + +class SemgrepRule(models.Model): + _name = 'runbot.semgrep_rule' + _description = 'Semgrep Rule' + _inherit = ['mail.thread'] + + name = fields.Char(string='Rule Name', required=True) + category_id = fields.Many2one('runbot.checker_category', string='Category', required=True, index=True) + language = fields.Selection([('python', 'Python'), ('javascript', 'JavaScript'), ('generic', 'Generic')], required=True) + max_version_number = fields.Char(string='Max Odoo Version', help='Maximum exclusive Odoo version this rule applies to') + min_version_number = fields.Char(string='Min Odoo Version', help='Minimum inclusive Odoo version this rule applies to') + message = fields.Char(string='Error message', help='Message to display when the rule is triggered', required=True) + rule = fields.Text("Rule", required=True) + rule_text = fields.Text("Rule Text", compute='_compute_rule_text') + severity = fields.Selection([('INFO', 'INFO'), ('WARNING', 'WARNING'), ('ERROR', 'ERROR')], string='Severity', required=True) + + @api.depends('name', 'message', 'severity', 'language', 'rule') + def _compute_rule_text(self): + def indent_by(s, by=2): + indent = " " * by + return ''.join( + l if l.isspace() else indent + l + for l in s.splitlines(keepends=True) + ) + + def count_indent(s): + for line in s.splitlines(keepends=False): + if line.isspace(): + continue + return len(line) - len(line.lstrip()) + return None + + self.rule_text = '' + for r in self: + rule = r.rule + if not rule: + continue + + indent = count_indent(rule) + if indent is None: + continue + + if indent < 2: + rule = indent_by(rule, 2 - indent) + indent = 2 + + i_indent = " " * (indent - 2) + s_indent = " " * indent + r.rule_text = f"""\ +{i_indent}- id: {r.name} +{s_indent}languages: [{r.language}] +{s_indent}severity: {r.severity} +{s_indent}message: {r.message!r} +{rule} + """ + + +class CheckerCategory(models.Model): + _name = 'runbot.checker_category' + _description = 'Checker Category' + + name = fields.Char(string='Category Name', required=True) + + _unique_name = models.Constraint( + 'unique (name)', + "avoid duplicate Category", + ) diff --git a/runbot/security/ir.model.access.csv b/runbot/security/ir.model.access.csv index 8e4c178bf..f0ca84182 100644 --- a/runbot/security/ir.model.access.csv +++ b/runbot/security/ir.model.access.csv @@ -22,7 +22,7 @@ access_runbot_build_config_step_order_manager,runbot_build_config_step_order_man access_runbot_config_step_upgrade_db_user,runbot_config_step_upgrade_db_user,runbot.model_runbot_config_step_upgrade_db,group_user,1,0,0,0 access_runbot_config_step_upgrade_db_manager,runbot_config_step_upgrade_db_manager,runbot.model_runbot_config_step_upgrade_db,runbot.group_build_config_user,1,1,1,1 -access_runbot_build_error_user,runbot_build_error_user,runbot.model_runbot_build_error,group_user,1,0,0,0 +access_runbot_build_error_user,runbot_build_error_user,runbot.model_runbot_build_error,group_user,1,1,0,0 access_runbot_build_error_admin,runbot_build_error_admin,runbot.model_runbot_build_error,runbot.group_runbot_admin,1,1,1,1 access_runbot_build_error_manager,runbot_build_error_manager,runbot.model_runbot_build_error,runbot.group_runbot_error_manager,1,1,1,1 @@ -68,6 +68,9 @@ access_runbot_error_regex_manager,runbot_error_regex_manager,runbot.model_runbot access_runbot_host_public,runbot_host_public,runbot.model_runbot_host,runbot.base_runbot_model_access,1,0,0,0 access_runbot_host_manager,runbot_host_manager,runbot.model_runbot_host,runbot.group_runbot_admin,1,1,1,1 +access_runbot_host_message_public,runbot_host_message_public,runbot.model_runbot_host_message,runbot.base_runbot_model_access,1,0,0,0 +access_runbot_host_message_admin,runbot_host_message_admin,runbot.model_runbot_host_message,runbot.group_runbot_admin,1,1,1,1 + access_runbot_repo_hooktime,runbot_repo_hooktime,runbot.model_runbot_repo_hooktime,group_user,1,0,0,0 access_runbot_repo_referencetime,runbot_repo_referencetime,runbot.model_runbot_repo_reftime,group_user,1,0,0,0 access_runbot_build_stat_admin,runbot_build_stat_admin,runbot.model_runbot_build_stat,runbot.group_runbot_admin,1,1,1,1 @@ -106,7 +109,6 @@ access_runbot_bundle_public,access_runbot_bundle_public,runbot.model_runbot_bund access_runbot_bundle_runbot_bundle_manager,access_runbot_bundle_runbot_manager,runbot.model_runbot_bundle,runbot.group_runbot_bundle_manager,1,1,0,0 access_runbot_bundle_runbot_admin,access_runbot_bundle_runbot_admin,runbot.model_runbot_bundle,runbot.group_runbot_admin,1,1,1,1 - access_runbot_batch_public,access_runbot_batch_public,runbot.model_runbot_batch,runbot.base_runbot_model_access,1,0,0,0 access_runbot_batch_runbot_admin,access_runbot_batch_runbot_admin,runbot.model_runbot_batch,runbot.group_runbot_admin,1,1,1,1 @@ -178,3 +180,8 @@ access_runbot_build_error_merge_filters_user,access_runbot_build_error_merge_fil access_runbot_bundle_tag_admin,access_runbot_bundle_tag_admin,runbot.model_runbot_bundle_tag,runbot.group_runbot_admin,1,1,1,1 access_runbot_bundle_tag_user,access_runbot_bundle_tag_user,runbot.model_runbot_bundle_tag,group_user,1,0,0,0 + +runbot.access_runbot_semgrep_rule,access_runbot_semgrep_rule,runbot.model_runbot_semgrep_rule,base.group_user,1,0,0,0 +runbot.access_runbot_semgrep_rule_admin,access_runbot_semgrep_rule_admin,runbot.model_runbot_semgrep_rule,runbot.group_runbot_admin,1,1,1,1 +runbot.access_runbot_checker_category,access_runbot_checker_category,runbot.model_runbot_checker_category,base.group_user,1,0,0,0 +runbot.access_runbot_checker_category_admin,access_runbot_checker_category_admin,runbot.model_runbot_checker_category,runbot.group_runbot_admin,1,1,1,1 diff --git a/runbot/static/src/css/runbot.css b/runbot/static/src/css/runbot.css index 46415f421..c49d822fa 100644 --- a/runbot/static/src/css/runbot.css +++ b/runbot/static/src/css/runbot.css @@ -1,5 +1,9 @@ :root { --gray: #6c757d; /* used for batch limitation */ + --btn-default-color: var(--bs-body-color); + --btn-default-bg: var(--bs-body-bg); + --btn-default-border: #ccc; + --active-project-color: #777; } /* @@ -13,7 +17,8 @@ --bs-info-bg-subtle: #d9edf7; --bs-info-rgb: 23, 162, 184; } -:root[data-bs-theme=red404] { + +:root[data-bs-theme=red404] { --bs-success-bg-subtle: #cdffb9; --bs-danger-bg-subtle: #e67ecf; --bs-warning-bg-subtle: #fae9b1; @@ -21,6 +26,11 @@ --bs-info-rgb: 23, 162, 184; } +:root[data-bs-theme=dark] { + --btn-default-border: #333; + --active-project-color: #CCC; +} + [data-bs-theme=legacy] .text-bg-info { color: #fff !important; /* black by default, changes from previous version, color forced to fit with --bs-info-rgb*/ } @@ -59,37 +69,28 @@ --bs-btn-disabled-border-color: #b90e6c; } - -:root { - --alternative:#ccc; - --btn-default-color: var(--bs-body-color); - --btn-default-border:#ccc; - --bs-default-rgb: var(--bs-body-color-rgb); - --active-project-color: #777; - -} - -:root[data-bs-theme=dark] { - --btn-default-border:#333; - --btn-default-color: var(--bs-body-color); - --active-project-color: #CCC; -} - .btn-default { --bs-btn-color: var(--btn-default-color); - --bs-btn-bg: var(--bs-body-bg); + --bs-btn-bg: var(--btn-default-bg); --bs-btn-border-color: var(--btn-default-border); --bs-btn-hover-color: var(--btn-default-color); - --bs-btn-hover-bg: var(--btn-default-border); - --bs-btn-hover-border-color: var(--btn-default-border); + --bs-btn-hover-bg: color-mix(in lab, var(--btn-default-bg), black 15%); + --bs-btn-hover-border-color: color-mix(in lab, var(--btn-default-border), black 10%); --bs-btn-focus-shadow-rgb: 60, 153, 110; --bs-btn-active-color: var(--btn-default-color); - --bs-btn-active-bg: var(--bs-body-bg); - --bs-btn-active-border-color: var(--bs-body-bg); + --bs-btn-active-bg: color-mix(in lab, var(--btn-default-bg), black 20%); + --bs-btn-active-border-color: color-mix(in lab, var(--btn-default-border), black 15%); --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); --bs-btn-disabled-color: var(--btn-default-color); - --bs-btn-disabled-bg: var(--bs-body-bg); - --bs-btn-disabled-border-color: var(--btn-default-border);; + --bs-btn-disabled-bg: var(--btn-default-bg); + --bs-btn-disabled-border-color: var(--btn-default-border); +} + +[data-bs-theme=dark] .btn-default { + --bs-btn-hover-bg: color-mix(in lab, var(--btn-default-bg), white 15%); + --bs-btn-hover-border-color: color-mix(in lab, var(--btn-default-border), white 10%); + --bs-btn-active-bg: color-mix(in lab, var(--btn-default-bg), white 20%); + --bs-btn-active-border-color: color-mix(in lab, var(--btn-default-border), white 15%); } .btn-info { @@ -184,7 +185,7 @@ a.slots_infos:hover { } .separator { - border-top: 2px solid #666; + border-top: 0.2em solid #666; } body, .table { @@ -426,3 +427,31 @@ code { .hide-success tr.bg-success-subtle { display: none; } + +.pre { + display: block; + font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; + white-space: pre; + overflow: auto; + font-size: 0.875em; + margin:0; + padding:0; + margin-top: 0.2em; + border: none; + +} + +.subtle_link { + color: var(--bs-body-color); + text-decoration: underline; +} + +.table-condensed .log-server td, .table-condensed .log-details td { + padding-top: 0; + padding-bottom: 0; + border: none; +} + +.log-details td { + padding-left: 20px; +} diff --git a/runbot/static/src/js/fields/fields.js b/runbot/static/src/js/fields/fields.js index 9db40bfeb..9d36fb7ff 100644 --- a/runbot/static/src/js/fields/fields.js +++ b/runbot/static/src/js/fields/fields.js @@ -1,44 +1,41 @@ -/** @odoo-module **/ - import { TextField } from "@web/views/fields/text/text_field"; import { CharField } from "@web/views/fields/char/char_field"; import { Many2OneField } from "@web/views/fields/many2one/many2one_field"; -import { _lt } from "@web/core/l10n/translation"; import { formatDateTime } from "@web/core/l10n/dates"; import { registry } from "@web/core/registry"; import { useInputField } from "@web/views/fields/input_field_hook"; -import { useRef, xml, Component, markup} from "@odoo/owl"; +import { useRef, xml, Component, markup } from "@odoo/owl"; import { useAutoresize } from "@web/core/utils/autoresize"; import { getFormattedValue } from "@web/views/utils"; import { UrlField } from "@web/views/fields/url/url_field"; -import { X2ManyField , x2ManyField} from "@web/views/fields/x2many/x2many_field"; +import { X2ManyField , x2ManyField } from "@web/views/fields/x2many/x2many_field"; import { BooleanToggleField } from "@web/views/fields/boolean_toggle/boolean_toggle_field"; - // https://stackoverflow.com/questions/4810841/pretty-print-json-using-javascript function colorizeJson(json) { - json = json.replace(/&/g, '&').replace(//g, '>'); - return json.replace(/("(\\u[a-zA-Z0-9]{4}|\\[^u]|[^\\"])*"(\s*:)?|\b(true|false|null)\b|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?)/g, function (match) { - var cls = ''; + json = json.replace(/&/g, "&").replace(//g, ">"); + return json.replace(/("(\\u[a-zA-Z0-9]{4}|\\[^u]|[^\\"])*"(\s*:)?|\b(true|false|null)\b|-?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?)/g, function (match) { + let cls = ""; if (/^"/.test(match)) { if (/:$/.test(match)) { - cls = 'o_runbot_json_key'; + cls = "o_runbot_json_key"; } else { - cls = 'o_runbot_json_value'; + cls = "o_runbot_json_value"; } } - return '' + match + ''; + return `${match}`; }); } function stringify(obj) { - return JSON.stringify(obj, null, '\t'); - } + return JSON.stringify(obj, null, "\t"); +} + export class JsonField extends TextField { static template = xml` - + @@ -54,6 +51,7 @@ export class JsonField extends TextField { `; + setup() { this.divRef = useRef("div"); this.textareaRef = useRef("textarea"); @@ -84,11 +82,9 @@ registry.category("fields").add("runbotjsonb", { export class FrontendUrl extends Component { static template = xml` -
+
`; - static components = { Many2OneField }; - static props = { ...Many2OneField.props, linkField: { type: String, optional: true }, @@ -96,21 +92,21 @@ export class FrontendUrl extends Component { get displayValue() { if (this.props.record.data[this.props.name].isLuxonDateTime){ - return formatDateTime(this.props.record.data[this.props.name]) + return formatDateTime(this.props.record.data[this.props.name]); } else { - return this.props.record.data[this.props.name] ? getFormattedValue(this.props.record, this.props.name) : '' + return this.props.record.data[this.props.name] ? getFormattedValue(this.props.record, this.props.name) : ""; } } get route() { - return this._route(this.props.linkField || this.props.name) + return this._route(this.props.linkField || this.props.name); } _route(fieldName) { const model = this.props.record.fields[fieldName].relation || "runbot.unknown"; const { id } = this.props.record.data[fieldName]; - if (model.startsWith('runbot.')){ - return '/runbot/' + model.split('.')[1] + '/' + id; + if (model.startsWith("runbot.")){ + return `/runbot/${model.split(".")[1]}/${id}`; } else { return false; } @@ -127,22 +123,20 @@ registry.category("fields").add("frontend_url", { }, }); - export class FieldCharFrontendUrl extends Component { - static template = xml` -
-
-
-
`; - - static components = { CharField } +
+
+
+
+ `; + static components = { CharField }; get route() { const model = this.props.record.resModel; const id = this.props.record.resId; - if (model.startsWith('runbot.')) { - return '/runbot/' + model.split('.')[1] + '/' + id; + if (model.startsWith("runbot.")) { + return `/runbot/${model.split(".")[1]}/${id}`; } else { return false; } @@ -154,36 +148,33 @@ registry.category("fields").add("char_frontend_url", { component: FieldCharFrontendUrl, }); - // Pull Request URL Widget const pullRequestRegex = /\/([a-zA-Z-_]+\/[a-zA-Z-_]+)\/pull\/(\d+)/; class PullRequestUrlField extends UrlField { static template = xml` `; - static components = { UrlField } + static components = { UrlField }; + get fieldProps() { - const props = {...this.props }; - const parts = pullRequestRegex.exec(this.props.record.data[props.name]) + const props = { ...this.props }; + const parts = pullRequestRegex.exec(this.props.record.data[props.name]); if (parts) { props.text = `${parts[1]}#${parts[2]}`; } - return props + return props; } } PullRequestUrlField.supportedTypes = ["char"]; - registry.category("fields").add("pull_request_url", { supportedTypes: ["char"], component: PullRequestUrlField, }); - export class Matrixx2ManyField extends X2ManyField { - static template = 'runbot.Matrixx2ManyField'; - + static template = "runbot.Matrixx2ManyField"; static components = { BooleanToggleField }; getEntry(from, to) { @@ -200,11 +191,11 @@ export class Matrixx2ManyField extends X2ManyField { return [...new Set(versions)].sort().reverse(); } } + export const matrixx2ManyField = { ...x2ManyField, component: Matrixx2ManyField, useSubView: false, }; - registry.category("fields").add("version_matrix", matrixx2ManyField); diff --git a/runbot/static/src/js/fields/history_graph.js b/runbot/static/src/js/fields/history_graph.js index e285d963c..6f6a3370c 100644 --- a/runbot/static/src/js/fields/history_graph.js +++ b/runbot/static/src/js/fields/history_graph.js @@ -1,5 +1,3 @@ -/** @odoo-module **/ -import { _lt } from "@web/core/l10n/translation"; import { registry } from "@web/core/registry"; import { useRef, xml, Component, useEffect } from "@odoo/owl"; @@ -9,21 +7,20 @@ export class HistoryGraph extends Component { `; + setup() { this.canvasRef = useRef("canvas"); useEffect(() => this.renderErrorGraph()); } renderErrorGraph(activeCell) { - const data = this.props.record.data[this.props.name] || {}; const errorId = data.error_id; const projectId = data.project_id; const categoryId = data.category_id; const breaking_pr_close_dates = data.breaking_pr_close_dates; const fixing_pr_close_dates = data.fixing_pr_close_dates; - - const canvas = this.canvasRef.el + const canvas = this.canvasRef.el; const ctx = canvas.getContext("2d"); const maxValue = data.max_count; const canvasBorder = 1; @@ -37,26 +34,25 @@ export class HistoryGraph extends Component { canvas.width = canvasWidth; canvas.height = canvasHeight; - function getColor(value, opacity) { if (value >= 10) { return `rgba(255, 0, 0, ${opacity})`; // red } else if (value >= 5) { return `rgba(255, 165, 0, ${opacity})`; // orange } - return `rgba(0, 170, 0, ${opacity})` // green + return `rgba(0, 170, 0, ${opacity})`; // green } ctx.clearRect(0, 0, canvasWidth, canvasHeight); ctx.fillStyle = "#EEE"; ctx.fillRect(0, 0, canvasWidth, canvasHeight); ctx.strokeStyle = "#333"; - ctx.lineWidth = canvasBorder * 2; // * 2 to account for each side, not only inner width - ctx.strokeRect(0, 0, canvasWidth, canvasHeight,); + ctx.lineWidth = canvasBorder * 2; // * 2 to account for each side, not only inner width + ctx.strokeRect(0, 0, canvasWidth, canvasHeight); data.date_labels.forEach((dateLabel, idx) => { data.version_labels.forEach((versionLabel, idy) => { - let version_id = data.versions_ids[idy] + const version_id = data.versions_ids[idy]; let value = data.daily_version_freq[idx][idy] || 0; let cellColor = "white"; let cellOpacity = 0; @@ -70,13 +66,13 @@ export class HistoryGraph extends Component { ctx.fillStyle = cellColor; ctx.fillRect(posX, posY, cellWidth, cellHeight); + if (activeCell && activeCell.col === idx && activeCell.row === idy) { ctx.strokeStyle = "black"; ctx.lineWidth = 2; ctx.strokeRect(posX, posY, cellWidth, cellHeight); } - if (fixing_pr_close_dates[version_id] == dateLabel) { ctx.fillStyle = "black"; ctx.font = "12px Arial"; @@ -87,13 +83,11 @@ export class HistoryGraph extends Component { ctx.font = "12px Arial"; ctx.fillText("✗", posX + cellWidth / 2 - 4, posY + cellHeight / 2 + 4); } - - }); }); if (mouseActions) { canvas.onmousemove = (event) => { - let tooltip = canvas.parentElement.querySelector('.history-graph-tooltip'); + let tooltip = canvas.parentElement.querySelector(".history-graph-tooltip"); if (tooltip) { tooltip.remove(); } @@ -101,16 +95,16 @@ export class HistoryGraph extends Component { const { col, row, value, dateLabel, versionLabel } = this.getCellFromEvent(event); if ( col >= 0 && row >= 0) { - tooltip = document.createElement('div'); - tooltip.className = 'history-graph-tooltip'; - tooltip.style.position = 'absolute'; + tooltip = document.createElement("div"); + tooltip.className = "history-graph-tooltip"; + tooltip.style.position = "absolute"; tooltip.style.left = `${canvas.offsetLeft}px`; tooltip.style.top = `${canvas.offsetTop + canvas.height}px`; - tooltip.style.background = '#fff'; - tooltip.style.border = '1px solid #333'; - tooltip.style.padding = '4px 8px'; - tooltip.style.fontSize = '12px'; - tooltip.style.pointerEvents = 'none'; + tooltip.style.background = "#fff"; + tooltip.style.border = "1px solid #333"; + tooltip.style.padding = "4px 8px"; + tooltip.style.fontSize = "12px"; + tooltip.style.pointerEvents = "none"; tooltip.style.zIndex = 1000; tooltip.innerHTML = ` Date: ${dateLabel} @@ -125,23 +119,23 @@ export class HistoryGraph extends Component { }; canvas.onmouseleave = () => { - const tooltip = canvas.parentElement.querySelector('.history-graph-tooltip'); + const tooltip = canvas.parentElement.querySelector(".history-graph-tooltip"); if (tooltip) { tooltip.remove(); - this.renderErrorGraph() + this.renderErrorGraph(); } }; canvas.onclick = (event) => { - const { col, row, value, dateLabel, versionLabel } = this.getCellFromEvent(event); + const { col, row, dateLabel } = this.getCellFromEvent(event); if (col >= 0 && row >= 0) { const url = `/runbot/batches/${projectId}/${categoryId}/${dateLabel}/${errorId}`; - window.open(url, '_blank'); + window.open(url, "_blank"); } - } + }; } - } + getCellFromEvent(event) { const data = this.props.record.data[this.props.name] || {}; const rect = this.canvasRef.el.getBoundingClientRect(); @@ -149,13 +143,13 @@ export class HistoryGraph extends Component { const y = event.clientY - rect.top - 1; // Adjust for canvas border const col = Math.floor(x / this.props.cellSize); const row = Math.floor(y / this.props.cellSize); - if ( col >= 0 && col < data.date_labels.length && row >= 0 && row < data.version_labels.length) { + if ( col >= 0 && col < data.date_labels.length && row >= 0 && row < data.version_labels.length) { const value = data.daily_version_freq[col][row] || 0; const dateLabel = data.date_labels[col]; const versionLabel = data.version_labels[row]; return { col, row, value, dateLabel, versionLabel }; } else { - return { col: -1, row: -1, value: 0, dateLabel: '', versionLabel: '' }; + return { col: -1, row: -1, value: 0, dateLabel: "", versionLabel: "" }; } } } diff --git a/runbot/static/src/js/fields/tracking_value.js b/runbot/static/src/js/fields/tracking_value.js index 14058b1cd..b08647d1b 100644 --- a/runbot/static/src/js/fields/tracking_value.js +++ b/runbot/static/src/js/fields/tracking_value.js @@ -1,4 +1,3 @@ -/** @odoo-module **/ import { patch } from "@web/core/utils/patch"; import { Message } from "@mail/core/common/message"; @@ -7,22 +6,27 @@ patch(Message.prototype, { super.setup(...arguments); this.kept = false; }, + isMultiline(trackingValue) { const oldValue = trackingValue.oldValue; const newValue = trackingValue.newValue; - return ((oldValue && typeof oldValue=== 'string' && oldValue.includes('\n')) && (newValue && typeof oldValue=== 'string' && newValue.includes('\n'))) + return ((oldValue && typeof oldValue=== "string" && oldValue.includes("\n")) && (newValue && typeof oldValue=== "string" && newValue.includes("\n"))); }, + formatTracking(trackingFieldInfo, trackingValue) { - return super.formatTracking(trackingFieldInfo, trackingValue) + return super.formatTracking(trackingFieldInfo, trackingValue); }, + toggleKept() { this.kept = !this.kept; }, + copyToClipboard(trackingValue) { return function () { navigator.clipboard.writeText(trackingValue); }; }, + lines(trackingValue) { const oldValue = trackingValue.oldValue; const newValue = trackingValue.newValue; @@ -30,45 +34,47 @@ patch(Message.prototype, { const lines = this.prepareForRendering(diff); return lines; }, + makeDiff(text1, text2) { - var dmp = new diff_match_patch(); - var a = dmp.diff_linesToChars_(text1, text2); - var lineText1 = a.chars1; - var lineText2 = a.chars2; - var lineArray = a.lineArray; - var diffs = dmp.diff_main(lineText1, lineText2, false); + const dmp = new diff_match_patch(); + const a = dmp.diff_linesToChars_(text1, text2); + const lineText1 = a.chars1; + const lineText2 = a.chars2; + const lineArray = a.lineArray; + const diffs = dmp.diff_main(lineText1, lineText2, false); dmp.diff_charsToLines_(diffs, lineArray); dmp.diff_cleanupSemantic(diffs); return diffs; }, + prepareForRendering(diffs) { - var lines = []; - var pre_line_counter = 0 - var post_line_counter = 0 - for (var x = 0; x < diffs.length; x++) { - var diff_type = diffs[x][0]; - var data = diffs[x][1]; - var data_lines = data.split('\n'); - for (var line_index in data_lines) { - var line = data_lines[line_index]; - line = line.replace(/&/g, '&'); - line = line.replace(//g, '>'); + const lines = []; + let pre_line_counter = 0; + let post_line_counter = 0; + for (let x = 0; x < diffs.length; x++) { + const diff_type = diffs[x][0]; + const data = diffs[x][1]; + const data_lines = data.split("\n"); + for (const line_index in data_lines) { + let line = data_lines[line_index]; + line = line.replace(/&/g, "&"); + line = line.replace(//g, ">"); //text = text.replace(/\n/g, '
'); //text = text.replace(/ /g, '  '); if (diff_type == -1) { - lines.push({type:'removed', pre_line_counter: pre_line_counter, post_line_counter: '-', line: line}) - pre_line_counter += 1 + lines.push({ type: "removed", pre_line_counter: pre_line_counter, post_line_counter: "-", line: line }); + pre_line_counter += 1; } else if (diff_type == 0) { - lines.push({type:'kept', pre_line_counter: '', post_line_counter: post_line_counter, line: line}) - pre_line_counter += 1 - post_line_counter +=1 + lines.push({ type: "kept", pre_line_counter: "", post_line_counter: post_line_counter, line: line }); + pre_line_counter += 1; + post_line_counter += 1; } else if (diff_type == 1) { - lines.push({type:'added', pre_line_counter: '+', post_line_counter: post_line_counter, line: line}) - post_line_counter +=1 + lines.push({ type: "added", pre_line_counter: "+", post_line_counter: post_line_counter, line: line }); + post_line_counter += 1; } } } return lines; - }, + }, }); diff --git a/runbot/static/src/js/runbot.js b/runbot/static/src/js/runbot.js index 58a902976..76b2a129b 100644 --- a/runbot/static/src/js/runbot.js +++ b/runbot/static/src/js/runbot.js @@ -38,3 +38,19 @@ function copyToClipboard(text) { } navigator.clipboard.writeText(text); } + +document.addEventListener('DOMContentLoaded', function() { + const collapseElement = document.getElementById('customTriggers'); + if (collapseElement) { + collapseElement.addEventListener('show.bs.collapse', function () { + const url = new URL(window.location); + url.searchParams.set('expand_custom', '1'); + window.history.replaceState({}, '', url); + }); + collapseElement.addEventListener('hide.bs.collapse', function () { + const url = new URL(window.location); + url.searchParams.delete('expand_custom'); + window.history.replaceState({}, '', url); + }); + } +}); diff --git a/runbot/static/src/js/views/form_controller.js b/runbot/static/src/js/views/form_controller.js index f7a1ea72c..4fc4d83a2 100644 --- a/runbot/static/src/js/views/form_controller.js +++ b/runbot/static/src/js/views/form_controller.js @@ -1,19 +1,18 @@ -/** @odoo-module **/ - -import { FormController } from '@web/views/form/form_controller'; -import { patch } from '@web/core/utils/patch'; +import { FormController } from "@web/views/form/form_controller"; +import { patch } from "@web/core/utils/patch"; patch(FormController.prototype, { // Prevent saving on tab switching beforeVisibilityChange: () => {}, + // Prevent closing page with dirty fields async beforeUnload(ev) { if (await this.model.root.isDirty()) { ev.preventDefault(); - ev.returnValue = 'Unsaved changes'; + ev.returnValue = "Unsaved changes"; } else { super.beforeUnload(ev); } - } -}) + }, +}); diff --git a/runbot/templates/batch.xml b/runbot/templates/batch.xml index 54a25393e..0896e8a1b 100644 --- a/runbot/templates/batch.xml +++ b/runbot/templates/batch.xml @@ -13,11 +13,28 @@ &emsp; + t-attf-href="/odoo/batch/{{batch.id}}" class="btn btn-default btn-sm" target="_blank" title="View Batch in Backend"> + + Priority + + + High + + + + Low + + + Normal + + Set to high + + + Category diff --git a/runbot/templates/branch.xml b/runbot/templates/branch.xml index 78e0948e0..611f06e77 100644 --- a/runbot/templates/branch.xml +++ b/runbot/templates/branch.xml @@ -12,7 +12,7 @@ diff --git a/runbot/templates/build.xml b/runbot/templates/build.xml index d9428a0d3..9f57f309c 100644 --- a/runbot/templates/build.xml +++ b/runbot/templates/build.xml @@ -44,7 +44,7 @@
  • - +
  • @@ -68,7 +68,7 @@
    -
    +
    This build is referenced in bundles
    @@ -114,6 +114,9 @@
    + + Mode: Light Configure
    +
    Version: @@ -222,7 +225,7 @@ - + Build @@ -234,7 +237,7 @@ with config - ... + ... @@ -265,25 +268,27 @@
    - + - - - + - + - - + @@ -291,54 +296,22 @@ - - + diff --git a/runbot/templates/build_error.xml b/runbot/templates/build_error.xml index e64fc4706..87ad07804 100644 --- a/runbot/templates/build_error.xml +++ b/runbot/templates/build_error.xml @@ -36,7 +36,7 @@
    + t-attf-href="/odoo/error/{{build_error.id}}" target="_blank" title="View in Backend"> @@ -113,7 +113,7 @@

    Team + t-attf-href="/odoo/team/{{team.id}}" target="_blank" title="View in Backend">

    diff --git a/runbot/templates/build_stats.xml b/runbot/templates/build_stats.xml index 8f36b6850..7721b8490 100644 --- a/runbot/templates/build_stats.xml +++ b/runbot/templates/build_stats.xml @@ -4,7 +4,7 @@ @@ -314,7 +330,7 @@ default - + killed @@ -349,12 +365,12 @@ Database selector - + Rebuild - + Kill @@ -399,8 +415,7 @@ Find similar builds - + View in backend diff --git a/runbot/tests/common.py b/runbot/tests/common.py index 9f6ba86c8..81cc8e784 100644 --- a/runbot/tests/common.py +++ b/runbot/tests/common.py @@ -12,7 +12,7 @@ class RunbotCase(TransactionCase): - def mock_git_helper(self, repo, cmd): + def mock_git_helper(self, repo, cmd, input_data=None, raw=False): """Helper that returns a mock for repo._git()""" if cmd[:2] == ['show', '-s'] or cmd[:3] == ['show', '--pretty="%H -- %s"', '-s']: return 'commit message for %s' % cmd[-1] @@ -82,7 +82,9 @@ def setUp(self): self.repo_odoo: [ ('odoo/addons', 'base', '__manifest__.py'), ('odoo/addons', 'test_lint', '__manifest__.py'), + ('addons', 'account', '__manifest__.py'), ('addons', 'mail', '__manifest__.py'), + ('addons', 'test_mail', '__manifest__.py'), ('addons', 'web', '__manifest__.py'), ('addons', 'crm', '__manifest__.py'), ('addons', 'project', '__manifest__.py'), @@ -194,8 +196,8 @@ def setUp(self): self.docker_run_calls = [] self.diff = '' - def mock_git(repo, cmd, quiet=False): - return self.mock_git_helper(repo, cmd) + def mock_git(repo, cmd, quiet=False, input_data=None, raw=False): + return self.mock_git_helper(repo, cmd, input_data=input_data, raw=raw) self.start_patcher('git_patcher', 'odoo.addons.runbot.models.repo.Repo._git', new=mock_git) self.start_patcher('hostname_patcher', 'odoo.addons.runbot.common.socket.gethostname', 'host.runbot.com') @@ -232,10 +234,10 @@ def mock_git(repo, cmd, quiet=False): self.start_patcher('_write_file', 'odoo.addons.runbot.models.build.BuildResult._write_file', None) self.start_patcher('_parse_config', 'odoo.addons.runbot.models.build.BuildResult._parse_config', {'--test-enable', '--test-tags', '--with-demo'}) - def get_available_modules(self_commit): + def _list_available_modules(self_commit): return self.addons_per_repo.get(self_commit.repo_id, []) - self.start_patcher('_get_available_modules', 'odoo.addons.runbot.models.commit.Commit._get_available_modules', new=get_available_modules) + self.start_patcher('_list_available_modules', 'odoo.addons.runbot.models.commit.Commit._list_available_modules', new=_list_available_modules) def no_commit(*_args, **_kwargs): _logger.info('Skipping commit') diff --git a/runbot/tests/test_build.py b/runbot/tests/test_build.py index e55aab30a..2cf84f65e 100644 --- a/runbot/tests/test_build.py +++ b/runbot/tests/test_build.py @@ -705,6 +705,32 @@ def test_build_cmd_faketime(self): second_child_cmd = second_child._cmd(py_version=3) self.assertIn('faketime "2024-02-04 04:42 UTC" python3 odoo/server.py', str(second_child_cmd)) + def test_format_message(self): + def get_log(message): + return self.env['ir.logging'].create({ + 'build_id': build.id, + 'type': 'server', + 'message': message, + 'level': 'INFO', + 'name': 'odoo.addons.web.tests.test_web', + 'path': '', + 'func': '', + 'line': '', + }) + build = self.Build.create({ + 'params_id': self.server_params.id, + 'description': 'A nice **description** with a link to odoo.com', + }) + self.assertEqual( + build._format_message(get_log('File "/data/build/odoo/addons/web/tests/test_web.py", line 42, in test_web')), + 'File "/data/build/odoo/addons/web/tests/test_web.py", line 42, in test_web' + ) + + self.assertEqual( + build._format_message(get_log('File "/data/build/odoo/addons/web/tests/test_web.py", in test_web')), + 'File "/data/build/odoo/addons/web/tests/test_web.py", in test_web' + ) + class TestGc(RunbotCaseMinimalSetup): def test_repo_gc_testing(self): @@ -744,7 +770,6 @@ def test_repo_gc_testing(self): bundle_b = self.env['runbot.bundle'].search([('name', '=', branch_b_name)]) bundle_b.last_batch._process() - build_b = bundle_b.last_batch.slot_ids[0].build_id # the two builds are starting tests on two different hosts @@ -754,8 +779,8 @@ def test_repo_gc_testing(self): # no room needed, verify that nobody got killed self.Runbot._gc_testing(host) - self.assertFalse(build_a.requested_action) - self.assertFalse(build_b.requested_action) + self.assertFalse(build_a.to_kill) + self.assertFalse(build_b.to_kill) # a new commit is pushed on branch_a self.push_commit(self.remote_odoo_dev, branch_a_name, 'new subject', sha='d0cad0ca') @@ -775,10 +800,10 @@ def test_repo_gc_testing(self): # no room needed, verify that nobody got killed self.Runbot._gc_testing(host) - self.assertFalse(build_a.requested_action) - self.assertFalse(build_b.requested_action) - self.assertFalse(build_a_last.requested_action) - self.assertFalse(children_b.requested_action) + self.assertFalse(build_a.to_kill) + self.assertFalse(build_b.to_kill) + self.assertFalse(build_a_last.to_kill) + self.assertFalse(children_b.to_kill) # now children_b starts on runbot_xxx children_b.write({'local_state': 'testing', 'host': host.name}) @@ -789,10 +814,10 @@ def test_repo_gc_testing(self): self.Runbot._gc_testing(host) # the killable build should have been marked to be killed - self.assertEqual(build_a.requested_action, 'deathrow') - self.assertFalse(build_b.requested_action) - self.assertFalse(build_a_last.requested_action) - self.assertFalse(children_b.requested_action) + self.assertTrue(build_a.to_kill) + self.assertFalse(build_b.to_kill) + self.assertFalse(build_a_last.to_kill) + self.assertFalse(children_b.to_kill) class TestGithubStatus(RunbotCase): diff --git a/runbot/tests/test_build_config_step.py b/runbot/tests/test_build_config_step.py index 9035c6af0..2e52171a3 100644 --- a/runbot/tests/test_build_config_step.py +++ b/runbot/tests/test_build_config_step.py @@ -117,33 +117,48 @@ def test_get_module(self): self.assertEqual('module_addons', self.repo_enterprise._get_module('enterprise/module_addons/some/file.py')) self.assertEqual(None, self.repo_odoo._get_module('odoo/core/module1/some/file.py')) self.assertEqual(None, self.repo_odoo._get_module('odoo/core/module/some/file.py')) + def test_codeowner_regex_multiple(self): - self.diff = 'file.js\nfile.py\nfile.xml' + self.diff = 'addons/module/file.js\naddons/module/file.py\naddons/module/file.xml' self.config_step._run_codeowner(self.parent_build) messages = self.parent_build.log_ids.mapped('message') self.assertEqual(messages[1], 'Checking 2 codeowner regexed on 3 files') - self.assertEqual(markdown_unescape(messages[2]), 'Adding team_js to reviewers for file [odoo/file.js](https://False/blob/dfdfcfcf/file.js)') - self.assertEqual(markdown_unescape(messages[3]), 'Adding team_py to reviewers for file [odoo/file.py](https://False/blob/dfdfcfcf/file.py)') - self.assertEqual(markdown_unescape(messages[4]), 'Adding codeowner-team to reviewers for file [odoo/file.xml](https://False/blob/dfdfcfcf/file.xml)') + self.assertEqual(markdown_unescape(messages[2]), 'Adding team_js to reviewers for file [odoo/addons/module/file.js](https://False/blob/dfdfcfcf/addons/module/file.js)') + self.assertEqual(markdown_unescape(messages[3]), 'Adding team_py to reviewers for file [odoo/addons/module/file.py](https://False/blob/dfdfcfcf/addons/module/file.py)') + self.assertEqual(markdown_unescape(messages[4]), 'Adding codeowner-team to reviewers for file [odoo/addons/module/file.xml](https://False/blob/dfdfcfcf/addons/module/file.xml)') self.assertEqual(markdown_unescape(messages[5]), 'Requesting review for pull request [base/odoo:1234](https://example.com/base/odoo/pull/1234): codeowner-team, team_js, team_py') self.assertEqual(self.dev_pr.reviewers, 'codeowner-team,team_js,team_py') + def test_codeowner_root_file(self): + self.diff = 'addons/module/file.js\naddons/module/file.py\naddons/module/file.xml\ntest_file' + self.config_step._run_codeowner(self.parent_build) + messages = self.parent_build.log_ids.mapped('message') + self.assertEqual(messages[1], 'Checking 2 codeowner regexed on 4 files') + self.assertEqual(markdown_unescape(messages[2]), 'File odoo/test_file is at the root level and it looks like it could be a mistake, remove it or ensure that a codeowner rule is added for this file') + self.assertEqual(markdown_unescape(messages[3]), 'Adding team_js to reviewers for file [odoo/addons/module/file.js](https://False/blob/dfdfcfcf/addons/module/file.js)') + self.assertEqual(markdown_unescape(messages[4]), 'Adding team_py to reviewers for file [odoo/addons/module/file.py](https://False/blob/dfdfcfcf/addons/module/file.py)') + self.assertEqual(markdown_unescape(messages[5]), 'Adding codeowner-team to reviewers for file [odoo/addons/module/file.xml](https://False/blob/dfdfcfcf/addons/module/file.xml)') + self.assertEqual(markdown_unescape(messages[6]), 'No reviewer for file [odoo/test_file](https://False/blob/dfdfcfcf/test_file)') + self.assertEqual(markdown_unescape(messages[7]), 'Requesting review for pull request [base/odoo:1234](https://example.com/base/odoo/pull/1234): codeowner-team, team_js, team_py') + self.assertEqual(self.dev_pr.reviewers, 'codeowner-team,team_js,team_py') + self.assertEqual(self.parent_build.local_result, 'ko') + def test_codeowner_regex_some_already_on(self): - self.diff = 'file.js\nfile.py\nfile.xml' + self.diff = 'addons/module/file.js\naddons/module/file.py\naddons/module/file.xml' self.dev_pr.reviewers = 'codeowner-team,team_js' self.config_step._run_codeowner(self.parent_build) messages = self.parent_build.log_ids.mapped('message') self.assertEqual(markdown_unescape(messages[5]), 'Requesting review for pull request [base/odoo:1234](https://example.com/base/odoo/pull/1234): team_py') def test_codeowner_regex_all_already_on(self): - self.diff = 'file.js\nfile.py\nfile.xml' + self.diff = 'addons/module/file.js\naddons/module/file.py\naddons/module/file.xml' self.dev_pr.reviewers = 'codeowner-team,team_js,team_py' self.config_step._run_codeowner(self.parent_build) messages = self.parent_build.log_ids.mapped('message') self.assertEqual(messages[5], 'All reviewers are already on pull request [base/odoo:1234](https://example.com/base/odoo/pull/1234)') def test_codeowner_author_in_team(self): - self.diff = 'file.js\nfile.py\nfile.xml' + self.diff = 'addons/module/file.js\naddons/module/file.py\naddons/module/file.xml' self.team1.github_team = 'team_py' self.team1.github_logins = 'some_member,another_member' self.team1.skip_team_pr = True @@ -424,25 +439,58 @@ def setUp(self): }).id, 'local_result': 'ok', }) + self.module_dependencies = { + "test_mail": ["mail"], + "mail": ["web"], + "account": ["web"], + "crm": ["web"], + "project": ["web"], + "test_l10n": ["l10n_be", "l10n_in"], + "l10n_be": ["account"], + "l10n_in": ["account"], + "web_enterprise": ["web"], + } + + def mock_git_helper(self, repo, cmd, input_data=None, raw=False): + def make_catfile_output(commit, content): + content_bytes = content.encode('utf-8') + header = f"{commit} blob {len(content_bytes)}\n".encode() + result = header + content_bytes + b"\n" + return result + + if cmd == ['cat-file', '--batch']: + if repo == self.repo_odoo and input_data == 'dfdfcfcf0000ffffffffffffffffffffffffffff:odoo/tests/.runbot/parallel_testing.json\n': + return make_catfile_output('dfdfcfcf0000ffffffffffffffffffffffffffff', self.config_file) + if repo == self.repo_odoo and input_data == 'dfdfcfcf0000ffffffffffffffffffffffffffff:odoo/tests/.runbot/l10n_standalone_testing.json\n': + return make_catfile_output('dfdfcfcf0000ffffffffffffffffffffffffffff', self.l10n_standalone_testing_file) + + if "__manifest__.py" in input_data: + modules_info = [ + (line, line.split(':')[-1].split('/')[-2]) + for line in input_data.splitlines() + if line.endswith('__manifest__.py') + ] + result = b"" + for original_query, module in modules_info: + content = '''{'name': '%s', 'depends': %s}''' % (module, self.module_dependencies.get(module, [])) + result += make_catfile_output(original_query.split(':')[0], content) + return result - def mock_git_helper(self, repo, cmd): - if repo == self.repo_odoo and cmd == ['show', 'dfdfcfcf0000ffffffffffffffffffffffffffff:odoo/tests/.runbot/parallel_testing.json']: - return self.config_file - elif repo == self.repo_odoo and cmd == ['show', 'dfdfcfcf0000ffffffffffffffffffffffffffff:odoo/tests/.runbot/l10n_standalone_testing.json']: - return self.l10n_standalone_testing_file - elif 'show' in cmd: + if cmd == ['cat-file', '--batch']: raise subprocess.CalledProcessError(cmd, 128) - return super().mock_git_helper(repo, cmd) + elif 'diff' in cmd: + return 'odoo/addons/crm/some/file.py\nodoo/addons/project/some/file.py' + return super().mock_git_helper(repo, cmd, input_data, raw) def test_module_filters(self): - self.assertEqual(self.build._get_modules_to_test('-> !mail'), ['base', 'crm', 'documents']) - self.assertEqual(self.build._get_modules_to_test('mail -> !web'), ['mail', 'project', 'test_l10n', 'test_lint']) + self.assertEqual(self.build._get_modules_to_test('-> !mail'), ['account', 'base', 'crm', 'documents']) + self.assertEqual(self.build._get_modules_to_test('mail -> !web'), ['mail', 'project', 'test_l10n', 'test_lint', 'test_mail']) self.assertEqual(self.build._get_modules_to_test('web -> web'), ['web']) self.assertEqual(self.build._get_modules_to_test('!web ->'), ['web_enterprise']) - self.assertEqual(self.build._get_modules_to_test('-> !mail, -crm'), ['base', 'documents']) - self.assertEqual(self.build._get_modules_to_test('mail -> !web, !project'), ['mail', 'test_l10n', 'test_lint']) - self.assertEqual(self.build._get_modules_to_test('-*,odoo/*'), ['base', 'crm', 'hw_drivers', 'mail', 'project', 'test_l10n', 'test_lint', 'web']) - self.assertEqual(self.build._get_modules_to_test('-*,odoo/test_*'), ['test_l10n', 'test_lint']) + self.assertEqual(self.build._get_modules_to_test('-> !mail, -crm'), ['account', 'base', 'documents']) + self.assertEqual(self.build._get_modules_to_test('mail -> !web, !project'), ['mail', 'test_l10n', 'test_lint', 'test_mail']) + self.assertEqual(self.build._get_modules_to_test('-*,odoo/*'), ['account', 'base', 'crm', 'hw_drivers', 'mail', 'project', 'test_l10n', 'test_lint', 'test_mail', 'web']) + self.assertEqual(self.build._get_modules_to_test('-*,odoo/test_*'), ['test_l10n', 'test_lint', 'test_mail']) self.assertEqual(self.build._get_modules_to_test('-*,enterprise/*'), ['documents', 'l10n_be', 'l10n_in', 'web_enterprise']) self.assertEqual(self.build._get_modules_to_test('-*,web*'), ['web', 'web_enterprise']) self.assertEqual(self.build._get_modules_to_test('-*,web*,-enterprise/web*'), ['web']) @@ -452,6 +500,35 @@ def test_config_extension(self): self.assertEqual(json.loads(self.config.default_dynamic_config)['vars']['module_filter'], '*,-hw_*') self.assertEqual(self.build.dynamic_config['vars']['module_filter'], '*,-hw_*,-l10n_*') + def test_parse_dynamic_entry(self): + Step = self.env['runbot.build.config.step'] + + def check_parse(entry, expected): + res = Step._parse_dynamic_entry(entry, self.build, {'key': 'value', 'test_method': '.test_method'}) + self.assertEqual(res, expected) + check_parse('{{-test_*|filter_all_modules}}', 'account,base,crm,documents,hw_drivers,l10n_be,l10n_in,mail,project,web,web_enterprise') + check_parse('{{-*,web*|filter_all_modules}}', 'web,web_enterprise') + check_parse('{{-*,web*|filter_all_modules|make_module_test_tags}}', '/web,/web_enterprise') + check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|prepend("some_tag")}}', 'some_tag/web,some_tag/web_enterprise') + check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|prepend(key)}}', 'value/web,value/web_enterprise') + check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|append(".test_method")}}', '/web.test_method,/web_enterprise.test_method') + check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|append(test_method)}}', '/web.test_method,/web_enterprise.test_method') + + self.patch(type(self.build), '_modified_modules', lambda cl, defaults=None: {'crm'}) + + check_parse('{{*|filter_all_modules|modified_modules}}', 'crm') + + def test_modules_dependencies(self): + self.assertEqual(self.build._get_modules_dependencies(['test_mail'], 1), ['mail', 'test_mail']) + self.assertEqual(self.build._get_modules_dependencies(['test_mail']), ['base', 'mail', 'test_mail', 'web']) + self.assertEqual(self.build._get_modules_dependencies(['test_l10n']), ['account', 'base', 'l10n_be', 'l10n_in', 'test_l10n', 'web']) + self.assertEqual(self.build._get_modules_dependencies(['test_mail', 'test_l10n']), ['account', 'base', 'l10n_be', 'l10n_in', 'mail', 'test_l10n', 'test_mail', 'web']) + self.assertEqual(self.build._get_modules_dependencies(['test_mail', 'test_l10n'], 1), ['l10n_be', 'l10n_in', 'mail', 'test_l10n', 'test_mail']) + + self.assertEqual(self.build._get_dependant_modules(['account'], 1), ['account', 'l10n_be', 'l10n_in']) + self.assertEqual(self.build._get_dependant_modules(['account']), ['account', 'l10n_be', 'l10n_in', 'test_l10n']) + self.assertEqual(self.build._get_dependant_modules(['base']), ['account', 'base', 'crm', 'documents', 'hw_drivers', 'l10n_be', 'l10n_in', 'mail', 'project', 'test_l10n', 'test_lint', 'test_mail', 'web', 'web_enterprise']) + def check_server_cmd(self, cmd, install, test_enable, test_tags, db=None): self.assertIn('odoo/server.py', cmd) if install: @@ -522,7 +599,7 @@ def test_dynamic_step_parallel_testing(self): cmd = self.docker_run_calls[0][0] odoo_cmd = cmd.cmd self.check_server_cmd(odoo_cmd, - install=['base', 'crm', 'documents', 'mail', 'project', 'test_l10n', 'test_lint', 'web', 'web_enterprise'], + install=['account', 'base', 'crm', 'documents', 'mail', 'project', 'test_l10n', 'test_lint', 'test_mail', 'web', 'web_enterprise'], test_enable=False, test_tags=None, db=f'{build.dest}-all', @@ -557,7 +634,7 @@ def test_dynamic_step_parallel_testing(self): cmd = self.docker_run_calls[0][0] odoo_cmd = cmd.cmd self.check_server_cmd(odoo_cmd, - install=['base', 'crm', 'documents', 'mail', 'project', 'test_l10n', 'test_lint', 'web', 'web_enterprise'], + install=['account', 'base', 'crm', 'documents', 'mail', 'project', 'test_l10n', 'test_lint', 'test_mail', 'web', 'web_enterprise'], test_enable=True, test_tags='-post_install,-/test_lint', ) @@ -574,8 +651,8 @@ def test_dynamic_step_parallel_testing(self): ) for post_install, expected_tags in [ - (post_install_1, '-at_install,/base,/crm,/documents,/hw_drivers,/l10n_be,/l10n_in'), # we need the blacklisted modules here - (post_install_2, '-at_install,/mail,/project,/test_l10n,/test_lint'), + (post_install_1, '-at_install,/account,/base,/crm,/documents,/hw_drivers,/l10n_be,/l10n_in'), # we need the blacklisted modules here + (post_install_2, '-at_install,/mail,/project,/test_l10n,/test_lint,/test_mail'), (post_install_3, '-at_install,/web'), (post_install_4, '-at_install,/web_enterprise'), ]: @@ -679,7 +756,7 @@ def test_dynamic_step_l10n_standalone(self): (post_install_1, '-external,-external_l10n,post_install_l10n/l10n_hr_payroll_be,post_install_l10n/l10n_hr_payroll_in'), # we need the blacklisted modules here (post_install_2, '-external,-external_l10n,post_install_l10n/l10n_edi_be,post_install_l10n/l10n_edi_in'), (post_install_3, '-external,-external_l10n,post_install_l10n/l10n_reports_be,post_install_l10n/l10n_reports_in'), - (post_install_4, Like('-external,-external_l10n,post_install_l10n/base,post_install_l10n/crm,...')), + (post_install_4, Like('-external,-external_l10n,post_install_l10n/account,post_install_l10n/base,post_install_l10n/crm,...')), ]: with self.subTest(post_install=expected_tags): # 4.1 post install restore @@ -723,6 +800,7 @@ def test_foreach_module(self): self.config.step_ids[0]._run_dynamic(self.build) self.assertEqual(self.build.children_ids.mapped('description'), [ + 'Post install tests for **account**', 'Post install tests for **base**', 'Post install tests for **crm**', 'Post install tests for **documents**', @@ -752,31 +830,154 @@ def test_foreach_modified_module(self): }] }''' - self.patch(type(self.build), '_modified_modules', lambda cl: {'crm'}) + self.patch(type(self.build), '_modified_modules', lambda cl, defaults=None: {'crm'}) self.config.default_dynamic_config = dynamic_config self.config.step_ids[0]._run_dynamic(self.build) self.assertEqual(self.build.children_ids.mapped('description'), - [ - 'Post install tests for **crm**', + [ + 'Post install tests for **crm**', ]) - def test_parse_dynamic_entry(self): - Step = self.env['runbot.build.config.step'] + def test_modified_existing_module(self): + dynamic_config = '''{ + "vars": { + "modified_modules": "{{*|filter_all_modules|modified_modules}}", + "test_modules": "{{modified_modules|prepend('test_')|select_existing_modules}}", + "modules_to_test": "{{modified_modules|union(test_modules)}}" + }, + "name": "Foreach module testing", + "steps": [{ + "name": "Create module builds", + "job_type": "create_build", + "children": [{ + "name": "Test single module", + "description": "Post install tests for **{{modules_to_test}}**", + "steps": [{ + "name": "Start single module test", + "job_type": "odoo", + "install_modules": "{{modules_to_test}}", + "test_tags": "{{modules_to_test|make_module_test_tags}}" + }] + }] + }] + }''' - def check_parse(entry, expected): - res = Step._parse_dynamic_entry(entry, self.build, {'key': 'value', 'test_method': '.test_method'}) - self.assertEqual(res, expected) - check_parse('{{-test_*|filter_all_modules}}', 'base,crm,documents,hw_drivers,l10n_be,l10n_in,mail,project,web,web_enterprise') - check_parse('{{-*,web*|filter_all_modules}}', 'web,web_enterprise') - check_parse('{{-*,web*|filter_all_modules|make_module_test_tags}}', '/web,/web_enterprise') - check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|prepend("some_tag")}}', 'some_tag/web,some_tag/web_enterprise') - check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|prepend(key)}}', 'value/web,value/web_enterprise') - check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|append(".test_method")}}', '/web.test_method,/web_enterprise.test_method') - check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|append(test_method)}}', '/web.test_method,/web_enterprise.test_method') + self.patch(type(self.build), '_modified_modules', lambda cl, defaults=None: {'crm', 'mail'}) + self.config.default_dynamic_config = dynamic_config + self.config.step_ids[0]._run_dynamic(self.build) + self.assertEqual(self.build.children_ids.mapped('description'), + [ + 'Post install tests for **crm,mail,test_mail**', + ]) + child_dynamic_vars = self.build.children_ids.params_id.config_data['dynamic_vars'] + self.assertEqual(child_dynamic_vars, { + 'modified_modules': 'crm,mail', + 'test_modules': 'test_mail', + 'modules_to_test': 'crm,mail,test_mail', + }) - self.patch(type(self.build), '_modified_modules', lambda cl: {'crm'}) + def test_modified_existing_module_parallel(self): + dynamic_config = '''{ + "vars": { + "modified_modules": "{{*|filter_all_modules|modified_modules}}", + "modules_to_test": "{{modified_modules|prepend('test_')|select_existing_modules|union(modified_modules)}}" + }, + "name": "Parallel split modified", + "steps": [{ + "name": "Create module builds", + "job_type": "create_build", + "for_each_vars": [{ + "test_module_filter": "{{modules_to_test}},->!mail" + }, + { + "test_module_filter": "{{modules_to_test}},mail->!website" + }, + { + "test_module_filter": "{{modules_to_test}},website->" + } + ], + "if": "{{child_modules_to_test}}", + "children": [{ + "vars": { + "child_modules_to_test": "{{test_module_filter|select_existing_modules}}" + }, + "name": "Test single module", + "description": "Post install tests for **{{child_modules_to_test}}**", + "steps": [{ + "name": "Start single module test", + "job_type": "odoo", + "install_modules": "{{child_modules_to_test}}", + "test_tags": "{{child_modules_to_test|make_module_test_tags}}" + }] + }] + }] + }''' - check_parse('{{*|filter_all_modules|modified_modules}}', 'crm') + self.patch(type(self.build), '_modified_modules', lambda cl, defaults=None: {'crm', 'mail'}) + self.config.default_dynamic_config = dynamic_config + self.config.step_ids[0]._run_dynamic(self.build) + self.assertEqual(self.build.children_ids.mapped('description'), + [ + 'Post install tests for **crm**', + 'Post install tests for **mail,test_mail**', + ]) + + self.assertEqual(self.build.children_ids[0].params_id.config_data['dynamic_vars']['child_modules_to_test'], 'crm') + self.assertEqual(self.build.children_ids[1].params_id.config_data['dynamic_vars']['child_modules_to_test'], 'mail,test_mail') + + def test_modified_existing_module_parallel_relations(self): + dynamic_config = '''{ + "vars": [ + {"module_filter": "*,-hw_*,-*l10n_*,-theme_*,-account_bacs,-account_reports_cash_basis,-auth_ldap,-base_gengo,-document_ftp,-iot_drivers,-note_pad,-odoo_referral,-odoo_referral_portal,-pad,-pad_project,-pos_blackbox_be,-pos_cache,-pos_six,-social_demo,-website_gengo,-website_instantclick,test_l10n_be_hr_payroll_account,test_l10n_us_hr_payroll_account"}, + {"_modified_modules": "{{module_filter|filter_all_modules|modified_modules}}"}, + {"_modules_dependencies": "{{_modified_modules|get_dependencies(1)}}"}, + {"_dependant_modules": "{{_modified_modules|get_dependant(1)}}"}, + {"_test_modules": "{{_modified_modules|prepend('test_')|select_existing_modules}}"}, + {"_modules_to_test": "{{_modified_modules|union(_test_modules)|union(_dependant_modules)|union(_modules_dependencies)}}"} + ], + "name": "Parallel split modified", + "steps": [{ + "name": "Create module builds", + "job_type": "create_build", + "for_each_vars": [{ + "_test_module_filter": "{{_modules_to_test}},->!mail" + }, + { + "_test_module_filter": "{{_modules_to_test}},mail->!website" + }, + { + "_test_module_filter": "{{_modules_to_test}},website->" + } + ], + "if": "{{child_modules_to_test}}", + "log": "Modified modules: {{_modified_modules}}\\nDepenencies: {{_modules_dependencies}}\\nDependant: {{_dependant_modules}}\\nTest modules: {{_test_modules}}", + "children": [{ + "vars": { + "child_modules_to_test": "{{_test_module_filter|select_existing_modules}}" + }, + "name": "Test single module", + "description": "Post install tests for **{{child_modules_to_test}}**", + "steps": [{ + "name": "Start single module test", + "job_type": "odoo", + "install_modules": "{{child_modules_to_test}}", + "test_tags": "{{child_modules_to_test|make_module_test_tags}}" + }] + }] + }] + }''' + + self.patch(type(self.build), '_modified_modules', lambda cl, defaults=None: {'crm', 'mail'}) + self.config.default_dynamic_config = dynamic_config + self.config.step_ids[0]._run_dynamic(self.build) + self.assertEqual(self.build.children_ids.mapped('description'), + [ + 'Post install tests for **crm**', + 'Post install tests for **mail,test_mail,web**', + ]) + self.assertEqual(self.build.children_ids[0].params_id.config_data['dynamic_vars']['child_modules_to_test'], 'crm') + self.assertEqual(self.build.children_ids[1].params_id.config_data['dynamic_vars']['child_modules_to_test'], 'mail,test_mail,web') + self.assertEqual(list(self.build.children_ids[0].params_id.config_data['dynamic_vars'].keys()), ['module_filter', 'child_modules_to_test']) class TestBuildConfigStep(TestBuildConfigStepCommon): diff --git a/runbot/tests/test_build_error.py b/runbot/tests/test_build_error.py index 7368d088a..0217e3182 100644 --- a/runbot/tests/test_build_error.py +++ b/runbot/tests/test_build_error.py @@ -2,7 +2,7 @@ from unittest.mock import patch from odoo import fields -from odoo.exceptions import ValidationError +from odoo.exceptions import AccessError, ValidationError from odoo.tests import new_test_user from .common import RunbotCase @@ -204,6 +204,59 @@ def test_merge_test_tags(self): self.assertEqual(error_b.test_tags, False) self.assertEqual(error_b.active, False) + def test_merge_pr_ids(self): + error_a = self.BuildError.create({ + 'content': 'foo', + }) + error_b = self.BuildError.create({ + 'content': 'bar', + 'breaking_pr_id': self.dev_pr.id, + 'fixing_pr_id': self.dev_pr.id, + }) + + error_a._merge(error_b) + + self.assertEqual(error_a.fixing_pr_id, self.dev_pr) + self.assertEqual(error_a.breaking_pr_id, self.dev_pr) + + def test_duplicate_breaking_pr(self): + pr_branch = self.Branch.create({ + 'name': '242', + 'is_pr': True, + 'alive': True, + 'remote_id': self.remote_odoo.id, + 'target_branch_name': self.branch_odoo.name, + 'pull_head_name': f'{self.remote_odoo.owner}:{self.dev_branch.name}', + }) + + error_a = self.BuildError.create({'content': 'error A', 'breaking_pr_id': pr_branch.id}) + error_b = self.BuildError.create({'content': 'error B', 'breaking_pr_id': pr_branch.id}) + error_c = self.BuildError.create({'content': 'error C', 'breaking_pr_id': pr_branch.id}) + error_d = self.BuildError.create({'content': 'error D'}) + + # Test compute count (excludes self) + self.assertEqual(error_a.duplicate_breaking_pr_count, 2) + self.assertEqual(error_b.duplicate_breaking_pr_count, 2) + self.assertEqual(error_c.duplicate_breaking_pr_count, 2) + self.assertEqual(error_d.duplicate_breaking_pr_count, 0) + + # Test action returns all errors including self + action = error_a.action_view_duplicate_breaking_pr() + self.assertEqual(action['type'], 'ir.actions.act_window') + self.assertEqual(action['res_model'], 'runbot.build.error') + errors_in_action = self.BuildError.search(action['domain']) + self.assertIn(error_a, errors_in_action) + self.assertEqual(len(errors_in_action), 3) + + # Test count update on deactivation + error_b.active = False + error_a.invalidate_recordset(['duplicate_breaking_pr_count']) + self.assertEqual(error_a.duplicate_breaking_pr_count, 1) + + # Test count update on breaking PR removal + error_a.breaking_pr_id = False + self.assertEqual(error_a.duplicate_breaking_pr_count, 0) + def test_relink_contents(self): build_a = self.create_test_build({'local_result': 'ko', 'local_state': 'done'}) error_content_a = self.BuildErrorContent.create({'content': 'foo bar'}) @@ -251,7 +304,7 @@ def test_relink_simple(self): build_b = self.create_test_build({'local_result': 'ko', 'local_state': 'done'}) error_content_b = self.BuildErrorContent.create({'content': 'foo bar'}) error_b = error_content_b.error_id - error_b.test_tags = 'footag' + error_b.test_tags = 'footag[@test, comma]\nfootag2[@test, comma],footag3[@test, comma]' self.BuildErrorLink.create({'build_id': build_b.id, 'error_content_id': error_content_b.id}) self.assertEqual(self.BuildErrorContent.search([('fingerprint', '=', error_content_a.fingerprint)]), error_content_a | error_content_b) @@ -260,7 +313,7 @@ def test_relink_simple(self): self.assertFalse(error_b.error_content_ids) self.assertTrue(error_a.active, 'The merged error without test tags should have been deactivated') - self.assertEqual(error_a.test_tags, 'footag', 'Tags should have been transfered from b to a') + self.assertEqual(error_a.test_tags, 'footag[@test, comma]\nfootag2[@test, comma],footag3[@test, comma]', 'Tags should have been transfered from b to a') self.assertFalse(error_b.active, 'The merged error with test tags should remain active') self.assertIn(build_a, error_content_a.build_ids) self.assertIn(build_b, error_content_a.build_ids) @@ -269,9 +322,9 @@ def test_relink_simple(self): tagged_error_content = self.BuildErrorContent.create({'content': 'foo bar'}) tagged_error = tagged_error_content.error_id - tagged_error.test_tags = 'bartag' + tagged_error.test_tags = 'bartag[@test, comma]\nbartag2[@test, comma],bartag3[@test, comma]' (error_content_a | tagged_error_content)._relink() - self.assertEqual(error_a.test_tags, 'footag,bartag') + self.assertEqual(error_a.test_tags, 'footag[@test, comma]\nfootag2[@test, comma]\nfootag3[@test, comma]\nbartag[@test, comma]\nbartag2[@test, comma]\nbartag3[@test, comma]') self.assertTrue(error_a.active) self.assertFalse(tagged_error.active) @@ -712,6 +765,40 @@ def test_build_error_notification(self): innactive_error.responsible = responsible message_notify.assert_not_called() + def test_build_error_acl(self): + self.stop_patcher('isfile') # prevent user creation + self.start_patcher('message_notify', 'odoo.addons.mail.models.mail_thread.MailThread.message_notify') + test_team = self.env['runbot.team'].create({ + 'name': 'test-team', + 'project_id': self.project.id, + }) + responsible = new_test_user(self.env, login='fixman', name='fixman', groups='base.group_user') + user_lambda = new_test_user(self.env, login='lambda', name='lambda', groups='base.group_user') + error_manager = new_test_user(self.env, login='errorman', name='errorman', groups='base.group_user,runbot.group_runbot_error_manager') + runbot_manager = new_test_user(self.env, login='runbotman', name='runbotman', groups='base.group_user,runbot.group_runbot_admin') + + error = self.BuildError.create({}) + + # check writable fields by any user + error.with_user(user_lambda).write({ + 'responsible': responsible.id, + 'customer': error_manager.id, + 'fixing_pr_id': self.dev_pr.id, + 'breaking_pr_id': self.dev_pr.id, + 'random': True, + 'team_id': test_team.id, + }) + + # check other fields for a user lambda + with self.assertRaises(AccessError): + error.with_user(user_lambda).description = 'test description' + + # now check that an error manager can set a test_tag + error.with_user(error_manager).test_tags = 'brol' + + # and the runbot admin user can change it back + error.with_user(runbot_manager).test_tags = False + class TestErrorMerge(TestBuildErrorCommon): @@ -820,7 +907,38 @@ def test_error_content_multiple_canonical_tags(self): self.assertEqual(error_content_2.canonical_tag, '/web/tests/test_file.py:TestUi.TestUi') self.assertNotEqual(error_content_1, error_content_2) self.assertEqual(error_content_1.error_id, error_content_2.error_id) - self.assertEqual(error_content_1.error_id.canonical_tags, '/base/tests/test_file.py:TestUi.TestUi,/web/tests/test_file.py:TestUi.TestUi') + self.assertEqual(error_content_1.error_id.canonical_tags, '/base/tests/test_file.py:TestUi.TestUi\n/web/tests/test_file.py:TestUi.TestUi') + error_content_1.error_id.test_tags = error_content_1.error_id.canonical_tags + self.assertEqual(error_content_1.error_id._disabling_tags(), [ + '-/base/tests/test_file.py:TestUi.TestUi', + '-/web/tests/test_file.py:TestUi.TestUi', + ]) + error_content_3 = self.env['runbot.build.error.content'].create({ + 'content': 'Tour foobar_tour failed at step step_14 in mode mode', + 'metadata': {'test': {'canonical_tag': '/web/tests/test_file.py:TestJs.test_unit_desktop[@web/some , comma\\[\\] and brackets]'}}, + }) + self.assertEqual(error_content_3.canonical_tag, '/web/tests/test_file.py:TestJs.test_unit_desktop[@web/some , comma\\[\\] and brackets]') + self.assertNotEqual(error_content_1, error_content_2) + self.assertEqual(error_content_1.error_id, error_content_2.error_id) + self.assertEqual(error_content_1.error_id.canonical_tags, """/base/tests/test_file.py:TestUi.TestUi\n/web/tests/test_file.py:TestJs.test_unit_desktop[@web/some , comma\\[\\] and brackets]\n/web/tests/test_file.py:TestUi.TestUi""") + error_content_1.error_id.test_tags = error_content_1.error_id.canonical_tags + self.assertEqual(error_content_1.error_id._disabling_tags(), [ + '-/base/tests/test_file.py:TestUi.TestUi', + '-/web/tests/test_file.py:TestJs.test_unit_desktop[@web/some , comma\\[\\] and brackets]', + '-/web/tests/test_file.py:TestUi.TestUi', + ], "disabling tags must keep the escaping and correct format") + error_content_1.error_id.test_tags = error_content_1.error_id.test_tags.replace('\n', ',') + self.assertEqual(error_content_1.error_id._disabling_tags(), [ + '-/base/tests/test_file.py:TestUi.TestUi', + '-/web/tests/test_file.py:TestJs.test_unit_desktop[@web/some , comma\\[\\] and brackets]', + '-/web/tests/test_file.py:TestUi.TestUi', + ], "_disabling_tags shoudl also work fine with comma separted tags") + error_content_1.error_id.test_tags = error_content_1.error_id.test_tags.replace('\\', '') + self.assertEqual(error_content_1.error_id._disabling_tags(), [ + '-/base/tests/test_file.py:TestUi.TestUi', + '-/web/tests/test_file.py:TestJs.test_unit_desktop[@web/some , comma[] and brackets]', + '-/web/tests/test_file.py:TestUi.TestUi', + ]) class TestCodeOwner(RunbotCase): diff --git a/runbot/tests/test_dockerfile.py b/runbot/tests/test_dockerfile.py index 213310872..cb7539bc2 100644 --- a/runbot/tests/test_dockerfile.py +++ b/runbot/tests/test_dockerfile.py @@ -3,10 +3,12 @@ import logging import os import re +import time from psycopg2.errors import UniqueViolation +from requests.exceptions import HTTPError from odoo import Command, exceptions -from unittest.mock import patch, mock_open +from unittest.mock import patch, mock_open, MagicMock from odoo.tests.common import tagged, HttpCase, mute_logger from .common import RunbotCase @@ -149,3 +151,154 @@ def test_dockerfile_variant_unique(self): 'name': 'Documentation2', 'parent_id': default_dockerfile.id, }) + + +@tagged('-at_install', 'post_install') +class TestDockerfileCache(RunbotCase, HttpCase): + def test_dockerfile_get_cached_content(self): + dockerfile = self.env['runbot.dockerfile'].create({ + 'name': 'TestsAddCache', + 'to_build': True, + 'layer_ids': [ + Command.create({ + 'name': 'CacheAddTest', + 'layer_type': 'raw', + 'content': 'some useless content', + }), + ], + }) + + self.start_patcher('docker_username', 'odoo.addons.runbot.models.docker.USERNAME', new='TestUser') + + expected_content = """# CacheAddTest +some useless content + +USER TestUser +""" + + self.start_patcher('hardlink_to', 'odoo.addons.runbot.models.docker.Path.hardlink_to') + self.start_patcher('path_unlink', 'odoo.addons.runbot.models.docker.Path.unlink') + content = dockerfile._get_cached_content('/tmp/fake_build_path') + self.assertEqual(content, expected_content, 'Dockerfile without "ADD" should be left unchanged') + + raw_layer = """FROM ubuntu:noble +ADD https://nowhere.example.org/nothing.txt /data/nothing.txt +""" + + expected_content = """# CacheAddTest +FROM ubuntu:noble +ADD https://nowhere.example.org/nothing.txt /data/nothing.txt + + +USER TestUser +""" + dockerfile.layer_ids[0].content = raw_layer + content = dockerfile._get_cached_content('/tmp/fake_build_path') + self.assertEqual(content, expected_content, 'Dockerfile without "#CACHE" directive should be left unchanged') + + # Here we start the useful cache tests + raw_layer = """FROM ubuntu:noble +# CACHE 60 +ADD https://nowhere.example.org/nothing.txt /data/nothing.txt +""" + + expected_content = """# CacheAddTest +FROM ubuntu:noble +# CACHE 60 +COPY _data_nothing_txt /data/nothing.txt + + +USER TestUser +""" + mock_response = MagicMock() + mock_response.iter_content.return_value = [b'small file content'] + self.start_patcher('docker_requests_get', 'odoo.addons.runbot.models.docker.requests.get', return_value=mock_response) + + # 1 - The cache file does not exists yet + self.start_patcher('docker_path_exists', 'odoo.addons.runbot.models.docker.Path.exists', return_value=False) + dockerfile.layer_ids[0].content = raw_layer + with patch('odoo.addons.runbot.models.docker.Path.open', mock_open()) as cache_file_mock: + content = dockerfile._get_cached_content('/tmp/fake_build_path') + cache_file_mock.assert_called_once_with('wb') + self.assertEqual(content, expected_content, 'Dockerfile with "#CACHE" should change the ADD directive to COPY') + + # 2 - The cache file exists but the cache duration is expired + self.patchers['docker_path_exists'].return_value = True + self.start_patcher('docker_path_lstat', 'odoo.addons.runbot.models.docker.Path.lstat') + self.patchers['docker_path_lstat'].return_value.st_mtime = time.time() - 100 + with patch('odoo.addons.runbot.models.docker.Path.open', mock_open()) as cache_file_mock: + content = dockerfile._get_cached_content('/tmp/fake_build_path') + cache_file_mock.assert_called_once_with('wb') + self.assertEqual(content, expected_content, 'Dockerfile with "#CACHE" should change the ADD directive to COPY') + + # 3 - The cache file exists but the cache duration is not expired + self.start_patcher('docker_path_touch', 'odoo.addons.runbot.models.docker.Path.touch', return_value=True) + self.patchers['docker_path_lstat'].return_value.st_mtime = time.time() - 2 + with patch('odoo.addons.runbot.models.docker.Path.open', mock_open()) as cache_file_mock: + content = dockerfile._get_cached_content('/tmp/fake_build_path') + cache_file_mock.assert_not_called() + self.assertEqual(content, expected_content, 'Dockerfile with "#CACHE" should change the ADD directive to COPY') + self.patchers['docker_path_touch'].assert_not_called() + + # 4 - The cache file does not exists yet but the there is an error while downloading + self.patchers['docker_path_exists'].return_value = False + self.patchers['docker_requests_get'].side_effect = HTTPError + + dockerfile.layer_ids[0].content = raw_layer + with patch('odoo.addons.runbot.models.docker.Path.open', mock_open()) as cache_file_mock: + with self.assertRaises(HTTPError, msg='HTTPError Exception should be reraised during cache download'): + content = dockerfile._get_cached_content('/tmp/fake_build_path') + + def test_dockerfile_build_with_cached_content(self): + dockerfile = self.env['runbot.dockerfile'].create({ + 'name': 'TestsAddCache', + 'to_build': True, + 'layer_ids': [ + Command.create({ + 'name': 'CacheAddTest', + 'layer_type': 'raw', + 'content': 'some useless content', + }), + ], + }) + + dockerfile.layer_ids[0].content = """# Cache Test +FROM ubuntu:noble +# CACHE 60 +ADD https://nowhere.example.org/nothing.txt /data/nothing.txt +""" + + expected_content = """# Cache Test +FROM ubuntu:noble +# CACHE 60 +COPY _data_nothing_txt /data/nothing.txt + + +USER TestUser +""" + + self.start_patcher('docker_username', 'odoo.addons.runbot.models.docker.USERNAME', new='TestUser') + self.start_patcher('docker_path_exists', 'odoo.addons.runbot.models.docker.Path.exists', return_value=False) + self.start_patcher('docker_path_hardlink_to', 'odoo.addons.runbot.models.docker.Path.hardlink_to') + self.start_patcher('docker_get_docker_metadata', 'odoo.addons.runbot.models.docker.Dockerfile._get_docker_metadata') + + mock_response = MagicMock() + mock_response.iter_content.return_value = [b'small file content'] + self.start_patcher('docker_requests_get', 'odoo.addons.runbot.models.docker.requests.get', return_value=mock_response) + + self.patchers['docker_build'].return_value = { + 'image_id': 'xxx', + 'success': True, + 'duration': 69, + 'image': 'd0d0caca', + 'msg': '', + } + + with patch('odoo.addons.runbot.models.docker.Path.open', mock_open()) as cache_file_mock: + with patch('builtins.open', mock_open()) as dockerfile_file: + dockerfile._build() + cache_file_mock.assert_called_once_with('wb') + dockerfile_file_handle = dockerfile_file() + dockerfile_file_handle.write.assert_called_once_with(expected_content) + self.patchers['docker_path_hardlink_to'].assert_called() + self.patchers['docker_get_docker_metadata'].assert_called() diff --git a/runbot/tests/test_repo.py b/runbot/tests/test_repo.py index 81c940ab0..cb8195a05 100644 --- a/runbot/tests/test_repo.py +++ b/runbot/tests/test_repo.py @@ -372,7 +372,7 @@ def setUp(self): self.fetch_count = 0 self.force_failure = False - def mock_git_helper(self, repo, cmd): + def mock_git_helper(self, repo, cmd, input_data=None, raw=False): self.assertIn('fetch', cmd) self.fetch_count += 1 if self.fetch_count < 3 or self.force_failure: @@ -457,7 +457,7 @@ def setUp(self): super().setUp() self.test_refs = [] - def mock_git_helper(self, repo, cmd): + def mock_git_helper(self, repo, cmd, input_data=None, raw=False): self.assertIn('for-each-ref', cmd) self.assertIn('refs/*/pull/*', cmd) return '\n'.join(['\x00'.join(ref_data) for ref_data in self.test_refs]) diff --git a/runbot/views/branch_views.xml b/runbot/views/branch_views.xml index 943ef6e30..3791f562e 100644 --- a/runbot/views/branch_views.xml +++ b/runbot/views/branch_views.xml @@ -46,6 +46,7 @@ Branches runbot.branch list,form + branch diff --git a/runbot/views/build_error_views.xml b/runbot/views/build_error_views.xml index d9a560cc6..e521b5eb4 100644 --- a/runbot/views/build_error_views.xml +++ b/runbot/views/build_error_views.xml @@ -10,6 +10,9 @@ +
    @@ -434,6 +442,7 @@ Errors regex runbot.error.regex list,form + error-regex runbot.error.qualify.regex.list @@ -509,6 +518,7 @@ Build Errors Qualifying Regexes runbot.error.qualify.regex list,form + error-qualify diff --git a/runbot/views/build_views.xml b/runbot/views/build_views.xml index 722201d09..0d60ab89b 100644 --- a/runbot/views/build_views.xml +++ b/runbot/views/build_views.xml @@ -12,6 +12,9 @@ + + + @@ -63,7 +66,7 @@ - + @@ -83,6 +86,8 @@ + + @@ -148,12 +153,14 @@ ir.actions.act_window runbot.build list,form,graph,pivot + build Builds Params ir.actions.act_window runbot.build.params list,form + build-param - \ No newline at end of file + diff --git a/runbot/views/bundle_views.xml b/runbot/views/bundle_views.xml index 2ebd0e45e..ff76a3fb4 100644 --- a/runbot/views/bundle_views.xml +++ b/runbot/views/bundle_views.xml @@ -10,6 +10,7 @@ + @@ -17,6 +18,9 @@ + + + @@ -29,6 +33,7 @@ + @@ -54,6 +59,7 @@ + @@ -257,36 +263,42 @@ ir.actions.act_window runbot.bundle.trigger.custom list,form + custom-trigger Bundles ir.actions.act_window runbot.bundle list,form + bundle Projects ir.actions.act_window runbot.project list,form + project Versions ir.actions.act_window runbot.version list,form + version Batches ir.actions.act_window runbot.batch list,form + batch Bundle Tags ir.actions.act_window runbot.bundle.tag list,form + bundle-tag diff --git a/runbot/views/codeowner_views.xml b/runbot/views/codeowner_views.xml index afb81aaed..e7ccdfacf 100644 --- a/runbot/views/codeowner_views.xml +++ b/runbot/views/codeowner_views.xml @@ -39,6 +39,7 @@ Codeowner runbot.codeowner list,form + codeowner diff --git a/runbot/views/commit_views.xml b/runbot/views/commit_views.xml index 8bc09c10c..fed406ef9 100644 --- a/runbot/views/commit_views.xml +++ b/runbot/views/commit_views.xml @@ -59,6 +59,7 @@ Commits runbot.commit list,form + commit @@ -93,6 +94,7 @@ Commit Links runbot.commit.link list + commit-link diff --git a/runbot/views/config_views.xml b/runbot/views/config_views.xml index 9992b7ab8..11637d564 100644 --- a/runbot/views/config_views.xml +++ b/runbot/views/config_views.xml @@ -47,6 +47,7 @@ + @@ -120,6 +121,12 @@ + + + + + + @@ -200,12 +207,14 @@ Build Configs runbot.build.config list,form + config Build Config Steps runbot.build.config.step list,form + config-step diff --git a/runbot/views/dashboard_views.xml b/runbot/views/dashboard_views.xml index fa0f4ad8e..3a127a766 100644 --- a/runbot/views/dashboard_views.xml +++ b/runbot/views/dashboard_views.xml @@ -215,30 +215,35 @@ Runbot Dashboards Tiles runbot.dashboard.tile list,form + dashboard-tile Runbot teams runbot.team list,form + team Runbot Dashboards runbot.dashboard list,form + dashboard Runbot modules runbot.module list,form + module Runbot modules ownership runbot.module.ownership list,form + module-ownership diff --git a/runbot/views/dockerfile_views.xml b/runbot/views/dockerfile_views.xml index 79e9f58af..dab232b01 100644 --- a/runbot/views/dockerfile_views.xml +++ b/runbot/views/dockerfile_views.xml @@ -268,12 +268,14 @@ Docker build results runbot.docker_build_result list,form + docker-build Docker Layers runbot.docker_layer list,form + docker-layer diff --git a/runbot/views/host_views.xml b/runbot/views/host_views.xml index f22f34e2b..726aeff48 100644 --- a/runbot/views/host_views.xml +++ b/runbot/views/host_views.xml @@ -72,6 +72,7 @@ Host runbot.host list,form + host diff --git a/runbot/views/menus.xml b/runbot/views/menus.xml index 720226c82..8a71193d9 100644 --- a/runbot/views/menus.xml +++ b/runbot/views/menus.xml @@ -26,6 +26,7 @@ + @@ -62,6 +63,7 @@ + diff --git a/runbot/views/repo_views.xml b/runbot/views/repo_views.xml index c854ccdb2..41b620f6e 100644 --- a/runbot/views/repo_views.xml +++ b/runbot/views/repo_views.xml @@ -17,14 +17,17 @@ + + + @@ -197,6 +200,7 @@ + @@ -263,30 +267,35 @@ Repositories runbot.repo list,form + repository Triggers runbot.trigger list,form + trigger Triggers dependency runbot.trigger.dependency list,form + trigger-dependency Remotes runbot.remote list,form + remote Trigger Categories runbot.category list,form + trigger-category diff --git a/runbot/views/semgrep_rules.xml b/runbot/views/semgrep_rules.xml new file mode 100644 index 000000000..a325ea0ed --- /dev/null +++ b/runbot/views/semgrep_rules.xml @@ -0,0 +1,93 @@ + + + + + runbot.semgrep.rule.tree + runbot.semgrep_rule + + + + + + + + + + + + + + + runbot.semgrep.rule.form + runbot.semgrep_rule + +
    + +

    + + + + + + + + + + + + + +
    +

    + You will likely want to write and test rules in the semgrep playground +

    +

    + On the playground, # ruleid: {ruleid} can be used to make sure a rule matches a snippet, and # ok:{ruleid} + to make sure a rule does not match one. +

    +

    Pattern Documentation

    + +
    + + + +
    + + + + + +
    + + +
    +
    + + + runbot.checker.category.tree + runbot.checker_category + + + + + + + + + Semgrep Rules + runbot.semgrep_rule + list,form + semgrep-rule + + + + Checker Categories + runbot.checker_category + list,form + checker-category + + +
    +
    diff --git a/runbot/views/stat_views.xml b/runbot/views/stat_views.xml index 6da213540..1131525d3 100644 --- a/runbot/views/stat_views.xml +++ b/runbot/views/stat_views.xml @@ -37,6 +37,7 @@ Stat regex runbot.build.stat.regex list,form + stat-regex diff --git a/runbot/views/upgrade.xml b/runbot/views/upgrade.xml index 2338aa16e..680b978da 100644 --- a/runbot/views/upgrade.xml +++ b/runbot/views/upgrade.xml @@ -86,12 +86,14 @@ Upgrade Exceptions runbot.upgrade.exception list,form + upgrade-exception Upgrade Regexes runbot.upgrade.regex list,form + upgrade-regex diff --git a/runbot/views/upgrade_matrix_views.xml b/runbot/views/upgrade_matrix_views.xml index 594127b71..a692ea132 100644 --- a/runbot/views/upgrade_matrix_views.xml +++ b/runbot/views/upgrade_matrix_views.xml @@ -71,6 +71,7 @@ Upgrade matrix runbot.upgrade.matrix list,form + upgrade-matrix diff --git a/runbot/views/warning_views.xml b/runbot/views/warning_views.xml index be9a0ba0c..edccdf541 100644 --- a/runbot/views/warning_views.xml +++ b/runbot/views/warning_views.xml @@ -16,6 +16,7 @@ Warnings runbot.warning list + warning
    DateDate (UTC) LevelType Message
    - -
    - + - + + + + + + + + - - - - Build # - - - - - - - - - - - - - : - + + + + Build # + + + - - - - - - - - - - - - - - - - - - - - - -
    -
    + + +
    - + @@ -348,17 +321,16 @@ -
    +
    - This error is already . - - - - - () + + This error is already . + + + + () +