diff --git a/runbot/__manifest__.py b/runbot/__manifest__.py index 177b5d4db..38db70949 100644 --- a/runbot/__manifest__.py +++ b/runbot/__manifest__.py @@ -6,7 +6,7 @@ 'author': "Odoo SA", 'website': "http://runbot.odoo.com", 'category': 'Website', - 'version': '5.14', + 'version': '5.16', 'application': True, 'depends': ['base', 'base_automation', 'website', 'auth_oauth'], 'data': [ @@ -57,6 +57,7 @@ 'views/oauth_provider_views.xml', 'views/repo_views.xml', 'views/res_config_settings_views.xml', + 'views/semgrep_rules.xml', 'views/stat_views.xml', 'views/upgrade.xml', 'views/upgrade_matrix_views.xml', diff --git a/runbot/common.py b/runbot/common.py index de9f5b333..23938c84e 100644 --- a/runbot/common.py +++ b/runbot/common.py @@ -24,6 +24,13 @@ dest_reg = re.compile(r'^\d{5,}-.+$') +try: + from odoo.addons.saas_worker.util import from_role +except ImportError: + def from_role(*_, **__): + return lambda _: None + + def transactioncache(method): @functools.wraps(method) def wrapper(self, *args, **kwargs): diff --git a/runbot/container.py b/runbot/container.py index d2f2a1624..f52456c1e 100644 --- a/runbot/container.py +++ b/runbot/container.py @@ -116,7 +116,7 @@ def _docker_build(build_dir, image_tag, pull=False): """Build the docker image :param build_dir: the build directory that contains Dockerfile. :param image_tag: name used to tag the resulting docker image - :return: tuple(success, msg) where success is a boolean and msg is the error message or None + :return: dict """ with DockerManager(image_tag) as dm: @@ -259,7 +259,10 @@ def _docker_run(cmd=False, log_path=False, build_dir=False, container_name=False else: run_cmd = cmd run_cmd = f'cd /data/build;touch start-{container_name};{run_cmd};cd /data/build;touch end-{container_name}' - _logger.info('Docker run command: %s', run_cmd) + run_cmd_repr = str(run_cmd) + if len(run_cmd_repr) > 250: + run_cmd_repr = run_cmd_repr[:250] + '...' + _logger.info('Docker run command: %s', run_cmd_repr) docker_clear_state(container_name, build_dir) # ensure that no state are remaining build_dir = file_path(build_dir) diff --git a/runbot/controllers/frontend.py b/runbot/controllers/frontend.py index 27075fc1a..a8f15e44a 100644 --- a/runbot/controllers/frontend.py +++ b/runbot/controllers/frontend.py @@ -80,7 +80,7 @@ def _pending(self): '/runbot/', '/runbot//search/'], website=True, auth='public', type='http') def bundles(self, project=None, search='', refresh=False, limit=40, has_pr=None, **kwargs): - search = search if len(search) < 60 else search[:60] + search = search if len(search) < 60 else search[:200] env = request.env categories = env['runbot.category'].search([]) projects = self.env['runbot.project'].search([('hidden', '=', False)]) @@ -119,13 +119,11 @@ def bundles(self, project=None, search='', refresh=False, limit=40, has_pr=None, pr_numbers = [] for search_elem in search.split("|"): if search_elem.isnumeric(): - pr_numbers.append(int(search_elem)) + search_domains.append([('branch_ids', 'any', [('name', '=', search_elem)])]) + if ':' in search_elem: + search_domains.append([('branch_ids', 'any', [('pull_head_name', '=', search_elem)])]) operator = '=ilike' if '%' in search_elem else 'ilike' search_domains.append([('name', operator, search_elem)]) - if pr_numbers: - res = request.env['runbot.branch'].search([('name', 'in', pr_numbers)]) - if res: - search_domains.append([('id', 'in', res.mapped('bundle_id').ids)]) search_domain = Domain.OR(search_domains) domain = Domain.AND([domain, search_domain]) @@ -166,7 +164,7 @@ def bundles(self, project=None, search='', refresh=False, limit=40, has_pr=None, '/runbot/bundle//page/', '/runbot/bundle/', ], website=True, auth='public', type='http', sitemap=False) - def bundle(self, bundle=None, page=1, limit=50, **kwargs): + def bundle(self, bundle=None, page=1, limit=50, expand_custom=False, **kwargs): if isinstance(bundle, str): bundle = request.env['runbot.bundle'].search([('name', '=', bundle)], limit=1, order='id') if not bundle: @@ -183,6 +181,7 @@ def bundle(self, bundle=None, page=1, limit=50, **kwargs): ) batchs = request.env['runbot.batch'].search(domain, limit=limit, offset=pager.get('offset', 0), order='id desc') + # compute if we should display the new batch button context = { 'bundle': bundle, 'batchs': batchs, @@ -190,6 +189,8 @@ def bundle(self, bundle=None, page=1, limit=50, **kwargs): 'project': bundle.project_id, 'title': 'Bundle %s' % bundle.name, 'page_info_state': bundle.last_batch._get_global_result(), + 'expand_custom': expand_custom, + 'needs_update': bundle.last_batch and bundle.last_batch.sudo().needs_update(), } return request.render('runbot.bundle', context) @@ -199,7 +200,7 @@ def bundle(self, bundle=None, page=1, limit=50, **kwargs): '/runbot/bundle//force/', ], type='http', auth="user", methods=['GET', 'POST'], csrf=False) def force_bundle(self, bundle, auto_rebase=False, use_base_commits=False, **_post): - if not request.env.user.has_group('runbot.group_runbot_advanced_user') and ':' not in bundle.name: + if not request.env.user.has_group('runbot.group_runbot_advanced_user') and ':' not in bundle.name and not bundle.last_batch.needs_update(): message = "Only users with a specific group can do that. Please contact runbot administrators" raise Forbidden(message) _logger.info('user %s forcing bundle %s', request.env.user.name, bundle.name) # user must be able to read bundle @@ -220,6 +221,12 @@ def batch(self, batch_id=None, **kwargs): } return request.render('runbot.batch', context) + @route(['/runbot/batch//prioritize'], website=True, auth='user', type='http', sitemap=False) + def batch_priority(self, batch_id=None, **kwargs): + batch = request.env['runbot.batch'].browse(batch_id) + batch.sudo().priority_level = int(batch.create_date.timestamp() - 3600) + return werkzeug.utils.redirect('/runbot/batch/%s' % batch_id) + @route(['/runbot/batch/slot//build'], auth='user', type='http') def slot_create_build(self, slot=None, **kwargs): build = slot.sudo()._create_missing_build() @@ -316,7 +323,8 @@ def build(self, build_id, search=None, from_batch=None, **post): @route([ '/runbot/build/search', ], website=True, auth='public', type='http', sitemap=False) - def builds(self, **kwargs): + def builds(self, limit=100, **kwargs): + limit = min(int(limit), 1000) domain = [] for key in ('config_id', 'version_id', 'project_id', 'trigger_id', 'create_batch_id.bundle_id', 'create_batch_id'): # allowed params value = kwargs.get(key) @@ -330,10 +338,12 @@ def builds(self, **kwargs): for key in ('description',): if key in kwargs: - domain.append((f'{key}', 'ilike', kwargs.get(key))) + value = kwargs.get(key) + operator = 'ilike' if '%' in value else '=' + domain.append((f'{key}', operator, value)) context = { - 'builds': request.env['runbot.build'].search(domain, limit=100), + 'builds': request.env['runbot.build'].search(domain, limit=limit), } return request.render('runbot.build_search', context) @@ -663,19 +673,40 @@ def parse_log(self, ir_log, **kwargs): request.env['runbot.build.error']._parse_logs(ir_log) return werkzeug.utils.redirect('/runbot/build/%s' % ir_log.build_id.id) - @route(['/runbot/bundle/toggle_no_build//'], type='http', auth='user', sitemap=False) - def toggle_no_build(self, bundle_id, value, **kwargs): - if not request.env.user.has_group('base.group_user'): - return 'Forbidden' - bundle = request.env['runbot.bundle'].browse(bundle_id).exists() - if bundle.sticky or bundle.is_base: - return 'Forbidden' - if bundle.project_id.tmp_prefix and bundle.name.startswith(bundle.project_id.tmp_prefix): - return 'Forbidden' - bundle.sudo().no_build = bool(value) - _logger.info('Bundle %s no_build set to %s by %s', bundle.name, bool(value), request.env.user.name) + @route(['/runbot/bundle//triggers/'], type='http', auth='user', sitemap=False) + def configure_bundle_triggers(self, bundle_id, action, expand_custom=False, **kwargs): + if not request.env.user.has_group('runbot.group_user'): + raise NotFound() + + bundle = request.env['runbot.bundle'].browse(bundle_id) + if bundle.is_base or bundle.is_staging: + raise NotFound() + if action == 'disable_all': + bundle.sudo()._configure_custom_trigger_start_mode('disabled') + elif action == 'force_all': + bundle.sudo()._configure_custom_trigger_start_mode('force') + elif action == 'auto_all': + bundle.sudo()._configure_custom_trigger_start_mode('auto') + elif action == 'light_all': + bundle.sudo()._configure_custom_trigger_start_mode('light') + else: + raise NotFound() + if expand_custom: + return werkzeug.utils.redirect(f'/runbot/bundle/{bundle_id}?expand_custom=1') return werkzeug.utils.redirect(f'/runbot/bundle/{bundle_id}') + @route(['/runbot/trigger_custom//set_mode/'], type='http', auth='user', sitemap=False) + def configure_custom_trigger(self, trigger_custom_id, mode, **kwargs): + if not request.env.user.has_group('runbot.group_user'): + raise NotFound() + trigger_custom = request.env['runbot.bundle.trigger.custom'].browse(trigger_custom_id) + bundle = trigger_custom.bundle_id + if bundle.is_base or bundle.is_staging: + raise NotFound() + + trigger_custom.sudo().start_mode = mode + return werkzeug.utils.redirect(f'/runbot/bundle/{trigger_custom.bundle_id.id}?expand_custom=1') + @route(['/runbot/trigger/report/'], type='http', auth='user', website=True, sitemap=False) def report_view(self, trigger_id=None, **kwargs): return request.render("runbot.trigger_report", { @@ -852,21 +883,20 @@ def repos_heads(self, project_id=None, bundle_name=None, **kwargs): else: domain = Domain.AND([domain, [('sticky', '=', True)]]) bundles = request.env['runbot.bundle'].search(domain, order='id desc, name') - - last_batches_infos = { - bundle.name: { + last_batches_infos = dict() + for bundle in bundles: + batch = bundle.last_batch if bundle.last_batch.state != 'preparing' else bundle.last_done_batch + last_batches_infos[bundle.name] = { "commits": [ { "repo": commit_link.commit_id.repo_id.name, "head": commit_link.commit_id.name, "match_type": commit_link.match_type, } - for commit_link in bundle.last_batch.commit_link_ids + for commit_link in batch.commit_link_ids ], - "autotags": request.env["runbot.build.error"].sudo()._disabling_tags(build_id=bundle.last_batch.slot_ids.build_id[0]), + "autotags": request.env["runbot.build.error"].sudo()._disabling_tags(build_id=batch.slot_ids.build_id[0]), } - for bundle in bundles - } return request.make_json_response(last_batches_infos) @route([ diff --git a/runbot/controllers/hook.py b/runbot/controllers/hook.py index d46c7f699..4bd36a20d 100644 --- a/runbot/controllers/hook.py +++ b/runbot/controllers/hook.py @@ -4,8 +4,9 @@ import json import logging -from odoo import http +from odoo import http, fields from odoo.http import request +from ..common import from_role _logger = logging.getLogger(__name__) @@ -50,3 +51,18 @@ def hook(self, remote_id=None, **_post): branch = request.env['runbot.branch'].sudo().search([('remote_id', '=', remote.id), ('name', '=', branch_ref)]) branch.alive = False return "" + + @from_role('mergebot', signed=True) + @http.route(['/runbot/request_ci'], type='http', methods=["POST"], auth="public", website=True, csrf=False, sitemap=False) + def force_ci(self): + pull_request_names = request.get_json_data().get('pull_requests', []) + pull_domains = [] + for pull_request_names in pull_request_names: + remote_short_name, name = pull_request_names.split('#') + owner, repo_name = remote_short_name.split('/') + pull_domains.append([('remote_id.owner', '=', owner), ('remote_id.repo_name', '=', repo_name), ('name', '=', name)]) + pull_domains = fields.Domain.OR(pull_domains) + pull_requests = request.env['runbot.branch'].sudo().search([('is_pr', '=', True)] + pull_domains) + bundles = pull_requests.bundle_id + _logger.info('Received CI request for bundles: %s', bundles.mapped('name')) + bundles._force_ci() diff --git a/runbot/data/dockerfile_data.xml b/runbot/data/dockerfile_data.xml index 1671663d4..558e8f3d4 100644 --- a/runbot/data/dockerfile_data.xml +++ b/runbot/data/dockerfile_data.xml @@ -120,6 +120,7 @@ Install branch debian/control with latest postgresql-client # This layer updates the repository list to get the latest postgresql-client, mainly needed if the host postgresql version is higher than the default version of the docker os +# CACHE 60 ADD https://raw.githubusercontent.com/odoo/odoo/{odoo_branch}/debian/control /tmp/control.txt RUN curl -sSL https://www.postgresql.org/media/keys/ACCC4CF8.asc -o /etc/apt/trusted.gpg.d/psql_client.asc \ && echo "deb http://apt.postgresql.org/pub/repos/apt/ {os_release_name}-pgdg main" > /etc/apt/sources.list.d/pgclient.list \ @@ -136,7 +137,7 @@ RUN curl -sSL https://www.postgresql.org/media/keys/ACCC4CF8.asc -o /etc/apt/tru template Install chrome - + RUN curl -sSL https://dl.google.com/linux/chrome/deb/pool/main/g/google-chrome-stable/google-chrome-stable_{chrome_version}_amd64.deb -o /tmp/chrome.deb \ && apt-get update \ && apt-get -y install --no-install-recommends /tmp/chrome.deb \ @@ -195,7 +196,8 @@ ENV PIP_BREAK_SYSTEM_PACKAGES=1 template Install branch requirements - ADD --chown={USERNAME} https://raw.githubusercontent.com/odoo/odoo/{odoo_branch}/requirements.txt /tmp/requirements.txt + # CACHE 60 +ADD --chown={USERNAME} https://raw.githubusercontent.com/odoo/odoo/{odoo_branch}/requirements.txt /tmp/requirements.txt RUN python3 -m pip install --no-cache-dir -r /tmp/requirements.txt diff --git a/runbot/documentation/dynamic_config.md b/runbot/documentation/dynamic_config.md index acff2c71d..9282bd816 100644 --- a/runbot/documentation/dynamic_config.md +++ b/runbot/documentation/dynamic_config.md @@ -75,7 +75,7 @@ The config steps are mainly defined by their `job_type`. The `name` key is also ``` The `db_name` is optionnal, usually set to all as a convention on runbot for databases that contains *almost* all modules. If not defined the sanitized version of the name will be used. -`install_modules` and `install_default_modules` behave the same way except that `install_modules` will consider that we start with no module (prepends `.*` filter) while `install_default_modules` will be based on the runbot default module list (all available modules minus the repo blacklist) +`install_modules` and `install_default_modules` behave the same way except that `install_modules` will consider that we start with no module (prepends `-*` filter) while `install_default_modules` will be based on the runbot default module list (all available modules minus the repo blacklist) Both entries will use the value as a runbot module filter, and then passed as the -i, [see corresponding section](#module-selection) for more info. @@ -344,21 +344,43 @@ Filters are a way to transform dynamic values before using them. They are define For example, to transform a module filter into test tags: +#### filter_all_modules, make_module_test_tags + ```json {"test_tags": "-at_install,{{test_module_filter|filter_all_modules|make_module_test_tags}}", ``` In this example, the `filter_all_modules` filters will first transform the `test_module_filter` variable (which is a module filter) into a list of modules, and then the `make_module_test_tags` filters will transform this list of modules into test tags by prepending each module with a `/` to indicate that we want to run all tests from these modules. -Note that `filter_all_modules` is actually equivalent to `filter_default_modules`, but prepending a `*` at the begining of the filter. +#### filter_default_modules + +`filter_all_modules` is actually equivalent to `filter_default_modules`, but prepending a `*` at the begining of the filter. Without that a runbot defined filter is applied, returning a default list of modules per repo. `*,mail -> !web|filter_default_modules` is the same as `mail -> !web|filter_all_modules` + +#### prepend, append In some case we also want to combine the test-tags module with another tag or test method, this can be done using prepend and append `"{{-*,web*|filter_all_modules|make_module_test_tags|append('.test_method')}}` `{{-*,web*|filter_all_modules|make_module_test_tags|prepend('custom_tag')}}` -It is also possible to filter modules based on the one modified in the current bundle. +#### modified_modules + +It is possible to filter modules based on the one modified in the current bundle. `{{*|filter_all_modules|modified_modules}}"` + +#### select_existing_modules + +`select_existing_modules` is equivalent to `filter_default_modules` but with a -* at the beginning of the filter, meaning that we start with an empty selection and only add modules that are explicitly selected. + +This is a solution to keep only existing modules from a specific list, when we are not sure modules exists: +`{{*|filter_all_modules|modified_modules|prepend('test_')|select_existing_modules|make_module_test_tags}}` + +- `*|filter_all_modules` will select all existing modules +- `|modified_modules` will only keep the modified ones +- `prepend('test_')` will prepend test_ to have the test equivalent name of the modified modules (mail-> test_mail, base -> test_base) +- `select_existing_modules` will only keep modules that exists (test_mail) +- `make_module_test_tags` make the module test tags by prepending a / to each module. + diff --git a/runbot/migrations/19.0.5.15/post-migration.py b/runbot/migrations/19.0.5.15/post-migration.py new file mode 100644 index 000000000..efba09b23 --- /dev/null +++ b/runbot/migrations/19.0.5.15/post-migration.py @@ -0,0 +1,42 @@ +import logging + +_logger = logging.getLogger(__name__) + + +def migrate(cr, version): + cr.execute(""" + SELECT to_regclass('public.x_runbot_semgrep_rules'); + """) + if not cr.fetchone()[0]: + return + + cr.execute("""SELECT "x_checker", "x_language", "x_maxver", "x_message", "x_minver", "x_name", "x_rule", "x_severity" FROM x_runbot_semgrep_rules""") + results = cr.dictfetchall() + _logger.info('Migrating %d semgrep rules', len(results)) + categories = [] + for result in results: + categories.append(result['x_checker']) + + category_map = {} + for category in sorted(set(categories)): + cr.execute(""" + INSERT INTO runbot_checker_category (name) + VALUES (%s) + RETURNING id + """, (category,)) + category_map[category] = cr.fetchone()[0] + + for result in results: + cr.execute(""" + INSERT INTO runbot_semgrep_rule (name, category_id, language, max_version_number, min_version_number, message, rule, severity) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s) + """, ( + result['x_name'], + category_map[result['x_checker']], + result['x_language'], + result['x_maxver'], + result['x_minver'], + result['x_message'], + result['x_rule'], + result['x_severity'], + )) diff --git a/runbot/migrations/19.0.5.16/pre-migration.py b/runbot/migrations/19.0.5.16/pre-migration.py new file mode 100644 index 000000000..bffcd9582 --- /dev/null +++ b/runbot/migrations/19.0.5.16/pre-migration.py @@ -0,0 +1,8 @@ +import logging + +_logger = logging.getLogger(__name__) + + +def migrate(cr, version): + cr.execute("""ALTER TABLE runbot_batch ADD COLUMN priority_level integer""") + cr.execute("""ALTER TABLE runbot_build ADD COLUMN priority_level integer""") diff --git a/runbot/models/__init__.py b/runbot/models/__init__.py index 1551bf611..7041959e3 100644 --- a/runbot/models/__init__.py +++ b/runbot/models/__init__.py @@ -14,6 +14,7 @@ from . import database from . import docker from . import host +from . import ir_action from . import ir_cron from . import ir_http from . import ir_model_fields_converter @@ -25,6 +26,7 @@ from . import res_config_settings from . import res_users from . import runbot +from . import semgrep_rule from . import team from . import upgrade from . import user diff --git a/runbot/models/batch.py b/runbot/models/batch.py index 81806b1a8..6cd395827 100644 --- a/runbot/models/batch.py +++ b/runbot/models/batch.py @@ -15,11 +15,13 @@ class Batch(models.Model): last_update = fields.Datetime('Last ref update') bundle_id = fields.Many2one('runbot.bundle', required=True, index=True, ondelete='cascade') + build_all = fields.Boolean('Force all triggers') commit_link_ids = fields.Many2many('runbot.commit.link') commit_ids = fields.Many2many('runbot.commit', compute='_compute_commit_ids') slot_ids = fields.One2many('runbot.batch.slot', 'batch_id') all_build_ids = fields.Many2many('runbot.build', compute='_compute_all_build_ids', help="Recursive builds") state = fields.Selection([('preparing', 'Preparing'), ('ready', 'Ready'), ('done', 'Done'), ('skipped', 'Skipped')]) + priority_level = fields.Integer("Priority level", help="Priority level of the batch, determined from the create date and the bundle priority offset. The lower, the higher priority.") hidden = fields.Boolean('Hidden', default=False) age = fields.Integer(compute='_compute_age', string='Build age') category_id = fields.Many2one('runbot.category', index=True, default=lambda self: self.env.ref('runbot.default_category', raise_if_not_found=False)) @@ -163,7 +165,7 @@ def _create_build(self, params, slot): build_type = 'normal' if self.category_id != self.env.ref('runbot.default_category'): build_type = 'scheduled' - elif self.bundle_id.priority: + elif self.bundle_id.priority or params.trigger_id.use_extra_slot: build_type = 'priority' build = self.env['runbot.build'].create({ @@ -182,6 +184,12 @@ def _create_build(self, params, slot): def _prepare(self, auto_rebase=False, use_base_commits=False): _logger.info('Preparing batch %s', self.id) + + priority_offset = self.bundle_id.priority_offset + if not priority_offset and self.bundle_id.branch_ids.forwardport_of_id and self.bundle_id.last_batchs == self: # this is the only batch of a forwardported pr. + priority_offset = - 3600 * 5 + self.build_all = True # for normal pr, mergebot will request all ci on r+ if needed, for forward port, we need to ensure they are all created or the chain could be blocked + self.priority_level = int(self.create_date.timestamp() - priority_offset) if use_base_commits: self._warning('This batch will use base commits instead of bundle commits') if not self.bundle_id.base_id: @@ -364,7 +372,7 @@ def _fill_missing(branch_commits, match_type): base_commit_link_by_repos = {commit_link.commit_id.repo_id.id: commit_link for commit_link in self.base_reference_batch_id.commit_link_ids} if use_base_commits: commit_link_by_repos = base_commit_link_by_repos - version_id = self.bundle_id.version_id.id + bundle_version_id = self.bundle_id.version_id.id project_id = self.bundle_id.project_id.id trigger_customs = {} for trigger_custom in self.bundle_id.all_trigger_custom_ids: @@ -376,7 +384,22 @@ def _fill_missing(branch_commits, match_type): self._warning('Missing commit for repo %s for trigger %s', (trigger_repos & missing_repos).mapped('name'), trigger.name) continue # in any case, search for an existing build - config = trigger_custom.config_id or trigger.config_id + config = trigger.config_id + if not trigger_custom and trigger.light_config_id and not bundle.build_all and not self.build_all and not bundle.is_staging and not bundle.is_base: + if (project.use_light_default + or + project.use_light_draft and any(branch.draft for branch in self.bundle_id.branch_ids) + or + project.use_light_no_pr and not any(branch.is_pr for branch in self.bundle_id.branch_ids) + ): + config = trigger.light_config_id + + if trigger_custom.config_id: + config = trigger_custom.config_id + elif trigger_custom.start_mode == 'light' and trigger.light_config_id: + config = trigger.light_config_id + + extra_params = trigger_custom.extra_params or '' config_data = dict(trigger.config_data or {}) | dict(trigger_custom.config_data or {}) trigger_commit_link_by_repos = commit_link_by_repos @@ -384,6 +407,7 @@ def _fill_missing(branch_commits, match_type): self._warning(f'This batch will use base commits instead of bundle commits for trigger {trigger.name}') trigger_commit_link_by_repos = base_commit_link_by_repos commits_links = [trigger_commit_link_by_repos[repo.id].id for repo in trigger_repos] + version_id = bundle_version_id if (trigger.version_dependent or trigger.batch_dependent) else False params_value = { 'version_id': version_id, 'extra_params': extra_params, @@ -395,7 +419,7 @@ def _fill_missing(branch_commits, match_type): 'modules': bundle.modules, 'dockerfile_id': dockerfile_id, 'create_batch_id': self.id, - 'used_custom_trigger': bool(trigger_custom), + 'used_custom_trigger': bool(trigger_custom.config_id or trigger_custom.extra_params or trigger_custom.config_data or trigger_custom.use_base_commits), } params = self.env['runbot.build.params'].create(params_value) @@ -433,16 +457,22 @@ def _start_builds(self): is_dev = not bundle.is_staging and not bundle.is_base for trigger in self.slot_ids.trigger_id: enable_on_bundle = (trigger.on_staging and bundle.is_staging) or (trigger.on_base and bundle.is_base) or (trigger.on_dev and is_dev) - if ((trigger.repo_ids & bundle_repos) or bundle.build_all or bundle.sticky) and enable_on_bundle: + common_repo = (trigger.repo_ids & bundle_repos) + if self.build_all and not common_repo: + common_repo = (trigger.dependency_ids & bundle_repos) + if (common_repo or bundle.build_all or bundle.sticky) and enable_on_bundle: should_start_triggers_ids.add(trigger.id) + disabled_triggers = self.bundle_id.all_trigger_custom_ids.filtered(lambda tc: tc.start_mode == 'disabled').trigger_id for slot in self.slot_ids: if slot.build_id: continue trigger = slot.trigger_id - if trigger.starts_after_ids - success_trigger: # some required triggers are missing - continue trigger_custom = trigger_customs.get(trigger, self.env['runbot.bundle.trigger.custom']) + missing_triggers = trigger.starts_after_ids - success_trigger + if missing_triggers: + if not trigger_custom or (missing_triggers - disabled_triggers): + continue force_trigger = trigger_custom and trigger_custom.start_mode == 'force' skip_trigger = (trigger_custom and trigger_custom.start_mode == 'disabled') or trigger.manual should_start = slot.trigger_id.id in should_start_triggers_ids @@ -507,6 +537,23 @@ def _log(self, message, *args, level='INFO'): 'level': level, }) + def needs_update(self): + bundle = self.bundle_id + custom_trigger_per_trigger = {ct.trigger_id: ct for ct in bundle.trigger_custom_ids} + for slot in self.slot_ids: + trigger = slot.trigger_id + custom_trigger = custom_trigger_per_trigger.get(trigger) + if not custom_trigger: + continue + expected_config = trigger.config_id + if custom_trigger.config_id: + expected_config = custom_trigger.config_id + elif trigger.light_config_id and custom_trigger.start_mode == 'light': + expected_config = trigger.light_config_id + if slot.params_id.config_id != expected_config: + return True + return False + class BatchLog(models.Model): _name = 'runbot.batch.log' _description = 'Batch log' diff --git a/runbot/models/branch.py b/runbot/models/branch.py index f2cf6edf0..8deb9ec5c 100644 --- a/runbot/models/branch.py +++ b/runbot/models/branch.py @@ -101,7 +101,8 @@ def _compute_reference_name(self): # branch.reference_name = '%s~%s' % (branch.pull_head_name, branch.name) else: reference_name = branch.name - forced_version = branch.remote_id.repo_id.single_version # we don't add a depend on repo.single_version to avoid mass recompute of existing branches + repo = branch.remote_id.repo_id + forced_version = repo.enforce_version and repo.single_version # we don't add a depend on repo.single_version to avoid mass recompute of existing branches if forced_version and not (reference_name.startswith(f'{forced_version.name}-') or reference_name == forced_version.name): reference_name = f'{forced_version.name}---{reference_name}' branch.reference_name = reference_name diff --git a/runbot/models/build.py b/runbot/models/build.py index b3f10c962..340aec464 100644 --- a/runbot/models/build.py +++ b/runbot/models/build.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- - +import ast import datetime import getpass import hashlib @@ -10,23 +10,36 @@ import shutil import time import uuid - from collections import defaultdict -from dateutil import parser from pathlib import Path + +from dateutil import parser from psycopg2 import sql from psycopg2.extensions import TransactionRollbackError -from ..common import dt2time, now, grep, local_pgadmin_cursor, dest_reg, os, list_local_dbs, pseudo_markdown, RunbotException, findall, sanitize, markdown_escape, tail -from ..container import docker_stop, docker_state, Command, docker_run, docker_pull -from ..fields import JsonDictField - -from odoo import models, fields, api - +from odoo import api, fields, models from odoo.exceptions import ValidationError from odoo.tools import file_open, file_path from odoo.tools.safe_eval import safe_eval +from ..common import ( + RunbotException, + dest_reg, + dt2time, + findall, + grep, + list_local_dbs, + local_pgadmin_cursor, + markdown_escape, + now, + os, + pseudo_markdown, + sanitize, + tail, + transactioncache, +) +from ..container import Command, docker_pull, docker_run, docker_state, docker_stop +from ..fields import JsonDictField _logger = logging.getLogger(__name__) @@ -61,7 +74,6 @@ def remove_readonly(func, path_str, exinfo): def make_selection(array): return [(elem, elem.replace('_', ' ').capitalize()) if isinstance(elem, str) else elem for elem in array] - class BuildParameters(models.Model): _name = 'runbot.build.params' _description = "Build parameters" @@ -70,7 +82,7 @@ class BuildParameters(models.Model): # execution parametter commit_link_ids = fields.Many2many('runbot.commit.link', copy=True) commit_ids = fields.Many2many('runbot.commit', compute='_compute_commit_ids') - version_id = fields.Many2one('runbot.version', required=True, index=True) + version_id = fields.Many2one('runbot.version', index=True) project_id = fields.Many2one('runbot.project', required=True, index=True) # for access rights trigger_id = fields.Many2one('runbot.trigger', index=True) # for access rights create_batch_id = fields.Many2one('runbot.batch', index=True) @@ -105,8 +117,18 @@ class BuildParameters(models.Model): # @api.depends('version_id', 'project_id', 'extra_params', 'config_id', 'config_data', 'modules', 'commit_link_ids', 'builds_reference_ids') def _compute_fingerprint(self): + def get_commit_links_ident(commit_link): + commit_idents = [] + for c in commit_link.commit_id: + commit_ident = c.tree_hash or c.name + if c.rebase_on_id: + commit_ident += (c.rebase_on_id.tree_hash or c.rebase_on_id.name) + # in a ideal world, we would be able to determine what the real threehash would be + commit_idents.append(commit_ident) + return sorted(commit_idents) + for param in self: - commit_ident = sorted([c.tree_hash or '' for c in param.commit_link_ids.commit_id]) + commit_ident = get_commit_links_ident(param.commit_link_ids) if param.trigger_id.batch_dependent: commit_ident = sorted(param.commit_link_ids.commit_id.ids) cleaned_vals = { @@ -125,7 +147,7 @@ def _compute_fingerprint(self): } if param.upgrade_to_build_id: cleaned_vals['upgrade_to_build_dockerfile_id'] = param.upgrade_to_build_id.params_id.dockerfile_id.id - cleaned_vals['upgrade_to_build_commits'] = sorted([c.tree_hash or c.id for c in param.upgrade_to_build_id.params_id.commit_link_ids.commit_id]) + cleaned_vals['upgrade_to_build_commits'] = get_commit_links_ident(param.upgrade_to_build_id.params_id.commit_link_ids) if param.upgrade_from_build_id: cleaned_vals['upgrade_from_build_id'] = param.upgrade_from_build_id.id if param.trigger_id.batch_dependent: @@ -266,6 +288,7 @@ class BuildResult(models.Model): create_batch_id = fields.Many2one('runbot.batch', related='params_id.create_batch_id', store=True, index=True) create_bundle_id = fields.Many2one('runbot.bundle', related='params_id.create_batch_id.bundle_id', index=True) dynamic_config = JsonDictField('Dynamic Config', related='params_id.dynamic_config') + priority_level = fields.Integer('Priority', related='create_batch_id.priority_level', store=True, index=True) # state machine global_state = fields.Selection(make_selection(state_order), string='Status', compute='_compute_global_state', store=True, recursive=True) @@ -512,6 +535,7 @@ def _add_child(self, param_values, orphan=False, description=False, additionnal_ 'params_id': self.params_id.copy(param_values).id, 'parent_id': self.id, 'build_type': self.build_type, + 'priority_level': self.priority_level, 'description': description, 'orphan_result': orphan, 'keep_host': self.keep_host, @@ -534,7 +558,7 @@ def _result_multi(self): def _compute_dest(self): for build in self: if build.id: - nickname = build.params_id.version_id.name + nickname = build.params_id.version_id.name or 'build' nickname = re.sub(r'"|\'|~|\:', '', nickname) nickname = re.sub(r'_|/|\.', '-', nickname) build.dest = ("%05d-%s" % (build.id or 0, nickname[:32])).lower() @@ -982,7 +1006,7 @@ def _docker_run(self, step, cmd=None, ro_volumes=None, env_variables=None, **kwa for dest, source in _ro_volumes.items(): ro_volumes[f'/data/build/{dest}'] = source if 'image_tag' not in kwargs: - kwargs.update({'image_tag': self.params_id.dockerfile_id.image_tag}) + kwargs.update({'image_tag': step.dockerfile_id.image_tag or self.params_id.dockerfile_id.image_tag}) dockerfile_variant = self.params_id.config_data.get('dockerfile_variant', step.dockerfile_variant) if dockerfile_variant and f'.{dockerfile_variant.lower()}' not in kwargs['image_tag']: kwargs['image_tag'] += f'.{dockerfile_variant.lower()}' @@ -1079,25 +1103,28 @@ def _checkout(self): return exports + def _list_available_modules(self): + for commit in self.env.context.get('defined_commit_ids') or self.params_id.commit_ids: + for (addons_path, module, manifest_file_name) in commit._list_available_modules(): + yield commit, addons_path, module, manifest_file_name + def _get_available_modules(self): all_modules = dict() available_modules = defaultdict(list) # repo_modules = [] - for commit in self.env.context.get('defined_commit_ids') or self.params_id.commit_ids: - for (addons_path, module, manifest_file_name) in commit._get_available_modules(): - if module in all_modules: - self._log( - 'Building environment', - '%s is a duplicated modules (found in "%s", already defined in %s)' % ( - module, - commit._source_path(addons_path, module, manifest_file_name), - all_modules[module]._source_path(addons_path, module, manifest_file_name)), - level='WARNING', - ) - else: - available_modules[commit.repo_id].append(module) - all_modules[module] = commit - # return repo_modules, available_modules + for commit, addons_path, module, manifest_file_name in self._list_available_modules(): + if module in all_modules: + self._log( + 'Building environment', + '%s is a duplicated modules (found in "%s", already defined in %s)' % ( + module, + commit._source_path(addons_path, module, manifest_file_name), + all_modules[module]._source_path(addons_path, module, manifest_file_name)), + level='WARNING', + ) + else: + available_modules[commit.repo_id].append(module) + all_modules[module] = commit return available_modules def _get_modules_to_test(self, modules_patterns=''): @@ -1108,13 +1135,56 @@ def _get_modules_to_test(self, modules_patterns=''): modules_patterns = (modules_patterns or '').split(',') return trigger._filter_modules_to_test(modules, params_patterns + modules_patterns) # we may switch params_patterns and modules_patterns order + @transactioncache + def _dependency_graph(self): + dependency_graph = defaultdict(set) + dependant_graph = defaultdict(set) + for commit in self.env.context.get('defined_commit_ids') or self.params_id.commit_ids: + file_paths = [] + modules = [] + for (addons_path, module, manifest_file_name) in commit._list_available_modules(): + file_paths.append(os.path.join(addons_path, module, manifest_file_name)) + modules.append(module) + contents = commit._git_show_files(file_paths) + for module, manifest in zip(modules, contents): + manifest_content = ast.literal_eval(manifest) + depends = manifest_content.get('depends', []) + if not depends and module != 'base': + depends = ['base'] + for dep in depends: + dependency_graph[module].add(dep) + dependant_graph[dep].add(module) + return dependency_graph, dependant_graph + + def search_modules_graph(self, modules, graph, depth=None): + def search(modules, depth=None, visited=None): + visited = visited or set() + modules = set(modules) - visited + visited |= modules + dependencies = set(modules) + if depth == 0 or not modules: + return dependencies + for module in modules: + dependencies |= search(graph[module], depth - 1 if depth is not None else None, visited) + return dependencies + return sorted(search(modules, depth)) + + def _get_modules_dependencies(self, modules, depth=None): + self.ensure_one() + dependency_graph, _ = self._dependency_graph() + return self.search_modules_graph(modules, dependency_graph, depth) + + def _get_dependant_modules(self, modules, depth=None): + _, dependant_graph = self._dependency_graph() + return self.search_modules_graph(modules, dependant_graph, depth) + def _local_pg_dropdb(self, dbname): msg = '' try: with local_pgadmin_cursor() as local_cr: query = 'SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname=%s' local_cr.execute(query, [dbname]) - local_cr.execute('SET LOCAL statement_timeout=10000') # avoid to be stuck if the dropdb is locked + local_cr.execute('SET statement_timeout=10000') # avoid to be stuck if the dropdb is locked local_cr.execute('DROP DATABASE IF EXISTS "%s"' % dbname) except Exception as e: msg = f"Failed to drop local logs database : {dbname} with exception: {e}" @@ -1230,19 +1300,24 @@ def _modified_files(self, commit_link_links=None): commit_link_links = self.params_id.commit_link_ids for commit_link in commit_link_links: commit = commit_link.commit_id - modified = commit.repo_id._git(['diff', '--name-only', '%s..%s' % (commit_link.merge_base_commit_id.name, commit.name)]) + commit._fetch() + modified = commit.repo_id._git(['diff', '--name-only', '%s..%s' % (commit_link.merge_base_commit_id.tree_hash, commit.tree_hash)]) if modified: files = [os.sep.join([self._docker_source_folder(commit), file]) for file in modified.split('\n') if file] modified_files[commit_link] = files return modified_files - def _modified_modules(self, commit_link_links=None): + def _modified_modules(self, commit_link_links=None, defaults=None): modified_files = self._modified_files(commit_link_links) modified_modules = set() for commit_link, files in modified_files.items(): commit = commit_link.commit_id for file in files: - modified_modules.add(commit.repo_id._get_module(file)) + module = commit.repo_id._get_module(file) + if module: + modified_modules.add(module) + elif defaults: + modified_modules |= set(defaults) return modified_modules def _get_upgrade_path(self): @@ -1458,14 +1533,18 @@ def _github_status(self): build.parent_id._github_status() else: trigger = build.params_id.trigger_id - if not trigger.ci_context: + ci_context = trigger.ci_context + if not ci_context: continue desc = trigger.ci_description or " (runtime %ss)" % (build.job_time,) if build.params_id.used_custom_trigger: - state = 'error' + ci_context += " (custom)" desc = "This build used custom config. Remove custom trigger to restore default ci" - elif build.global_result in ('ko', 'warn'): + if build.params_id.config_id == build.trigger_id.light_config_id: + ci_context += " (light)" + desc = "This build used a light config. Enable default build configuration to restore default ci" + if build.global_result in ('ko', 'warn'): state = 'error' elif build.global_state in ('pending', 'testing'): state = 'pending' @@ -1518,7 +1597,7 @@ def _github_status(self): else: target_url = f"{self.get_base_url()}/runbot/build/{build.id}" - commit._github_status(build, trigger.ci_context, state, target_url, desc, ci_strategy=trigger.ci_strategy) + commit._github_status(build, ci_context, state, target_url, desc, ci_strategy=trigger.ci_strategy) def _parse_config(self): return set(findall(self._server("tools/config.py"), r'--[\w-]+', )) diff --git a/runbot/models/build_config.py b/runbot/models/build_config.py index d369eca19..0c86e5eb8 100644 --- a/runbot/models/build_config.py +++ b/runbot/models/build_config.py @@ -46,28 +46,41 @@ def filter_all_modules(selector, build, dynamic_vars): return filter_default_modules(selector, build, dynamic_vars) +def get_dependencies(modules, build, dynamic_vars, depth=None): + depth = int(depth) if depth else None + modules = modules.split(',') + dependant = set(build._get_modules_dependencies(modules, depth)) - set(modules) + return ','.join(sorted(dependant)) + + +def get_dependant(modules, build, dynamic_vars, depth=None): + depth = int(depth) if depth else None + modules = modules.split(',') + dependant = set(build._get_dependant_modules(modules, depth)) - set(modules) + return ','.join(sorted(dependant)) + + def filter_default_modules(selector, build, dynamic_vars): - build._checkout() # we need to ensure source are exported before _get_modules_to_test modules = build._get_modules_to_test(selector) return ','.join(modules) -def keep_modified_modules(modules, build, dynamic_vars): +def select_existing_modules(selector, build, dynamic_vars): + selector = f'-*,{selector}' + return filter_default_modules(selector, build, dynamic_vars) + + +def keep_modified_modules(modules, build, dynamic_vars, *defaults): if build.params_id.config_data.get('skip_modified_modules_filter', False): return modules - modified_modules = build._modified_modules() + if defaults: + defaults = [d[1:-1] if re.match(r'^[\'"].*[\'"]$', d) else d for d in defaults] + modified_modules = build._modified_modules(defaults=defaults) modules = modules.split(',') filtered_modules = [module for module in modules if module in modified_modules] return ','.join(filtered_modules) -def keep_modified_modules_or_base(modules, build, dynamic_vars): - bundle = build.params_id.create_batch_id.bundle_id - if bundle.is_base or bundle.is_staging: - return modules - return keep_modified_modules(modules, build, dynamic_vars) - - def make_module_test_tags(modules, build, dynamic_vars): return ','.join([f'/{module}' for module in modules.split(',')]) @@ -88,6 +101,17 @@ def append_string(modules, build, dynamic_vars, element): return ','.join([f'{module}{element}' for module in modules.split(',')]) +def union(modules, build, dynamic_vars, element): + if re.match(r'^[\'"].*[\'"]$', element): + element = element[1:-1] + else: + element = dynamic_vars.get(element, element) + element = element.strip() + modules = set(modules.split(',')) if modules else set() + new_modules = set(element.split(',')) if element else set() + return ','.join(sorted(modules | new_modules)) + + class Config(models.Model): _name = 'runbot.build.config' _description = "Build config" @@ -222,14 +246,18 @@ def wrapper(value, path): return wrapper def VARS(vars, path): - if not isinstance(vars, dict): - raise ValidationError(f'{path} ({vars}) should be a dict') - for key, val in vars.items(): - TECHNICAL_NAME(key, f'{path}.{key}') - STR(val, f'{path}.{key}') + if isinstance(vars, list): + for item in vars: + VARS(item, path) + else: + if not isinstance(vars, dict): + raise ValidationError(f'{path} ({vars}) should be a dict') + for key, val in vars.items(): + TECHNICAL_NAME(key, f'{path}.{key}') + STR(val, f'{path}.{key}') NAME = str_checker(r'^[\w \-]+$') - STR = str_checker(r'.+') + STR = str_checker(r'.*') DYNAMIC_VALUE = STR TECHNICAL_NAME = str_checker(r'^[a-z0-9_\-]+$') BOOL = type_checker(bool) @@ -241,6 +269,7 @@ def VARS(vars, path): 'vars': OPTIONAL(VARS), 'steps': REQUIRED(LIST(STEP)), 'description': OPTIONAL(DYNAMIC_VALUE), + 'log': OPTIONAL(DYNAMIC_VALUE), } valid_steps['odoo'] = { 'name': REQUIRED(NAME), @@ -251,9 +280,11 @@ def VARS(vars, path): 'test_tags': OPTIONAL(DYNAMIC_VALUE), 'demo_mode': OPTIONAL(IN(['default', 'with_demo', 'without_demo'])), 'enable_auto_tags': OPTIONAL(BOOL), + 'extra_params': OPTIONAL(DYNAMIC_VALUE), 'cpu_limit': OPTIONAL(INT), 'export_database': OPTIONAL(BOOL), 'make_stats': OPTIONAL(BOOL), + 'log': OPTIONAL(DYNAMIC_VALUE), } valid_steps['create_build'] = { 'name': REQUIRED(NAME), @@ -262,6 +293,8 @@ def VARS(vars, path): 'for_each_vars': OPTIONAL(LIST(VARS)), 'for_each_module': OPTIONAL(DYNAMIC_VALUE), 'max_builds': OPTIONAL(INT), + 'if': OPTIONAL(DYNAMIC_VALUE), + 'log': OPTIONAL(DYNAMIC_VALUE), } valid_steps['restore'] = { 'name': REQUIRED(NAME), @@ -271,6 +304,7 @@ def VARS(vars, path): 'trigger_id': OPTIONAL(INT), 'use_current_batch': OPTIONAL(BOOL), 'zip_url': OPTIONAL(STR), + 'log': OPTIONAL(DYNAMIC_VALUE), } valid_steps['command'] = { 'name': REQUIRED(NAME), @@ -283,6 +317,7 @@ def VARS(vars, path): 'check_logs': OPTIONAL(LIST(STR)), 'expected_logs': OPTIONAL(LIST(STR)), 'make_stats': OPTIONAL(BOOL), + 'log': OPTIONAL(DYNAMIC_VALUE), } validate(config_schema, config, 'config') @@ -355,6 +390,7 @@ class ConfigStepUpgradeDb(models.Model): ('test_upgrade', 'Test Upgrade'), ('restore', 'Restore'), ('dynamic', 'Dynamic'), + ('semgrep', 'Semgrep'), ] @@ -377,6 +413,7 @@ class ConfigStep(models.Model): group_name = fields.Char('Group name', related='group.name') make_stats = fields.Boolean('Make stats', default=False) build_stat_regex_ids = fields.Many2many('runbot.build.stat.regex', string='Stats Regexes') + dockerfile_id = fields.Many2one('runbot.dockerfile', string='Dockerfile') dockerfile_variant = fields.Char('Docker Variant') # install_odoo create_db = fields.Boolean('Create Db', default=True, tracking=True) # future @@ -393,7 +430,7 @@ class ConfigStep(models.Model): enable_auto_tags = fields.Boolean('Allow auto tag', default=True, tracking=True) sub_command = fields.Char('Subcommand', tracking=True) extra_params = fields.Char('Extra cmd args', tracking=True) - additionnal_env = fields.Char('Extra env', help='Example: foo="bar";bar="foo". Cannot contains \' ', tracking=True) + additionnal_env = fields.Char('Extra env', help='Example: foo=bar;bar=foo. Cannot contains \' ', tracking=True) enable_log_db = fields.Boolean("Enable log db", default=True) demo_mode = fields.Selection( [('default', 'Default'), ('without_demo', 'Without Demo'), ('with_demo', 'With Demo')], @@ -427,6 +464,10 @@ class ConfigStep(models.Model): restore_download_db_suffix = fields.Char('Download db suffix') restore_rename_db_suffix = fields.Char('Rename db suffix') + semgrep_category = fields.Many2one('runbot.checker_category', string='Semgrep Category', tracking=True) + custom_link = fields.Char('Custom link for semgrep codes', tracking=True) + disable_nosem = fields.Boolean('Disable nosem', default=False, tracking=True) + commit_limit = fields.Integer('Commit limit', default=50) file_limit = fields.Integer('File limit', default=450) break_before_if_ko = fields.Boolean('Break before this step if build is ko') @@ -718,8 +759,8 @@ def _run_install_odoo(self, build, config_data=None): elif demo_mode == 'without_demo' and demo_installed_by_default: cmd.append('--without-demo=true') + extra_params = config_data.get('extra_params', build.params_id.extra_params or self.extra_params or '') # list module to install - extra_params = build.params_id.extra_params or self.extra_params or '' if mods and '-i' not in extra_params: cmd += ['-i', mods] config_path = build._server("tools/config.py") @@ -1180,7 +1221,7 @@ def _coverage_params(self, build, modules_to_install): docker_source_folder = build._docker_source_folder(commit) for manifest_file in commit.repo_id.manifest_files.split(','): pattern_to_omit.add('*%s' % manifest_file) - for (addons_path, module, _) in commit._get_available_modules(): + for (addons_path, module, _) in commit._list_available_modules(): if module not in modules_to_install: # we want to omit docker_source_folder/[addons/path/]module/* module_path_in_docker = os.sep.join([docker_source_folder, addons_path, module]) @@ -1232,6 +1273,8 @@ def _make_results(self, build): self._make_upgrade_results(build) elif active_job_type == 'restore': self._make_restore_results(build) + elif active_job_type == 'semgrep': + self._make_semgrep_results(build) def _make_python_results(self, build): eval_ctx = self._make_python_ctx(build) @@ -1491,35 +1534,70 @@ def _run_dynamic(self, build): raise RunbotException('Too many ancestors builds, possible cyclic dynamic build creation') if build.parent_id and build.dynamic_config == build.parent_id.dynamic_config: raise RunbotException('A child build cannot load the same dynamic config if parent, recursion detected') + + config_vars_list = build.dynamic_config.get('vars', {}) + if not isinstance(config_vars_list, list): + config_vars_list = [config_vars_list] + raw_vars = {} + for config_vars in config_vars_list: + raw_vars.update(config_vars) + + raw_vars.update(build.params_id.config_data.get('dynamic_vars', {})) + dynamic_vars = {} + # dynamic_vars can either be raw value like 'account', value to evaluate lazily in anothed dynamic value like 'account->!mail' + # or dynamic value that we want to evaluate early like '{{*|filter_all_modules|modified_modules}}' (between {{}}) + # this loop will evalute the third category + # this alows to evaluate only once an expression that could be expensive to use it in multiple dynamic values + # this also allow to clarify the config by chaining vars definition + # TODO check ordering + for key, value in raw_vars.items(): + dynamic_vars[key] = self._parse_dynamic_entry(value, build, dynamic_vars=dynamic_vars) + current_step = self._get_dynamic_step(build) if not current_step: build._log('Dynamic Step', 'No dynamic config or steps found, skipping', level="WARNING") return + if current_step.get('log'): + text = self._parse_dynamic_entry(current_step['log'], build, dynamic_vars=dynamic_vars) + build._log('_run_dynamic', text) if current_step['job_type'] == 'create_build': for_each_vars_list = current_step.get('for_each_vars', [{}]) if 'for_each_module' in current_step: modules_vars = [] for for_each_vars in for_each_vars_list: - modules_entry = self._parse_dynamic_entry(current_step['for_each_module'], build, additional_dynamic_vars=for_each_vars) + modules_entry = self._parse_dynamic_entry(current_step['for_each_module'], build, dynamic_vars={**dynamic_vars, **for_each_vars}) modules = [m.strip() for m in modules_entry.split(',') if m.strip()] for module in modules: module_vars = {**for_each_vars, 'module': module} modules_vars.append(module_vars) for_each_vars_list = modules_vars - parent_vars = {**build.dynamic_config.get('vars', {}), **build.params_id.config_data.get('dynamic_vars', {})} + child_data_list = [] for child_index, child in enumerate(current_step.get('children', [])): child_vars = child.get('vars', {}) for for_each_vars in for_each_vars_list: config_name = child.get('name', build.params_id.config_id.name) - dynamic_vars = {**parent_vars, **child_vars, **for_each_vars} + raw_dynamic_vars = {**dynamic_vars, **for_each_vars, **child_vars} + child_dynamic_vars = {} + # evaluate for_each_vars + for key, value in raw_dynamic_vars.items(): + child_dynamic_vars[key] = self._parse_dynamic_entry(value, build, dynamic_vars=child_dynamic_vars) + if 'if' in current_step: + condition = self._parse_dynamic_entry(current_step['if'], build, dynamic_vars=child_dynamic_vars) + if not condition: + continue if 'description' in child: - description = self._parse_dynamic_entry(child['description'], build, additional_dynamic_vars=dynamic_vars) + description = self._parse_dynamic_entry(child['description'], build, dynamic_vars=child_dynamic_vars) # note: we mainly need to provide additional_dynamic_vars because the child is not created yet at this point else: description = config_name + # filter vars not prefixed with _ to simplify child values + if child.get('log'): + text = self._parse_dynamic_entry(child['log'], build, dynamic_vars=child_dynamic_vars) + build._log('_run_dynamic', text) + public_child_dynamic_vars = {key: value for key, value in child_dynamic_vars.items() if not key.startswith('_')} child_data = { - 'config_data': {**build.params_id.config_data.dict, "dynamic_vars": dynamic_vars}, + 'config_data': {**build.params_id.config_data.dict, "dynamic_vars": public_child_dynamic_vars}, 'config_id': build.params_id.config_id.id, 'dynamic_active_step_index': 0, 'dynamic_config_position': f'{build.params_id.dynamic_config_position or ""}/{build.dynamic_active_step_index}.{child_index}', @@ -1550,12 +1628,15 @@ def _run_dynamic(self, build): install_modules_pattern = current_step.get('install_modules', '') if install_modules_pattern.split(',', 1)[0] not in ('*', '-*'): install_modules_pattern = '-*,' + install_modules_pattern - config_data['install_module_pattern'] = self._parse_dynamic_entry(install_modules_pattern, build) + config_data['install_module_pattern'] = self._parse_dynamic_entry(install_modules_pattern, build, dynamic_vars) if 'test_tags' in current_step: - config_data['test_tags'] = self._parse_dynamic_entry(current_step.get('test_tags'), build) + config_data['test_tags'] = self._parse_dynamic_entry(current_step.get('test_tags'), build, dynamic_vars) config_data['test_enable'] = bool(current_step.get('test_enable') or current_step.get('test_tags')) + if 'extra_params' in current_step: + config_data['extra_params'] = self._parse_dynamic_entry(current_step.get('extra_params'), build, dynamic_vars) + for key in ('screencast', 'demo_mode', 'enable_auto_tags'): if key in current_step: value = current_step[key] @@ -1576,6 +1657,7 @@ def _run_dynamic(self, build): 'addons_path': ",".join(build._get_addons_path()), 'exports': ",".join(exports.keys()), 'exports_paths': ",".join(exports.values()), + **dynamic_vars, } command = [shlex.quote(self._parse_dynamic_entry(part, build, values)) for part in command] pres = [] @@ -1597,22 +1679,23 @@ def _get_dynamic_db_suffix(self, step): db_suffix = re.sub(r'[^a-z0-9_\-]', '_', db_suffix.lower()) return db_suffix - def _parse_dynamic_entry(self, entry, build, additional_dynamic_vars=None): + def _parse_dynamic_entry(self, entry, build, dynamic_vars): """ transforms a module/test-tags entry dynamically """ - dynamic_config = build.dynamic_config - expression_filters = { 'filter_all_modules': filter_all_modules, 'filter_default_modules': filter_default_modules, 'make_module_test_tags': make_module_test_tags, + 'select_existing_modules': select_existing_modules, + 'get_dependencies': get_dependencies, + 'get_dependant': get_dependant, 'prepend': prepend_string, 'append': append_string, 'modified_modules': keep_modified_modules, - 'modified_modules_or_base': keep_modified_modules_or_base, + 'union': union, } - dynamic_vars = {**dynamic_config.get('vars', {}), **build.params_id.config_data.get('dynamic_vars', {}), **(additional_dynamic_vars or {})} + dynamic_vars = dynamic_vars or {} def parse_expression(match): # inspired by jinja but with limited features @@ -1645,6 +1728,139 @@ def consume_remaining_tasks(self, build): return next_index < len(steps) return False + def _run_semgrep(self, build): + if not self._check_limits(build): + return + + rules = self.env['runbot.semgrep_rule'].search([ + ("category_id", '=', self.semgrep_category.id), + '|', ("min_version_number", '=', False), ("min_version_number", "<=", build.params_id.version_id.number), + '|', ('max_version_number', '=', False), ('max_version_number', '>', build.params_id.version_id.number), + ]) + if not rules: + return + + for rule in rules: + build._write_file(f"rules/{rule.name}.yaml", "rules:\n" + rule.rule_text) + + exports = build._checkout() + + files = [] + targets = [] + for link in build.params_id.commit_link_ids: + # filtering section for progressive CI (style & security) + modified = link.commit_id.repo_id._git([ + 'diff', + '%s..%s' % (link.merge_base_commit_id.name, link.commit_id.name), + '--', + '*.py', + '*.js', + ]) + for patched_file in PatchSet(modified.splitlines(keepends=True)): + target = patched_file.target_file.removeprefix('b/') + if target.startswith(('setup/',)): + continue + target = link.commit_id.repo_id.name + '/' + target + + before = len(targets) + targets.extend( + f"{target}:{line.target_line_no}" + for hunk in patched_file + for line in hunk + if line.is_added + ) + # only look at file if it has additions + if len(targets) > before: + files.append(target) + + if not files: + build._log("", "Nothing to scan.") + return + + build._log("", f"checking {len(targets)} lines in {len(files)} files") + + # add empty ignore file, otherwise semgrep ignores test directories by default + build._write_file(".semgrepignore", "") + build._write_file(f"logs/{self.name}-files_list.txt", "\n".join(files)) + build._write_file("targets", "\n".join(targets)) + + cmd = f"semgrep scan {'--disable-nosem' if self.disable_nosem else ''} -c /data/build/rules --json --timeout=0 --verbose $(cat logs/{self.name}-files_list.txt) > /data/build/results.json" + + return { + "cmd": cmd, + "container_name": build._get_docker_name(), + "ro_volumes": exports, + } + + def _make_semgrep_results(self, build): + step_result = "ok" + if build._is_file("targets"): + targets = set(build._read_file("targets").splitlines(keepends=False)) + f = build._read_file("results.json") + semgrep_result = json.loads(f) if f else {} + else: + targets = set() + semgrep_result = {} + + repo = { + link.commit_id.repo_id.name: (link.branch_id.remote_id.base_url, link.commit_id) + for link in build.params_id.commit_link_ids + } + + # some of the lints can catch the same issue multiple times on the same line, and semgrep does not dedup + seen = set() + + # rules results + for result in semgrep_result.get('results', ()): + _, _, code = result['check_id'].rpartition('.') + start = result['start']['line'] + matches = targets & { + f"{result['path']}:{start}" + for line in range(result['start']['line'], result['end']['line'] + 1) + } + if not matches: + continue + + if all((target, code) in seen for target in matches): + continue + seen.update((target, code) for target in matches) + + repo_name, path = result['path'].split('/', 1) + filename = f"{path}:{start}" + repo_base_url, commit = repo[repo_name] + commit_hash = commit.name + + # FIXME: should be a code block :( + extra = result['extra'] + # snippet = extra['lines'] #"\n".join(f'{line}' for line in extra['lines'].splitlines(keepends=False)) + file = commit._read_source(path, mode='rb') + snippet = file[result['start']['offset']:result['end']['offset']].decode() + + codelink = f"{code}: {extra['message']}\n" + if self.custom_link: + # message may be sensitive, do not display, show snippet on same line if single line, otherwise block below + codelink = f"[{code} 🔗]({self.custom_link}#{code}): " + if '\n' in snippet: + snippet = '\n' + snippet + + build._log( + "semgrep", + f"""\ + [%s](https://%s/blob/%s/%s#L%s-L%s) + {codelink}`%s` + """, filename, repo_base_url, commit_hash, path, result['start']['line'], result['end']['line'], snippet, + level=extra['severity'], + log_type='markdown', + ) + if extra['severity'] != 'INFO': + step_result = "ko" + + # internal semgrep errors + for err in semgrep_result.get('errors', ()): + build._log("semgrep", err.get('message') or str(err), log_type='markdown') + + build['local_result'] = build._get_worst_result([build.local_result, step_result]) + class ConfigStepOrder(models.Model): _name = 'runbot.build.config.step.order' diff --git a/runbot/models/build_config_codeowner.py b/runbot/models/build_config_codeowner.py index 9a07cc502..807043568 100644 --- a/runbot/models/build_config_codeowner.py +++ b/runbot/models/build_config_codeowner.py @@ -34,7 +34,7 @@ def _codeowners_regexes(self, codeowners, version_id): team_set |= set(t.strip() for t in github_teams) return list(regexes.items()) - def _reviewer_per_file(self, files, regexes, ownerships, repo): + def _reviewer_per_file(self, files, regexes, ownerships, repo, build): reviewer_per_file = {} for file in files: file_reviewers = set() @@ -42,7 +42,7 @@ def _reviewer_per_file(self, files, regexes, ownerships, repo): if re.match(regex, file): if not teams or 'none' in teams: file_reviewers = None - break # blacklisted, break + break # blacklisted, break file_reviewers |= teams if file_reviewers is None: continue @@ -56,8 +56,11 @@ def _reviewer_per_file(self, files, regexes, ownerships, repo): for ownership in ownerships: if file_module == ownership.module_id.name: file_reviewers.add(ownership.team_id.github_team) - if not file_reviewers and self.fallback_reviewer: - file_reviewers.add(self.fallback_reviewer) + if not file_reviewers: + if len(file.split('/')) <= 2: + build._log('', 'File %s is at the root level and it looks like it could be a mistake, remove it or ensure that a codeowner rule is added for this file', file, log_type='markdown', level="ERROR") + elif self.fallback_reviewer: + file_reviewers.add(self.fallback_reviewer) reviewer_per_file[file] = file_reviewers return reviewer_per_file @@ -121,7 +124,7 @@ def _run_codeowner(self, build): for commit_link, files in modified_files.items(): build._log('', 'Checking %s codeowner regexed on %s files' % (len(regexes), len(files))) reviewers = set() - reviewer_per_file = self._reviewer_per_file(files, regexes, ownerships, commit_link.commit_id.repo_id) + reviewer_per_file = self._reviewer_per_file(files, regexes, ownerships, commit_link.commit_id.repo_id, build) for file, file_reviewers in reviewer_per_file.items(): href = 'https://%s/blob/%s/%s' % (commit_link.branch_id.remote_id.base_url, commit_link.commit_id.name, file.split('/', 1)[-1]) if file_reviewers: diff --git a/runbot/models/build_error.py b/runbot/models/build_error.py index c5b4fdb1e..b165914d6 100644 --- a/runbot/models/build_error.py +++ b/runbot/models/build_error.py @@ -13,7 +13,7 @@ from werkzeug.urls import url_join from odoo import api, fields, models -from odoo.exceptions import UserError, ValidationError +from odoo.exceptions import AccessError, UserError, ValidationError from odoo.tools import SQL, lazy, ormcache from odoo.fields import Domain @@ -262,7 +262,7 @@ class BuildError(models.Model): unique_build_error_link_ids = fields.Many2many('runbot.build.error.link', compute='_compute_unique_build_error_link_ids') build_ids = fields.Many2many('runbot.build', compute=_compute_related_error_content_ids('build_ids'), search=_search_related_error_content_ids('build_ids')) bundle_ids = fields.Many2many('runbot.bundle', compute=_compute_related_error_content_ids('bundle_ids'), search=_search_related_error_content_ids('bundle_ids')) - version_ids = fields.Many2many('runbot.version', string='Versions', compute=_compute_related_error_content_ids('version_ids'), search=_search_related_error_content_ids('version_ids')) + version_ids = fields.Many2many('runbot.version', string='Versions', compute='_compute_version_ids', search=_search_related_error_content_ids('version_ids')) trigger_ids = fields.Many2many('runbot.trigger', string='Triggers', compute=_compute_related_error_content_ids('trigger_ids'), store=True) tag_ids = fields.Many2many('runbot.build.error.tag', string='Tags', compute=_compute_related_error_content_ids('tag_ids'), search=_search_related_error_content_ids('tag_ids')) random = fields.Boolean('Random', compute="_compute_random", store=True) @@ -322,6 +322,11 @@ def _compute_fixing_bundle_id(self): for record in self: record.fixing_bundle_id = record.fixing_pr_id.bundle_id if record.fixing_pr_id else False + @api.depends('error_content_ids.version_ids') + def _compute_version_ids(self): + for record in self: + record['version_ids'] = record.error_content_ids['version_ids'].sorted('number') + def _compute_disappearing_batch_ids(self): # this is really inefficient but should only be used in form view # One search per version where it appeared @@ -396,7 +401,7 @@ def _compute_count(self): for record in self: record.error_count = len(record.error_content_ids) - @api.depends('error_content_ids') + @api.depends('error_content_ids.random') def _compute_random(self): for record in self: record.random = any(error.random for error in record.error_content_ids) @@ -534,6 +539,12 @@ def write(self, vals): if not vals['active'] and build_error.active and build_error.last_seen_date and build_error.last_seen_date + relativedelta(days=1) > datetime.datetime.now(): raise UserError("This error broke less than one day ago can only be deactivated by admin") + writable_fields = ['responsible', 'fixing_pr_id', 'breaking_pr_id', 'customer', 'random', 'team_id', 'manual_team_id'] + if not self.env.su and not self.env.user.has_groups('runbot.group_runbot_admin,runbot.group_runbot_error_manager'): + no_access_fields = vals.keys() - writable_fields + if no_access_fields != set(): + raise AccessError(f"You are not allowed to modify the following field(s): {','.join(no_access_fields)}") + if (responsible_id := vals.get('responsible')) and vals.get('active', True): responsible = self.env['res.users'].browse(responsible_id) for build_error in self: @@ -567,6 +578,8 @@ def _merge(self, others): # TODO xdo split the error id change and other params merge in order to avoid the merge in write and write in merge recursion self.ensure_one error = self + fields_to_merge = ['responsible', 'fixing_pr_id', 'breaking_pr_id'] + fields_to_copy = ['manual_team_id'] for previous_error in others: # todo, check that all relevant fields are checked and transfered/logged if previous_error.test_tags and error.test_tags != previous_error.test_tags: @@ -581,14 +594,12 @@ def _merge(self, others): test_tags.append(tag) error.test_tags = ','.join(test_tags) previous_error.test_tags = False - if previous_error.responsible: - if error.responsible and error.responsible != previous_error.responsible and not self.env.su: - raise UserError(f"error {error.id} as already a responsible ({error.responsible}) cannot assign {previous_error.responsible}") - if not error.responsible: - error.responsible = previous_error.responsible - if previous_error.team_id: - if not error.team_id: - error.team_id = previous_error.team_id + for field in fields_to_merge + fields_to_copy: + if previous_error[field]: + if field in fields_to_merge and error[field] and error[field] != previous_error[field] and not self.env.su: + raise UserError(f"error {error.id} as already a {field} ({error[field]}) cannot assign {previous_error[field]}") + if not error[field]: + error[field] = previous_error[field] previous_error.error_content_ids.with_context(merging=True).write({'error_id': self}) previous_error.common_qualifiers = dict() previous_error.unique_qualifiers = dict() @@ -814,6 +825,7 @@ class BuildErrorContent(models.Model): error_active = fields.Boolean('Active', related='error_id.active') error_id = fields.Many2one('runbot.build.error', 'Linked to', index=True, required=True, tracking=True, ondelete='cascade') + create_error_id = fields.Many2one('runbot.build.error', 'Original error', index=True) error_display_id = fields.Integer(compute='_compute_error_display_id', string="Error id") content = fields.Text('Error message', required=True) cleaned_content = fields.Text('Cleaned error message') @@ -890,6 +902,7 @@ def create(self, vals_list): 'name': name, }) vals['error_id'] = error.id + vals['create_error_id'] = vals['error_id'] content = vals.get('content') cleaned_content = cleaners._r_sub(content) vals.update({ @@ -958,7 +971,7 @@ def _compute_version_ids(self): res = dict(self.env.cr.fetchall()) for build_error_content in self: - build_error_content.version_ids = self.env['runbot.version'].browse([v for v in res.get(build_error_content.id, []) if v]) + build_error_content.version_ids = self.env['runbot.version'].browse([v for v in res.get(build_error_content.id, []) if v]).sorted('number') @api.depends('build_ids') def _compute_trigger_ids(self): diff --git a/runbot/models/bundle.py b/runbot/models/bundle.py index 1c54d9d53..3e8073e89 100644 --- a/runbot/models/bundle.py +++ b/runbot/models/bundle.py @@ -57,6 +57,8 @@ class Bundle(models.Model): tag_ids = fields.Many2many('runbot.bundle.tag', string='Tags') team_id = fields.Many2one('runbot.team', compute='_compute_team_id', store=True, readonly=False) + priority_offset = fields.Integer("Priority offset", help="Offset in seconds to remove from the create date of a batch to define priority, positive value means higher priority, negative value means lower priority.") + def _compute_frontend_url(self): for bundle in self: bundle.frontend_url = f'/runbot/bundle/{bundle.id}' @@ -275,7 +277,7 @@ def _consistency_warning(self): warnings.append(('info', 'PR %s targeting a non base branch: %s' % (branch.dname, branch.target_branch_name))) else: warnings.append(('warning' if branch.alive else 'info', 'PR %s targeting wrong version: %s (expecting %s)' % (branch.dname, branch.target_branch_name, self.base_id.name))) - elif not branch.is_pr and not branch.name.startswith(self.base_id.name) and not self.defined_base_id: + elif not branch.is_pr and not branch.name.startswith(self.base_id.name) and not self.defined_base_id and branch.remote_id.repo_id.enforce_version: warnings.append(('warning', 'Branch %s not starting with version name (%s)' % (branch.dname, self.base_id.name))) return warnings @@ -320,7 +322,12 @@ def action_generate_custom_trigger_restore_action(self): return self._generate_custom_trigger_action(context) def action_disable_all_triggers(self): - triggers_to_disable = ( + self._configure_custom_trigger_start_mode('disable') + + def _configure_custom_trigger_start_mode(self, mode): + self.ensure_one() + + triggers_to_create = ( self.env["runbot.trigger"] .search([ ("id", "not in", self.trigger_custom_ids.trigger_id.ids), @@ -333,13 +340,26 @@ def action_disable_all_triggers(self): ) ) vals = [] - for trigger in triggers_to_disable: - vals.append({ - 'bundle_id': self.id, - 'trigger_id': trigger.id, - 'start_mode': 'disabled', - }) + bundle_repos = self.branch_ids.remote_id.repo_id + for trigger in triggers_to_create: + if trigger.repo_ids & bundle_repos or trigger.dependency_ids & bundle_repos: + vals.append({ + 'bundle_id': self.id, + 'trigger_id': trigger.id, + }) self.env['runbot.bundle.trigger.custom'].create(vals) + for custom_trigger in self.trigger_custom_ids: + trigger_mode = mode + if mode == 'light' and not custom_trigger.trigger_id.light_config_id: + trigger_mode = 'auto' + custom_trigger.start_mode = trigger_mode + + def _force_ci(self): + for bundle in self: + bundle._configure_custom_trigger_start_mode('force') + # we need to create a new batch in case some of the triggers were in minimal mode + batch = bundle._force() or bundle.last_batch + batch._log("Batch was requested for ci") class BundleTag(models.Model): diff --git a/runbot/models/commit.py b/runbot/models/commit.py index 02973cfd3..18d6922d8 100644 --- a/runbot/models/commit.py +++ b/runbot/models/commit.py @@ -3,7 +3,6 @@ import subprocess from ..common import os, RunbotException, make_github_session, transactioncache -import glob import shutil from odoo import models, fields, api @@ -66,22 +65,14 @@ def _rebase_on(self, commit): return self return self._get(self.name, self.repo_id.id, self.read()[0], commit.id) - def _get_available_modules(self): - for manifest_file_name in self.repo_id.manifest_files.split(','): # '__manifest__.py' '__openerp__.py' - for addons_path in (self.repo_id.addons_paths or '').split(','): # '' 'addons' 'odoo/addons' - sep = os.path.join(addons_path, '*') - for manifest_path in glob.glob(self._source_path(sep, manifest_file_name)): - module = os.path.basename(os.path.dirname(manifest_path)) - yield (addons_path, module, manifest_file_name) - def _list_files(self, patterns): #example: git ls-files --with-tree=abcf390f90dbdd39fd61abc53f8516e7278e0931 ':(glob)addons/*/*.py' ':(glob)odoo/addons/*/*.py' # note that glob is needed to avoid the star matching ** self.ensure_one() + self._fetch() return self.repo_id._git(['ls-files', '--with-tree', self.name, *patterns]).split('\n') def _list_available_modules(self): - # beta version, may replace _get_available_modules latter addons_paths = (self.repo_id.addons_paths or '').split(',') patterns = [] for manifest_file_name in self.repo_id.manifest_files.split(','): # '__manifest__.py' '__openerp__.py' @@ -98,12 +89,17 @@ def _list_available_modules(self): module, manifest_file_name = elems yield (addons_path, module, manifest_file_name) + @transactioncache # hack to avoid to fetch two time the same commit inside the same transaction + def _fetch(self): + self.repo_id._fetch(self.name) + if not self.repo_id._hash_exists(self.name): + self.repo_id._fetch(self.tree_hash) def _export(self, build): """Export a git repo into a sources""" # TODO add automated tests self.ensure_one() - self.repo_id._fetch(self.name) + self._fetch() if not self.env['runbot.commit.export'].search([('build_id', '=', build.id), ('commit_id', '=', self.id)]): self.env['runbot.commit.export'].create({'commit_id': self.id, 'build_id': build.id}) export_path = self._source_path() @@ -166,12 +162,43 @@ def _read_source(self, file, mode='r'): @transactioncache def _git_show_file(self, file): + return self._git_show_files([file])[0] + + def _git_show_files(self, files): self.ensure_one() + if not files: + return [] + self.repo_id._fetch(self.name) + + queries = "\n".join([f"{self.name}:{f}" for f in files]) + "\n" + try: - return self.repo_id._git(['show', '%s:%s' % (self.name, file)]) + buffer = self.repo_id._git( + ['cat-file', '--batch'], + input_data=queries, + raw=True, + ) except subprocess.CalledProcessError: - return False + return [False] * len(files) + + results = [] + offset = 0 + buffer_len = len(buffer) + while offset < buffer_len: + newline_idx = buffer.find(b'\n', offset) + if newline_idx == -1: + break + header = buffer[offset:newline_idx].decode('utf-8') + offset = newline_idx + 1 + try: + size_in_bytes = int(header.rsplit(' ', 1)[-1]) + except ValueError: # most likely missing + results.append(False) + continue + results.append(buffer[offset : offset + size_in_bytes].decode('utf-8', errors='replace')) + offset += size_in_bytes + 1 + return results def _source_path(self, *paths): if not self.tree_hash: diff --git a/runbot/models/custom_trigger.py b/runbot/models/custom_trigger.py index 53cc74281..a31d7fa55 100644 --- a/runbot/models/custom_trigger.py +++ b/runbot/models/custom_trigger.py @@ -8,7 +8,7 @@ class BundleTriggerCustomization(models.Model): _description = 'Custom trigger' trigger_id = fields.Many2one('runbot.trigger') - start_mode = fields.Selection([('disabled', 'Disabled'), ('auto', 'Auto'), ('force', 'Force')], required=True, default='auto') + start_mode = fields.Selection([('disabled', 'Disabled'), ('auto', 'Auto'), ('light', 'Light'), ('force', 'Force')], required=True, default='auto') use_base_commits = fields.Boolean("Use base commits", help="Allow to test a trigger without the branch changes", default=False) bundle_id = fields.Many2one('runbot.bundle') config_id = fields.Many2one('runbot.build.config') diff --git a/runbot/models/docker.py b/runbot/models/docker.py index cd4816ad9..547a3c6e0 100644 --- a/runbot/models/docker.py +++ b/runbot/models/docker.py @@ -2,8 +2,13 @@ import logging import os import re +import time +from pathlib import Path + import docker -from odoo import api, fields, models, exceptions +import requests + +from odoo import api, exceptions, fields, models from ..container import docker_build from ..fields import JsonDictField @@ -330,17 +335,62 @@ def _get_docker_metadata(self, image_id): return {'error': str(e)} return metadata + def _get_cached_content(self, docker_build_path): + self.ensure_one() + cache_dir = Path(self.env['runbot.runbot']._path('docker', 'cache')) + cache_dir.mkdir(exist_ok=True) + cache_re = re.compile(r'^#\s?CACHE\s(?P\d+)$') + add_re = re.compile(r'^ADD\s(?Phttp.+)\s(?P.+)$') + lines = self.dockerfile.split('\n') + for i, line in enumerate(lines): + if cache_match := cache_re.match(line): + if add_match := add_re.match(lines[i + 1]): + cache_duration = int(cache_match.group('duration')) + url = add_match.group('url') + filename = re.sub(r'[^a-zA-Z0-9]', '_', url)[:255] + destination = add_match.group('destination') + # Use the destination name as hardlink name to avoid rebuild if file content is the same but not the url + hardlink_name = re.sub(r'[^a-zA-Z0-9]', '_', destination) + lines[i + 1] = f'COPY {hardlink_name} {destination}' + cache_file_path = cache_dir / filename + if not cache_file_path.exists() or time.time() - cache_file_path.lstat().st_mtime > cache_duration: + try: + with requests.get(url, stream=True) as response: + response.raise_for_status() + with cache_file_path.open('wb') as cache_file: + for chunk in response.iter_content(chunk_size=8192): + cache_file.write(chunk) + except (requests.exceptions.HTTPError, requests.exceptions.RequestException): + if cache_file_path.exists(): + cache_file_path.touch() # to avoid spamming in case of failures + self.env['runbot.runbot']._warning(f'Dockerfile {self.name} failed to fetch "{url}"') + else: + raise + hardlink_path = Path(docker_build_path) / hardlink_name + hardlink_path.unlink(missing_ok=True) + hardlink_path.hardlink_to(cache_file_path) + return '\n'.join(lines) + def _build(self, host=None): tag_dir = re.sub(r'[^\w]', '_', self.image_tag) docker_build_path = self.env['runbot.runbot']._path('docker', tag_dir) os.makedirs(docker_build_path, exist_ok=True) - content = self.dockerfile - with open(self.env['runbot.runbot']._path('docker', tag_dir, 'Dockerfile'), 'w') as Dockerfile: - Dockerfile.write(content) - result = docker_build(docker_build_path, self.image_future_tag, self.pull_on_build) - duration = result['duration'] - msg = result['msg'] - success = image_id = result.get('image_id') + + duration = 0 + content = '' + image_id = None + try: + content = self._get_cached_content(docker_build_path) + with open(self.env['runbot.runbot']._path('docker', tag_dir, 'Dockerfile'), 'w', encoding="utf-8") as Dockerfile: + Dockerfile.write(content) + result = docker_build(docker_build_path, self.image_future_tag, self.pull_on_build) + duration = result['duration'] + msg = result['msg'] + success = image_id = result.get('image_id') + except Exception as e: + success = False + msg = f'Exception during Docker build: "{e}"' + docker_build_result_values = {'dockerfile_id': self.id, 'output': msg, 'duration': duration, 'content': content, 'host_id': host and host.id} if success: docker_build_result_values['result'] = 'success' diff --git a/runbot/models/ir_action.py b/runbot/models/ir_action.py new file mode 100644 index 000000000..95b82fc2e --- /dev/null +++ b/runbot/models/ir_action.py @@ -0,0 +1,12 @@ +import requests + +from odoo import models + + +class ExtendedServerActionContext(models.Model): + _inherit = 'ir.actions.server' + + def _get_eval_context(self, action=None): + ctx = super()._get_eval_context(action=action) + ctx.update(requests=requests.Session()) + return ctx diff --git a/runbot/models/ir_qweb.py b/runbot/models/ir_qweb.py index e9fa061ce..c8ea0eb37 100644 --- a/runbot/models/ir_qweb.py +++ b/runbot/models/ir_qweb.py @@ -1,5 +1,5 @@ -from ..common import s2human, s2human_long, precise_s2human -from odoo import models +from ..common import s2human, s2human_long, precise_s2human, transactioncache +from odoo import models, tools from odoo.http import request from odoo.addons.website.controllers.main import QueryURL @@ -12,3 +12,10 @@ def _prepare_frontend_environment(self, values): values['s2human_long'] = s2human_long values['precise_s2human'] = precise_s2human return response + + @tools.conditional( + 'xml' in tools.config['dev_mode'], + transactioncache, + ) # replace ormcache by transaction cache to avoid reading the same template multiple times in the same requests. Context is ignored but should be the same for each call in the same request + def _generate_code_cached(self, ref: int): + return super()._generate_code_cached(ref) diff --git a/runbot/models/project.py b/runbot/models/project.py index 5e9ee46d1..7032754c4 100644 --- a/runbot/models/project.py +++ b/runbot/models/project.py @@ -24,6 +24,10 @@ class Project(models.Model): hidden = fields.Boolean('Hidden', help='Hide this project from the main page') active = fields.Boolean("Active", default=True) process_delay = fields.Integer('Process delay', default=60, required=True, help="Delay between a push and a batch starting its process.") + next_freeze_tag_id = fields.Many2one('runbot.bundle.tag', string="Next freeze tag") + use_light_default = fields.Boolean('Use light config by default', help="Use the light config when possible for all triggers") + use_light_draft = fields.Boolean('Use light config for draft PRs', help="Use the light config when possible for bundle having draft pr") + use_light_no_pr = fields.Boolean('Use light config when no PR', help="Use the light config when possible for all bundles not having any pr") @api.constrains('process_delay') def _constraint_process_delay(self): diff --git a/runbot/models/repo.py b/runbot/models/repo.py index 14edd3760..175721603 100644 --- a/runbot/models/repo.py +++ b/runbot/models/repo.py @@ -55,6 +55,7 @@ class Trigger(models.Model): project_id = fields.Many2one('runbot.project', string="Project id", required=True) repo_ids = fields.Many2many('runbot.repo', relation='runbot_trigger_triggers', string="Triggers", domain="[('project_id', '=', project_id)]") dependency_ids = fields.Many2many('runbot.repo', relation='runbot_trigger_dependencies', string="Dependencies") + use_extra_slot = fields.Boolean('Use extra slot', help="If checked, builds from this trigger can use an extra slot on the builders (for light and fast triggers)") starts_before_ids = fields.Many2many( 'runbot.trigger', @@ -72,9 +73,11 @@ class Trigger(models.Model): ) module_filters = fields.One2many('runbot.module.filter', 'trigger_id', string="Module filters", help='Will be combined with repo module filters when used with this trigger') config_id = fields.Many2one('runbot.build.config', string="Config", required=True) + light_config_id = fields.Many2one('runbot.build.config', string="Light config", help="Alternative config to use when light mode is enabled") config_data = JsonDictField('Config Data') network_enabled = fields.Boolean('Network Enabled') batch_dependent = fields.Boolean('Batch Dependent', help="Force adding batch in build parameters to make it unique and give access to bundle") + version_dependent = fields.Boolean('Version Dependent', default=True, help="Add the version in build parameters. Uncheck if the version is not needed to determine the build result") ci_context = fields.Char("CI context", tracking=True) category_id = fields.Many2one('runbot.category', default=lambda self: self.env.ref('runbot.default_category', raise_if_not_found=False)) @@ -427,8 +430,10 @@ class Repo(models.Model): get_ref_time = fields.Float('Last refs db update', compute='_compute_get_ref_time') trigger_ids = fields.Many2many('runbot.trigger', relation='runbot_trigger_triggers', readonly=True) single_version = fields.Many2one('runbot.version', "Single version", help="Limit the repo to a single version for non versionned repo") + enforce_version = fields.Boolean('Force version', help="Force all bundle containing branch from this repo to be prefixed with the correct version", default=True) forbidden_regex = fields.Char('Forbidden regex', help="Regex that forid bundle creation if branch name is matching", tracking=True) invalid_branch_message = fields.Char('Forbidden branch message', tracking=True) + allow_slashes = fields.Boolean('Allow slashes in branch names', help="Allow branches with slashes in their name (e.g. odoo/tests/my_branch). If unchecked, only one level of branches is allowed (e.g. odoo/my_branch)", default=True) def _compute_get_ref_time(self): self.env.cr.execute(""" @@ -498,11 +503,19 @@ def _get_git_command(self, cmd, errors='strict'): cmd = ['git', '-C', self.path] + config_args + cmd return cmd - def _git(self, cmd, errors='strict', quiet=False): + def _git(self, cmd, errors='strict', quiet=False, input_data=None, raw=False): cmd = self._get_git_command(cmd, errors) if not quiet: _logger.info("git command: %s", shlex.join(cmd)) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode(errors=errors) + kwargs = {'stderr': subprocess.STDOUT} + if input_data is not None: + if isinstance(input_data, str): + input_data = input_data.encode('utf-8') + kwargs['input'] = input_data + output = subprocess.check_output(cmd, **kwargs) + if raw: + return output + return output.decode(errors=errors) def _fetch(self, sha): if not self._hash_exists(sha): @@ -553,7 +566,10 @@ def _get_refs(self, max_age=30, ignore=None): self._set_ref_time(get_ref_time) fields = ['refname', 'objectname', 'committerdate:unix', 'authorname', 'authoremail', 'subject', 'committername', 'committeremail', 'tree'] fmt = "%00".join(["%(" + field + ")" for field in fields]) - cmd = ['for-each-ref', '--format', fmt, '--sort=-committerdate', 'refs/*/heads/*'] + refs_desc = 'refs/*/heads/*' + if self.allow_slashes: + refs_desc = 'refs/*/heads/**' + cmd = ['for-each-ref', '--format', fmt, '--sort=-committerdate', refs_desc] if any(remote.fetch_pull for remote in self.remote_ids): cmd.append('refs/*/pull/*') git_refs = self._git(cmd) @@ -580,7 +596,7 @@ def _find_or_create_branches(self, refs): """ # FIXME WIP - names = [r[0].split('/')[-1] for r in refs] + names = [r[0].split('/', 3)[-1] for r in refs] branches = self.env['runbot.branch'].search([('name', 'in', names), ('remote_id', 'in', self.remote_ids.ids)]) ref_branches = {branch._ref(): branch for branch in branches} new_branch_values = [] @@ -589,7 +605,7 @@ def _find_or_create_branches(self, refs): # format example: # refs/ruodoo-dev/heads/12.0-must-fail # refs/ruodoo/pull/1 - _, remote_name, branch_type, name = ref_name.split('/') + _, remote_name, branch_type, name = ref_name.split('/', 3) remote_id = self.remote_ids.filtered(lambda r: r.remote_name == remote_name).id if not remote_id: _logger.warning('Remote %s not found', remote_name) diff --git a/runbot/models/runbot.py b/runbot/models/runbot.py index f750ef8fc..047e3090b 100644 --- a/runbot/models/runbot.py +++ b/runbot/models/runbot.py @@ -138,7 +138,7 @@ def _allocate_builds(self, host, nb_slots, domain=None): if domain: non_allocated_domain = Domain.AND([non_allocated_domain, domain]) query = self.env['runbot.build']._search(non_allocated_domain) - query.order = 'runbot_build.create_batch_id' + query.order = 'runbot_build.priority_level' self.env.execute_query(SQL("""UPDATE runbot_build SET diff --git a/runbot/models/semgrep_rule.py b/runbot/models/semgrep_rule.py new file mode 100644 index 000000000..c8dcffef0 --- /dev/null +++ b/runbot/models/semgrep_rule.py @@ -0,0 +1,69 @@ +from odoo import api, fields, models + + +class SemgrepRule(models.Model): + _name = 'runbot.semgrep_rule' + _description = 'Semgrep Rule' + _inherit = ['mail.thread'] + + name = fields.Char(string='Rule Name', required=True) + category_id = fields.Many2one('runbot.checker_category', string='Category', required=True, index=True) + language = fields.Selection([('python', 'Python'), ('javascript', 'JavaScript'), ('generic', 'Generic')], required=True) + max_version_number = fields.Char(string='Max Odoo Version', help='Maximum exclusive Odoo version this rule applies to') + min_version_number = fields.Char(string='Min Odoo Version', help='Minimum inclusive Odoo version this rule applies to') + message = fields.Char(string='Error message', help='Message to display when the rule is triggered', required=True) + rule = fields.Text("Rule", required=True) + rule_text = fields.Text("Rule Text", compute='_compute_rule_text') + severity = fields.Selection([('INFO', 'INFO'), ('WARNING', 'WARNING'), ('ERROR', 'ERROR')], string='Severity', required=True) + + @api.depends('name', 'message', 'severity', 'language', 'rule') + def _compute_rule_text(self): + def indent_by(s, by=2): + indent = " " * by + return ''.join( + l if l.isspace() else indent + l + for l in s.splitlines(keepends=True) + ) + + def count_indent(s): + for line in s.splitlines(keepends=False): + if line.isspace(): + continue + return len(line) - len(line.lstrip()) + return None + + self.rule_text = '' + for r in self: + rule = r.rule + if not rule: + continue + + indent = count_indent(rule) + if indent is None: + continue + + if indent < 2: + rule = indent_by(rule, 2 - indent) + indent = 2 + + i_indent = " " * (indent - 2) + s_indent = " " * indent + r.rule_text = f"""\ +{i_indent}- id: {r.name} +{s_indent}languages: [{r.language}] +{s_indent}severity: {r.severity} +{s_indent}message: {r.message!r} +{rule} + """ + + +class CheckerCategory(models.Model): + _name = 'runbot.checker_category' + _description = 'Checker Category' + + name = fields.Char(string='Category Name', required=True) + + _unique_name = models.Constraint( + 'unique (name)', + "avoid duplicate Category", + ) diff --git a/runbot/security/ir.model.access.csv b/runbot/security/ir.model.access.csv index 8e4c178bf..6c233c654 100644 --- a/runbot/security/ir.model.access.csv +++ b/runbot/security/ir.model.access.csv @@ -22,7 +22,7 @@ access_runbot_build_config_step_order_manager,runbot_build_config_step_order_man access_runbot_config_step_upgrade_db_user,runbot_config_step_upgrade_db_user,runbot.model_runbot_config_step_upgrade_db,group_user,1,0,0,0 access_runbot_config_step_upgrade_db_manager,runbot_config_step_upgrade_db_manager,runbot.model_runbot_config_step_upgrade_db,runbot.group_build_config_user,1,1,1,1 -access_runbot_build_error_user,runbot_build_error_user,runbot.model_runbot_build_error,group_user,1,0,0,0 +access_runbot_build_error_user,runbot_build_error_user,runbot.model_runbot_build_error,group_user,1,1,0,0 access_runbot_build_error_admin,runbot_build_error_admin,runbot.model_runbot_build_error,runbot.group_runbot_admin,1,1,1,1 access_runbot_build_error_manager,runbot_build_error_manager,runbot.model_runbot_build_error,runbot.group_runbot_error_manager,1,1,1,1 @@ -178,3 +178,8 @@ access_runbot_build_error_merge_filters_user,access_runbot_build_error_merge_fil access_runbot_bundle_tag_admin,access_runbot_bundle_tag_admin,runbot.model_runbot_bundle_tag,runbot.group_runbot_admin,1,1,1,1 access_runbot_bundle_tag_user,access_runbot_bundle_tag_user,runbot.model_runbot_bundle_tag,group_user,1,0,0,0 + +runbot.access_runbot_semgrep_rule,access_runbot_semgrep_rule,runbot.model_runbot_semgrep_rule,base.group_user,1,0,0,0 +runbot.access_runbot_semgrep_rule_admin,access_runbot_semgrep_rule_admin,runbot.model_runbot_semgrep_rule,runbot.group_runbot_admin,1,1,1,1 +runbot.access_runbot_checker_category,access_runbot_checker_category,runbot.model_runbot_checker_category,base.group_user,1,0,0,0 +runbot.access_runbot_checker_category_admin,access_runbot_checker_category_admin,runbot.model_runbot_checker_category,runbot.group_runbot_admin,1,1,1,1 diff --git a/runbot/static/src/css/runbot.css b/runbot/static/src/css/runbot.css index 46415f421..c275824a5 100644 --- a/runbot/static/src/css/runbot.css +++ b/runbot/static/src/css/runbot.css @@ -1,5 +1,9 @@ :root { --gray: #6c757d; /* used for batch limitation */ + --btn-default-color: var(--bs-body-color); + --btn-default-bg: var(--bs-body-bg); + --btn-default-border: #ccc; + --active-project-color: #777; } /* @@ -13,7 +17,8 @@ --bs-info-bg-subtle: #d9edf7; --bs-info-rgb: 23, 162, 184; } -:root[data-bs-theme=red404] { + +:root[data-bs-theme=red404] { --bs-success-bg-subtle: #cdffb9; --bs-danger-bg-subtle: #e67ecf; --bs-warning-bg-subtle: #fae9b1; @@ -21,6 +26,11 @@ --bs-info-rgb: 23, 162, 184; } +:root[data-bs-theme=dark] { + --btn-default-border: #333; + --active-project-color: #CCC; +} + [data-bs-theme=legacy] .text-bg-info { color: #fff !important; /* black by default, changes from previous version, color forced to fit with --bs-info-rgb*/ } @@ -59,37 +69,28 @@ --bs-btn-disabled-border-color: #b90e6c; } - -:root { - --alternative:#ccc; - --btn-default-color: var(--bs-body-color); - --btn-default-border:#ccc; - --bs-default-rgb: var(--bs-body-color-rgb); - --active-project-color: #777; - -} - -:root[data-bs-theme=dark] { - --btn-default-border:#333; - --btn-default-color: var(--bs-body-color); - --active-project-color: #CCC; -} - .btn-default { --bs-btn-color: var(--btn-default-color); - --bs-btn-bg: var(--bs-body-bg); + --bs-btn-bg: var(--btn-default-bg); --bs-btn-border-color: var(--btn-default-border); --bs-btn-hover-color: var(--btn-default-color); - --bs-btn-hover-bg: var(--btn-default-border); - --bs-btn-hover-border-color: var(--btn-default-border); + --bs-btn-hover-bg: color-mix(in lab, var(--btn-default-bg), black 15%); + --bs-btn-hover-border-color: color-mix(in lab, var(--btn-default-border), black 10%); --bs-btn-focus-shadow-rgb: 60, 153, 110; --bs-btn-active-color: var(--btn-default-color); - --bs-btn-active-bg: var(--bs-body-bg); - --bs-btn-active-border-color: var(--bs-body-bg); + --bs-btn-active-bg: color-mix(in lab, var(--btn-default-bg), black 20%); + --bs-btn-active-border-color: color-mix(in lab, var(--btn-default-border), black 15%); --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); --bs-btn-disabled-color: var(--btn-default-color); - --bs-btn-disabled-bg: var(--bs-body-bg); - --bs-btn-disabled-border-color: var(--btn-default-border);; + --bs-btn-disabled-bg: var(--btn-default-bg); + --bs-btn-disabled-border-color: var(--btn-default-border); +} + +[data-bs-theme=dark] .btn-default { + --bs-btn-hover-bg: color-mix(in lab, var(--btn-default-bg), white 15%); + --bs-btn-hover-border-color: color-mix(in lab, var(--btn-default-border), white 10%); + --bs-btn-active-bg: color-mix(in lab, var(--btn-default-bg), white 20%); + --bs-btn-active-border-color: color-mix(in lab, var(--btn-default-border), white 15%); } .btn-info { diff --git a/runbot/static/src/js/fields/fields.js b/runbot/static/src/js/fields/fields.js index 9db40bfeb..9d36fb7ff 100644 --- a/runbot/static/src/js/fields/fields.js +++ b/runbot/static/src/js/fields/fields.js @@ -1,44 +1,41 @@ -/** @odoo-module **/ - import { TextField } from "@web/views/fields/text/text_field"; import { CharField } from "@web/views/fields/char/char_field"; import { Many2OneField } from "@web/views/fields/many2one/many2one_field"; -import { _lt } from "@web/core/l10n/translation"; import { formatDateTime } from "@web/core/l10n/dates"; import { registry } from "@web/core/registry"; import { useInputField } from "@web/views/fields/input_field_hook"; -import { useRef, xml, Component, markup} from "@odoo/owl"; +import { useRef, xml, Component, markup } from "@odoo/owl"; import { useAutoresize } from "@web/core/utils/autoresize"; import { getFormattedValue } from "@web/views/utils"; import { UrlField } from "@web/views/fields/url/url_field"; -import { X2ManyField , x2ManyField} from "@web/views/fields/x2many/x2many_field"; +import { X2ManyField , x2ManyField } from "@web/views/fields/x2many/x2many_field"; import { BooleanToggleField } from "@web/views/fields/boolean_toggle/boolean_toggle_field"; - // https://stackoverflow.com/questions/4810841/pretty-print-json-using-javascript function colorizeJson(json) { - json = json.replace(/&/g, '&').replace(//g, '>'); - return json.replace(/("(\\u[a-zA-Z0-9]{4}|\\[^u]|[^\\"])*"(\s*:)?|\b(true|false|null)\b|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?)/g, function (match) { - var cls = ''; + json = json.replace(/&/g, "&").replace(//g, ">"); + return json.replace(/("(\\u[a-zA-Z0-9]{4}|\\[^u]|[^\\"])*"(\s*:)?|\b(true|false|null)\b|-?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?)/g, function (match) { + let cls = ""; if (/^"/.test(match)) { if (/:$/.test(match)) { - cls = 'o_runbot_json_key'; + cls = "o_runbot_json_key"; } else { - cls = 'o_runbot_json_value'; + cls = "o_runbot_json_value"; } } - return '' + match + ''; + return `${match}`; }); } function stringify(obj) { - return JSON.stringify(obj, null, '\t'); - } + return JSON.stringify(obj, null, "\t"); +} + export class JsonField extends TextField { static template = xml` - + @@ -54,6 +51,7 @@ export class JsonField extends TextField { `; + setup() { this.divRef = useRef("div"); this.textareaRef = useRef("textarea"); @@ -84,11 +82,9 @@ registry.category("fields").add("runbotjsonb", { export class FrontendUrl extends Component { static template = xml` -
+
`; - static components = { Many2OneField }; - static props = { ...Many2OneField.props, linkField: { type: String, optional: true }, @@ -96,21 +92,21 @@ export class FrontendUrl extends Component { get displayValue() { if (this.props.record.data[this.props.name].isLuxonDateTime){ - return formatDateTime(this.props.record.data[this.props.name]) + return formatDateTime(this.props.record.data[this.props.name]); } else { - return this.props.record.data[this.props.name] ? getFormattedValue(this.props.record, this.props.name) : '' + return this.props.record.data[this.props.name] ? getFormattedValue(this.props.record, this.props.name) : ""; } } get route() { - return this._route(this.props.linkField || this.props.name) + return this._route(this.props.linkField || this.props.name); } _route(fieldName) { const model = this.props.record.fields[fieldName].relation || "runbot.unknown"; const { id } = this.props.record.data[fieldName]; - if (model.startsWith('runbot.')){ - return '/runbot/' + model.split('.')[1] + '/' + id; + if (model.startsWith("runbot.")){ + return `/runbot/${model.split(".")[1]}/${id}`; } else { return false; } @@ -127,22 +123,20 @@ registry.category("fields").add("frontend_url", { }, }); - export class FieldCharFrontendUrl extends Component { - static template = xml` -
-
-
-
`; - - static components = { CharField } +
+
+
+
+ `; + static components = { CharField }; get route() { const model = this.props.record.resModel; const id = this.props.record.resId; - if (model.startsWith('runbot.')) { - return '/runbot/' + model.split('.')[1] + '/' + id; + if (model.startsWith("runbot.")) { + return `/runbot/${model.split(".")[1]}/${id}`; } else { return false; } @@ -154,36 +148,33 @@ registry.category("fields").add("char_frontend_url", { component: FieldCharFrontendUrl, }); - // Pull Request URL Widget const pullRequestRegex = /\/([a-zA-Z-_]+\/[a-zA-Z-_]+)\/pull\/(\d+)/; class PullRequestUrlField extends UrlField { static template = xml` `; - static components = { UrlField } + static components = { UrlField }; + get fieldProps() { - const props = {...this.props }; - const parts = pullRequestRegex.exec(this.props.record.data[props.name]) + const props = { ...this.props }; + const parts = pullRequestRegex.exec(this.props.record.data[props.name]); if (parts) { props.text = `${parts[1]}#${parts[2]}`; } - return props + return props; } } PullRequestUrlField.supportedTypes = ["char"]; - registry.category("fields").add("pull_request_url", { supportedTypes: ["char"], component: PullRequestUrlField, }); - export class Matrixx2ManyField extends X2ManyField { - static template = 'runbot.Matrixx2ManyField'; - + static template = "runbot.Matrixx2ManyField"; static components = { BooleanToggleField }; getEntry(from, to) { @@ -200,11 +191,11 @@ export class Matrixx2ManyField extends X2ManyField { return [...new Set(versions)].sort().reverse(); } } + export const matrixx2ManyField = { ...x2ManyField, component: Matrixx2ManyField, useSubView: false, }; - registry.category("fields").add("version_matrix", matrixx2ManyField); diff --git a/runbot/static/src/js/fields/history_graph.js b/runbot/static/src/js/fields/history_graph.js index e285d963c..6f6a3370c 100644 --- a/runbot/static/src/js/fields/history_graph.js +++ b/runbot/static/src/js/fields/history_graph.js @@ -1,5 +1,3 @@ -/** @odoo-module **/ -import { _lt } from "@web/core/l10n/translation"; import { registry } from "@web/core/registry"; import { useRef, xml, Component, useEffect } from "@odoo/owl"; @@ -9,21 +7,20 @@ export class HistoryGraph extends Component { `; + setup() { this.canvasRef = useRef("canvas"); useEffect(() => this.renderErrorGraph()); } renderErrorGraph(activeCell) { - const data = this.props.record.data[this.props.name] || {}; const errorId = data.error_id; const projectId = data.project_id; const categoryId = data.category_id; const breaking_pr_close_dates = data.breaking_pr_close_dates; const fixing_pr_close_dates = data.fixing_pr_close_dates; - - const canvas = this.canvasRef.el + const canvas = this.canvasRef.el; const ctx = canvas.getContext("2d"); const maxValue = data.max_count; const canvasBorder = 1; @@ -37,26 +34,25 @@ export class HistoryGraph extends Component { canvas.width = canvasWidth; canvas.height = canvasHeight; - function getColor(value, opacity) { if (value >= 10) { return `rgba(255, 0, 0, ${opacity})`; // red } else if (value >= 5) { return `rgba(255, 165, 0, ${opacity})`; // orange } - return `rgba(0, 170, 0, ${opacity})` // green + return `rgba(0, 170, 0, ${opacity})`; // green } ctx.clearRect(0, 0, canvasWidth, canvasHeight); ctx.fillStyle = "#EEE"; ctx.fillRect(0, 0, canvasWidth, canvasHeight); ctx.strokeStyle = "#333"; - ctx.lineWidth = canvasBorder * 2; // * 2 to account for each side, not only inner width - ctx.strokeRect(0, 0, canvasWidth, canvasHeight,); + ctx.lineWidth = canvasBorder * 2; // * 2 to account for each side, not only inner width + ctx.strokeRect(0, 0, canvasWidth, canvasHeight); data.date_labels.forEach((dateLabel, idx) => { data.version_labels.forEach((versionLabel, idy) => { - let version_id = data.versions_ids[idy] + const version_id = data.versions_ids[idy]; let value = data.daily_version_freq[idx][idy] || 0; let cellColor = "white"; let cellOpacity = 0; @@ -70,13 +66,13 @@ export class HistoryGraph extends Component { ctx.fillStyle = cellColor; ctx.fillRect(posX, posY, cellWidth, cellHeight); + if (activeCell && activeCell.col === idx && activeCell.row === idy) { ctx.strokeStyle = "black"; ctx.lineWidth = 2; ctx.strokeRect(posX, posY, cellWidth, cellHeight); } - if (fixing_pr_close_dates[version_id] == dateLabel) { ctx.fillStyle = "black"; ctx.font = "12px Arial"; @@ -87,13 +83,11 @@ export class HistoryGraph extends Component { ctx.font = "12px Arial"; ctx.fillText("✗", posX + cellWidth / 2 - 4, posY + cellHeight / 2 + 4); } - - }); }); if (mouseActions) { canvas.onmousemove = (event) => { - let tooltip = canvas.parentElement.querySelector('.history-graph-tooltip'); + let tooltip = canvas.parentElement.querySelector(".history-graph-tooltip"); if (tooltip) { tooltip.remove(); } @@ -101,16 +95,16 @@ export class HistoryGraph extends Component { const { col, row, value, dateLabel, versionLabel } = this.getCellFromEvent(event); if ( col >= 0 && row >= 0) { - tooltip = document.createElement('div'); - tooltip.className = 'history-graph-tooltip'; - tooltip.style.position = 'absolute'; + tooltip = document.createElement("div"); + tooltip.className = "history-graph-tooltip"; + tooltip.style.position = "absolute"; tooltip.style.left = `${canvas.offsetLeft}px`; tooltip.style.top = `${canvas.offsetTop + canvas.height}px`; - tooltip.style.background = '#fff'; - tooltip.style.border = '1px solid #333'; - tooltip.style.padding = '4px 8px'; - tooltip.style.fontSize = '12px'; - tooltip.style.pointerEvents = 'none'; + tooltip.style.background = "#fff"; + tooltip.style.border = "1px solid #333"; + tooltip.style.padding = "4px 8px"; + tooltip.style.fontSize = "12px"; + tooltip.style.pointerEvents = "none"; tooltip.style.zIndex = 1000; tooltip.innerHTML = ` Date: ${dateLabel} @@ -125,23 +119,23 @@ export class HistoryGraph extends Component { }; canvas.onmouseleave = () => { - const tooltip = canvas.parentElement.querySelector('.history-graph-tooltip'); + const tooltip = canvas.parentElement.querySelector(".history-graph-tooltip"); if (tooltip) { tooltip.remove(); - this.renderErrorGraph() + this.renderErrorGraph(); } }; canvas.onclick = (event) => { - const { col, row, value, dateLabel, versionLabel } = this.getCellFromEvent(event); + const { col, row, dateLabel } = this.getCellFromEvent(event); if (col >= 0 && row >= 0) { const url = `/runbot/batches/${projectId}/${categoryId}/${dateLabel}/${errorId}`; - window.open(url, '_blank'); + window.open(url, "_blank"); } - } + }; } - } + getCellFromEvent(event) { const data = this.props.record.data[this.props.name] || {}; const rect = this.canvasRef.el.getBoundingClientRect(); @@ -149,13 +143,13 @@ export class HistoryGraph extends Component { const y = event.clientY - rect.top - 1; // Adjust for canvas border const col = Math.floor(x / this.props.cellSize); const row = Math.floor(y / this.props.cellSize); - if ( col >= 0 && col < data.date_labels.length && row >= 0 && row < data.version_labels.length) { + if ( col >= 0 && col < data.date_labels.length && row >= 0 && row < data.version_labels.length) { const value = data.daily_version_freq[col][row] || 0; const dateLabel = data.date_labels[col]; const versionLabel = data.version_labels[row]; return { col, row, value, dateLabel, versionLabel }; } else { - return { col: -1, row: -1, value: 0, dateLabel: '', versionLabel: '' }; + return { col: -1, row: -1, value: 0, dateLabel: "", versionLabel: "" }; } } } diff --git a/runbot/static/src/js/fields/tracking_value.js b/runbot/static/src/js/fields/tracking_value.js index 14058b1cd..b08647d1b 100644 --- a/runbot/static/src/js/fields/tracking_value.js +++ b/runbot/static/src/js/fields/tracking_value.js @@ -1,4 +1,3 @@ -/** @odoo-module **/ import { patch } from "@web/core/utils/patch"; import { Message } from "@mail/core/common/message"; @@ -7,22 +6,27 @@ patch(Message.prototype, { super.setup(...arguments); this.kept = false; }, + isMultiline(trackingValue) { const oldValue = trackingValue.oldValue; const newValue = trackingValue.newValue; - return ((oldValue && typeof oldValue=== 'string' && oldValue.includes('\n')) && (newValue && typeof oldValue=== 'string' && newValue.includes('\n'))) + return ((oldValue && typeof oldValue=== "string" && oldValue.includes("\n")) && (newValue && typeof oldValue=== "string" && newValue.includes("\n"))); }, + formatTracking(trackingFieldInfo, trackingValue) { - return super.formatTracking(trackingFieldInfo, trackingValue) + return super.formatTracking(trackingFieldInfo, trackingValue); }, + toggleKept() { this.kept = !this.kept; }, + copyToClipboard(trackingValue) { return function () { navigator.clipboard.writeText(trackingValue); }; }, + lines(trackingValue) { const oldValue = trackingValue.oldValue; const newValue = trackingValue.newValue; @@ -30,45 +34,47 @@ patch(Message.prototype, { const lines = this.prepareForRendering(diff); return lines; }, + makeDiff(text1, text2) { - var dmp = new diff_match_patch(); - var a = dmp.diff_linesToChars_(text1, text2); - var lineText1 = a.chars1; - var lineText2 = a.chars2; - var lineArray = a.lineArray; - var diffs = dmp.diff_main(lineText1, lineText2, false); + const dmp = new diff_match_patch(); + const a = dmp.diff_linesToChars_(text1, text2); + const lineText1 = a.chars1; + const lineText2 = a.chars2; + const lineArray = a.lineArray; + const diffs = dmp.diff_main(lineText1, lineText2, false); dmp.diff_charsToLines_(diffs, lineArray); dmp.diff_cleanupSemantic(diffs); return diffs; }, + prepareForRendering(diffs) { - var lines = []; - var pre_line_counter = 0 - var post_line_counter = 0 - for (var x = 0; x < diffs.length; x++) { - var diff_type = diffs[x][0]; - var data = diffs[x][1]; - var data_lines = data.split('\n'); - for (var line_index in data_lines) { - var line = data_lines[line_index]; - line = line.replace(/&/g, '&'); - line = line.replace(//g, '>'); + const lines = []; + let pre_line_counter = 0; + let post_line_counter = 0; + for (let x = 0; x < diffs.length; x++) { + const diff_type = diffs[x][0]; + const data = diffs[x][1]; + const data_lines = data.split("\n"); + for (const line_index in data_lines) { + let line = data_lines[line_index]; + line = line.replace(/&/g, "&"); + line = line.replace(//g, ">"); //text = text.replace(/\n/g, '
'); //text = text.replace(/ /g, '  '); if (diff_type == -1) { - lines.push({type:'removed', pre_line_counter: pre_line_counter, post_line_counter: '-', line: line}) - pre_line_counter += 1 + lines.push({ type: "removed", pre_line_counter: pre_line_counter, post_line_counter: "-", line: line }); + pre_line_counter += 1; } else if (diff_type == 0) { - lines.push({type:'kept', pre_line_counter: '', post_line_counter: post_line_counter, line: line}) - pre_line_counter += 1 - post_line_counter +=1 + lines.push({ type: "kept", pre_line_counter: "", post_line_counter: post_line_counter, line: line }); + pre_line_counter += 1; + post_line_counter += 1; } else if (diff_type == 1) { - lines.push({type:'added', pre_line_counter: '+', post_line_counter: post_line_counter, line: line}) - post_line_counter +=1 + lines.push({ type: "added", pre_line_counter: "+", post_line_counter: post_line_counter, line: line }); + post_line_counter += 1; } } } return lines; - }, + }, }); diff --git a/runbot/static/src/js/runbot.js b/runbot/static/src/js/runbot.js index 58a902976..76b2a129b 100644 --- a/runbot/static/src/js/runbot.js +++ b/runbot/static/src/js/runbot.js @@ -38,3 +38,19 @@ function copyToClipboard(text) { } navigator.clipboard.writeText(text); } + +document.addEventListener('DOMContentLoaded', function() { + const collapseElement = document.getElementById('customTriggers'); + if (collapseElement) { + collapseElement.addEventListener('show.bs.collapse', function () { + const url = new URL(window.location); + url.searchParams.set('expand_custom', '1'); + window.history.replaceState({}, '', url); + }); + collapseElement.addEventListener('hide.bs.collapse', function () { + const url = new URL(window.location); + url.searchParams.delete('expand_custom'); + window.history.replaceState({}, '', url); + }); + } +}); diff --git a/runbot/static/src/js/views/form_controller.js b/runbot/static/src/js/views/form_controller.js index f7a1ea72c..4fc4d83a2 100644 --- a/runbot/static/src/js/views/form_controller.js +++ b/runbot/static/src/js/views/form_controller.js @@ -1,19 +1,18 @@ -/** @odoo-module **/ - -import { FormController } from '@web/views/form/form_controller'; -import { patch } from '@web/core/utils/patch'; +import { FormController } from "@web/views/form/form_controller"; +import { patch } from "@web/core/utils/patch"; patch(FormController.prototype, { // Prevent saving on tab switching beforeVisibilityChange: () => {}, + // Prevent closing page with dirty fields async beforeUnload(ev) { if (await this.model.root.isDirty()) { ev.preventDefault(); - ev.returnValue = 'Unsaved changes'; + ev.returnValue = "Unsaved changes"; } else { super.beforeUnload(ev); } - } -}) + }, +}); diff --git a/runbot/templates/batch.xml b/runbot/templates/batch.xml index 54a25393e..0896e8a1b 100644 --- a/runbot/templates/batch.xml +++ b/runbot/templates/batch.xml @@ -13,11 +13,28 @@ &emsp; + t-attf-href="/odoo/batch/{{batch.id}}" class="btn btn-default btn-sm" target="_blank" title="View Batch in Backend"> + + Priority + + + High + + + + Low + + + Normal + + Set to high + + + Category diff --git a/runbot/templates/branch.xml b/runbot/templates/branch.xml index 78e0948e0..611f06e77 100644 --- a/runbot/templates/branch.xml +++ b/runbot/templates/branch.xml @@ -12,7 +12,7 @@ diff --git a/runbot/templates/build.xml b/runbot/templates/build.xml index d9428a0d3..3c701df09 100644 --- a/runbot/templates/build.xml +++ b/runbot/templates/build.xml @@ -44,7 +44,7 @@
  • - +
  • @@ -68,7 +68,7 @@
    -
    +
    This build is referenced in bundles
    @@ -114,6 +114,9 @@
    + + Mode: Light Configure
    +
    Version: @@ -222,7 +225,7 @@ - + Build @@ -234,7 +237,7 @@ with config - ... + ... @@ -291,7 +294,7 @@ - + @@ -355,7 +358,7 @@ - + () diff --git a/runbot/templates/build_error.xml b/runbot/templates/build_error.xml index e64fc4706..87ad07804 100644 --- a/runbot/templates/build_error.xml +++ b/runbot/templates/build_error.xml @@ -36,7 +36,7 @@
    + t-attf-href="/odoo/error/{{build_error.id}}" target="_blank" title="View in Backend"> @@ -113,7 +113,7 @@

    Team + t-attf-href="/odoo/team/{{team.id}}" target="_blank" title="View in Backend">

    diff --git a/runbot/templates/build_stats.xml b/runbot/templates/build_stats.xml index 8f36b6850..7721b8490 100644 --- a/runbot/templates/build_stats.xml +++ b/runbot/templates/build_stats.xml @@ -4,7 +4,7 @@ @@ -399,8 +415,7 @@ Find similar builds - + View in backend diff --git a/runbot/tests/common.py b/runbot/tests/common.py index 9f6ba86c8..81cc8e784 100644 --- a/runbot/tests/common.py +++ b/runbot/tests/common.py @@ -12,7 +12,7 @@ class RunbotCase(TransactionCase): - def mock_git_helper(self, repo, cmd): + def mock_git_helper(self, repo, cmd, input_data=None, raw=False): """Helper that returns a mock for repo._git()""" if cmd[:2] == ['show', '-s'] or cmd[:3] == ['show', '--pretty="%H -- %s"', '-s']: return 'commit message for %s' % cmd[-1] @@ -82,7 +82,9 @@ def setUp(self): self.repo_odoo: [ ('odoo/addons', 'base', '__manifest__.py'), ('odoo/addons', 'test_lint', '__manifest__.py'), + ('addons', 'account', '__manifest__.py'), ('addons', 'mail', '__manifest__.py'), + ('addons', 'test_mail', '__manifest__.py'), ('addons', 'web', '__manifest__.py'), ('addons', 'crm', '__manifest__.py'), ('addons', 'project', '__manifest__.py'), @@ -194,8 +196,8 @@ def setUp(self): self.docker_run_calls = [] self.diff = '' - def mock_git(repo, cmd, quiet=False): - return self.mock_git_helper(repo, cmd) + def mock_git(repo, cmd, quiet=False, input_data=None, raw=False): + return self.mock_git_helper(repo, cmd, input_data=input_data, raw=raw) self.start_patcher('git_patcher', 'odoo.addons.runbot.models.repo.Repo._git', new=mock_git) self.start_patcher('hostname_patcher', 'odoo.addons.runbot.common.socket.gethostname', 'host.runbot.com') @@ -232,10 +234,10 @@ def mock_git(repo, cmd, quiet=False): self.start_patcher('_write_file', 'odoo.addons.runbot.models.build.BuildResult._write_file', None) self.start_patcher('_parse_config', 'odoo.addons.runbot.models.build.BuildResult._parse_config', {'--test-enable', '--test-tags', '--with-demo'}) - def get_available_modules(self_commit): + def _list_available_modules(self_commit): return self.addons_per_repo.get(self_commit.repo_id, []) - self.start_patcher('_get_available_modules', 'odoo.addons.runbot.models.commit.Commit._get_available_modules', new=get_available_modules) + self.start_patcher('_list_available_modules', 'odoo.addons.runbot.models.commit.Commit._list_available_modules', new=_list_available_modules) def no_commit(*_args, **_kwargs): _logger.info('Skipping commit') diff --git a/runbot/tests/test_build_config_step.py b/runbot/tests/test_build_config_step.py index 9035c6af0..2e52171a3 100644 --- a/runbot/tests/test_build_config_step.py +++ b/runbot/tests/test_build_config_step.py @@ -117,33 +117,48 @@ def test_get_module(self): self.assertEqual('module_addons', self.repo_enterprise._get_module('enterprise/module_addons/some/file.py')) self.assertEqual(None, self.repo_odoo._get_module('odoo/core/module1/some/file.py')) self.assertEqual(None, self.repo_odoo._get_module('odoo/core/module/some/file.py')) + def test_codeowner_regex_multiple(self): - self.diff = 'file.js\nfile.py\nfile.xml' + self.diff = 'addons/module/file.js\naddons/module/file.py\naddons/module/file.xml' self.config_step._run_codeowner(self.parent_build) messages = self.parent_build.log_ids.mapped('message') self.assertEqual(messages[1], 'Checking 2 codeowner regexed on 3 files') - self.assertEqual(markdown_unescape(messages[2]), 'Adding team_js to reviewers for file [odoo/file.js](https://False/blob/dfdfcfcf/file.js)') - self.assertEqual(markdown_unescape(messages[3]), 'Adding team_py to reviewers for file [odoo/file.py](https://False/blob/dfdfcfcf/file.py)') - self.assertEqual(markdown_unescape(messages[4]), 'Adding codeowner-team to reviewers for file [odoo/file.xml](https://False/blob/dfdfcfcf/file.xml)') + self.assertEqual(markdown_unescape(messages[2]), 'Adding team_js to reviewers for file [odoo/addons/module/file.js](https://False/blob/dfdfcfcf/addons/module/file.js)') + self.assertEqual(markdown_unescape(messages[3]), 'Adding team_py to reviewers for file [odoo/addons/module/file.py](https://False/blob/dfdfcfcf/addons/module/file.py)') + self.assertEqual(markdown_unescape(messages[4]), 'Adding codeowner-team to reviewers for file [odoo/addons/module/file.xml](https://False/blob/dfdfcfcf/addons/module/file.xml)') self.assertEqual(markdown_unescape(messages[5]), 'Requesting review for pull request [base/odoo:1234](https://example.com/base/odoo/pull/1234): codeowner-team, team_js, team_py') self.assertEqual(self.dev_pr.reviewers, 'codeowner-team,team_js,team_py') + def test_codeowner_root_file(self): + self.diff = 'addons/module/file.js\naddons/module/file.py\naddons/module/file.xml\ntest_file' + self.config_step._run_codeowner(self.parent_build) + messages = self.parent_build.log_ids.mapped('message') + self.assertEqual(messages[1], 'Checking 2 codeowner regexed on 4 files') + self.assertEqual(markdown_unescape(messages[2]), 'File odoo/test_file is at the root level and it looks like it could be a mistake, remove it or ensure that a codeowner rule is added for this file') + self.assertEqual(markdown_unescape(messages[3]), 'Adding team_js to reviewers for file [odoo/addons/module/file.js](https://False/blob/dfdfcfcf/addons/module/file.js)') + self.assertEqual(markdown_unescape(messages[4]), 'Adding team_py to reviewers for file [odoo/addons/module/file.py](https://False/blob/dfdfcfcf/addons/module/file.py)') + self.assertEqual(markdown_unescape(messages[5]), 'Adding codeowner-team to reviewers for file [odoo/addons/module/file.xml](https://False/blob/dfdfcfcf/addons/module/file.xml)') + self.assertEqual(markdown_unescape(messages[6]), 'No reviewer for file [odoo/test_file](https://False/blob/dfdfcfcf/test_file)') + self.assertEqual(markdown_unescape(messages[7]), 'Requesting review for pull request [base/odoo:1234](https://example.com/base/odoo/pull/1234): codeowner-team, team_js, team_py') + self.assertEqual(self.dev_pr.reviewers, 'codeowner-team,team_js,team_py') + self.assertEqual(self.parent_build.local_result, 'ko') + def test_codeowner_regex_some_already_on(self): - self.diff = 'file.js\nfile.py\nfile.xml' + self.diff = 'addons/module/file.js\naddons/module/file.py\naddons/module/file.xml' self.dev_pr.reviewers = 'codeowner-team,team_js' self.config_step._run_codeowner(self.parent_build) messages = self.parent_build.log_ids.mapped('message') self.assertEqual(markdown_unescape(messages[5]), 'Requesting review for pull request [base/odoo:1234](https://example.com/base/odoo/pull/1234): team_py') def test_codeowner_regex_all_already_on(self): - self.diff = 'file.js\nfile.py\nfile.xml' + self.diff = 'addons/module/file.js\naddons/module/file.py\naddons/module/file.xml' self.dev_pr.reviewers = 'codeowner-team,team_js,team_py' self.config_step._run_codeowner(self.parent_build) messages = self.parent_build.log_ids.mapped('message') self.assertEqual(messages[5], 'All reviewers are already on pull request [base/odoo:1234](https://example.com/base/odoo/pull/1234)') def test_codeowner_author_in_team(self): - self.diff = 'file.js\nfile.py\nfile.xml' + self.diff = 'addons/module/file.js\naddons/module/file.py\naddons/module/file.xml' self.team1.github_team = 'team_py' self.team1.github_logins = 'some_member,another_member' self.team1.skip_team_pr = True @@ -424,25 +439,58 @@ def setUp(self): }).id, 'local_result': 'ok', }) + self.module_dependencies = { + "test_mail": ["mail"], + "mail": ["web"], + "account": ["web"], + "crm": ["web"], + "project": ["web"], + "test_l10n": ["l10n_be", "l10n_in"], + "l10n_be": ["account"], + "l10n_in": ["account"], + "web_enterprise": ["web"], + } + + def mock_git_helper(self, repo, cmd, input_data=None, raw=False): + def make_catfile_output(commit, content): + content_bytes = content.encode('utf-8') + header = f"{commit} blob {len(content_bytes)}\n".encode() + result = header + content_bytes + b"\n" + return result + + if cmd == ['cat-file', '--batch']: + if repo == self.repo_odoo and input_data == 'dfdfcfcf0000ffffffffffffffffffffffffffff:odoo/tests/.runbot/parallel_testing.json\n': + return make_catfile_output('dfdfcfcf0000ffffffffffffffffffffffffffff', self.config_file) + if repo == self.repo_odoo and input_data == 'dfdfcfcf0000ffffffffffffffffffffffffffff:odoo/tests/.runbot/l10n_standalone_testing.json\n': + return make_catfile_output('dfdfcfcf0000ffffffffffffffffffffffffffff', self.l10n_standalone_testing_file) + + if "__manifest__.py" in input_data: + modules_info = [ + (line, line.split(':')[-1].split('/')[-2]) + for line in input_data.splitlines() + if line.endswith('__manifest__.py') + ] + result = b"" + for original_query, module in modules_info: + content = '''{'name': '%s', 'depends': %s}''' % (module, self.module_dependencies.get(module, [])) + result += make_catfile_output(original_query.split(':')[0], content) + return result - def mock_git_helper(self, repo, cmd): - if repo == self.repo_odoo and cmd == ['show', 'dfdfcfcf0000ffffffffffffffffffffffffffff:odoo/tests/.runbot/parallel_testing.json']: - return self.config_file - elif repo == self.repo_odoo and cmd == ['show', 'dfdfcfcf0000ffffffffffffffffffffffffffff:odoo/tests/.runbot/l10n_standalone_testing.json']: - return self.l10n_standalone_testing_file - elif 'show' in cmd: + if cmd == ['cat-file', '--batch']: raise subprocess.CalledProcessError(cmd, 128) - return super().mock_git_helper(repo, cmd) + elif 'diff' in cmd: + return 'odoo/addons/crm/some/file.py\nodoo/addons/project/some/file.py' + return super().mock_git_helper(repo, cmd, input_data, raw) def test_module_filters(self): - self.assertEqual(self.build._get_modules_to_test('-> !mail'), ['base', 'crm', 'documents']) - self.assertEqual(self.build._get_modules_to_test('mail -> !web'), ['mail', 'project', 'test_l10n', 'test_lint']) + self.assertEqual(self.build._get_modules_to_test('-> !mail'), ['account', 'base', 'crm', 'documents']) + self.assertEqual(self.build._get_modules_to_test('mail -> !web'), ['mail', 'project', 'test_l10n', 'test_lint', 'test_mail']) self.assertEqual(self.build._get_modules_to_test('web -> web'), ['web']) self.assertEqual(self.build._get_modules_to_test('!web ->'), ['web_enterprise']) - self.assertEqual(self.build._get_modules_to_test('-> !mail, -crm'), ['base', 'documents']) - self.assertEqual(self.build._get_modules_to_test('mail -> !web, !project'), ['mail', 'test_l10n', 'test_lint']) - self.assertEqual(self.build._get_modules_to_test('-*,odoo/*'), ['base', 'crm', 'hw_drivers', 'mail', 'project', 'test_l10n', 'test_lint', 'web']) - self.assertEqual(self.build._get_modules_to_test('-*,odoo/test_*'), ['test_l10n', 'test_lint']) + self.assertEqual(self.build._get_modules_to_test('-> !mail, -crm'), ['account', 'base', 'documents']) + self.assertEqual(self.build._get_modules_to_test('mail -> !web, !project'), ['mail', 'test_l10n', 'test_lint', 'test_mail']) + self.assertEqual(self.build._get_modules_to_test('-*,odoo/*'), ['account', 'base', 'crm', 'hw_drivers', 'mail', 'project', 'test_l10n', 'test_lint', 'test_mail', 'web']) + self.assertEqual(self.build._get_modules_to_test('-*,odoo/test_*'), ['test_l10n', 'test_lint', 'test_mail']) self.assertEqual(self.build._get_modules_to_test('-*,enterprise/*'), ['documents', 'l10n_be', 'l10n_in', 'web_enterprise']) self.assertEqual(self.build._get_modules_to_test('-*,web*'), ['web', 'web_enterprise']) self.assertEqual(self.build._get_modules_to_test('-*,web*,-enterprise/web*'), ['web']) @@ -452,6 +500,35 @@ def test_config_extension(self): self.assertEqual(json.loads(self.config.default_dynamic_config)['vars']['module_filter'], '*,-hw_*') self.assertEqual(self.build.dynamic_config['vars']['module_filter'], '*,-hw_*,-l10n_*') + def test_parse_dynamic_entry(self): + Step = self.env['runbot.build.config.step'] + + def check_parse(entry, expected): + res = Step._parse_dynamic_entry(entry, self.build, {'key': 'value', 'test_method': '.test_method'}) + self.assertEqual(res, expected) + check_parse('{{-test_*|filter_all_modules}}', 'account,base,crm,documents,hw_drivers,l10n_be,l10n_in,mail,project,web,web_enterprise') + check_parse('{{-*,web*|filter_all_modules}}', 'web,web_enterprise') + check_parse('{{-*,web*|filter_all_modules|make_module_test_tags}}', '/web,/web_enterprise') + check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|prepend("some_tag")}}', 'some_tag/web,some_tag/web_enterprise') + check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|prepend(key)}}', 'value/web,value/web_enterprise') + check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|append(".test_method")}}', '/web.test_method,/web_enterprise.test_method') + check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|append(test_method)}}', '/web.test_method,/web_enterprise.test_method') + + self.patch(type(self.build), '_modified_modules', lambda cl, defaults=None: {'crm'}) + + check_parse('{{*|filter_all_modules|modified_modules}}', 'crm') + + def test_modules_dependencies(self): + self.assertEqual(self.build._get_modules_dependencies(['test_mail'], 1), ['mail', 'test_mail']) + self.assertEqual(self.build._get_modules_dependencies(['test_mail']), ['base', 'mail', 'test_mail', 'web']) + self.assertEqual(self.build._get_modules_dependencies(['test_l10n']), ['account', 'base', 'l10n_be', 'l10n_in', 'test_l10n', 'web']) + self.assertEqual(self.build._get_modules_dependencies(['test_mail', 'test_l10n']), ['account', 'base', 'l10n_be', 'l10n_in', 'mail', 'test_l10n', 'test_mail', 'web']) + self.assertEqual(self.build._get_modules_dependencies(['test_mail', 'test_l10n'], 1), ['l10n_be', 'l10n_in', 'mail', 'test_l10n', 'test_mail']) + + self.assertEqual(self.build._get_dependant_modules(['account'], 1), ['account', 'l10n_be', 'l10n_in']) + self.assertEqual(self.build._get_dependant_modules(['account']), ['account', 'l10n_be', 'l10n_in', 'test_l10n']) + self.assertEqual(self.build._get_dependant_modules(['base']), ['account', 'base', 'crm', 'documents', 'hw_drivers', 'l10n_be', 'l10n_in', 'mail', 'project', 'test_l10n', 'test_lint', 'test_mail', 'web', 'web_enterprise']) + def check_server_cmd(self, cmd, install, test_enable, test_tags, db=None): self.assertIn('odoo/server.py', cmd) if install: @@ -522,7 +599,7 @@ def test_dynamic_step_parallel_testing(self): cmd = self.docker_run_calls[0][0] odoo_cmd = cmd.cmd self.check_server_cmd(odoo_cmd, - install=['base', 'crm', 'documents', 'mail', 'project', 'test_l10n', 'test_lint', 'web', 'web_enterprise'], + install=['account', 'base', 'crm', 'documents', 'mail', 'project', 'test_l10n', 'test_lint', 'test_mail', 'web', 'web_enterprise'], test_enable=False, test_tags=None, db=f'{build.dest}-all', @@ -557,7 +634,7 @@ def test_dynamic_step_parallel_testing(self): cmd = self.docker_run_calls[0][0] odoo_cmd = cmd.cmd self.check_server_cmd(odoo_cmd, - install=['base', 'crm', 'documents', 'mail', 'project', 'test_l10n', 'test_lint', 'web', 'web_enterprise'], + install=['account', 'base', 'crm', 'documents', 'mail', 'project', 'test_l10n', 'test_lint', 'test_mail', 'web', 'web_enterprise'], test_enable=True, test_tags='-post_install,-/test_lint', ) @@ -574,8 +651,8 @@ def test_dynamic_step_parallel_testing(self): ) for post_install, expected_tags in [ - (post_install_1, '-at_install,/base,/crm,/documents,/hw_drivers,/l10n_be,/l10n_in'), # we need the blacklisted modules here - (post_install_2, '-at_install,/mail,/project,/test_l10n,/test_lint'), + (post_install_1, '-at_install,/account,/base,/crm,/documents,/hw_drivers,/l10n_be,/l10n_in'), # we need the blacklisted modules here + (post_install_2, '-at_install,/mail,/project,/test_l10n,/test_lint,/test_mail'), (post_install_3, '-at_install,/web'), (post_install_4, '-at_install,/web_enterprise'), ]: @@ -679,7 +756,7 @@ def test_dynamic_step_l10n_standalone(self): (post_install_1, '-external,-external_l10n,post_install_l10n/l10n_hr_payroll_be,post_install_l10n/l10n_hr_payroll_in'), # we need the blacklisted modules here (post_install_2, '-external,-external_l10n,post_install_l10n/l10n_edi_be,post_install_l10n/l10n_edi_in'), (post_install_3, '-external,-external_l10n,post_install_l10n/l10n_reports_be,post_install_l10n/l10n_reports_in'), - (post_install_4, Like('-external,-external_l10n,post_install_l10n/base,post_install_l10n/crm,...')), + (post_install_4, Like('-external,-external_l10n,post_install_l10n/account,post_install_l10n/base,post_install_l10n/crm,...')), ]: with self.subTest(post_install=expected_tags): # 4.1 post install restore @@ -723,6 +800,7 @@ def test_foreach_module(self): self.config.step_ids[0]._run_dynamic(self.build) self.assertEqual(self.build.children_ids.mapped('description'), [ + 'Post install tests for **account**', 'Post install tests for **base**', 'Post install tests for **crm**', 'Post install tests for **documents**', @@ -752,31 +830,154 @@ def test_foreach_modified_module(self): }] }''' - self.patch(type(self.build), '_modified_modules', lambda cl: {'crm'}) + self.patch(type(self.build), '_modified_modules', lambda cl, defaults=None: {'crm'}) self.config.default_dynamic_config = dynamic_config self.config.step_ids[0]._run_dynamic(self.build) self.assertEqual(self.build.children_ids.mapped('description'), - [ - 'Post install tests for **crm**', + [ + 'Post install tests for **crm**', ]) - def test_parse_dynamic_entry(self): - Step = self.env['runbot.build.config.step'] + def test_modified_existing_module(self): + dynamic_config = '''{ + "vars": { + "modified_modules": "{{*|filter_all_modules|modified_modules}}", + "test_modules": "{{modified_modules|prepend('test_')|select_existing_modules}}", + "modules_to_test": "{{modified_modules|union(test_modules)}}" + }, + "name": "Foreach module testing", + "steps": [{ + "name": "Create module builds", + "job_type": "create_build", + "children": [{ + "name": "Test single module", + "description": "Post install tests for **{{modules_to_test}}**", + "steps": [{ + "name": "Start single module test", + "job_type": "odoo", + "install_modules": "{{modules_to_test}}", + "test_tags": "{{modules_to_test|make_module_test_tags}}" + }] + }] + }] + }''' - def check_parse(entry, expected): - res = Step._parse_dynamic_entry(entry, self.build, {'key': 'value', 'test_method': '.test_method'}) - self.assertEqual(res, expected) - check_parse('{{-test_*|filter_all_modules}}', 'base,crm,documents,hw_drivers,l10n_be,l10n_in,mail,project,web,web_enterprise') - check_parse('{{-*,web*|filter_all_modules}}', 'web,web_enterprise') - check_parse('{{-*,web*|filter_all_modules|make_module_test_tags}}', '/web,/web_enterprise') - check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|prepend("some_tag")}}', 'some_tag/web,some_tag/web_enterprise') - check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|prepend(key)}}', 'value/web,value/web_enterprise') - check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|append(".test_method")}}', '/web.test_method,/web_enterprise.test_method') - check_parse('{{-*,web*|filter_all_modules|make_module_test_tags|append(test_method)}}', '/web.test_method,/web_enterprise.test_method') + self.patch(type(self.build), '_modified_modules', lambda cl, defaults=None: {'crm', 'mail'}) + self.config.default_dynamic_config = dynamic_config + self.config.step_ids[0]._run_dynamic(self.build) + self.assertEqual(self.build.children_ids.mapped('description'), + [ + 'Post install tests for **crm,mail,test_mail**', + ]) + child_dynamic_vars = self.build.children_ids.params_id.config_data['dynamic_vars'] + self.assertEqual(child_dynamic_vars, { + 'modified_modules': 'crm,mail', + 'test_modules': 'test_mail', + 'modules_to_test': 'crm,mail,test_mail', + }) - self.patch(type(self.build), '_modified_modules', lambda cl: {'crm'}) + def test_modified_existing_module_parallel(self): + dynamic_config = '''{ + "vars": { + "modified_modules": "{{*|filter_all_modules|modified_modules}}", + "modules_to_test": "{{modified_modules|prepend('test_')|select_existing_modules|union(modified_modules)}}" + }, + "name": "Parallel split modified", + "steps": [{ + "name": "Create module builds", + "job_type": "create_build", + "for_each_vars": [{ + "test_module_filter": "{{modules_to_test}},->!mail" + }, + { + "test_module_filter": "{{modules_to_test}},mail->!website" + }, + { + "test_module_filter": "{{modules_to_test}},website->" + } + ], + "if": "{{child_modules_to_test}}", + "children": [{ + "vars": { + "child_modules_to_test": "{{test_module_filter|select_existing_modules}}" + }, + "name": "Test single module", + "description": "Post install tests for **{{child_modules_to_test}}**", + "steps": [{ + "name": "Start single module test", + "job_type": "odoo", + "install_modules": "{{child_modules_to_test}}", + "test_tags": "{{child_modules_to_test|make_module_test_tags}}" + }] + }] + }] + }''' - check_parse('{{*|filter_all_modules|modified_modules}}', 'crm') + self.patch(type(self.build), '_modified_modules', lambda cl, defaults=None: {'crm', 'mail'}) + self.config.default_dynamic_config = dynamic_config + self.config.step_ids[0]._run_dynamic(self.build) + self.assertEqual(self.build.children_ids.mapped('description'), + [ + 'Post install tests for **crm**', + 'Post install tests for **mail,test_mail**', + ]) + + self.assertEqual(self.build.children_ids[0].params_id.config_data['dynamic_vars']['child_modules_to_test'], 'crm') + self.assertEqual(self.build.children_ids[1].params_id.config_data['dynamic_vars']['child_modules_to_test'], 'mail,test_mail') + + def test_modified_existing_module_parallel_relations(self): + dynamic_config = '''{ + "vars": [ + {"module_filter": "*,-hw_*,-*l10n_*,-theme_*,-account_bacs,-account_reports_cash_basis,-auth_ldap,-base_gengo,-document_ftp,-iot_drivers,-note_pad,-odoo_referral,-odoo_referral_portal,-pad,-pad_project,-pos_blackbox_be,-pos_cache,-pos_six,-social_demo,-website_gengo,-website_instantclick,test_l10n_be_hr_payroll_account,test_l10n_us_hr_payroll_account"}, + {"_modified_modules": "{{module_filter|filter_all_modules|modified_modules}}"}, + {"_modules_dependencies": "{{_modified_modules|get_dependencies(1)}}"}, + {"_dependant_modules": "{{_modified_modules|get_dependant(1)}}"}, + {"_test_modules": "{{_modified_modules|prepend('test_')|select_existing_modules}}"}, + {"_modules_to_test": "{{_modified_modules|union(_test_modules)|union(_dependant_modules)|union(_modules_dependencies)}}"} + ], + "name": "Parallel split modified", + "steps": [{ + "name": "Create module builds", + "job_type": "create_build", + "for_each_vars": [{ + "_test_module_filter": "{{_modules_to_test}},->!mail" + }, + { + "_test_module_filter": "{{_modules_to_test}},mail->!website" + }, + { + "_test_module_filter": "{{_modules_to_test}},website->" + } + ], + "if": "{{child_modules_to_test}}", + "log": "Modified modules: {{_modified_modules}}\\nDepenencies: {{_modules_dependencies}}\\nDependant: {{_dependant_modules}}\\nTest modules: {{_test_modules}}", + "children": [{ + "vars": { + "child_modules_to_test": "{{_test_module_filter|select_existing_modules}}" + }, + "name": "Test single module", + "description": "Post install tests for **{{child_modules_to_test}}**", + "steps": [{ + "name": "Start single module test", + "job_type": "odoo", + "install_modules": "{{child_modules_to_test}}", + "test_tags": "{{child_modules_to_test|make_module_test_tags}}" + }] + }] + }] + }''' + + self.patch(type(self.build), '_modified_modules', lambda cl, defaults=None: {'crm', 'mail'}) + self.config.default_dynamic_config = dynamic_config + self.config.step_ids[0]._run_dynamic(self.build) + self.assertEqual(self.build.children_ids.mapped('description'), + [ + 'Post install tests for **crm**', + 'Post install tests for **mail,test_mail,web**', + ]) + self.assertEqual(self.build.children_ids[0].params_id.config_data['dynamic_vars']['child_modules_to_test'], 'crm') + self.assertEqual(self.build.children_ids[1].params_id.config_data['dynamic_vars']['child_modules_to_test'], 'mail,test_mail,web') + self.assertEqual(list(self.build.children_ids[0].params_id.config_data['dynamic_vars'].keys()), ['module_filter', 'child_modules_to_test']) class TestBuildConfigStep(TestBuildConfigStepCommon): diff --git a/runbot/tests/test_build_error.py b/runbot/tests/test_build_error.py index 7368d088a..24e9ab6a2 100644 --- a/runbot/tests/test_build_error.py +++ b/runbot/tests/test_build_error.py @@ -2,7 +2,7 @@ from unittest.mock import patch from odoo import fields -from odoo.exceptions import ValidationError +from odoo.exceptions import AccessError, ValidationError from odoo.tests import new_test_user from .common import RunbotCase @@ -204,6 +204,21 @@ def test_merge_test_tags(self): self.assertEqual(error_b.test_tags, False) self.assertEqual(error_b.active, False) + def test_merge_pr_ids(self): + error_a = self.BuildError.create({ + 'content': 'foo', + }) + error_b = self.BuildError.create({ + 'content': 'bar', + 'breaking_pr_id': self.dev_pr.id, + 'fixing_pr_id': self.dev_pr.id, + }) + + error_a._merge(error_b) + + self.assertEqual(error_a.fixing_pr_id, self.dev_pr) + self.assertEqual(error_a.breaking_pr_id, self.dev_pr) + def test_relink_contents(self): build_a = self.create_test_build({'local_result': 'ko', 'local_state': 'done'}) error_content_a = self.BuildErrorContent.create({'content': 'foo bar'}) @@ -712,6 +727,40 @@ def test_build_error_notification(self): innactive_error.responsible = responsible message_notify.assert_not_called() + def test_build_error_acl(self): + self.stop_patcher('isfile') # prevent user creation + self.start_patcher('message_notify', 'odoo.addons.mail.models.mail_thread.MailThread.message_notify') + test_team = self.env['runbot.team'].create({ + 'name': 'test-team', + 'project_id': self.project.id, + }) + responsible = new_test_user(self.env, login='fixman', name='fixman', groups='base.group_user') + user_lambda = new_test_user(self.env, login='lambda', name='lambda', groups='base.group_user') + error_manager = new_test_user(self.env, login='errorman', name='errorman', groups='base.group_user,runbot.group_runbot_error_manager') + runbot_manager = new_test_user(self.env, login='runbotman', name='runbotman', groups='base.group_user,runbot.group_runbot_admin') + + error = self.BuildError.create({}) + + # check writable fields by any user + error.with_user(user_lambda).write({ + 'responsible': responsible.id, + 'customer': error_manager.id, + 'fixing_pr_id': self.dev_pr.id, + 'breaking_pr_id': self.dev_pr.id, + 'random': True, + 'team_id': test_team.id, + }) + + # check other fields for a user lambda + with self.assertRaises(AccessError): + error.with_user(user_lambda).description = 'test description' + + # now check that an error manager can set a test_tag + error.with_user(error_manager).test_tags = 'brol' + + # and the runbot admin user can change it back + error.with_user(runbot_manager).test_tags = False + class TestErrorMerge(TestBuildErrorCommon): diff --git a/runbot/tests/test_dockerfile.py b/runbot/tests/test_dockerfile.py index 213310872..cb7539bc2 100644 --- a/runbot/tests/test_dockerfile.py +++ b/runbot/tests/test_dockerfile.py @@ -3,10 +3,12 @@ import logging import os import re +import time from psycopg2.errors import UniqueViolation +from requests.exceptions import HTTPError from odoo import Command, exceptions -from unittest.mock import patch, mock_open +from unittest.mock import patch, mock_open, MagicMock from odoo.tests.common import tagged, HttpCase, mute_logger from .common import RunbotCase @@ -149,3 +151,154 @@ def test_dockerfile_variant_unique(self): 'name': 'Documentation2', 'parent_id': default_dockerfile.id, }) + + +@tagged('-at_install', 'post_install') +class TestDockerfileCache(RunbotCase, HttpCase): + def test_dockerfile_get_cached_content(self): + dockerfile = self.env['runbot.dockerfile'].create({ + 'name': 'TestsAddCache', + 'to_build': True, + 'layer_ids': [ + Command.create({ + 'name': 'CacheAddTest', + 'layer_type': 'raw', + 'content': 'some useless content', + }), + ], + }) + + self.start_patcher('docker_username', 'odoo.addons.runbot.models.docker.USERNAME', new='TestUser') + + expected_content = """# CacheAddTest +some useless content + +USER TestUser +""" + + self.start_patcher('hardlink_to', 'odoo.addons.runbot.models.docker.Path.hardlink_to') + self.start_patcher('path_unlink', 'odoo.addons.runbot.models.docker.Path.unlink') + content = dockerfile._get_cached_content('/tmp/fake_build_path') + self.assertEqual(content, expected_content, 'Dockerfile without "ADD" should be left unchanged') + + raw_layer = """FROM ubuntu:noble +ADD https://nowhere.example.org/nothing.txt /data/nothing.txt +""" + + expected_content = """# CacheAddTest +FROM ubuntu:noble +ADD https://nowhere.example.org/nothing.txt /data/nothing.txt + + +USER TestUser +""" + dockerfile.layer_ids[0].content = raw_layer + content = dockerfile._get_cached_content('/tmp/fake_build_path') + self.assertEqual(content, expected_content, 'Dockerfile without "#CACHE" directive should be left unchanged') + + # Here we start the useful cache tests + raw_layer = """FROM ubuntu:noble +# CACHE 60 +ADD https://nowhere.example.org/nothing.txt /data/nothing.txt +""" + + expected_content = """# CacheAddTest +FROM ubuntu:noble +# CACHE 60 +COPY _data_nothing_txt /data/nothing.txt + + +USER TestUser +""" + mock_response = MagicMock() + mock_response.iter_content.return_value = [b'small file content'] + self.start_patcher('docker_requests_get', 'odoo.addons.runbot.models.docker.requests.get', return_value=mock_response) + + # 1 - The cache file does not exists yet + self.start_patcher('docker_path_exists', 'odoo.addons.runbot.models.docker.Path.exists', return_value=False) + dockerfile.layer_ids[0].content = raw_layer + with patch('odoo.addons.runbot.models.docker.Path.open', mock_open()) as cache_file_mock: + content = dockerfile._get_cached_content('/tmp/fake_build_path') + cache_file_mock.assert_called_once_with('wb') + self.assertEqual(content, expected_content, 'Dockerfile with "#CACHE" should change the ADD directive to COPY') + + # 2 - The cache file exists but the cache duration is expired + self.patchers['docker_path_exists'].return_value = True + self.start_patcher('docker_path_lstat', 'odoo.addons.runbot.models.docker.Path.lstat') + self.patchers['docker_path_lstat'].return_value.st_mtime = time.time() - 100 + with patch('odoo.addons.runbot.models.docker.Path.open', mock_open()) as cache_file_mock: + content = dockerfile._get_cached_content('/tmp/fake_build_path') + cache_file_mock.assert_called_once_with('wb') + self.assertEqual(content, expected_content, 'Dockerfile with "#CACHE" should change the ADD directive to COPY') + + # 3 - The cache file exists but the cache duration is not expired + self.start_patcher('docker_path_touch', 'odoo.addons.runbot.models.docker.Path.touch', return_value=True) + self.patchers['docker_path_lstat'].return_value.st_mtime = time.time() - 2 + with patch('odoo.addons.runbot.models.docker.Path.open', mock_open()) as cache_file_mock: + content = dockerfile._get_cached_content('/tmp/fake_build_path') + cache_file_mock.assert_not_called() + self.assertEqual(content, expected_content, 'Dockerfile with "#CACHE" should change the ADD directive to COPY') + self.patchers['docker_path_touch'].assert_not_called() + + # 4 - The cache file does not exists yet but the there is an error while downloading + self.patchers['docker_path_exists'].return_value = False + self.patchers['docker_requests_get'].side_effect = HTTPError + + dockerfile.layer_ids[0].content = raw_layer + with patch('odoo.addons.runbot.models.docker.Path.open', mock_open()) as cache_file_mock: + with self.assertRaises(HTTPError, msg='HTTPError Exception should be reraised during cache download'): + content = dockerfile._get_cached_content('/tmp/fake_build_path') + + def test_dockerfile_build_with_cached_content(self): + dockerfile = self.env['runbot.dockerfile'].create({ + 'name': 'TestsAddCache', + 'to_build': True, + 'layer_ids': [ + Command.create({ + 'name': 'CacheAddTest', + 'layer_type': 'raw', + 'content': 'some useless content', + }), + ], + }) + + dockerfile.layer_ids[0].content = """# Cache Test +FROM ubuntu:noble +# CACHE 60 +ADD https://nowhere.example.org/nothing.txt /data/nothing.txt +""" + + expected_content = """# Cache Test +FROM ubuntu:noble +# CACHE 60 +COPY _data_nothing_txt /data/nothing.txt + + +USER TestUser +""" + + self.start_patcher('docker_username', 'odoo.addons.runbot.models.docker.USERNAME', new='TestUser') + self.start_patcher('docker_path_exists', 'odoo.addons.runbot.models.docker.Path.exists', return_value=False) + self.start_patcher('docker_path_hardlink_to', 'odoo.addons.runbot.models.docker.Path.hardlink_to') + self.start_patcher('docker_get_docker_metadata', 'odoo.addons.runbot.models.docker.Dockerfile._get_docker_metadata') + + mock_response = MagicMock() + mock_response.iter_content.return_value = [b'small file content'] + self.start_patcher('docker_requests_get', 'odoo.addons.runbot.models.docker.requests.get', return_value=mock_response) + + self.patchers['docker_build'].return_value = { + 'image_id': 'xxx', + 'success': True, + 'duration': 69, + 'image': 'd0d0caca', + 'msg': '', + } + + with patch('odoo.addons.runbot.models.docker.Path.open', mock_open()) as cache_file_mock: + with patch('builtins.open', mock_open()) as dockerfile_file: + dockerfile._build() + cache_file_mock.assert_called_once_with('wb') + dockerfile_file_handle = dockerfile_file() + dockerfile_file_handle.write.assert_called_once_with(expected_content) + self.patchers['docker_path_hardlink_to'].assert_called() + self.patchers['docker_get_docker_metadata'].assert_called() diff --git a/runbot/tests/test_repo.py b/runbot/tests/test_repo.py index 81c940ab0..cb8195a05 100644 --- a/runbot/tests/test_repo.py +++ b/runbot/tests/test_repo.py @@ -372,7 +372,7 @@ def setUp(self): self.fetch_count = 0 self.force_failure = False - def mock_git_helper(self, repo, cmd): + def mock_git_helper(self, repo, cmd, input_data=None, raw=False): self.assertIn('fetch', cmd) self.fetch_count += 1 if self.fetch_count < 3 or self.force_failure: @@ -457,7 +457,7 @@ def setUp(self): super().setUp() self.test_refs = [] - def mock_git_helper(self, repo, cmd): + def mock_git_helper(self, repo, cmd, input_data=None, raw=False): self.assertIn('for-each-ref', cmd) self.assertIn('refs/*/pull/*', cmd) return '\n'.join(['\x00'.join(ref_data) for ref_data in self.test_refs]) diff --git a/runbot/views/branch_views.xml b/runbot/views/branch_views.xml index 943ef6e30..3791f562e 100644 --- a/runbot/views/branch_views.xml +++ b/runbot/views/branch_views.xml @@ -46,6 +46,7 @@ Branches runbot.branch list,form + branch diff --git a/runbot/views/build_error_views.xml b/runbot/views/build_error_views.xml index d9a560cc6..cee4d2d8e 100644 --- a/runbot/views/build_error_views.xml +++ b/runbot/views/build_error_views.xml @@ -24,7 +24,7 @@ - + @@ -41,7 +41,12 @@ - +